repo_name
stringlengths 6
100
| path
stringlengths 4
294
| copies
stringlengths 1
5
| size
stringlengths 4
6
| content
stringlengths 606
896k
| license
stringclasses 15
values |
---|---|---|---|---|---|
agentr13/python-phonenumbers | python/phonenumbers/data/region_MX.py | 8 | 4158 | """Auto-generated file, do not edit by hand. MX metadata"""
from ..phonemetadata import NumberFormat, PhoneNumberDesc, PhoneMetadata
PHONE_METADATA_MX = PhoneMetadata(id='MX', country_code=52, international_prefix='0[09]',
general_desc=PhoneNumberDesc(national_number_pattern='[1-9]\\d{9,10}', possible_number_pattern='\\d{7,11}'),
fixed_line=PhoneNumberDesc(national_number_pattern='(?:33|55|81)\\d{8}|(?:2(?:0[01]|2[2-9]|3[1-35-8]|4[13-9]|7[1-689]|8[1-578]|9[467])|3(?:1[1-79]|[2458][1-9]|7[1-8]|9[1-5])|4(?:1[1-57-9]|[24-6][1-9]|[37][1-8]|8[1-35-9]|9[2-689])|5(?:88|9[1-79])|6(?:1[2-68]|[234][1-9]|5[1-3689]|6[12457-9]|7[1-7]|8[67]|9[4-8])|7(?:[13467][1-9]|2[1-8]|5[13-9]|8[1-69]|9[17])|8(?:2[13-689]|3[1-6]|4[124-6]|6[1246-9]|7[1-378]|9[12479])|9(?:1[346-9]|2[1-4]|3[2-46-8]|5[1348]|[69][1-9]|7[12]|8[1-8]))\\d{7}', possible_number_pattern='\\d{7,10}', example_number='2221234567'),
mobile=PhoneNumberDesc(national_number_pattern='1(?:(?:33|55|81)\\d{8}|(?:2(?:2[2-9]|3[1-35-8]|4[13-9]|7[1-689]|8[1-578]|9[467])|3(?:1[1-79]|[2458][1-9]|7[1-8]|9[1-5])|4(?:1[1-57-9]|[24-6][1-9]|[37][1-8]|8[1-35-9]|9[2-689])|5(?:88|9[1-79])|6(?:1[2-68]|[2-4][1-9]|5[1-3689]|6[12457-9]|7[1-7]|8[67]|9[4-8])|7(?:[13467][1-9]|2[1-8]|5[13-9]|8[1-69]|9[17])|8(?:2[13-689]|3[1-6]|4[124-6]|6[1246-9]|7[1-378]|9[12479])|9(?:1[346-9]|2[1-4]|3[2-46-8]|5[1348]|[69][1-9]|7[12]|8[1-8]))\\d{7})', possible_number_pattern='\\d{11}', example_number='12221234567'),
toll_free=PhoneNumberDesc(national_number_pattern='8(?:00|88)\\d{7}', possible_number_pattern='\\d{10}', example_number='8001234567'),
premium_rate=PhoneNumberDesc(national_number_pattern='900\\d{7}', possible_number_pattern='\\d{10}', example_number='9001234567'),
shared_cost=PhoneNumberDesc(national_number_pattern='300\\d{7}', possible_number_pattern='\\d{10}', example_number='3001234567'),
personal_number=PhoneNumberDesc(national_number_pattern='500\\d{7}', possible_number_pattern='\\d{10}', example_number='5001234567'),
voip=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
pager=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
uan=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
voicemail=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
no_international_dialling=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
national_prefix='01',
national_prefix_for_parsing='0[12]|04[45](\\d{10})',
national_prefix_transform_rule='1\\1',
number_format=[NumberFormat(pattern='([358]\\d)(\\d{4})(\\d{4})', format='\\1 \\2 \\3', leading_digits_pattern=['33|55|81'], national_prefix_formatting_rule='01 \\1', national_prefix_optional_when_formatting=True),
NumberFormat(pattern='(\\d{3})(\\d{3})(\\d{4})', format='\\1 \\2 \\3', leading_digits_pattern=['[2467]|3[0-2457-9]|5[089]|8[02-9]|9[0-35-9]'], national_prefix_formatting_rule='01 \\1', national_prefix_optional_when_formatting=True),
NumberFormat(pattern='(1)([358]\\d)(\\d{4})(\\d{4})', format='044 \\2 \\3 \\4', leading_digits_pattern=['1(?:33|55|81)'], national_prefix_formatting_rule='\\1', national_prefix_optional_when_formatting=True),
NumberFormat(pattern='(1)(\\d{3})(\\d{3})(\\d{4})', format='044 \\2 \\3 \\4', leading_digits_pattern=['1(?:[2467]|3[0-2457-9]|5[089]|8[2-9]|9[1-35-9])'], national_prefix_formatting_rule='\\1', national_prefix_optional_when_formatting=True)],
intl_number_format=[NumberFormat(pattern='([358]\\d)(\\d{4})(\\d{4})', format='\\1 \\2 \\3', leading_digits_pattern=['33|55|81']),
NumberFormat(pattern='(\\d{3})(\\d{3})(\\d{4})', format='\\1 \\2 \\3', leading_digits_pattern=['[2467]|3[0-2457-9]|5[089]|8[02-9]|9[0-35-9]']),
NumberFormat(pattern='(1)([358]\\d)(\\d{4})(\\d{4})', format='\\1 \\2 \\3 \\4', leading_digits_pattern=['1(?:33|55|81)']),
NumberFormat(pattern='(1)(\\d{3})(\\d{3})(\\d{4})', format='\\1 \\2 \\3 \\4', leading_digits_pattern=['1(?:[2467]|3[0-2457-9]|5[089]|8[2-9]|9[1-35-9])'])],
leading_zero_possible=True,
mobile_number_portable_region=True)
| apache-2.0 |
q1ang/scikit-learn | examples/ensemble/plot_forest_importances_faces.py | 403 | 1519 | """
=================================================
Pixel importances with a parallel forest of trees
=================================================
This example shows the use of forests of trees to evaluate the importance
of the pixels in an image classification task (faces). The hotter the pixel,
the more important.
The code below also illustrates how the construction and the computation
of the predictions can be parallelized within multiple jobs.
"""
print(__doc__)
from time import time
import matplotlib.pyplot as plt
from sklearn.datasets import fetch_olivetti_faces
from sklearn.ensemble import ExtraTreesClassifier
# Number of cores to use to perform parallel fitting of the forest model
n_jobs = 1
# Load the faces dataset
data = fetch_olivetti_faces()
X = data.images.reshape((len(data.images), -1))
y = data.target
mask = y < 5 # Limit to 5 classes
X = X[mask]
y = y[mask]
# Build a forest and compute the pixel importances
print("Fitting ExtraTreesClassifier on faces data with %d cores..." % n_jobs)
t0 = time()
forest = ExtraTreesClassifier(n_estimators=1000,
max_features=128,
n_jobs=n_jobs,
random_state=0)
forest.fit(X, y)
print("done in %0.3fs" % (time() - t0))
importances = forest.feature_importances_
importances = importances.reshape(data.images[0].shape)
# Plot pixel importances
plt.matshow(importances, cmap=plt.cm.hot)
plt.title("Pixel importances with forests of trees")
plt.show()
| bsd-3-clause |
bobcyw/django | django/conf/locale/pt_BR/formats.py | 504 | 1434 | # -*- encoding: utf-8 -*-
# This file is distributed under the same license as the Django package.
#
from __future__ import unicode_literals
# The *_FORMAT strings use the Django date format syntax,
# see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = r'j \d\e F \d\e Y'
TIME_FORMAT = 'H:i'
DATETIME_FORMAT = r'j \d\e F \d\e Y à\s H:i'
YEAR_MONTH_FORMAT = r'F \d\e Y'
MONTH_DAY_FORMAT = r'j \d\e F'
SHORT_DATE_FORMAT = 'd/m/Y'
SHORT_DATETIME_FORMAT = 'd/m/Y H:i'
FIRST_DAY_OF_WEEK = 0 # Sunday
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see http://docs.python.org/library/datetime.html#strftime-strptime-behavior
DATE_INPUT_FORMATS = [
'%d/%m/%Y', '%d/%m/%y', # '25/10/2006', '25/10/06'
# '%d de %b de %Y', '%d de %b, %Y', # '25 de Out de 2006', '25 Out, 2006'
# '%d de %B de %Y', '%d de %B, %Y', # '25 de Outubro de 2006', '25 de Outubro, 2006'
]
DATETIME_INPUT_FORMATS = [
'%d/%m/%Y %H:%M:%S', # '25/10/2006 14:30:59'
'%d/%m/%Y %H:%M:%S.%f', # '25/10/2006 14:30:59.000200'
'%d/%m/%Y %H:%M', # '25/10/2006 14:30'
'%d/%m/%Y', # '25/10/2006'
'%d/%m/%y %H:%M:%S', # '25/10/06 14:30:59'
'%d/%m/%y %H:%M:%S.%f', # '25/10/06 14:30:59.000200'
'%d/%m/%y %H:%M', # '25/10/06 14:30'
'%d/%m/%y', # '25/10/06'
]
DECIMAL_SEPARATOR = ','
THOUSAND_SEPARATOR = '.'
NUMBER_GROUPING = 3
| bsd-3-clause |
chilleo/ALPHA | module/CalculateGeneralizedDStatisticClass.py | 1 | 11045 | from PyQt4 import QtCore
from sys import *
import os
import subprocess
# if platform == 'win32':
# path.insert(0, "../CommandLineFiles")
# elif platform == 'darwin':
# path.insert(0, "..\\CommandLineFiles")
# import CalculateGeneralizedDStatistic
import sys
from CommandLineFiles.RunDGEN import run_saved_dgen, Create_Network_Helper
sys.path.append('..\\')
from CommandLineFiles import CalculateGeneralizedDStatistic
"""
Functions:
~
Chabrielle Allen
Travis Benedict
Peter Dulworth
"""
class CalculateGeneralizedDStatisticClass(QtCore.QThread):
def __init__(self, parent=None):
super(CalculateGeneralizedDStatisticClass, self).__init__(parent)
def calculate_generalized(self, alignments, species_tree=None, reticulations=None, outgroup=None, window_size=100000000000,
window_offset=100000000000, verbose=False, alpha=0.01, use_inv=False, useDir=False,
directory="", statistic=False, save=False, f="DGenStatistic_", plot=False, meta=False, useAlreadyGeneratedStat=True):
self.emit(QtCore.SIGNAL('GEN_D_10'))
if(useAlreadyGeneratedStat == False): #generate a dgen stat
# run the java jar lines goes here
# FOR REFERENCE, THE ARGS AND JAVA COMMAND
# String treeString = args[0];
# String networkString = args[1];
# String outGroupName = args[2];
# String saveStatHere = args[3];
# int numberOfRandomRuns = Integer.parseInt(args[4]);
# GenerateDgenStatistic(treeString, networkString, outGroupName, saveStatHere, numberOfRandomRuns);
# Get the global path name to the jar file
dir_path = os.path.dirname(os.path.realpath(__file__))
jarPath = os.path.join(dir_path, "DGEN2.jar")
# Run PhyloNet dgen maker jar file
numberRandomRuns = 100
networkString = Create_Network_Helper(species_tree, reticulations, 0.9)
#species tree and network string need 's to work properly
species_tree = "'"+species_tree+"'"
networkString = "'"+networkString+"'"
jarRunOutput = subprocess.Popen("java -jar {0} {1} {2} {3} {4} {5}".format(jarPath, species_tree, networkString, outgroup, statistic, numberRandomRuns), stdout=subprocess.PIPE,
shell=True)
# Read output and convert to float
#pgtst = float(p.stdout.readline())
self.emit(QtCore.SIGNAL('GEN_D_50'))
#and then always run the statistic on data (just doing it this way for now to save time. could be chagned later to be slightly more user friendly. but also all users should want to analyze data probably)
#run the dstat. making temp variables just to keep clear on what is named what (cuz i am changing some things around without messing with some of the code rewriting)
runInVerboseMode = use_inv
saveResultsHere = f
resultsString = run_saved_dgen(statistic, alignments, window_size=window_size, window_offset=window_offset, verbose=runInVerboseMode, alpha=alpha)
#line here to save results string to file saveResultsHere (do i want to do this or just output to screen?
# If users want to save the statistic and speed up future runs
if len(saveResultsHere) > 0:
num = 0
file_name = saveResultsHere + ".txt"
while os.path.exists(file_name):
file_name = "DGenResults_{0}.txt".format(num)
num += 1
with open(file_name, "w") as text_file:
#output_str = "Taxa: {0}\n".format(taxa)
#text_file.write(output_str)
#output_str = "Statistic: {0}\n".format(generate_statistic_string((increase_resized, decrease_resized)))
#text_file.write(output_str)
text_file.write(resultsString)
text_file.close()
#put a line to either print results or to save em to a file. printing to screen done here
#self.emit(QtCore.SIGNAL('GEN_D_COMPLETE'))
self.emit(QtCore.SIGNAL('GEN_D_100'))
self.emit(QtCore.SIGNAL('DGEN2_FINISHED'), resultsString)
debugHere = 0
#run_saved_dgen(?,
# ['/Users/leo/rice/res/data/cichlid/alignment/cichlid6tax.phylip-sequential.txt'],
# verbose=True, plot='/Users/leo/rice/res/data/dgen/tmp/figC/plot_figCVerbose', meta='Dgen')
# OLD WAY
# alignments_to_d_resized, alignments_to_windows_to_d, standard_o, verbose_o = CalculateGeneralizedDStatistic.calculate_generalized\
# (alignments, species_tree, reticulations, outgroup, window_size, window_offset, verbose, alpha, use_inv,
# useDir, directory, statistic, save, f, plot, meta)
# self.emit(QtCore.SIGNAL("L_FINISHED"), alignments_to_d_resized, alignments_to_windows_to_d, standard_o, verbose_o)
def run(self):
"""
Starts PyQt Thread. Called with "start()".
"""
# try:
# self.window_splitter(self.inputFilename, self.windowSize, self.windowOffset)
# except IOError:
# self.emit(QtCore.SIGNAL('INVALID_ALIGNMENT_FILE'), self.inputFilename)
# return
self.calculate_generalized(self.alignments,
species_tree=self.species_tree,
reticulations=self.r,
outgroup=self.o,
window_size=self.window_size,
window_offset=self.window_offset,
verbose=self.verbose,
alpha=self.alpha,
use_inv=self.use_inv,
useDir=self.useDir,
directory=self.directory,
statistic=self.statistic,
save=self.save,
f=self.save_location,
plot=self.plot,
meta=self.meta,
useAlreadyGeneratedStat=self.useAlreadyGeneratedStat)
#self.emit(QtCore.SIGNAL('GEN_D_COMPLETE'), None)
if __name__ == '__main__':
gd = CalculateGeneralizedDStatisticClass()
species_tree = '((P1,P2),(P3,O));'
# species_tree = '(((P1,P2),(P3,(P4,P5))),O);'
r = [('P3', 'P1')]
alignments = ["exampleFiles/seqfile.txt"]
if platform == "darwin":
alignments = ["/Users/Peter/PycharmProjects/ALPHA/exampleFiles/seqfile.txt"]
else:
alignments = ["C:\\Users\\travi\Desktop\\dFoilStdPlusOneFar50kbp\\dFoilStdPlusOneFar50kbp\\sim2\\seqfile.txt"]
# gd.calculate_generalized(alignments, species_tree, r, window_size=50000, window_offset=50000, verbose=True, alpha=0.01, save=True)
gd.calculate(alignments, species_tree, r, outgroup="O", window_size=50000, window_offset=50000, verbose=True, alpha=0.01, save=True)
# save_file = "C:\\Users\\travi\\Documents\\ALPHA\\CommandLineFiles\\DGenStatistic_35.txt"
# plot_formatting(calculate_generalized(alignments, statistic=save_file))
# print calculate_generalized(alignments, statistic="C:\\Users\\travi\\Documents\\ALPHA\\CommandLineFiles\\DGenStatistic_10.txt", verbose=True)
# calculate_generalized(alignments, statistic="C:\\Users\\travi\\Documents\\ALPHA\\CommandLineFiles\\DGenStatistic_35.txt")
# python - c "from CalculateGeneralizedDStatistic import *; calculate_generalized(['C:\\Users\\travi\\Documents\\PhyloVis\\exampleFiles\\ExampleDFOIL.phylip'], statistic='C:\\Users\\travi\\Documents\\ALPHA\\CommandLineFiles\\DGenStatistic_35.txt')"
# species_tree, r = '(((P1,P2),(P3,(P4,P5))),O);', [('P1', 'P3')]
# alignments = ["C:\\Users\\travi\\Documents\\PhyloVis\\exampleFiles\\ExampleDFOIL.phylip"]
# alignments = ["C:\\Users\\travi\\Desktop\\sixtaxa.txt"]
# i = calculate_generalized(alignments, species_tree, r, 100000, 100000, True, save=True)
# for j in range(10):
# k = calculate_generalized(alignments, species_tree, r, 100000, 100000, True, save=True)
# if i != k:
# print "FAIL"
# print i
# print k
# print j
# print pattern_string_generator(['A', 'A', 'A', 'A', 'A'])
# Inputs for paper
# file = "C:\\Users\\travi\\Desktop\\concatFile.phylip.txt"
# species_tree = '((C,G),(((A,Q),L),R));'
#
# window_size, window_offset = 10000, 1000
# r = [('L', 'R')]
# plot_formatting(calculate_generalized(file, species_tree, r, window_size, window_offset, True))
# window_size, window_offset = 100000, 10000
# plot_formatting(calculate_generalized(file, species_tree, r, window_size, window_offset, True))
#
# window_size, window_offset = 10000, 1000
# r = [('Q', 'R')]
# plot_formatting(calculate_generalized(file, species_tree, r, window_size, window_offset, True))
# window_size, window_offset = 100000, 10000
# plot_formatting(calculate_generalized(file, species_tree, r, window_size, window_offset, True))
#
# window_size, window_offset = 10000, 1000
# r = [('Q', 'G')]
# plot_formatting(calculate_generalized(file, species_tree, r, window_size, window_offset, True))
# window_size, window_offset = 100000, 10000
# plot_formatting(calculate_generalized(file, species_tree, r, window_size, window_offset, True))
# concat_directory("/Users/Peter/PycharmProjects/ALPHA/test_phylip_dir")
# print calculate_generalized('/Users/Peter/PycharmProjects/ALPHA/CLFILE', '(((P1,P2),(P3,P4)),O);', [('P1', 'P3')], 50000, 50000, True)
# file = 'C:\\Users\\travi\\Desktop\\clphylipseq.txt'
# # r = [('L', 'R')]
# r = [('Q', 'R')]
# # r = [('Q', 'G')]
# print calculate_generalized(file , '((C,G),(((A,Q),L),R));', r, 100000, 100000, True)
# concat_directory("/Users/Peter/PycharmProjects/ALPHA/travy_test")
# print calculate_generalized('/Users/Peter/PycharmProjects/ALPHA/CLFILE', '(((P1,P2),(P3,P4)),O);', [('P1', 'P3')], 50000, 50000, True)
# plot_formatting(calculate_generalized(alignments, species_tree, r, 1000, 1000, True))
# # lstat, signif, windows_to_l = calculate_generalized(alignment, species_tree, r, 1000, 1000, True, 0.05)
# # plot_formatting((lstat, signif, windows_to_l))
# plot_formatting(calculate_generalized('C:\\Users\\travi\\Desktop\\seqfileNamed', '(((P1,P2),(P3,P4)),O);', [('P3', 'P1')], 1000, 1000, False, 0.99), False)
# print calculate_generalized('C:\\Users\\travi\\Desktop\\seqfileNamed', '(((P1,P2),(P3,P4)),O);', [('P1', 'P3')], 50000, 50000, True)
# python -c"from CalculateGeneralizedDStatistic import *; plot_formatting(calculate_generalized('C:\\Users\\travi\\Desktop\\seqfileNamed', '(((P1,P2),(P3,P4)),O);', [('P1', 'P3')], 100000, 100000, True, 0.01), True)"
| mit |
yoki/phantomjs | src/breakpad/src/tools/gyp/test/sibling/gyptest-relocate.py | 151 | 1144 | #!/usr/bin/env python
# Copyright (c) 2009 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
"""
import TestGyp
test = TestGyp.TestGyp()
test.run_gyp('build/all.gyp', chdir='src')
test.relocate('src', 'relocate/src')
test.build('build/all.gyp', test.ALL, chdir='relocate/src')
chdir = 'relocate/src/build'
# The top-level Makefile is in the directory where gyp was run.
# TODO(mmoss) Should the Makefile go in the directory of the passed in .gyp
# file? What about when passing in multiple .gyp files? Would sub-project
# Makefiles (see http://codereview.chromium.org/340008 comments) solve this?
if test.format == 'make':
chdir = 'relocate/src'
if test.format == 'xcode':
chdir = 'relocate/src/prog1'
test.run_built_executable('prog1',
chdir=chdir,
stdout="Hello from prog1.c\n")
if test.format == 'xcode':
chdir = 'relocate/src/prog2'
test.run_built_executable('prog2',
chdir=chdir,
stdout="Hello from prog2.c\n")
test.pass_test()
| bsd-3-clause |
pli3/enigma2-pli | lib/python/Plugins/Extensions/DVDBurn/Process.py | 14 | 36761 | from Components.Task import Task, Job, DiskspacePrecondition, Condition, ToolExistsPrecondition
from Components.Harddisk import harddiskmanager
from Screens.MessageBox import MessageBox
import os
class png2yuvTask(Task):
def __init__(self, job, inputfile, outputfile):
Task.__init__(self, job, "Creating menu video")
self.setTool("png2yuv")
self.args += ["-n1", "-Ip", "-f25", "-j", inputfile]
self.dumpFile = outputfile
self.weighting = 15
def run(self, callback):
Task.run(self, callback)
self.container.stdoutAvail.remove(self.processStdout)
self.container.dumpToFile(self.dumpFile)
def processStderr(self, data):
print "[png2yuvTask]", data[:-1]
class mpeg2encTask(Task):
def __init__(self, job, inputfile, outputfile):
Task.__init__(self, job, "Encoding menu video")
self.setTool("mpeg2enc")
self.args += ["-f8", "-np", "-a2", "-o", outputfile]
self.inputFile = inputfile
self.weighting = 25
def run(self, callback):
Task.run(self, callback)
self.container.readFromFile(self.inputFile)
def processOutputLine(self, line):
print "[mpeg2encTask]", line[:-1]
class spumuxTask(Task):
def __init__(self, job, xmlfile, inputfile, outputfile):
Task.__init__(self, job, "Muxing buttons into menu")
self.setTool("spumux")
self.args += [xmlfile]
self.inputFile = inputfile
self.dumpFile = outputfile
self.weighting = 15
def run(self, callback):
Task.run(self, callback)
self.container.stdoutAvail.remove(self.processStdout)
self.container.dumpToFile(self.dumpFile)
self.container.readFromFile(self.inputFile)
def processStderr(self, data):
print "[spumuxTask]", data[:-1]
class MakeFifoNode(Task):
def __init__(self, job, number):
Task.__init__(self, job, "Make FIFO nodes")
self.setTool("mknod")
nodename = self.job.workspace + "/dvd_title_%d" % number + ".mpg"
self.args += [nodename, "p"]
self.weighting = 10
class LinkTS(Task):
def __init__(self, job, sourcefile, link_name):
Task.__init__(self, job, "Creating symlink for source titles")
self.setTool("ln")
self.args += ["-s", sourcefile, link_name]
self.weighting = 10
class CopyMeta(Task):
def __init__(self, job, sourcefile):
Task.__init__(self, job, "Copy title meta files")
self.setTool("cp")
from os import listdir
path, filename = sourcefile.rstrip("/").rsplit("/",1)
tsfiles = listdir(path)
for file in tsfiles:
if file.startswith(filename+"."):
self.args += [path+'/'+file]
self.args += [self.job.workspace]
self.weighting = 15
class DemuxTask(Task):
def __init__(self, job, inputfile):
Task.__init__(self, job, "Demux video into ES")
title = job.project.titles[job.i]
self.global_preconditions.append(DiskspacePrecondition(title.estimatedDiskspace))
self.setTool("projectx")
self.args += [inputfile, "-demux", "-set", "ExportPanel.Streamtype.Subpicture=0", "-set", "ExportPanel.Streamtype.Teletext=0", "-out", self.job.workspace ]
self.end = 300
self.prog_state = 0
self.weighting = 1000
self.cutfile = self.job.workspace + "/cut_%d.Xcl" % (job.i+1)
self.cutlist = title.cutlist
self.currentPID = None
self.relevantAudioPIDs = [ ]
self.getRelevantAudioPIDs(title)
self.generated_files = [ ]
self.mplex_audiofiles = { }
self.mplex_videofile = ""
self.mplex_streamfiles = [ ]
if len(self.cutlist) > 1:
self.args += [ "-cut", self.cutfile ]
def prepare(self):
self.writeCutfile()
def getRelevantAudioPIDs(self, title):
for audiotrack in title.properties.audiotracks:
if audiotrack.active.getValue():
self.relevantAudioPIDs.append(audiotrack.pid.getValue())
def processOutputLine(self, line):
line = line[:-1]
#print "[DemuxTask]", line
MSG_NEW_FILE = "---> new File: "
MSG_PROGRESS = "[PROGRESS] "
MSG_NEW_MP2 = "++> Mpg Audio: PID 0x"
MSG_NEW_AC3 = "++> AC3/DTS Audio: PID 0x"
if line.startswith(MSG_NEW_FILE):
file = line[len(MSG_NEW_FILE):]
if file[0] == "'":
file = file[1:-1]
self.haveNewFile(file)
elif line.startswith(MSG_PROGRESS):
progress = line[len(MSG_PROGRESS):]
self.haveProgress(progress)
elif line.startswith(MSG_NEW_MP2) or line.startswith(MSG_NEW_AC3):
try:
self.currentPID = str(int(line.split(': PID 0x',1)[1].split(' ',1)[0],16))
except ValueError:
print "[DemuxTask] ERROR: couldn't detect Audio PID (projectx too old?)"
def haveNewFile(self, file):
print "[DemuxTask] produced file:", file, self.currentPID
self.generated_files.append(file)
if self.currentPID in self.relevantAudioPIDs:
self.mplex_audiofiles[self.currentPID] = file
elif file.endswith("m2v"):
self.mplex_videofile = file
def haveProgress(self, progress):
#print "PROGRESS [%s]" % progress
MSG_CHECK = "check & synchronize audio file"
MSG_DONE = "done..."
if progress == "preparing collection(s)...":
self.prog_state = 0
elif progress[:len(MSG_CHECK)] == MSG_CHECK:
self.prog_state += 1
else:
try:
p = int(progress)
p = p - 1 + self.prog_state * 100
if p > self.progress:
self.progress = p
except ValueError:
pass
def writeCutfile(self):
f = open(self.cutfile, "w")
f.write("CollectionPanel.CutMode=4\n")
for p in self.cutlist:
s = p / 90000
m = s / 60
h = m / 60
m %= 60
s %= 60
f.write("%02d:%02d:%02d\n" % (h, m, s))
f.close()
def cleanup(self, failed):
print "[DemuxTask::cleanup]"
self.mplex_streamfiles = [ self.mplex_videofile ]
for pid in self.relevantAudioPIDs:
if pid in self.mplex_audiofiles:
self.mplex_streamfiles.append(self.mplex_audiofiles[pid])
print self.mplex_streamfiles
if failed:
import os
for file in self.generated_files:
try:
os.remove(file)
except OSError:
pass
class MplexTaskPostcondition(Condition):
def check(self, task):
if task.error == task.ERROR_UNDERRUN:
return True
return task.error is None
def getErrorMessage(self, task):
return {
task.ERROR_UNDERRUN: ("Can't multiplex source video!"),
task.ERROR_UNKNOWN: ("An unknown error occurred!")
}[task.error]
class MplexTask(Task):
ERROR_UNDERRUN, ERROR_UNKNOWN = range(2)
def __init__(self, job, outputfile, inputfiles=None, demux_task=None, weighting = 500):
Task.__init__(self, job, "Mux ES into PS")
self.weighting = weighting
self.demux_task = demux_task
self.postconditions.append(MplexTaskPostcondition())
self.setTool("mplex")
self.args += ["-f8", "-o", outputfile, "-v1"]
if inputfiles:
self.args += inputfiles
def setTool(self, tool):
self.cmd = tool
self.args = [tool]
self.global_preconditions.append(ToolExistsPrecondition())
# we don't want the ReturncodePostcondition in this case because for right now we're just gonna ignore the fact that mplex fails with a buffer underrun error on some streams (this always at the very end)
def prepare(self):
self.error = None
if self.demux_task:
self.args += self.demux_task.mplex_streamfiles
def processOutputLine(self, line):
print "[MplexTask] ", line[:-1]
if line.startswith("**ERROR:"):
if line.find("Frame data under-runs detected") != -1:
self.error = self.ERROR_UNDERRUN
else:
self.error = self.ERROR_UNKNOWN
class RemoveESFiles(Task):
def __init__(self, job, demux_task):
Task.__init__(self, job, "Remove temp. files")
self.demux_task = demux_task
self.setTool("rm")
self.weighting = 10
def prepare(self):
self.args += ["-f"]
self.args += self.demux_task.generated_files
self.args += [self.demux_task.cutfile]
class ReplexTask(Task):
def __init__(self, job, outputfile, inputfile):
Task.__init__(self, job, "ReMux TS into PS")
self.weighting = 1000
self.setTool("replex")
self.args += ["-t", "DVD", "-j", "-o", outputfile, inputfile]
def processOutputLine(self, line):
print "[ReplexTask] ", line[:-1]
class DVDAuthorTask(Task):
def __init__(self, job):
Task.__init__(self, job, "Authoring DVD")
self.weighting = 20
self.setTool("dvdauthor")
self.CWD = self.job.workspace
self.args += ["-x", self.job.workspace+"/dvdauthor.xml"]
self.menupreview = job.menupreview
def processOutputLine(self, line):
print "[DVDAuthorTask] ", line[:-1]
if not self.menupreview and line.startswith("STAT: Processing"):
self.callback(self, [], stay_resident=True)
elif line.startswith("STAT: VOBU"):
try:
progress = int(line.split("MB")[0].split(" ")[-1])
if progress:
self.job.mplextask.progress = progress
print "[DVDAuthorTask] update mplextask progress:", self.job.mplextask.progress, "of", self.job.mplextask.end
except:
print "couldn't set mux progress"
class DVDAuthorFinalTask(Task):
def __init__(self, job):
Task.__init__(self, job, "dvdauthor finalize")
self.setTool("dvdauthor")
self.args += ["-T", "-o", self.job.workspace + "/dvd"]
class WaitForResidentTasks(Task):
def __init__(self, job):
Task.__init__(self, job, "waiting for dvdauthor to finalize")
def run(self, callback):
print "waiting for %d resident task(s) %s to finish..." % (len(self.job.resident_tasks),str(self.job.resident_tasks))
self.callback = callback
if self.job.resident_tasks == 0:
callback(self, [])
class BurnTaskPostcondition(Condition):
RECOVERABLE = True
def check(self, task):
if task.returncode == 0:
return True
elif task.error is None or task.error is task.ERROR_MINUSRWBUG:
return True
return False
def getErrorMessage(self, task):
return {
task.ERROR_NOTWRITEABLE: _("Medium is not a writeable DVD!"),
task.ERROR_LOAD: _("Could not load medium! No disc inserted?"),
task.ERROR_SIZE: _("Content does not fit on DVD!"),
task.ERROR_WRITE_FAILED: _("Write failed!"),
task.ERROR_DVDROM: _("No (supported) DVDROM found!"),
task.ERROR_ISOFS: _("Medium is not empty!"),
task.ERROR_FILETOOLARGE: _("TS file is too large for ISO9660 level 1!"),
task.ERROR_ISOTOOLARGE: _("ISO file is too large for this filesystem!"),
task.ERROR_UNKNOWN: _("An unknown error occurred!")
}[task.error]
class BurnTask(Task):
ERROR_NOTWRITEABLE, ERROR_LOAD, ERROR_SIZE, ERROR_WRITE_FAILED, ERROR_DVDROM, ERROR_ISOFS, ERROR_FILETOOLARGE, ERROR_ISOTOOLARGE, ERROR_MINUSRWBUG, ERROR_UNKNOWN = range(10)
def __init__(self, job, extra_args=[], tool="growisofs"):
Task.__init__(self, job, job.name)
self.weighting = 500
self.end = 120 # 100 for writing, 10 for buffer flush, 10 for closing disc
self.postconditions.append(BurnTaskPostcondition())
self.setTool(tool)
self.args += extra_args
def prepare(self):
self.error = None
def processOutputLine(self, line):
line = line[:-1]
print "[GROWISOFS] %s" % line
if line[8:14] == "done, ":
self.progress = float(line[:6])
print "progress:", self.progress
elif line.find("flushing cache") != -1:
self.progress = 100
elif line.find("closing disc") != -1:
self.progress = 110
elif line.startswith(":-["):
if line.find("ASC=30h") != -1:
self.error = self.ERROR_NOTWRITEABLE
elif line.find("ASC=24h") != -1:
self.error = self.ERROR_LOAD
elif line.find("SK=5h/ASC=A8h/ACQ=04h") != -1:
self.error = self.ERROR_MINUSRWBUG
else:
self.error = self.ERROR_UNKNOWN
print "BurnTask: unknown error %s" % line
elif line.startswith(":-("):
if line.find("No space left on device") != -1:
self.error = self.ERROR_SIZE
elif self.error == self.ERROR_MINUSRWBUG:
print "*sigh* this is a known bug. we're simply gonna assume everything is fine."
self.postconditions = []
elif line.find("write failed") != -1:
self.error = self.ERROR_WRITE_FAILED
elif line.find("unable to open64(") != -1 and line.find(",O_RDONLY): No such file or directory") != -1:
self.error = self.ERROR_DVDROM
elif line.find("media is not recognized as recordable DVD") != -1:
self.error = self.ERROR_NOTWRITEABLE
else:
self.error = self.ERROR_UNKNOWN
print "BurnTask: unknown error %s" % line
elif line.startswith("FATAL:"):
if line.find("already carries isofs!"):
self.error = self.ERROR_ISOFS
else:
self.error = self.ERROR_UNKNOWN
print "BurnTask: unknown error %s" % line
elif line.find("-allow-limited-size was not specified. There is no way do represent this file size. Aborting.") != -1:
self.error = self.ERROR_FILETOOLARGE
elif line.startswith("genisoimage: File too large."):
self.error = self.ERROR_ISOTOOLARGE
def setTool(self, tool):
self.cmd = tool
self.args = [tool]
self.global_preconditions.append(ToolExistsPrecondition())
class RemoveDVDFolder(Task):
def __init__(self, job):
Task.__init__(self, job, "Remove temp. files")
self.setTool("rm")
self.args += ["-rf", self.job.workspace]
self.weighting = 10
class CheckDiskspaceTask(Task):
def __init__(self, job):
Task.__init__(self, job, "Checking free space")
totalsize = 0 # require an extra safety 50 MB
maxsize = 0
for title in job.project.titles:
titlesize = title.estimatedDiskspace
if titlesize > maxsize: maxsize = titlesize
totalsize += titlesize
diskSpaceNeeded = totalsize + maxsize
job.estimateddvdsize = totalsize / 1024 / 1024
totalsize += 50*1024*1024 # require an extra safety 50 MB
self.global_preconditions.append(DiskspacePrecondition(diskSpaceNeeded))
self.weighting = 5
def abort(self):
self.finish(aborted = True)
def run(self, callback):
self.callback = callback
failed_preconditions = self.checkPreconditions(True) + self.checkPreconditions(False)
if len(failed_preconditions):
callback(self, failed_preconditions)
return
Task.processFinished(self, 0)
class PreviewTask(Task):
def __init__(self, job, path):
Task.__init__(self, job, "Preview")
self.postconditions.append(PreviewTaskPostcondition())
self.job = job
self.path = path
self.weighting = 10
def run(self, callback):
self.callback = callback
if self.job.menupreview:
self.previewProject()
else:
import Screens.Standby
if Screens.Standby.inStandby:
self.previewCB(False)
else:
from Tools import Notifications
Notifications.AddNotificationWithCallback(self.previewCB, MessageBox, _("Do you want to preview this DVD before burning?"), timeout = 60, default = False)
def abort(self):
self.finish(aborted = True)
def previewCB(self, answer):
if answer == True:
self.previewProject()
else:
self.closedCB(True)
def playerClosed(self):
if self.job.menupreview:
self.closedCB(True)
else:
from Tools import Notifications
Notifications.AddNotificationWithCallback(self.closedCB, MessageBox, _("Do you want to burn this collection to DVD medium?") )
def closedCB(self, answer):
if answer == True:
Task.processFinished(self, 0)
else:
Task.processFinished(self, 1)
def previewProject(self):
from Screens.DVD import DVDPlayer
self.job.project.session.openWithCallback(self.playerClosed, DVDPlayer, dvd_filelist= [ self.path ])
class PreviewTaskPostcondition(Condition):
def check(self, task):
return task.returncode == 0
def getErrorMessage(self, task):
return "Cancel"
class ImagingPostcondition(Condition):
def check(self, task):
return task.returncode == 0
def getErrorMessage(self, task):
return _("Failed") + ": python-imaging"
class ImagePrepareTask(Task):
def __init__(self, job):
Task.__init__(self, job, _("please wait, loading picture..."))
self.postconditions.append(ImagingPostcondition())
self.weighting = 20
self.job = job
self.Menus = job.Menus
def run(self, callback):
self.callback = callback
# we are doing it this weird way so that the TaskView Screen actually pops up before the spinner comes
from enigma import eTimer
self.delayTimer = eTimer()
self.delayTimer.callback.append(self.conduct)
self.delayTimer.start(10,1)
def conduct(self):
try:
from ImageFont import truetype
from Image import open as Image_open
s = self.job.project.menutemplate.settings
(width, height) = s.dimensions.getValue()
self.Menus.im_bg_orig = Image_open(s.menubg.getValue())
if self.Menus.im_bg_orig.size != (width, height):
self.Menus.im_bg_orig = self.Menus.im_bg_orig.resize((width, height))
self.Menus.fontsizes = [s.fontsize_headline.getValue(), s.fontsize_title.getValue(), s.fontsize_subtitle.getValue()]
self.Menus.fonts = [(truetype(s.fontface_headline.getValue(), self.Menus.fontsizes[0])), (truetype(s.fontface_title.getValue(), self.Menus.fontsizes[1])),(truetype(s.fontface_subtitle.getValue(), self.Menus.fontsizes[2]))]
Task.processFinished(self, 0)
except:
Task.processFinished(self, 1)
class MenuImageTask(Task):
def __init__(self, job, menu_count, spuxmlfilename, menubgpngfilename, highlightpngfilename):
Task.__init__(self, job, "Create Menu %d Image" % menu_count)
self.postconditions.append(ImagingPostcondition())
self.weighting = 10
self.job = job
self.Menus = job.Menus
self.menu_count = menu_count
self.spuxmlfilename = spuxmlfilename
self.menubgpngfilename = menubgpngfilename
self.highlightpngfilename = highlightpngfilename
def run(self, callback):
self.callback = callback
#try:
import ImageDraw, Image, os
s = self.job.project.menutemplate.settings
s_top = s.margin_top.getValue()
s_bottom = s.margin_bottom.getValue()
s_left = s.margin_left.getValue()
s_right = s.margin_right.getValue()
s_rows = s.space_rows.getValue()
s_cols = s.space_cols.getValue()
nr_cols = s.cols.getValue()
nr_rows = s.rows.getValue()
thumb_size = s.thumb_size.getValue()
if thumb_size[0]:
from Image import open as Image_open
(s_width, s_height) = s.dimensions.getValue()
fonts = self.Menus.fonts
im_bg = self.Menus.im_bg_orig.copy()
im_high = Image.new("P", (s_width, s_height), 0)
im_high.putpalette(self.Menus.spu_palette)
draw_bg = ImageDraw.Draw(im_bg)
draw_high = ImageDraw.Draw(im_high)
if self.menu_count == 1:
headlineText = self.job.project.settings.name.getValue().decode("utf-8")
headlinePos = self.getPosition(s.offset_headline.getValue(), 0, 0, s_width, s_top, draw_bg.textsize(headlineText, font=fonts[0]))
draw_bg.text(headlinePos, headlineText, fill=self.Menus.color_headline, font=fonts[0])
spuxml = """<?xml version="1.0" encoding="utf-8"?>
<subpictures>
<stream>
<spu
highlight="%s"
transparent="%02x%02x%02x"
start="00:00:00.00"
force="yes" >""" % (self.highlightpngfilename, self.Menus.spu_palette[0], self.Menus.spu_palette[1], self.Menus.spu_palette[2])
#rowheight = (self.Menus.fontsizes[1]+self.Menus.fontsizes[2]+thumb_size[1]+s_rows)
menu_start_title = (self.menu_count-1)*self.job.titles_per_menu + 1
menu_end_title = (self.menu_count)*self.job.titles_per_menu + 1
nr_titles = len(self.job.project.titles)
if menu_end_title > nr_titles:
menu_end_title = nr_titles+1
col = 1
row = 1
for title_no in range( menu_start_title , menu_end_title ):
title = self.job.project.titles[title_no-1]
col_width = ( s_width - s_left - s_right ) / nr_cols
row_height = ( s_height - s_top - s_bottom ) / nr_rows
left = s_left + ( (col-1) * col_width ) + s_cols/2
right = left + col_width - s_cols
top = s_top + ( (row-1) * row_height) + s_rows/2
bottom = top + row_height - s_rows
width = right - left
height = bottom - top
if bottom > s_height:
bottom = s_height
#draw_bg.rectangle((left, top, right, bottom), outline=(255,0,0))
im_cell_bg = Image.new("RGBA", (width, height),(0,0,0,0))
draw_cell_bg = ImageDraw.Draw(im_cell_bg)
im_cell_high = Image.new("P", (width, height), 0)
im_cell_high.putpalette(self.Menus.spu_palette)
draw_cell_high = ImageDraw.Draw(im_cell_high)
if thumb_size[0]:
thumbPos = self.getPosition(s.offset_thumb.getValue(), 0, 0, width, height, thumb_size)
box = (thumbPos[0], thumbPos[1], thumbPos[0]+thumb_size[0], thumbPos[1]+thumb_size[1])
try:
thumbIm = Image_open(title.inputfile.rsplit('.',1)[0] + ".png")
im_cell_bg.paste(thumbIm,thumbPos)
except:
draw_cell_bg.rectangle(box, fill=(64,127,127,127))
border = s.thumb_border.getValue()
if border:
draw_cell_high.rectangle(box, fill=1)
draw_cell_high.rectangle((box[0]+border, box[1]+border, box[2]-border, box[3]-border), fill=0)
titleText = title.formatDVDmenuText(s.titleformat.getValue(), title_no).decode("utf-8")
titlePos = self.getPosition(s.offset_title.getValue(), 0, 0, width, height, draw_bg.textsize(titleText, font=fonts[1]))
draw_cell_bg.text(titlePos, titleText, fill=self.Menus.color_button, font=fonts[1])
draw_cell_high.text(titlePos, titleText, fill=1, font=self.Menus.fonts[1])
subtitleText = title.formatDVDmenuText(s.subtitleformat.getValue(), title_no).decode("utf-8")
subtitlePos = self.getPosition(s.offset_subtitle.getValue(), 0, 0, width, height, draw_cell_bg.textsize(subtitleText, font=fonts[2]))
draw_cell_bg.text(subtitlePos, subtitleText, fill=self.Menus.color_button, font=fonts[2])
del draw_cell_bg
del draw_cell_high
im_bg.paste(im_cell_bg,(left, top, right, bottom), mask=im_cell_bg)
im_high.paste(im_cell_high,(left, top, right, bottom))
spuxml += """
<button name="button%s" x0="%d" x1="%d" y0="%d" y1="%d"/>""" % (str(title_no).zfill(2),left,right,top,bottom )
if col < nr_cols:
col += 1
else:
col = 1
row += 1
top = s_height - s_bottom - s_rows/2
if self.menu_count < self.job.nr_menus:
next_page_text = s.next_page_text.getValue().decode("utf-8")
textsize = draw_bg.textsize(next_page_text, font=fonts[1])
pos = ( s_width-textsize[0]-s_right, top )
draw_bg.text(pos, next_page_text, fill=self.Menus.color_button, font=fonts[1])
draw_high.text(pos, next_page_text, fill=1, font=fonts[1])
spuxml += """
<button name="button_next" x0="%d" x1="%d" y0="%d" y1="%d"/>""" % (pos[0],pos[0]+textsize[0],pos[1],pos[1]+textsize[1])
if self.menu_count > 1:
prev_page_text = s.prev_page_text.getValue().decode("utf-8")
textsize = draw_bg.textsize(prev_page_text, font=fonts[1])
pos = ( (s_left+s_cols/2), top )
draw_bg.text(pos, prev_page_text, fill=self.Menus.color_button, font=fonts[1])
draw_high.text(pos, prev_page_text, fill=1, font=fonts[1])
spuxml += """
<button name="button_prev" x0="%d" x1="%d" y0="%d" y1="%d"/>""" % (pos[0],pos[0]+textsize[0],pos[1],pos[1]+textsize[1])
del draw_bg
del draw_high
fd=open(self.menubgpngfilename,"w")
im_bg.save(fd,"PNG")
fd.close()
fd=open(self.highlightpngfilename,"w")
im_high.save(fd,"PNG")
fd.close()
spuxml += """
</spu>
</stream>
</subpictures>"""
f = open(self.spuxmlfilename, "w")
f.write(spuxml)
f.close()
Task.processFinished(self, 0)
#except:
#Task.processFinished(self, 1)
def getPosition(self, offset, left, top, right, bottom, size):
pos = [left, top]
if offset[0] != -1:
pos[0] += offset[0]
else:
pos[0] += ( (right-left) - size[0] ) / 2
if offset[1] != -1:
pos[1] += offset[1]
else:
pos[1] += ( (bottom-top) - size[1] ) / 2
return tuple(pos)
class Menus:
def __init__(self, job):
self.job = job
job.Menus = self
s = self.job.project.menutemplate.settings
self.color_headline = tuple(s.color_headline.getValue())
self.color_button = tuple(s.color_button.getValue())
self.color_highlight = tuple(s.color_highlight.getValue())
self.spu_palette = [ 0x60, 0x60, 0x60 ] + s.color_highlight.getValue()
ImagePrepareTask(job)
nr_titles = len(job.project.titles)
job.titles_per_menu = s.cols.getValue()*s.rows.getValue()
job.nr_menus = ((nr_titles+job.titles_per_menu-1)/job.titles_per_menu)
#a new menu_count every 4 titles (1,2,3,4->1 ; 5,6,7,8->2 etc.)
for menu_count in range(1 , job.nr_menus+1):
num = str(menu_count)
spuxmlfilename = job.workspace+"/spumux"+num+".xml"
menubgpngfilename = job.workspace+"/dvd_menubg"+num+".png"
highlightpngfilename = job.workspace+"/dvd_highlight"+num+".png"
MenuImageTask(job, menu_count, spuxmlfilename, menubgpngfilename, highlightpngfilename)
png2yuvTask(job, menubgpngfilename, job.workspace+"/dvdmenubg"+num+".yuv")
menubgm2vfilename = job.workspace+"/dvdmenubg"+num+".mv2"
mpeg2encTask(job, job.workspace+"/dvdmenubg"+num+".yuv", menubgm2vfilename)
menubgmpgfilename = job.workspace+"/dvdmenubg"+num+".mpg"
menuaudiofilename = s.menuaudio.getValue()
MplexTask(job, outputfile=menubgmpgfilename, inputfiles = [menubgm2vfilename, menuaudiofilename], weighting = 20)
menuoutputfilename = job.workspace+"/dvdmenu"+num+".mpg"
spumuxTask(job, spuxmlfilename, menubgmpgfilename, menuoutputfilename)
def CreateAuthoringXML_singleset(job):
nr_titles = len(job.project.titles)
mode = job.project.settings.authormode.getValue()
authorxml = []
authorxml.append('<?xml version="1.0" encoding="utf-8"?>\n')
authorxml.append(' <dvdauthor dest="' + (job.workspace+"/dvd") + '">\n')
authorxml.append(' <vmgm>\n')
authorxml.append(' <menus lang="' + job.project.menutemplate.settings.menulang.getValue() + '">\n')
authorxml.append(' <pgc>\n')
authorxml.append(' <vob file="' + job.project.settings.vmgm.getValue() + '" />\n', )
if mode.startswith("menu"):
authorxml.append(' <post> jump titleset 1 menu; </post>\n')
else:
authorxml.append(' <post> jump title 1; </post>\n')
authorxml.append(' </pgc>\n')
authorxml.append(' </menus>\n')
authorxml.append(' </vmgm>\n')
authorxml.append(' <titleset>\n')
if mode.startswith("menu"):
authorxml.append(' <menus lang="' + job.project.menutemplate.settings.menulang.getValue() + '">\n')
authorxml.append(' <video aspect="4:3"/>\n')
for menu_count in range(1 , job.nr_menus+1):
if menu_count == 1:
authorxml.append(' <pgc entry="root">\n')
else:
authorxml.append(' <pgc>\n')
menu_start_title = (menu_count-1)*job.titles_per_menu + 1
menu_end_title = (menu_count)*job.titles_per_menu + 1
if menu_end_title > nr_titles:
menu_end_title = nr_titles+1
for i in range( menu_start_title , menu_end_title ):
authorxml.append(' <button name="button' + (str(i).zfill(2)) + '"> jump title ' + str(i) +'; </button>\n')
if menu_count > 1:
authorxml.append(' <button name="button_prev"> jump menu ' + str(menu_count-1) + '; </button>\n')
if menu_count < job.nr_menus:
authorxml.append(' <button name="button_next"> jump menu ' + str(menu_count+1) + '; </button>\n')
menuoutputfilename = job.workspace+"/dvdmenu"+str(menu_count)+".mpg"
authorxml.append(' <vob file="' + menuoutputfilename + '" pause="inf"/>\n')
authorxml.append(' </pgc>\n')
authorxml.append(' </menus>\n')
authorxml.append(' <titles>\n')
for i in range( nr_titles ):
chapters = ','.join(job.project.titles[i].getChapterMarks())
title_no = i+1
title_filename = job.workspace + "/dvd_title_%d.mpg" % (title_no)
if job.menupreview:
LinkTS(job, job.project.settings.vmgm.getValue(), title_filename)
else:
MakeFifoNode(job, title_no)
if mode.endswith("linked") and title_no < nr_titles:
post_tag = "jump title %d;" % ( title_no+1 )
elif mode.startswith("menu"):
post_tag = "call vmgm menu 1;"
else: post_tag = ""
authorxml.append(' <pgc>\n')
authorxml.append(' <vob file="' + title_filename + '" chapters="' + chapters + '" />\n')
authorxml.append(' <post> ' + post_tag + ' </post>\n')
authorxml.append(' </pgc>\n')
authorxml.append(' </titles>\n')
authorxml.append(' </titleset>\n')
authorxml.append(' </dvdauthor>\n')
f = open(job.workspace+"/dvdauthor.xml", "w")
for x in authorxml:
f.write(x)
f.close()
def CreateAuthoringXML_multiset(job):
nr_titles = len(job.project.titles)
mode = job.project.settings.authormode.getValue()
authorxml = []
authorxml.append('<?xml version="1.0" encoding="utf-8"?>\n')
authorxml.append(' <dvdauthor dest="' + (job.workspace+"/dvd") + '" jumppad="yes">\n')
authorxml.append(' <vmgm>\n')
authorxml.append(' <menus lang="' + job.project.menutemplate.settings.menulang.getValue() + '">\n')
authorxml.append(' <video aspect="4:3"/>\n')
if mode.startswith("menu"):
for menu_count in range(1 , job.nr_menus+1):
if menu_count == 1:
authorxml.append(' <pgc>\n')
else:
authorxml.append(' <pgc>\n')
menu_start_title = (menu_count-1)*job.titles_per_menu + 1
menu_end_title = (menu_count)*job.titles_per_menu + 1
if menu_end_title > nr_titles:
menu_end_title = nr_titles+1
for i in range( menu_start_title , menu_end_title ):
authorxml.append(' <button name="button' + (str(i).zfill(2)) + '"> jump titleset ' + str(i) +' title 1; </button>\n')
if menu_count > 1:
authorxml.append(' <button name="button_prev"> jump menu ' + str(menu_count-1) + '; </button>\n')
if menu_count < job.nr_menus:
authorxml.append(' <button name="button_next"> jump menu ' + str(menu_count+1) + '; </button>\n')
menuoutputfilename = job.workspace+"/dvdmenu"+str(menu_count)+".mpg"
authorxml.append(' <vob file="' + menuoutputfilename + '" pause="inf"/>\n')
authorxml.append(' </pgc>\n')
else:
authorxml.append(' <pgc>\n')
authorxml.append(' <vob file="' + job.project.settings.vmgm.getValue() + '" />\n' )
authorxml.append(' <post> jump titleset 1 title 1; </post>\n')
authorxml.append(' </pgc>\n')
authorxml.append(' </menus>\n')
authorxml.append(' </vmgm>\n')
for i in range( nr_titles ):
title = job.project.titles[i]
authorxml.append(' <titleset>\n')
authorxml.append(' <menus lang="' + job.project.menutemplate.settings.menulang.getValue() + '">\n')
authorxml.append(' <pgc entry="root">\n')
authorxml.append(' <pre>\n')
authorxml.append(' jump vmgm menu entry title;\n')
authorxml.append(' </pre>\n')
authorxml.append(' </pgc>\n')
authorxml.append(' </menus>\n')
authorxml.append(' <titles>\n')
for audiotrack in title.properties.audiotracks:
active = audiotrack.active.getValue()
if active:
format = audiotrack.format.getValue()
language = audiotrack.language.getValue()
audio_tag = ' <audio format="%s"' % format
if language != "nolang":
audio_tag += ' lang="%s"' % language
audio_tag += ' />\n'
authorxml.append(audio_tag)
aspect = title.properties.aspect.getValue()
video_tag = ' <video aspect="'+aspect+'"'
if title.properties.widescreen.getValue() == "4:3":
video_tag += ' widescreen="'+title.properties.widescreen.getValue()+'"'
video_tag += ' />\n'
authorxml.append(video_tag)
chapters = ','.join(title.getChapterMarks())
title_no = i+1
title_filename = job.workspace + "/dvd_title_%d.mpg" % (title_no)
if job.menupreview:
LinkTS(job, job.project.settings.vmgm.getValue(), title_filename)
else:
MakeFifoNode(job, title_no)
if mode.endswith("linked") and title_no < nr_titles:
post_tag = "jump titleset %d title 1;" % ( title_no+1 )
elif mode.startswith("menu"):
post_tag = "call vmgm menu 1;"
else: post_tag = ""
authorxml.append(' <pgc>\n')
authorxml.append(' <vob file="' + title_filename + '" chapters="' + chapters + '" />\n')
authorxml.append(' <post> ' + post_tag + ' </post>\n')
authorxml.append(' </pgc>\n')
authorxml.append(' </titles>\n')
authorxml.append(' </titleset>\n')
authorxml.append(' </dvdauthor>\n')
f = open(job.workspace+"/dvdauthor.xml", "w")
for x in authorxml:
f.write(x)
f.close()
def getISOfilename(isopath, volName):
from Tools.Directories import fileExists
i = 0
filename = isopath+'/'+volName+".iso"
while fileExists(filename):
i = i+1
filename = isopath+'/'+volName + str(i).zfill(3) + ".iso"
return filename
class DVDJob(Job):
def __init__(self, project, menupreview=False):
Job.__init__(self, "DVDBurn Job")
self.project = project
from time import strftime
from Tools.Directories import SCOPE_HDD, resolveFilename, createDir
new_workspace = resolveFilename(SCOPE_HDD) + "tmp/" + strftime("%Y%m%d%H%M%S")
createDir(new_workspace, True)
self.workspace = new_workspace
self.project.workspace = self.workspace
self.menupreview = menupreview
self.conduct()
def conduct(self):
CheckDiskspaceTask(self)
if self.project.settings.authormode.getValue().startswith("menu") or self.menupreview:
Menus(self)
if self.project.settings.titlesetmode.getValue() == "multi":
CreateAuthoringXML_multiset(self)
else:
CreateAuthoringXML_singleset(self)
DVDAuthorTask(self)
nr_titles = len(self.project.titles)
if self.menupreview:
PreviewTask(self, self.workspace + "/dvd/VIDEO_TS/")
else:
hasProjectX = os.path.exists('/usr/bin/projectx')
print "[DVDJob] hasProjectX=", hasProjectX
for self.i in range(nr_titles):
self.title = self.project.titles[self.i]
link_name = self.workspace + "/source_title_%d.ts" % (self.i+1)
title_filename = self.workspace + "/dvd_title_%d.mpg" % (self.i+1)
LinkTS(self, self.title.inputfile, link_name)
if not hasProjectX:
ReplexTask(self, outputfile=title_filename, inputfile=link_name).end = self.estimateddvdsize
else:
demux = DemuxTask(self, link_name)
self.mplextask = MplexTask(self, outputfile=title_filename, demux_task=demux)
self.mplextask.end = self.estimateddvdsize
RemoveESFiles(self, demux)
WaitForResidentTasks(self)
PreviewTask(self, self.workspace + "/dvd/VIDEO_TS/")
output = self.project.settings.output.getValue()
volName = self.project.settings.name.getValue()
if output == "dvd":
self.name = _("Burn DVD")
tool = "growisofs"
burnargs = [ "-Z", "/dev/" + harddiskmanager.getCD(), "-dvd-compat" ]
if self.project.size/(1024*1024) > self.project.MAX_SL:
burnargs += [ "-use-the-force-luke=4gms", "-speed=1", "-R" ]
elif output == "iso":
self.name = _("Create DVD-ISO")
tool = "genisoimage"
isopathfile = getISOfilename(self.project.settings.isopath.getValue(), volName)
burnargs = [ "-o", isopathfile ]
burnargs += [ "-dvd-video", "-publisher", "Dreambox", "-V", volName, self.workspace + "/dvd" ]
BurnTask(self, burnargs, tool)
RemoveDVDFolder(self)
class DVDdataJob(Job):
def __init__(self, project):
Job.__init__(self, "Data DVD Burn")
self.project = project
from time import strftime
from Tools.Directories import SCOPE_HDD, resolveFilename, createDir
new_workspace = resolveFilename(SCOPE_HDD) + "tmp/" + strftime("%Y%m%d%H%M%S") + "/dvd/"
createDir(new_workspace, True)
self.workspace = new_workspace
self.project.workspace = self.workspace
self.conduct()
def conduct(self):
if self.project.settings.output.getValue() == "iso":
CheckDiskspaceTask(self)
nr_titles = len(self.project.titles)
for self.i in range(nr_titles):
title = self.project.titles[self.i]
filename = title.inputfile.rstrip("/").rsplit("/",1)[1]
link_name = self.workspace + filename
LinkTS(self, title.inputfile, link_name)
CopyMeta(self, title.inputfile)
output = self.project.settings.output.getValue()
volName = self.project.settings.name.getValue()
tool = "growisofs"
if output == "dvd":
self.name = _("Burn DVD")
burnargs = [ "-Z", "/dev/" + harddiskmanager.getCD(), "-dvd-compat" ]
if self.project.size/(1024*1024) > self.project.MAX_SL:
burnargs += [ "-use-the-force-luke=4gms", "-speed=1", "-R" ]
elif output == "iso":
tool = "genisoimage"
self.name = _("Create DVD-ISO")
isopathfile = getISOfilename(self.project.settings.isopath.getValue(), volName)
burnargs = [ "-o", isopathfile ]
if self.project.settings.dataformat.getValue() == "iso9660_1":
burnargs += ["-iso-level", "1" ]
elif self.project.settings.dataformat.getValue() == "iso9660_4":
burnargs += ["-iso-level", "4", "-allow-limited-size" ]
elif self.project.settings.dataformat.getValue() == "udf":
burnargs += ["-udf", "-allow-limited-size" ]
burnargs += [ "-publisher", "Dreambox", "-V", volName, "-follow-links", self.workspace ]
BurnTask(self, burnargs, tool)
RemoveDVDFolder(self)
class DVDisoJob(Job):
def __init__(self, project, imagepath):
Job.__init__(self, _("Burn DVD"))
self.project = project
self.menupreview = False
from Tools.Directories import getSize
if imagepath.endswith(".iso"):
PreviewTask(self, imagepath)
burnargs = [ "-Z", "/dev/" + harddiskmanager.getCD() + '='+imagepath, "-dvd-compat" ]
if getSize(imagepath)/(1024*1024) > self.project.MAX_SL:
burnargs += [ "-use-the-force-luke=4gms", "-speed=1", "-R" ]
else:
PreviewTask(self, imagepath + "/VIDEO_TS/")
volName = self.project.settings.name.getValue()
burnargs = [ "-Z", "/dev/" + harddiskmanager.getCD(), "-dvd-compat" ]
if getSize(imagepath)/(1024*1024) > self.project.MAX_SL:
burnargs += [ "-use-the-force-luke=4gms", "-speed=1", "-R" ]
burnargs += [ "-dvd-video", "-publisher", "Dreambox", "-V", volName, imagepath ]
tool = "growisofs"
BurnTask(self, burnargs, tool)
| gpl-2.0 |
wolverineav/neutron | neutron/tests/fullstack/base.py | 1 | 3133 | # Copyright 2015 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
from oslo_db.sqlalchemy import test_base
from neutron.db.migration import cli as migration
from neutron.tests.common import base
from neutron.tests.fullstack.resources import client as client_resource
from neutron.tests import tools
class BaseFullStackTestCase(base.MySQLTestCase):
"""Base test class for full-stack tests."""
def setUp(self, environment):
super(BaseFullStackTestCase, self).setUp()
# NOTE(ihrachys): seed should be reset before environment fixture below
# since the latter starts services that may rely on generated port
# numbers
tools.reset_random_seed()
self.create_db_tables()
self.environment = environment
self.environment.test_name = self.get_name()
self.useFixture(self.environment)
self.client = self.environment.neutron_server.client
self.safe_client = self.useFixture(
client_resource.ClientFixture(self.client))
def get_name(self):
class_name, test_name = self.id().split(".")[-2:]
return "%s.%s" % (class_name, test_name)
def create_db_tables(self):
"""Populate the new database.
MySQLTestCase creates a new database for each test, but these need to
be populated with the appropriate tables. Before we can do that, we
must change the 'connection' option which the Neutron code knows to
look at.
Currently, the username and password options are hard-coded by
oslo.db and neutron/tests/functional/contrib/gate_hook.sh. Also,
we only support MySQL for now, but the groundwork for adding Postgres
is already laid.
"""
conn = ("mysql+pymysql://%(username)s:%(password)s"
"@127.0.0.1/%(db_name)s" % {
'username': test_base.DbFixture.USERNAME,
'password': test_base.DbFixture.PASSWORD,
'db_name': self.engine.url.database})
alembic_config = migration.get_neutron_config()
alembic_config.neutron_config = cfg.CONF
self.original_conn = cfg.CONF.database.connection
self.addCleanup(self._revert_connection_address)
cfg.CONF.set_override('connection', conn, group='database')
migration.do_alembic_command(alembic_config, 'upgrade', 'heads')
def _revert_connection_address(self):
cfg.CONF.set_override('connection',
self.original_conn,
group='database')
| apache-2.0 |
amenonsen/ansible | lib/ansible/modules/network/fortios/fortios_router_rip.py | 14 | 25268 | #!/usr/bin/python
from __future__ import (absolute_import, division, print_function)
# Copyright 2019 Fortinet, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
__metaclass__ = type
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'metadata_version': '1.1'}
DOCUMENTATION = '''
---
module: fortios_router_rip
short_description: Configure RIP in Fortinet's FortiOS and FortiGate.
description:
- This module is able to configure a FortiGate or FortiOS (FOS) device by allowing the
user to set and modify router feature and rip category.
Examples include all parameters and values need to be adjusted to datasources before usage.
Tested with FOS v6.0.5
version_added: "2.8"
author:
- Miguel Angel Munoz (@mamunozgonzalez)
- Nicolas Thomas (@thomnico)
notes:
- Requires fortiosapi library developed by Fortinet
- Run as a local_action in your playbook
requirements:
- fortiosapi>=0.9.8
options:
host:
description:
- FortiOS or FortiGate IP address.
type: str
required: false
username:
description:
- FortiOS or FortiGate username.
type: str
required: false
password:
description:
- FortiOS or FortiGate password.
type: str
default: ""
vdom:
description:
- Virtual domain, among those defined previously. A vdom is a
virtual instance of the FortiGate that can be configured and
used as a different unit.
type: str
default: root
https:
description:
- Indicates if the requests towards FortiGate must use HTTPS protocol.
type: bool
default: true
ssl_verify:
description:
- Ensures FortiGate certificate must be verified by a proper CA.
type: bool
default: true
version_added: 2.9
router_rip:
description:
- Configure RIP.
default: null
type: dict
suboptions:
default_information_originate:
description:
- Enable/disable generation of default route.
type: str
choices:
- enable
- disable
default_metric:
description:
- Default metric.
type: int
distance:
description:
- distance
type: list
suboptions:
access_list:
description:
- Access list for route destination. Source router.access-list.name.
type: str
distance:
description:
- Distance (1 - 255).
type: int
id:
description:
- Distance ID.
required: true
type: int
prefix:
description:
- Distance prefix.
type: str
distribute_list:
description:
- Distribute list.
type: list
suboptions:
direction:
description:
- Distribute list direction.
type: str
choices:
- in
- out
id:
description:
- Distribute list ID.
required: true
type: int
interface:
description:
- Distribute list interface name. Source system.interface.name.
type: str
listname:
description:
- Distribute access/prefix list name. Source router.access-list.name router.prefix-list.name.
type: str
status:
description:
- status
type: str
choices:
- enable
- disable
garbage_timer:
description:
- Garbage timer in seconds.
type: int
interface:
description:
- RIP interface configuration.
type: list
suboptions:
auth_keychain:
description:
- Authentication key-chain name. Source router.key-chain.name.
type: str
auth_mode:
description:
- Authentication mode.
type: str
choices:
- none
- text
- md5
auth_string:
description:
- Authentication string/password.
type: str
flags:
description:
- flags
type: int
name:
description:
- Interface name. Source system.interface.name.
required: true
type: str
receive_version:
description:
- Receive version.
type: str
choices:
- 1
- 2
send_version:
description:
- Send version.
type: str
choices:
- 1
- 2
send_version2_broadcast:
description:
- Enable/disable broadcast version 1 compatible packets.
type: str
choices:
- disable
- enable
split_horizon:
description:
- Enable/disable split horizon.
type: str
choices:
- poisoned
- regular
split_horizon_status:
description:
- Enable/disable split horizon.
type: str
choices:
- enable
- disable
max_out_metric:
description:
- Maximum metric allowed to output(0 means 'not set').
type: int
neighbor:
description:
- neighbor
type: list
suboptions:
id:
description:
- Neighbor entry ID.
required: true
type: int
ip:
description:
- IP address.
type: str
network:
description:
- network
type: list
suboptions:
id:
description:
- Network entry ID.
required: true
type: int
prefix:
description:
- Network prefix.
type: str
offset_list:
description:
- Offset list.
type: list
suboptions:
access_list:
description:
- Access list name. Source router.access-list.name.
type: str
direction:
description:
- Offset list direction.
type: str
choices:
- in
- out
id:
description:
- Offset-list ID.
required: true
type: int
interface:
description:
- Interface name. Source system.interface.name.
type: str
offset:
description:
- offset
type: int
status:
description:
- status
type: str
choices:
- enable
- disable
passive_interface:
description:
- Passive interface configuration.
type: list
suboptions:
name:
description:
- Passive interface name. Source system.interface.name.
required: true
type: str
recv_buffer_size:
description:
- Receiving buffer size.
type: int
redistribute:
description:
- Redistribute configuration.
type: list
suboptions:
metric:
description:
- Redistribute metric setting.
type: int
name:
description:
- Redistribute name.
required: true
type: str
routemap:
description:
- Route map name. Source router.route-map.name.
type: str
status:
description:
- status
type: str
choices:
- enable
- disable
timeout_timer:
description:
- Timeout timer in seconds.
type: int
update_timer:
description:
- Update timer in seconds.
type: int
version:
description:
- RIP version.
type: str
choices:
- 1
- 2
'''
EXAMPLES = '''
- hosts: localhost
vars:
host: "192.168.122.40"
username: "admin"
password: ""
vdom: "root"
ssl_verify: "False"
tasks:
- name: Configure RIP.
fortios_router_rip:
host: "{{ host }}"
username: "{{ username }}"
password: "{{ password }}"
vdom: "{{ vdom }}"
https: "False"
router_rip:
default_information_originate: "enable"
default_metric: "4"
distance:
-
access_list: "<your_own_value> (source router.access-list.name)"
distance: "7"
id: "8"
prefix: "<your_own_value>"
distribute_list:
-
direction: "in"
id: "12"
interface: "<your_own_value> (source system.interface.name)"
listname: "<your_own_value> (source router.access-list.name router.prefix-list.name)"
status: "enable"
garbage_timer: "16"
interface:
-
auth_keychain: "<your_own_value> (source router.key-chain.name)"
auth_mode: "none"
auth_string: "<your_own_value>"
flags: "21"
name: "default_name_22 (source system.interface.name)"
receive_version: "1"
send_version: "1"
send_version2_broadcast: "disable"
split_horizon: "poisoned"
split_horizon_status: "enable"
max_out_metric: "28"
neighbor:
-
id: "30"
ip: "<your_own_value>"
network:
-
id: "33"
prefix: "<your_own_value>"
offset_list:
-
access_list: "<your_own_value> (source router.access-list.name)"
direction: "in"
id: "38"
interface: "<your_own_value> (source system.interface.name)"
offset: "40"
status: "enable"
passive_interface:
-
name: "default_name_43 (source system.interface.name)"
recv_buffer_size: "44"
redistribute:
-
metric: "46"
name: "default_name_47"
routemap: "<your_own_value> (source router.route-map.name)"
status: "enable"
timeout_timer: "50"
update_timer: "51"
version: "1"
'''
RETURN = '''
build:
description: Build number of the fortigate image
returned: always
type: str
sample: '1547'
http_method:
description: Last method used to provision the content into FortiGate
returned: always
type: str
sample: 'PUT'
http_status:
description: Last result given by FortiGate on last operation applied
returned: always
type: str
sample: "200"
mkey:
description: Master key (id) used in the last call to FortiGate
returned: success
type: str
sample: "id"
name:
description: Name of the table used to fulfill the request
returned: always
type: str
sample: "urlfilter"
path:
description: Path of the table used to fulfill the request
returned: always
type: str
sample: "webfilter"
revision:
description: Internal revision number
returned: always
type: str
sample: "17.0.2.10658"
serial:
description: Serial number of the unit
returned: always
type: str
sample: "FGVMEVYYQT3AB5352"
status:
description: Indication of the operation's result
returned: always
type: str
sample: "success"
vdom:
description: Virtual domain used
returned: always
type: str
sample: "root"
version:
description: Version of the FortiGate
returned: always
type: str
sample: "v5.6.3"
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.connection import Connection
from ansible.module_utils.network.fortios.fortios import FortiOSHandler
from ansible.module_utils.network.fortimanager.common import FAIL_SOCKET_MSG
def login(data, fos):
host = data['host']
username = data['username']
password = data['password']
ssl_verify = data['ssl_verify']
fos.debug('on')
if 'https' in data and not data['https']:
fos.https('off')
else:
fos.https('on')
fos.login(host, username, password, verify=ssl_verify)
def filter_router_rip_data(json):
option_list = ['default_information_originate', 'default_metric', 'distance',
'distribute_list', 'garbage_timer', 'interface',
'max_out_metric', 'neighbor', 'network',
'offset_list', 'passive_interface', 'recv_buffer_size',
'redistribute', 'timeout_timer', 'update_timer',
'version']
dictionary = {}
for attribute in option_list:
if attribute in json and json[attribute] is not None:
dictionary[attribute] = json[attribute]
return dictionary
def underscore_to_hyphen(data):
if isinstance(data, list):
for elem in data:
elem = underscore_to_hyphen(elem)
elif isinstance(data, dict):
new_data = {}
for k, v in data.items():
new_data[k.replace('_', '-')] = underscore_to_hyphen(v)
data = new_data
return data
def router_rip(data, fos):
vdom = data['vdom']
router_rip_data = data['router_rip']
filtered_data = underscore_to_hyphen(filter_router_rip_data(router_rip_data))
return fos.set('router',
'rip',
data=filtered_data,
vdom=vdom)
def is_successful_status(status):
return status['status'] == "success" or \
status['http_method'] == "DELETE" and status['http_status'] == 404
def fortios_router(data, fos):
if data['router_rip']:
resp = router_rip(data, fos)
return not is_successful_status(resp), \
resp['status'] == "success", \
resp
def main():
fields = {
"host": {"required": False, "type": "str"},
"username": {"required": False, "type": "str"},
"password": {"required": False, "type": "str", "default": "", "no_log": True},
"vdom": {"required": False, "type": "str", "default": "root"},
"https": {"required": False, "type": "bool", "default": True},
"ssl_verify": {"required": False, "type": "bool", "default": True},
"router_rip": {
"required": False, "type": "dict", "default": None,
"options": {
"default_information_originate": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"default_metric": {"required": False, "type": "int"},
"distance": {"required": False, "type": "list",
"options": {
"access_list": {"required": False, "type": "str"},
"distance": {"required": False, "type": "int"},
"id": {"required": True, "type": "int"},
"prefix": {"required": False, "type": "str"}
}},
"distribute_list": {"required": False, "type": "list",
"options": {
"direction": {"required": False, "type": "str",
"choices": ["in", "out"]},
"id": {"required": True, "type": "int"},
"interface": {"required": False, "type": "str"},
"listname": {"required": False, "type": "str"},
"status": {"required": False, "type": "str",
"choices": ["enable", "disable"]}
}},
"garbage_timer": {"required": False, "type": "int"},
"interface": {"required": False, "type": "list",
"options": {
"auth_keychain": {"required": False, "type": "str"},
"auth_mode": {"required": False, "type": "str",
"choices": ["none", "text", "md5"]},
"auth_string": {"required": False, "type": "str"},
"flags": {"required": False, "type": "int"},
"name": {"required": True, "type": "str"},
"receive_version": {"required": False, "type": "str",
"choices": ["1", "2"]},
"send_version": {"required": False, "type": "str",
"choices": ["1", "2"]},
"send_version2_broadcast": {"required": False, "type": "str",
"choices": ["disable", "enable"]},
"split_horizon": {"required": False, "type": "str",
"choices": ["poisoned", "regular"]},
"split_horizon_status": {"required": False, "type": "str",
"choices": ["enable", "disable"]}
}},
"max_out_metric": {"required": False, "type": "int"},
"neighbor": {"required": False, "type": "list",
"options": {
"id": {"required": True, "type": "int"},
"ip": {"required": False, "type": "str"}
}},
"network": {"required": False, "type": "list",
"options": {
"id": {"required": True, "type": "int"},
"prefix": {"required": False, "type": "str"}
}},
"offset_list": {"required": False, "type": "list",
"options": {
"access_list": {"required": False, "type": "str"},
"direction": {"required": False, "type": "str",
"choices": ["in", "out"]},
"id": {"required": True, "type": "int"},
"interface": {"required": False, "type": "str"},
"offset": {"required": False, "type": "int"},
"status": {"required": False, "type": "str",
"choices": ["enable", "disable"]}
}},
"passive_interface": {"required": False, "type": "list",
"options": {
"name": {"required": True, "type": "str"}
}},
"recv_buffer_size": {"required": False, "type": "int"},
"redistribute": {"required": False, "type": "list",
"options": {
"metric": {"required": False, "type": "int"},
"name": {"required": True, "type": "str"},
"routemap": {"required": False, "type": "str"},
"status": {"required": False, "type": "str",
"choices": ["enable", "disable"]}
}},
"timeout_timer": {"required": False, "type": "int"},
"update_timer": {"required": False, "type": "int"},
"version": {"required": False, "type": "str",
"choices": ["1", "2"]}
}
}
}
module = AnsibleModule(argument_spec=fields,
supports_check_mode=False)
# legacy_mode refers to using fortiosapi instead of HTTPAPI
legacy_mode = 'host' in module.params and module.params['host'] is not None and \
'username' in module.params and module.params['username'] is not None and \
'password' in module.params and module.params['password'] is not None
if not legacy_mode:
if module._socket_path:
connection = Connection(module._socket_path)
fos = FortiOSHandler(connection)
is_error, has_changed, result = fortios_router(module.params, fos)
else:
module.fail_json(**FAIL_SOCKET_MSG)
else:
try:
from fortiosapi import FortiOSAPI
except ImportError:
module.fail_json(msg="fortiosapi module is required")
fos = FortiOSAPI()
login(module.params, fos)
is_error, has_changed, result = fortios_router(module.params, fos)
fos.logout()
if not is_error:
module.exit_json(changed=has_changed, meta=result)
else:
module.fail_json(msg="Error in repo", meta=result)
if __name__ == '__main__':
main()
| gpl-3.0 |
rspavel/spack | var/spack/repos/builtin.mock/packages/garply/package.py | 3 | 4269 | # Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
import os
import sys
class Garply(Package):
"""Toy package for testing dependencies"""
homepage = "https://www.example.com"
url = "https://github.com/gartung/garply/archive/v3.0.0.tar.gz"
version('3.0.0',
sha256='534ac8ba7a6fed7e8bbb543bd43ca04999e65337445a531bd296939f5ac2f33d')
def install(self, spec, prefix):
garply_h = '''#ifndef GARPLY_H_
class Garply
{
private:
static const int version_major;
static const int version_minor;
public:
Garply();
int get_version() const;
int garplinate() const;
};
#endif // GARPLY_H_
'''
garply_cc = '''#include "garply.h"
#include "garply_version.h"
#include <iostream>
const int Garply::version_major = garply_version_major;
const int Garply::version_minor = garply_version_minor;
Garply::Garply() {}
int
Garply::get_version() const
{
return 10 * version_major + version_minor;
}
int
Garply::garplinate() const
{
std::cout << "Garply::garplinate version " << get_version()
<< " invoked" << std::endl;
std::cout << "Garply config dir = %s" << std::endl;
return get_version();
}
'''
garplinator_cc = '''#include "garply.h"
#include <iostream>
int
main()
{
Garply garply;
garply.garplinate();
return 0;
}
'''
garply_version_h = '''const int garply_version_major = %s;
const int garply_version_minor = %s;
'''
mkdirp('%s/garply' % prefix.include)
mkdirp('%s/garply' % self.stage.source_path)
with open('%s/garply_version.h' % self.stage.source_path, 'w') as f:
f.write(garply_version_h % (self.version[0], self.version[1:]))
with open('%s/garply/garply.h' % self.stage.source_path, 'w') as f:
f.write(garply_h)
with open('%s/garply/garply.cc' % self.stage.source_path, 'w') as f:
f.write(garply_cc % prefix.config)
with open('%s/garply/garplinator.cc' %
self.stage.source_path, 'w') as f:
f.write(garplinator_cc)
gpp = which('/usr/bin/g++')
if sys.platform == 'darwin':
gpp = which('/usr/bin/clang++')
gpp('-Dgarply_EXPORTS',
'-I%s' % self.stage.source_path,
'-O2', '-g', '-DNDEBUG', '-fPIC',
'-o', 'garply.cc.o',
'-c', '%s/garply/garply.cc' % self.stage.source_path)
gpp('-Dgarply_EXPORTS',
'-I%s' % self.stage.source_path,
'-O2', '-g', '-DNDEBUG', '-fPIC',
'-o', 'garplinator.cc.o',
'-c', '%s/garply/garplinator.cc' % self.stage.source_path)
if sys.platform == 'darwin':
gpp('-fPIC', '-O2', '-g', '-DNDEBUG', '-dynamiclib',
'-Wl,-headerpad_max_install_names', '-o', 'libgarply.dylib',
'-install_name', '@rpath/libgarply.dylib',
'garply.cc.o')
gpp('-O2', '-g', '-DNDEBUG', '-Wl,-search_paths_first',
'-Wl,-headerpad_max_install_names',
'garplinator.cc.o', '-o', 'garplinator',
'-Wl,-rpath,%s' % prefix.lib64,
'libgarply.dylib')
mkdirp(prefix.lib64)
copy('libgarply.dylib', '%s/libgarply.dylib' % prefix.lib64)
else:
gpp('-fPIC', '-O2', '-g', '-DNDEBUG', '-shared',
'-Wl,-soname,libgarply.so',
'-o', 'libgarply.so', 'garply.cc.o')
gpp('-O2', '-g', '-DNDEBUG', '-rdynamic',
'garplinator.cc.o', '-o', 'garplinator',
'-Wl,-rpath,%s' % prefix.lib64,
'libgarply.so')
mkdirp(prefix.lib64)
copy('libgarply.so', '%s/libgarply.so' % prefix.lib64)
copy('garplinator', '%s/garplinator' % prefix.lib64)
copy('%s/garply/garply.h' % self.stage.source_path,
'%s/garply/garply.h' % prefix.include)
mkdirp(prefix.bin)
copy('garply_version.h', '%s/garply_version.h' % prefix.bin)
os.symlink('%s/garplinator' % prefix.lib64,
'%s/garplinator' % prefix.bin)
| lgpl-2.1 |
foursquare/pants | tests/python/pants_test/engine/legacy/test_graph_integration.py | 1 | 5480 | # coding=utf-8
# Copyright 2018 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import absolute_import, division, print_function, unicode_literals
import unittest
from pants.build_graph.address_lookup_error import AddressLookupError
from pants.option.scope import GLOBAL_SCOPE_CONFIG_SECTION
from pants_test.pants_run_integration_test import PantsRunIntegrationTest
class GraphIntegrationTest(PantsRunIntegrationTest):
_SOURCES_TARGET_BASE = 'testprojects/src/python/sources'
_SOURCES_ERR_MSGS = {
'missing-globs': ("globs('*.a')", ['*.a']),
'missing-rglobs': ("rglobs('*.a')", ['**/*.a']),
'missing-zglobs': ("zglobs('**/*.a')", ['**/*.a']),
'missing-literal-files': (
"['nonexistent_test_file.txt', 'another_nonexistent_file.txt']", [
'nonexistent_test_file.txt',
'another_nonexistent_file.txt',
]),
'some-missing-some-not': ("globs('*.txt', '*.rs')", ['*.rs']),
'overlapping-globs': ("globs('sources.txt', '*.txt')", ['*.txt']),
}
_WARN_FMT = "WARN] In target {base}:{name} with {desc}={glob}: glob pattern '{as_zsh_glob}' did not match any files."
def _list_target_check_warnings_sources(self, target_name):
target_full = '{}:{}'.format(self._SOURCES_TARGET_BASE, target_name)
glob_str, expected_globs = self._SOURCES_ERR_MSGS[target_name]
pants_run = self.run_pants(['list', target_full], config={
GLOBAL_SCOPE_CONFIG_SECTION: {
'glob_expansion_failure': 'warn',
},
})
self.assert_success(pants_run)
for as_zsh_glob in expected_globs:
warning_msg = self._WARN_FMT.format(
base=self._SOURCES_TARGET_BASE,
name=target_name,
desc='sources',
glob=glob_str,
as_zsh_glob=as_zsh_glob)
self.assertIn(warning_msg, pants_run.stderr_data)
_ERR_TARGETS = {
'testprojects/src/python/sources:some-missing-some-not': [
"globs('*.txt', '*.rs')",
"Snapshot(PathGlobs(include=(u\'testprojects/src/python/sources/*.txt\', u\'testprojects/src/python/sources/*.rs\'), exclude=(), glob_match_error_behavior<=GlobMatchErrorBehavior>=GlobMatchErrorBehavior(failure_behavior=error)))",
"Globs did not match. Excludes were: []. Unmatched globs were: [\"testprojects/src/python/sources/*.rs\"].",
],
'testprojects/src/java/org/pantsbuild/testproject/bundle:missing-bundle-fileset': [
"['a/b/file1.txt']",
"RGlobs('*.aaaa', '*.bbbb')",
"Globs('*.aaaa')",
"ZGlobs('**/*.abab')",
"['file1.aaaa', 'file2.aaaa']",
"Snapshot(PathGlobs(include=(u\'testprojects/src/java/org/pantsbuild/testproject/bundle/*.aaaa\',), exclude=(), glob_match_error_behavior<=GlobMatchErrorBehavior>=GlobMatchErrorBehavior(failure_behavior=error)))",
"Globs did not match. Excludes were: []. Unmatched globs were: [\"testprojects/src/java/org/pantsbuild/testproject/bundle/*.aaaa\"].",
]
}
def _list_target_check_error(self, target_name):
expected_excerpts = self._ERR_TARGETS[target_name]
pants_run = self.run_pants(['list', target_name], config={
GLOBAL_SCOPE_CONFIG_SECTION: {
'glob_expansion_failure': 'error',
},
})
self.assert_failure(pants_run)
self.assertIn(AddressLookupError.__name__, pants_run.stderr_data)
for excerpt in expected_excerpts:
self.assertIn(excerpt, pants_run.stderr_data)
@unittest.skip('Skipped to expedite landing #5769: see #5863')
def test_missing_sources_warnings(self):
for target_name in self._SOURCES_ERR_MSGS.keys():
self._list_target_check_warnings_sources(target_name)
@unittest.skip('Skipped to expedite landing #5769: see #5863')
def test_existing_sources(self):
target_full = '{}:text'.format(self._SOURCES_TARGET_BASE)
pants_run = self.run_pants(['list', target_full], config={
GLOBAL_SCOPE_CONFIG_SECTION: {
'glob_expansion_failure': 'warn',
},
})
self.assert_success(pants_run)
self.assertNotIn("WARN]", pants_run.stderr_data)
@unittest.skip('Skipped to expedite landing #5769: see #5863')
def test_missing_bundles_warnings(self):
target_full = '{}:{}'.format(self._BUNDLE_TARGET_BASE, self._BUNDLE_TARGET_NAME)
pants_run = self.run_pants(['list', target_full], config={
GLOBAL_SCOPE_CONFIG_SECTION: {
'glob_expansion_failure': 'warn',
},
})
self.assert_success(pants_run)
for glob_str, expected_globs in self._BUNDLE_ERR_MSGS:
for as_zsh_glob in expected_globs:
warning_msg = self._WARN_FMT.format(
base=self._BUNDLE_TARGET_BASE,
name=self._BUNDLE_TARGET_NAME,
desc='fileset',
glob=glob_str,
as_zsh_glob=as_zsh_glob)
self.assertIn(warning_msg, pants_run.stderr_data)
@unittest.skip('Skipped to expedite landing #5769: see #5863')
def test_existing_bundles(self):
target_full = '{}:mapper'.format(self._BUNDLE_TARGET_BASE)
pants_run = self.run_pants(['list', target_full], config={
GLOBAL_SCOPE_CONFIG_SECTION: {
'glob_expansion_failure': 'warn',
},
})
self.assert_success(pants_run)
self.assertNotIn("WARN]", pants_run.stderr_data)
def test_error_message(self):
self._list_target_check_error('testprojects/src/python/sources:some-missing-some-not')
self._list_target_check_error(
'testprojects/src/java/org/pantsbuild/testproject/bundle:missing-bundle-fileset')
| apache-2.0 |
maK-/weevely3 | testsuite/test_file_enum.py | 14 | 4630 | from testfixtures import log_capture
from testsuite.base_fs import BaseFilesystem
from testsuite import config
from core.sessions import SessionURL
from core import modules
import utils
from core import messages
import subprocess
import tempfile
import os
class FileEnum(BaseFilesystem):
def setUp(self):
self.session = SessionURL(
self.url,
self.password,
volatile = True
)
modules.load_modules(self.session)
# Create the folder tree
self.folders_abs, self.folders_rel = self.populate_folders()
self.files_abs, self.files_rel = self.populate_files(
self.folders_abs,
[ 'executable', 'writable', 'write-executable', 'readable' ]
)
# Change mode of the first file to ---x--x--x 0111 execute
self.check_call(
config.cmd_env_chmod_s_s % ('0111', self.files_abs[0]),
shell=True)
# Change mode of the second file to --w--w--w- 0222 write
self.check_call(
config.cmd_env_chmod_s_s % ('0222', self.files_abs[1]),
shell=True)
# Change mode of the third file to 0000
self.check_call(
config.cmd_env_chmod_s_s % ('0000', self.files_abs[2]),
shell=True)
self.run_argv = modules.loaded['file_enum'].run_argv
def tearDown(self):
# Reset recursively all the permissions to 0777
self.check_call(
config.cmd_env_chmod_s_s % ('-R 0777', self.folders_abs[0]),
shell=True)
for folder in reversed(self.folders_abs):
self.check_call(
config.cmd_env_remove_s % (self.files_abs.pop()),
shell=True)
self.check_call(
config.cmd_env_rmdir_s % (folder),
shell=True)
def test_file_enum(self):
# Enum self.files_rel[:2] passed with arguments
self.assertItemsEqual(self.run_argv( self.files_rel[:3] ), {
self.files_rel[0] : [ 'ex' ],
self.files_rel[1] : [ 'ew' ],
self.files_rel[2] : [ 'e' ]
})
# Enum self.files_rel[:2] + bogus passed with arguments
self.assertItemsEqual(self.run_argv( self.files_rel[:3] + [ 'bogus' ] ), {
self.files_rel[0] : [ 'ex' ],
self.files_rel[1] : [ 'ew' ],
self.files_rel[2] : [ 'e' ]
})
# Enum self.files_rel[:2] + bogus passed with arguments and -print
self.assertItemsEqual(self.run_argv( self.files_rel[:3] + [ 'bogus', '-print' ] ), {
self.files_rel[0] : [ 'ex' ],
self.files_rel[1] : [ 'ew' ],
self.files_rel[2] : [ 'e' ],
'bogus' : []
})
def test_file_enum_lpath(self):
# Enum self.files_rel[:2] passed with lfile
temp_file = tempfile.NamedTemporaryFile()
temp_file.write('\n'.join(self.files_rel[:3]))
temp_file.flush()
self.assertItemsEqual(self.run_argv( [ '-lpath-list', temp_file.name ] ), {
self.files_rel[0] : [ 'ex' ],
self.files_rel[1] : [ 'ew' ],
self.files_rel[2] : [ 'e' ]
})
temp_file.close()
# Enum self.files_rel[:2] + bogus passed with lfile
temp_file = tempfile.NamedTemporaryFile()
temp_file.write('\n'.join(self.files_rel[:3] + [ 'bogus' ]))
temp_file.flush()
self.assertItemsEqual(self.run_argv( [ '-lpath-list', temp_file.name ] ), {
self.files_rel[0] : [ 'ex' ],
self.files_rel[1] : [ 'ew' ],
self.files_rel[2] : [ 'e' ]
})
temp_file.close()
# Enum self.files_rel[:2] + bogus passed with lfile and -print
temp_file = tempfile.NamedTemporaryFile()
temp_file.write('\n'.join(self.files_rel[:3] + [ 'bogus' ]))
temp_file.flush()
self.assertItemsEqual(self.run_argv( [ '-lpath-list', temp_file.name, '-print' ] ), {
self.files_rel[0] : [ 'ex' ],
self.files_rel[1] : [ 'ew' ],
self.files_rel[2] : [ 'e' ],
'bogus' : []
})
temp_file.close()
@log_capture()
def test_err(self, log_captured):
self.assertIsNone(self.run_argv( [ '-lpath-list', 'bogus' ] ))
self.assertEqual(messages.generic.error_loading_file_s_s[:19],
log_captured.records[-1].msg[:19])
| gpl-3.0 |
yao-matrix/mLearning | kaggle/distracted_driver/app/train.py | 2 | 3468 | #!/usr/bin/env python
# coding=utf-8
import os
import cv2
import glob
import datetime
import logging
import numpy as np
from keras.utils import np_utils
from keras.models import Sequential
from keras.layers.core import Dense, Dropout, Activation, Flatten
from keras.layers.convolutional import Convolution2D, MaxPooling2D
from ml_utils import split_cv, save_model
current_dir = os.path.dirname(os.path.abspath(__file__))
log_path = os.path.join(current_dir, os.path.pardir, 'log', datetime.date.today().strftime('%Y%m%d') + '.log')
logger = logging.getLogger('train')
logger.setLevel(logging.DEBUG)
fh = logging.FileHandler(log_path)
ch = logging.StreamHandler()
fh.setLevel(logging.DEBUG)
ch.setLevel(logging.DEBUG)
formatter = logging.Formatter('[%(asctime)s][%(name)s][%(levelname)s]: %(message)s')
fh.setFormatter(formatter)
ch.setFormatter(formatter)
logger.addHandler(fh)
logger.addHandler(ch)
def load_img(img_path, img_rows, img_cols):
# read image to a grayscale buffer
# print img_path
img = cv2.imread(img_path, cv2.IMREAD_GRAYSCALE)
rows, cols = img.shape
# print rows, cols
# print img_rows, img_cols
resized = cv2.resize(img, (img_cols, img_rows), interpolation = cv2.INTER_CUBIC);
return resized
def load_train(img_rows, img_cols):
X_train = []
Y_train = []
i = 0
for j in range(10):
path = os.path.join('/workshop2/data/driver-distraction', 'train', 'c' + str(j), '*.jpg')
files = glob.glob(path)
for fl in files:
i += 1
# print fl
img = load_img(fl, img_rows, img_cols)
X_train.append(img)
Y_train.append(j)
logger.info("%d samples in total" % (i))
return X_train, Y_train
img_rows = 96
img_cols = 128
batch_size = 64
nb_classes = 10
nb_epoch = 2
nb_filters = 32
nb_pool = 2
nb_conv = 3
if __name__ == "__main__":
logger.info("start training")
# read training data
train_data, train_labels = load_train(img_rows, img_cols)
train_data = np.array(train_data, dtype = np.uint8)
train_labels = np.array(train_labels, dtype = np.uint8)
train_data = train_data.reshape(train_data.shape[0], 1, img_rows, img_cols)
train_labels = np_utils.to_categorical(train_labels, nb_classes)
train_data = train_data.astype('float32')
train_data /= 127.5
train_data -= 1.0
logger.info("read training data complete")
# split for cross validation
train, train_label, validation, validation_label = split_cv(train_data, train_labels)
logger.info("data split complete")
# build stacking layers
model = Sequential()
model.add(Convolution2D(nb_filters, nb_conv, nb_conv, border_mode = 'valid', input_shape = (1, img_rows, img_cols)))
model.add(Activation('relu'))
model.add(Convolution2D(nb_filters, nb_conv, nb_conv))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(nb_pool, nb_pool)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(128))
model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(Dense(nb_classes))
model.add(Activation('softmax'))
model.compile(loss = 'categorical_crossentropy', optimizer = 'adadelta')
model.fit(train, train_label, batch_size = batch_size, nb_epoch = nb_epoch, verbose = 1, validation_data = (validation, validation_label))
logger.info("model training complete")
score = model.evaluate(validation, validation_label, verbose = 0)
logger.info("validation score: %f" % (score))
save_model(model)
logger.info("model saved")
| apache-2.0 |
hidenori-t/chainer | chainer/optimizer.py | 7 | 8978 | import math
import numpy
from chainer import cuda
# TODO(delta2323): Make it public function and move it to common directory.
def _sqnorm(x):
with cuda.get_device(x):
x = x.ravel()
return float(x.dot(x))
class Optimizer(object):
"""Base class of all numerical optimizers.
Optimizer is set up with references to parameters and gradients, and
then on every call of :meth:`update`, it updates parameters based on
corresponding gradients. Optimizer implementations must override
:meth:`update_one` method, which updates one parameter array using the
corresponding gradient array.
Optimizer can optionally use state for each parameter/gradient pair. It is
initialized by :meth:`init_state` method at set up.
Attributes:
t (int): Number of update steps. It can be used in :meth:`update_one`
implementation, where :attr:`t` is incremented beforehand.
"""
def setup(self, params_grads):
"""Prepares states for all given parameter/gradient pairs.
Args:
params_grads: FunctionSet or tuple (pair) of two tuples.
For tuple, the first element is a tuple of parameter arrays,
and the second is a tuple of corresponding gradient arrays.
"""
if hasattr(params_grads, 'parameters') and \
hasattr(params_grads, 'gradients'):
params = getattr(params_grads, 'parameters')
grads = getattr(params_grads, 'gradients')
elif isinstance(params_grads, tuple):
params = params_grads[0]
grads = params_grads[1]
else:
msg = ("'params_grads' must have 'parameters' and 'gradients'"
" attributes or tuples, {0} is given")
raise ValueError(msg)
self.t = 0
self.tuples = []
for p, g in zip(params, grads):
with cuda.get_device(p):
state = self.init_state(p, g)
self.tuples.append((p, g, state))
def init_state(self, param, grad):
"""Returns the initial state for given parameter and gradient.
Default implementation delegates the procedure to
:meth:`init_state_cpu` or :meth:`init_state_gpu` depending on the type
of ``param``.
Args:
param: Parameter array.
grad: Gradient array corresponding to ``param``.
Returns:
Initial state value.
.. warning::
Note that, on every call of :meth:`update_one`, the state value
is passed by value and then the method updates its content, so
the state must be a reference. Especiallly, one cannot use a
value of built-in numeric type. If the state is one scalar
value, it is recommended to use a zero-dimensional array, i.e.
:class:`numpy.ndarray` with shape ``()``.
"""
if isinstance(param, cuda.ndarray):
return self.init_state_gpu(param, grad)
return self.init_state_cpu(param, grad)
def init_state_cpu(self, param, grad):
"""Returns the initial state for given parameter and gradient on GPU.
Args:
param (numpy.ndarray): Parameter array.
grad (numpy.ndarray): Gradient array.
Returns:
Initial state value.
.. seealso:: :meth:`init_state`, :meth:`init_state_gpu`
"""
return None
def init_state_gpu(self, param, grad):
"""Returns the initial state for given parameter and gradient on CPU.
Args:
param (cupy.ndarray): Parameter array.
grad (cupy.ndarray): Gradient array.
Returns:
Initial state value.
.. seealso:: :meth:`init_state`, :meth:`init_state_gpu`
"""
return None
def zero_grads(self):
"""Fills all gradient arrays by zeros.
This method should be call before backprop takes place, since
gradients are accumulated on backprop.
"""
for _, g, _ in self.tuples:
if isinstance(g, cuda.ndarray):
with cuda.get_device(g):
g.fill(0)
else:
g.fill(0)
def compute_grads_norm(self):
"""Computes the norm of whole gradients.
Returns:
float: L2 norm of whole gradients, i.e. square root of sum of
square of all gradient elements.
.. warning::
This method returns a CPU-computed value, which means that this
method synchronizes between CPU and GPU if at least one of the
gradients reside on the GPU.
"""
# TODO(beam2d): Make it asynchronous to CPU when gradients exist on GPU
sqnorm = 0
for _, g, _ in self.tuples:
sqnorm += _sqnorm(g)
return math.sqrt(sqnorm)
def clip_grads(self, maxnorm):
"""Clips the norm of whole gradients up to given threshold.
Args:
maxnorm (float): Threshold of gradient L2 norm.
.. seealso::
:meth:`compute_grads_norm`
It uses this method to compute the gradient norm to be clipped.
"""
norm = self.compute_grads_norm()
if norm > maxnorm:
ratio = maxnorm / norm
for _, g, _ in self.tuples:
with cuda.get_device(g):
g *= ratio
def weight_decay(self, decay):
"""Applies weight decay to the parameter/gradient pairs.
Args:
decay (float): Coefficient of weight decay
"""
for p, g, _ in self.tuples:
if isinstance(p, cuda.ndarray):
with cuda.get_device(p):
cuda.elementwise('T p, T decay', 'T g',
'g += decay * p',
'weight_decay')(p, decay, g)
else:
g += decay * p
def accumulate_grads(self, grads):
"""Accumulates gradients from other source.
This method just adds given gradient arrays to gradients that this
optimizer holds. It is typically used in data-parallel optimization,
where gradients for different shards are computed in parallel and
aggregated by this method. This method correctly treats multiple GPU
devices.
Args:
grads (Iterable): Iterable of gradient arrays to be accumulated.
"""
for (_, g_dst, _), g_src in zip(self.tuples, grads):
if isinstance(g_dst, numpy.ndarray):
g_dst += cuda.to_cpu(g_src)
continue
with cuda.get_device(g_dst):
if (isinstance(g_src, cuda.ndarray) and
g_dst.gpudata.device != g_src.gpudata.device):
g_dst += cuda.copy(g_src, out_device=g_dst.gpudata.device)
else:
g_dst += cuda.to_gpu(g_src)
def update(self):
"""Updates all parameters and states using corresponding gradients.
This method iteratively calls :meth:`update_one` for each parameter/
gradient/state tuple. Beforehand, :attr:`t` attribute is incremented.
"""
self.t += 1
for p, g, s in self.tuples:
with cuda.get_device(p):
self.update_one(p, g, s)
def update_one(self, param, grad, state):
"""Updates a parameter array and its state using given gradient.
The default implementation delegates the procedure to
:meth:`update_one_cpu` or :meth:`update_one_gpu` depending on the type
of the parameter array. Optimizer implmentation must override these
type-specific methods or this :meth:`update_one` method directly.
Args:
param: Parameter array.
grad: Gradient array.
state: State value.
.. seealso:: :meth:`update_one_cpu`, :meth:`update_one_gpu`
"""
if isinstance(param, cuda.ndarray):
self.update_one_gpu(param, grad, state)
else:
self.update_one_cpu(param, grad, state)
def update_one_cpu(self, param, grad, state):
"""Updates a parameter array and its state using given gradient on CPU.
Args:
param (numpy.ndarray): Parameter array.
grad (numpy.ndarray): Gradient array.
state: State value.
.. seealso:: :meth:`update_one`, :meth:`update_one_gpu`
"""
raise NotImplementedError()
def update_one_gpu(self, param, grad, state):
"""Updates a parameter array and its state using given gradient on GPU.
Args:
param (cupy.ndarray): Parameter array.
grad (cupy.ndarray): Gradient array.
state: State value.
.. seealso:: :meth:`update_one`, :meth:`update_one_cpu`
"""
raise NotImplementedError()
| mit |
jayme-github/headphones | lib/unidecode/x1d6.py | 248 | 3974 | data = (
's', # 0x00
't', # 0x01
'u', # 0x02
'v', # 0x03
'w', # 0x04
'x', # 0x05
'y', # 0x06
'z', # 0x07
'A', # 0x08
'B', # 0x09
'C', # 0x0a
'D', # 0x0b
'E', # 0x0c
'F', # 0x0d
'G', # 0x0e
'H', # 0x0f
'I', # 0x10
'J', # 0x11
'K', # 0x12
'L', # 0x13
'M', # 0x14
'N', # 0x15
'O', # 0x16
'P', # 0x17
'Q', # 0x18
'R', # 0x19
'S', # 0x1a
'T', # 0x1b
'U', # 0x1c
'V', # 0x1d
'W', # 0x1e
'X', # 0x1f
'Y', # 0x20
'Z', # 0x21
'a', # 0x22
'b', # 0x23
'c', # 0x24
'd', # 0x25
'e', # 0x26
'f', # 0x27
'g', # 0x28
'h', # 0x29
'i', # 0x2a
'j', # 0x2b
'k', # 0x2c
'l', # 0x2d
'm', # 0x2e
'n', # 0x2f
'o', # 0x30
'p', # 0x31
'q', # 0x32
'r', # 0x33
's', # 0x34
't', # 0x35
'u', # 0x36
'v', # 0x37
'w', # 0x38
'x', # 0x39
'y', # 0x3a
'z', # 0x3b
'A', # 0x3c
'B', # 0x3d
'C', # 0x3e
'D', # 0x3f
'E', # 0x40
'F', # 0x41
'G', # 0x42
'H', # 0x43
'I', # 0x44
'J', # 0x45
'K', # 0x46
'L', # 0x47
'M', # 0x48
'N', # 0x49
'O', # 0x4a
'P', # 0x4b
'Q', # 0x4c
'R', # 0x4d
'S', # 0x4e
'T', # 0x4f
'U', # 0x50
'V', # 0x51
'W', # 0x52
'X', # 0x53
'Y', # 0x54
'Z', # 0x55
'a', # 0x56
'b', # 0x57
'c', # 0x58
'd', # 0x59
'e', # 0x5a
'f', # 0x5b
'g', # 0x5c
'h', # 0x5d
'i', # 0x5e
'j', # 0x5f
'k', # 0x60
'l', # 0x61
'm', # 0x62
'n', # 0x63
'o', # 0x64
'p', # 0x65
'q', # 0x66
'r', # 0x67
's', # 0x68
't', # 0x69
'u', # 0x6a
'v', # 0x6b
'w', # 0x6c
'x', # 0x6d
'y', # 0x6e
'z', # 0x6f
'A', # 0x70
'B', # 0x71
'C', # 0x72
'D', # 0x73
'E', # 0x74
'F', # 0x75
'G', # 0x76
'H', # 0x77
'I', # 0x78
'J', # 0x79
'K', # 0x7a
'L', # 0x7b
'M', # 0x7c
'N', # 0x7d
'O', # 0x7e
'P', # 0x7f
'Q', # 0x80
'R', # 0x81
'S', # 0x82
'T', # 0x83
'U', # 0x84
'V', # 0x85
'W', # 0x86
'X', # 0x87
'Y', # 0x88
'Z', # 0x89
'a', # 0x8a
'b', # 0x8b
'c', # 0x8c
'd', # 0x8d
'e', # 0x8e
'f', # 0x8f
'g', # 0x90
'h', # 0x91
'i', # 0x92
'j', # 0x93
'k', # 0x94
'l', # 0x95
'm', # 0x96
'n', # 0x97
'o', # 0x98
'p', # 0x99
'q', # 0x9a
'r', # 0x9b
's', # 0x9c
't', # 0x9d
'u', # 0x9e
'v', # 0x9f
'w', # 0xa0
'x', # 0xa1
'y', # 0xa2
'z', # 0xa3
'i', # 0xa4
'j', # 0xa5
'', # 0xa6
'', # 0xa7
'Alpha', # 0xa8
'Beta', # 0xa9
'Gamma', # 0xaa
'Delta', # 0xab
'Epsilon', # 0xac
'Zeta', # 0xad
'Eta', # 0xae
'Theta', # 0xaf
'Iota', # 0xb0
'Kappa', # 0xb1
'Lamda', # 0xb2
'Mu', # 0xb3
'Nu', # 0xb4
'Xi', # 0xb5
'Omicron', # 0xb6
'Pi', # 0xb7
'Rho', # 0xb8
'Theta', # 0xb9
'Sigma', # 0xba
'Tau', # 0xbb
'Upsilon', # 0xbc
'Phi', # 0xbd
'Chi', # 0xbe
'Psi', # 0xbf
'Omega', # 0xc0
'nabla', # 0xc1
'alpha', # 0xc2
'beta', # 0xc3
'gamma', # 0xc4
'delta', # 0xc5
'epsilon', # 0xc6
'zeta', # 0xc7
'eta', # 0xc8
'theta', # 0xc9
'iota', # 0xca
'kappa', # 0xcb
'lamda', # 0xcc
'mu', # 0xcd
'nu', # 0xce
'xi', # 0xcf
'omicron', # 0xd0
'pi', # 0xd1
'rho', # 0xd2
'sigma', # 0xd3
'sigma', # 0xd4
'tai', # 0xd5
'upsilon', # 0xd6
'phi', # 0xd7
'chi', # 0xd8
'psi', # 0xd9
'omega', # 0xda
'', # 0xdb
'', # 0xdc
'', # 0xdd
'', # 0xde
'', # 0xdf
'', # 0xe0
'', # 0xe1
'', # 0xe2
'', # 0xe3
'', # 0xe4
'', # 0xe5
'', # 0xe6
'', # 0xe7
'', # 0xe8
'', # 0xe9
'', # 0xea
'', # 0xeb
'', # 0xec
'', # 0xed
'', # 0xee
'', # 0xef
'', # 0xf0
'', # 0xf1
'', # 0xf2
'', # 0xf3
'', # 0xf4
'', # 0xf5
'', # 0xf6
'', # 0xf7
'', # 0xf8
'', # 0xf9
'', # 0xfa
'', # 0xfb
'', # 0xfc
'', # 0xfd
'', # 0xfe
'', # 0xff
)
| gpl-3.0 |
NixaSoftware/CVis | venv/lib/python2.7/site-packages/numpy/lib/nanfunctions.py | 11 | 50827 | """
Functions that ignore NaN.
Functions
---------
- `nanmin` -- minimum non-NaN value
- `nanmax` -- maximum non-NaN value
- `nanargmin` -- index of minimum non-NaN value
- `nanargmax` -- index of maximum non-NaN value
- `nansum` -- sum of non-NaN values
- `nanprod` -- product of non-NaN values
- `nancumsum` -- cumulative sum of non-NaN values
- `nancumprod` -- cumulative product of non-NaN values
- `nanmean` -- mean of non-NaN values
- `nanvar` -- variance of non-NaN values
- `nanstd` -- standard deviation of non-NaN values
- `nanmedian` -- median of non-NaN values
- `nanpercentile` -- qth percentile of non-NaN values
"""
from __future__ import division, absolute_import, print_function
import warnings
import numpy as np
from numpy.lib.function_base import _ureduce as _ureduce
__all__ = [
'nansum', 'nanmax', 'nanmin', 'nanargmax', 'nanargmin', 'nanmean',
'nanmedian', 'nanpercentile', 'nanvar', 'nanstd', 'nanprod',
'nancumsum', 'nancumprod'
]
def _replace_nan(a, val):
"""
If `a` is of inexact type, make a copy of `a`, replace NaNs with
the `val` value, and return the copy together with a boolean mask
marking the locations where NaNs were present. If `a` is not of
inexact type, do nothing and return `a` together with a mask of None.
Note that scalars will end up as array scalars, which is important
for using the result as the value of the out argument in some
operations.
Parameters
----------
a : array-like
Input array.
val : float
NaN values are set to val before doing the operation.
Returns
-------
y : ndarray
If `a` is of inexact type, return a copy of `a` with the NaNs
replaced by the fill value, otherwise return `a`.
mask: {bool, None}
If `a` is of inexact type, return a boolean mask marking locations of
NaNs, otherwise return None.
"""
a = np.array(a, subok=True, copy=True)
if a.dtype == np.object_:
# object arrays do not support `isnan` (gh-9009), so make a guess
mask = a != a
elif issubclass(a.dtype.type, np.inexact):
mask = np.isnan(a)
else:
mask = None
if mask is not None:
np.copyto(a, val, where=mask)
return a, mask
def _copyto(a, val, mask):
"""
Replace values in `a` with NaN where `mask` is True. This differs from
copyto in that it will deal with the case where `a` is a numpy scalar.
Parameters
----------
a : ndarray or numpy scalar
Array or numpy scalar some of whose values are to be replaced
by val.
val : numpy scalar
Value used a replacement.
mask : ndarray, scalar
Boolean array. Where True the corresponding element of `a` is
replaced by `val`. Broadcasts.
Returns
-------
res : ndarray, scalar
Array with elements replaced or scalar `val`.
"""
if isinstance(a, np.ndarray):
np.copyto(a, val, where=mask, casting='unsafe')
else:
a = a.dtype.type(val)
return a
def _divide_by_count(a, b, out=None):
"""
Compute a/b ignoring invalid results. If `a` is an array the division
is done in place. If `a` is a scalar, then its type is preserved in the
output. If out is None, then then a is used instead so that the
division is in place. Note that this is only called with `a` an inexact
type.
Parameters
----------
a : {ndarray, numpy scalar}
Numerator. Expected to be of inexact type but not checked.
b : {ndarray, numpy scalar}
Denominator.
out : ndarray, optional
Alternate output array in which to place the result. The default
is ``None``; if provided, it must have the same shape as the
expected output, but the type will be cast if necessary.
Returns
-------
ret : {ndarray, numpy scalar}
The return value is a/b. If `a` was an ndarray the division is done
in place. If `a` is a numpy scalar, the division preserves its type.
"""
with np.errstate(invalid='ignore', divide='ignore'):
if isinstance(a, np.ndarray):
if out is None:
return np.divide(a, b, out=a, casting='unsafe')
else:
return np.divide(a, b, out=out, casting='unsafe')
else:
if out is None:
return a.dtype.type(a / b)
else:
# This is questionable, but currently a numpy scalar can
# be output to a zero dimensional array.
return np.divide(a, b, out=out, casting='unsafe')
def nanmin(a, axis=None, out=None, keepdims=np._NoValue):
"""
Return minimum of an array or minimum along an axis, ignoring any NaNs.
When all-NaN slices are encountered a ``RuntimeWarning`` is raised and
Nan is returned for that slice.
Parameters
----------
a : array_like
Array containing numbers whose minimum is desired. If `a` is not an
array, a conversion is attempted.
axis : int, optional
Axis along which the minimum is computed. The default is to compute
the minimum of the flattened array.
out : ndarray, optional
Alternate output array in which to place the result. The default
is ``None``; if provided, it must have the same shape as the
expected output, but the type will be cast if necessary. See
`doc.ufuncs` for details.
.. versionadded:: 1.8.0
keepdims : bool, optional
If this is set to True, the axes which are reduced are left
in the result as dimensions with size one. With this option,
the result will broadcast correctly against the original `a`.
If the value is anything but the default, then
`keepdims` will be passed through to the `min` method
of sub-classes of `ndarray`. If the sub-classes methods
does not implement `keepdims` any exceptions will be raised.
.. versionadded:: 1.8.0
Returns
-------
nanmin : ndarray
An array with the same shape as `a`, with the specified axis
removed. If `a` is a 0-d array, or if axis is None, an ndarray
scalar is returned. The same dtype as `a` is returned.
See Also
--------
nanmax :
The maximum value of an array along a given axis, ignoring any NaNs.
amin :
The minimum value of an array along a given axis, propagating any NaNs.
fmin :
Element-wise minimum of two arrays, ignoring any NaNs.
minimum :
Element-wise minimum of two arrays, propagating any NaNs.
isnan :
Shows which elements are Not a Number (NaN).
isfinite:
Shows which elements are neither NaN nor infinity.
amax, fmax, maximum
Notes
-----
NumPy uses the IEEE Standard for Binary Floating-Point for Arithmetic
(IEEE 754). This means that Not a Number is not equivalent to infinity.
Positive infinity is treated as a very large number and negative
infinity is treated as a very small (i.e. negative) number.
If the input has a integer type the function is equivalent to np.min.
Examples
--------
>>> a = np.array([[1, 2], [3, np.nan]])
>>> np.nanmin(a)
1.0
>>> np.nanmin(a, axis=0)
array([ 1., 2.])
>>> np.nanmin(a, axis=1)
array([ 1., 3.])
When positive infinity and negative infinity are present:
>>> np.nanmin([1, 2, np.nan, np.inf])
1.0
>>> np.nanmin([1, 2, np.nan, np.NINF])
-inf
"""
kwargs = {}
if keepdims is not np._NoValue:
kwargs['keepdims'] = keepdims
if type(a) is np.ndarray and a.dtype != np.object_:
# Fast, but not safe for subclasses of ndarray, or object arrays,
# which do not implement isnan (gh-9009), or fmin correctly (gh-8975)
res = np.fmin.reduce(a, axis=axis, out=out, **kwargs)
if np.isnan(res).any():
warnings.warn("All-NaN axis encountered", RuntimeWarning, stacklevel=2)
else:
# Slow, but safe for subclasses of ndarray
a, mask = _replace_nan(a, +np.inf)
res = np.amin(a, axis=axis, out=out, **kwargs)
if mask is None:
return res
# Check for all-NaN axis
mask = np.all(mask, axis=axis, **kwargs)
if np.any(mask):
res = _copyto(res, np.nan, mask)
warnings.warn("All-NaN axis encountered", RuntimeWarning, stacklevel=2)
return res
def nanmax(a, axis=None, out=None, keepdims=np._NoValue):
"""
Return the maximum of an array or maximum along an axis, ignoring any
NaNs. When all-NaN slices are encountered a ``RuntimeWarning`` is
raised and NaN is returned for that slice.
Parameters
----------
a : array_like
Array containing numbers whose maximum is desired. If `a` is not an
array, a conversion is attempted.
axis : int, optional
Axis along which the maximum is computed. The default is to compute
the maximum of the flattened array.
out : ndarray, optional
Alternate output array in which to place the result. The default
is ``None``; if provided, it must have the same shape as the
expected output, but the type will be cast if necessary. See
`doc.ufuncs` for details.
.. versionadded:: 1.8.0
keepdims : bool, optional
If this is set to True, the axes which are reduced are left
in the result as dimensions with size one. With this option,
the result will broadcast correctly against the original `a`.
If the value is anything but the default, then
`keepdims` will be passed through to the `max` method
of sub-classes of `ndarray`. If the sub-classes methods
does not implement `keepdims` any exceptions will be raised.
.. versionadded:: 1.8.0
Returns
-------
nanmax : ndarray
An array with the same shape as `a`, with the specified axis removed.
If `a` is a 0-d array, or if axis is None, an ndarray scalar is
returned. The same dtype as `a` is returned.
See Also
--------
nanmin :
The minimum value of an array along a given axis, ignoring any NaNs.
amax :
The maximum value of an array along a given axis, propagating any NaNs.
fmax :
Element-wise maximum of two arrays, ignoring any NaNs.
maximum :
Element-wise maximum of two arrays, propagating any NaNs.
isnan :
Shows which elements are Not a Number (NaN).
isfinite:
Shows which elements are neither NaN nor infinity.
amin, fmin, minimum
Notes
-----
NumPy uses the IEEE Standard for Binary Floating-Point for Arithmetic
(IEEE 754). This means that Not a Number is not equivalent to infinity.
Positive infinity is treated as a very large number and negative
infinity is treated as a very small (i.e. negative) number.
If the input has a integer type the function is equivalent to np.max.
Examples
--------
>>> a = np.array([[1, 2], [3, np.nan]])
>>> np.nanmax(a)
3.0
>>> np.nanmax(a, axis=0)
array([ 3., 2.])
>>> np.nanmax(a, axis=1)
array([ 2., 3.])
When positive infinity and negative infinity are present:
>>> np.nanmax([1, 2, np.nan, np.NINF])
2.0
>>> np.nanmax([1, 2, np.nan, np.inf])
inf
"""
kwargs = {}
if keepdims is not np._NoValue:
kwargs['keepdims'] = keepdims
if type(a) is np.ndarray and a.dtype != np.object_:
# Fast, but not safe for subclasses of ndarray, or object arrays,
# which do not implement isnan (gh-9009), or fmax correctly (gh-8975)
res = np.fmax.reduce(a, axis=axis, out=out, **kwargs)
if np.isnan(res).any():
warnings.warn("All-NaN slice encountered", RuntimeWarning, stacklevel=2)
else:
# Slow, but safe for subclasses of ndarray
a, mask = _replace_nan(a, -np.inf)
res = np.amax(a, axis=axis, out=out, **kwargs)
if mask is None:
return res
# Check for all-NaN axis
mask = np.all(mask, axis=axis, **kwargs)
if np.any(mask):
res = _copyto(res, np.nan, mask)
warnings.warn("All-NaN axis encountered", RuntimeWarning, stacklevel=2)
return res
def nanargmin(a, axis=None):
"""
Return the indices of the minimum values in the specified axis ignoring
NaNs. For all-NaN slices ``ValueError`` is raised. Warning: the results
cannot be trusted if a slice contains only NaNs and Infs.
Parameters
----------
a : array_like
Input data.
axis : int, optional
Axis along which to operate. By default flattened input is used.
Returns
-------
index_array : ndarray
An array of indices or a single index value.
See Also
--------
argmin, nanargmax
Examples
--------
>>> a = np.array([[np.nan, 4], [2, 3]])
>>> np.argmin(a)
0
>>> np.nanargmin(a)
2
>>> np.nanargmin(a, axis=0)
array([1, 1])
>>> np.nanargmin(a, axis=1)
array([1, 0])
"""
a, mask = _replace_nan(a, np.inf)
res = np.argmin(a, axis=axis)
if mask is not None:
mask = np.all(mask, axis=axis)
if np.any(mask):
raise ValueError("All-NaN slice encountered")
return res
def nanargmax(a, axis=None):
"""
Return the indices of the maximum values in the specified axis ignoring
NaNs. For all-NaN slices ``ValueError`` is raised. Warning: the
results cannot be trusted if a slice contains only NaNs and -Infs.
Parameters
----------
a : array_like
Input data.
axis : int, optional
Axis along which to operate. By default flattened input is used.
Returns
-------
index_array : ndarray
An array of indices or a single index value.
See Also
--------
argmax, nanargmin
Examples
--------
>>> a = np.array([[np.nan, 4], [2, 3]])
>>> np.argmax(a)
0
>>> np.nanargmax(a)
1
>>> np.nanargmax(a, axis=0)
array([1, 0])
>>> np.nanargmax(a, axis=1)
array([1, 1])
"""
a, mask = _replace_nan(a, -np.inf)
res = np.argmax(a, axis=axis)
if mask is not None:
mask = np.all(mask, axis=axis)
if np.any(mask):
raise ValueError("All-NaN slice encountered")
return res
def nansum(a, axis=None, dtype=None, out=None, keepdims=np._NoValue):
"""
Return the sum of array elements over a given axis treating Not a
Numbers (NaNs) as zero.
In NumPy versions <= 1.8.0 Nan is returned for slices that are all-NaN or
empty. In later versions zero is returned.
Parameters
----------
a : array_like
Array containing numbers whose sum is desired. If `a` is not an
array, a conversion is attempted.
axis : int, optional
Axis along which the sum is computed. The default is to compute the
sum of the flattened array.
dtype : data-type, optional
The type of the returned array and of the accumulator in which the
elements are summed. By default, the dtype of `a` is used. An
exception is when `a` has an integer type with less precision than
the platform (u)intp. In that case, the default will be either
(u)int32 or (u)int64 depending on whether the platform is 32 or 64
bits. For inexact inputs, dtype must be inexact.
.. versionadded:: 1.8.0
out : ndarray, optional
Alternate output array in which to place the result. The default
is ``None``. If provided, it must have the same shape as the
expected output, but the type will be cast if necessary. See
`doc.ufuncs` for details. The casting of NaN to integer can yield
unexpected results.
.. versionadded:: 1.8.0
keepdims : bool, optional
If this is set to True, the axes which are reduced are left
in the result as dimensions with size one. With this option,
the result will broadcast correctly against the original `a`.
If the value is anything but the default, then
`keepdims` will be passed through to the `mean` or `sum` methods
of sub-classes of `ndarray`. If the sub-classes methods
does not implement `keepdims` any exceptions will be raised.
.. versionadded:: 1.8.0
Returns
-------
nansum : ndarray.
A new array holding the result is returned unless `out` is
specified, in which it is returned. The result has the same
size as `a`, and the same shape as `a` if `axis` is not None
or `a` is a 1-d array.
See Also
--------
numpy.sum : Sum across array propagating NaNs.
isnan : Show which elements are NaN.
isfinite: Show which elements are not NaN or +/-inf.
Notes
-----
If both positive and negative infinity are present, the sum will be Not
A Number (NaN).
Examples
--------
>>> np.nansum(1)
1
>>> np.nansum([1])
1
>>> np.nansum([1, np.nan])
1.0
>>> a = np.array([[1, 1], [1, np.nan]])
>>> np.nansum(a)
3.0
>>> np.nansum(a, axis=0)
array([ 2., 1.])
>>> np.nansum([1, np.nan, np.inf])
inf
>>> np.nansum([1, np.nan, np.NINF])
-inf
>>> np.nansum([1, np.nan, np.inf, -np.inf]) # both +/- infinity present
nan
"""
a, mask = _replace_nan(a, 0)
return np.sum(a, axis=axis, dtype=dtype, out=out, keepdims=keepdims)
def nanprod(a, axis=None, dtype=None, out=None, keepdims=np._NoValue):
"""
Return the product of array elements over a given axis treating Not a
Numbers (NaNs) as ones.
One is returned for slices that are all-NaN or empty.
.. versionadded:: 1.10.0
Parameters
----------
a : array_like
Array containing numbers whose sum is desired. If `a` is not an
array, a conversion is attempted.
axis : int, optional
Axis along which the product is computed. The default is to compute
the product of the flattened array.
dtype : data-type, optional
The type of the returned array and of the accumulator in which the
elements are summed. By default, the dtype of `a` is used. An
exception is when `a` has an integer type with less precision than
the platform (u)intp. In that case, the default will be either
(u)int32 or (u)int64 depending on whether the platform is 32 or 64
bits. For inexact inputs, dtype must be inexact.
out : ndarray, optional
Alternate output array in which to place the result. The default
is ``None``. If provided, it must have the same shape as the
expected output, but the type will be cast if necessary. See
`doc.ufuncs` for details. The casting of NaN to integer can yield
unexpected results.
keepdims : bool, optional
If True, the axes which are reduced are left in the result as
dimensions with size one. With this option, the result will
broadcast correctly against the original `arr`.
Returns
-------
nanprod : ndarray
A new array holding the result is returned unless `out` is
specified, in which case it is returned.
See Also
--------
numpy.prod : Product across array propagating NaNs.
isnan : Show which elements are NaN.
Examples
--------
>>> np.nanprod(1)
1
>>> np.nanprod([1])
1
>>> np.nanprod([1, np.nan])
1.0
>>> a = np.array([[1, 2], [3, np.nan]])
>>> np.nanprod(a)
6.0
>>> np.nanprod(a, axis=0)
array([ 3., 2.])
"""
a, mask = _replace_nan(a, 1)
return np.prod(a, axis=axis, dtype=dtype, out=out, keepdims=keepdims)
def nancumsum(a, axis=None, dtype=None, out=None):
"""
Return the cumulative sum of array elements over a given axis treating Not a
Numbers (NaNs) as zero. The cumulative sum does not change when NaNs are
encountered and leading NaNs are replaced by zeros.
Zeros are returned for slices that are all-NaN or empty.
.. versionadded:: 1.12.0
Parameters
----------
a : array_like
Input array.
axis : int, optional
Axis along which the cumulative sum is computed. The default
(None) is to compute the cumsum over the flattened array.
dtype : dtype, optional
Type of the returned array and of the accumulator in which the
elements are summed. If `dtype` is not specified, it defaults
to the dtype of `a`, unless `a` has an integer dtype with a
precision less than that of the default platform integer. In
that case, the default platform integer is used.
out : ndarray, optional
Alternative output array in which to place the result. It must
have the same shape and buffer length as the expected output
but the type will be cast if necessary. See `doc.ufuncs`
(Section "Output arguments") for more details.
Returns
-------
nancumsum : ndarray.
A new array holding the result is returned unless `out` is
specified, in which it is returned. The result has the same
size as `a`, and the same shape as `a` if `axis` is not None
or `a` is a 1-d array.
See Also
--------
numpy.cumsum : Cumulative sum across array propagating NaNs.
isnan : Show which elements are NaN.
Examples
--------
>>> np.nancumsum(1)
array([1])
>>> np.nancumsum([1])
array([1])
>>> np.nancumsum([1, np.nan])
array([ 1., 1.])
>>> a = np.array([[1, 2], [3, np.nan]])
>>> np.nancumsum(a)
array([ 1., 3., 6., 6.])
>>> np.nancumsum(a, axis=0)
array([[ 1., 2.],
[ 4., 2.]])
>>> np.nancumsum(a, axis=1)
array([[ 1., 3.],
[ 3., 3.]])
"""
a, mask = _replace_nan(a, 0)
return np.cumsum(a, axis=axis, dtype=dtype, out=out)
def nancumprod(a, axis=None, dtype=None, out=None):
"""
Return the cumulative product of array elements over a given axis treating Not a
Numbers (NaNs) as one. The cumulative product does not change when NaNs are
encountered and leading NaNs are replaced by ones.
Ones are returned for slices that are all-NaN or empty.
.. versionadded:: 1.12.0
Parameters
----------
a : array_like
Input array.
axis : int, optional
Axis along which the cumulative product is computed. By default
the input is flattened.
dtype : dtype, optional
Type of the returned array, as well as of the accumulator in which
the elements are multiplied. If *dtype* is not specified, it
defaults to the dtype of `a`, unless `a` has an integer dtype with
a precision less than that of the default platform integer. In
that case, the default platform integer is used instead.
out : ndarray, optional
Alternative output array in which to place the result. It must
have the same shape and buffer length as the expected output
but the type of the resulting values will be cast if necessary.
Returns
-------
nancumprod : ndarray
A new array holding the result is returned unless `out` is
specified, in which case it is returned.
See Also
--------
numpy.cumprod : Cumulative product across array propagating NaNs.
isnan : Show which elements are NaN.
Examples
--------
>>> np.nancumprod(1)
array([1])
>>> np.nancumprod([1])
array([1])
>>> np.nancumprod([1, np.nan])
array([ 1., 1.])
>>> a = np.array([[1, 2], [3, np.nan]])
>>> np.nancumprod(a)
array([ 1., 2., 6., 6.])
>>> np.nancumprod(a, axis=0)
array([[ 1., 2.],
[ 3., 2.]])
>>> np.nancumprod(a, axis=1)
array([[ 1., 2.],
[ 3., 3.]])
"""
a, mask = _replace_nan(a, 1)
return np.cumprod(a, axis=axis, dtype=dtype, out=out)
def nanmean(a, axis=None, dtype=None, out=None, keepdims=np._NoValue):
"""
Compute the arithmetic mean along the specified axis, ignoring NaNs.
Returns the average of the array elements. The average is taken over
the flattened array by default, otherwise over the specified axis.
`float64` intermediate and return values are used for integer inputs.
For all-NaN slices, NaN is returned and a `RuntimeWarning` is raised.
.. versionadded:: 1.8.0
Parameters
----------
a : array_like
Array containing numbers whose mean is desired. If `a` is not an
array, a conversion is attempted.
axis : int, optional
Axis along which the means are computed. The default is to compute
the mean of the flattened array.
dtype : data-type, optional
Type to use in computing the mean. For integer inputs, the default
is `float64`; for inexact inputs, it is the same as the input
dtype.
out : ndarray, optional
Alternate output array in which to place the result. The default
is ``None``; if provided, it must have the same shape as the
expected output, but the type will be cast if necessary. See
`doc.ufuncs` for details.
keepdims : bool, optional
If this is set to True, the axes which are reduced are left
in the result as dimensions with size one. With this option,
the result will broadcast correctly against the original `a`.
If the value is anything but the default, then
`keepdims` will be passed through to the `mean` or `sum` methods
of sub-classes of `ndarray`. If the sub-classes methods
does not implement `keepdims` any exceptions will be raised.
Returns
-------
m : ndarray, see dtype parameter above
If `out=None`, returns a new array containing the mean values,
otherwise a reference to the output array is returned. Nan is
returned for slices that contain only NaNs.
See Also
--------
average : Weighted average
mean : Arithmetic mean taken while not ignoring NaNs
var, nanvar
Notes
-----
The arithmetic mean is the sum of the non-NaN elements along the axis
divided by the number of non-NaN elements.
Note that for floating-point input, the mean is computed using the same
precision the input has. Depending on the input data, this can cause
the results to be inaccurate, especially for `float32`. Specifying a
higher-precision accumulator using the `dtype` keyword can alleviate
this issue.
Examples
--------
>>> a = np.array([[1, np.nan], [3, 4]])
>>> np.nanmean(a)
2.6666666666666665
>>> np.nanmean(a, axis=0)
array([ 2., 4.])
>>> np.nanmean(a, axis=1)
array([ 1., 3.5])
"""
arr, mask = _replace_nan(a, 0)
if mask is None:
return np.mean(arr, axis=axis, dtype=dtype, out=out, keepdims=keepdims)
if dtype is not None:
dtype = np.dtype(dtype)
if dtype is not None and not issubclass(dtype.type, np.inexact):
raise TypeError("If a is inexact, then dtype must be inexact")
if out is not None and not issubclass(out.dtype.type, np.inexact):
raise TypeError("If a is inexact, then out must be inexact")
cnt = np.sum(~mask, axis=axis, dtype=np.intp, keepdims=keepdims)
tot = np.sum(arr, axis=axis, dtype=dtype, out=out, keepdims=keepdims)
avg = _divide_by_count(tot, cnt, out=out)
isbad = (cnt == 0)
if isbad.any():
warnings.warn("Mean of empty slice", RuntimeWarning, stacklevel=2)
# NaN is the only possible bad value, so no further
# action is needed to handle bad results.
return avg
def _nanmedian1d(arr1d, overwrite_input=False):
"""
Private function for rank 1 arrays. Compute the median ignoring NaNs.
See nanmedian for parameter usage
"""
c = np.isnan(arr1d)
s = np.where(c)[0]
if s.size == arr1d.size:
warnings.warn("All-NaN slice encountered", RuntimeWarning, stacklevel=3)
return np.nan
elif s.size == 0:
return np.median(arr1d, overwrite_input=overwrite_input)
else:
if overwrite_input:
x = arr1d
else:
x = arr1d.copy()
# select non-nans at end of array
enonan = arr1d[-s.size:][~c[-s.size:]]
# fill nans in beginning of array with non-nans of end
x[s[:enonan.size]] = enonan
# slice nans away
return np.median(x[:-s.size], overwrite_input=True)
def _nanmedian(a, axis=None, out=None, overwrite_input=False):
"""
Private function that doesn't support extended axis or keepdims.
These methods are extended to this function using _ureduce
See nanmedian for parameter usage
"""
if axis is None or a.ndim == 1:
part = a.ravel()
if out is None:
return _nanmedian1d(part, overwrite_input)
else:
out[...] = _nanmedian1d(part, overwrite_input)
return out
else:
# for small medians use sort + indexing which is still faster than
# apply_along_axis
# benchmarked with shuffled (50, 50, x) containing a few NaN
if a.shape[axis] < 600:
return _nanmedian_small(a, axis, out, overwrite_input)
result = np.apply_along_axis(_nanmedian1d, axis, a, overwrite_input)
if out is not None:
out[...] = result
return result
def _nanmedian_small(a, axis=None, out=None, overwrite_input=False):
"""
sort + indexing median, faster for small medians along multiple
dimensions due to the high overhead of apply_along_axis
see nanmedian for parameter usage
"""
a = np.ma.masked_array(a, np.isnan(a))
m = np.ma.median(a, axis=axis, overwrite_input=overwrite_input)
for i in range(np.count_nonzero(m.mask.ravel())):
warnings.warn("All-NaN slice encountered", RuntimeWarning, stacklevel=3)
if out is not None:
out[...] = m.filled(np.nan)
return out
return m.filled(np.nan)
def nanmedian(a, axis=None, out=None, overwrite_input=False, keepdims=np._NoValue):
"""
Compute the median along the specified axis, while ignoring NaNs.
Returns the median of the array elements.
.. versionadded:: 1.9.0
Parameters
----------
a : array_like
Input array or object that can be converted to an array.
axis : {int, sequence of int, None}, optional
Axis or axes along which the medians are computed. The default
is to compute the median along a flattened version of the array.
A sequence of axes is supported since version 1.9.0.
out : ndarray, optional
Alternative output array in which to place the result. It must
have the same shape and buffer length as the expected output,
but the type (of the output) will be cast if necessary.
overwrite_input : bool, optional
If True, then allow use of memory of input array `a` for
calculations. The input array will be modified by the call to
`median`. This will save memory when you do not need to preserve
the contents of the input array. Treat the input as undefined,
but it will probably be fully or partially sorted. Default is
False. If `overwrite_input` is ``True`` and `a` is not already an
`ndarray`, an error will be raised.
keepdims : bool, optional
If this is set to True, the axes which are reduced are left
in the result as dimensions with size one. With this option,
the result will broadcast correctly against the original `a`.
If this is anything but the default value it will be passed
through (in the special case of an empty array) to the
`mean` function of the underlying array. If the array is
a sub-class and `mean` does not have the kwarg `keepdims` this
will raise a RuntimeError.
Returns
-------
median : ndarray
A new array holding the result. If the input contains integers
or floats smaller than ``float64``, then the output data-type is
``np.float64``. Otherwise, the data-type of the output is the
same as that of the input. If `out` is specified, that array is
returned instead.
See Also
--------
mean, median, percentile
Notes
-----
Given a vector ``V`` of length ``N``, the median of ``V`` is the
middle value of a sorted copy of ``V``, ``V_sorted`` - i.e.,
``V_sorted[(N-1)/2]``, when ``N`` is odd and the average of the two
middle values of ``V_sorted`` when ``N`` is even.
Examples
--------
>>> a = np.array([[10.0, 7, 4], [3, 2, 1]])
>>> a[0, 1] = np.nan
>>> a
array([[ 10., nan, 4.],
[ 3., 2., 1.]])
>>> np.median(a)
nan
>>> np.nanmedian(a)
3.0
>>> np.nanmedian(a, axis=0)
array([ 6.5, 2., 2.5])
>>> np.median(a, axis=1)
array([ 7., 2.])
>>> b = a.copy()
>>> np.nanmedian(b, axis=1, overwrite_input=True)
array([ 7., 2.])
>>> assert not np.all(a==b)
>>> b = a.copy()
>>> np.nanmedian(b, axis=None, overwrite_input=True)
3.0
>>> assert not np.all(a==b)
"""
a = np.asanyarray(a)
# apply_along_axis in _nanmedian doesn't handle empty arrays well,
# so deal them upfront
if a.size == 0:
return np.nanmean(a, axis, out=out, keepdims=keepdims)
r, k = _ureduce(a, func=_nanmedian, axis=axis, out=out,
overwrite_input=overwrite_input)
if keepdims and keepdims is not np._NoValue:
return r.reshape(k)
else:
return r
def nanpercentile(a, q, axis=None, out=None, overwrite_input=False,
interpolation='linear', keepdims=np._NoValue):
"""
Compute the qth percentile of the data along the specified axis,
while ignoring nan values.
Returns the qth percentile(s) of the array elements.
.. versionadded:: 1.9.0
Parameters
----------
a : array_like
Input array or object that can be converted to an array.
q : float in range of [0,100] (or sequence of floats)
Percentile to compute, which must be between 0 and 100
inclusive.
axis : {int, sequence of int, None}, optional
Axis or axes along which the percentiles are computed. The
default is to compute the percentile(s) along a flattened
version of the array. A sequence of axes is supported since
version 1.9.0.
out : ndarray, optional
Alternative output array in which to place the result. It must
have the same shape and buffer length as the expected output,
but the type (of the output) will be cast if necessary.
overwrite_input : bool, optional
If True, then allow use of memory of input array `a` for
calculations. The input array will be modified by the call to
`percentile`. This will save memory when you do not need to
preserve the contents of the input array. In this case you
should not make any assumptions about the contents of the input
`a` after this function completes -- treat it as undefined.
Default is False. If `a` is not already an array, this parameter
will have no effect as `a` will be converted to an array
internally regardless of the value of this parameter.
interpolation : {'linear', 'lower', 'higher', 'midpoint', 'nearest'}
This optional parameter specifies the interpolation method to
use when the desired quantile lies between two data points
``i < j``:
* linear: ``i + (j - i) * fraction``, where ``fraction`` is
the fractional part of the index surrounded by ``i`` and
``j``.
* lower: ``i``.
* higher: ``j``.
* nearest: ``i`` or ``j``, whichever is nearest.
* midpoint: ``(i + j) / 2``.
keepdims : bool, optional
If this is set to True, the axes which are reduced are left in
the result as dimensions with size one. With this option, the
result will broadcast correctly against the original array `a`.
If this is anything but the default value it will be passed
through (in the special case of an empty array) to the
`mean` function of the underlying array. If the array is
a sub-class and `mean` does not have the kwarg `keepdims` this
will raise a RuntimeError.
Returns
-------
percentile : scalar or ndarray
If `q` is a single percentile and `axis=None`, then the result
is a scalar. If multiple percentiles are given, first axis of
the result corresponds to the percentiles. The other axes are
the axes that remain after the reduction of `a`. If the input
contains integers or floats smaller than ``float64``, the output
data-type is ``float64``. Otherwise, the output data-type is the
same as that of the input. If `out` is specified, that array is
returned instead.
See Also
--------
nanmean, nanmedian, percentile, median, mean
Notes
-----
Given a vector ``V`` of length ``N``, the ``q``-th percentile of
``V`` is the value ``q/100`` of the way from the minimum to the
maximum in a sorted copy of ``V``. The values and distances of
the two nearest neighbors as well as the `interpolation` parameter
will determine the percentile if the normalized ranking does not
match the location of ``q`` exactly. This function is the same as
the median if ``q=50``, the same as the minimum if ``q=0`` and the
same as the maximum if ``q=100``.
Examples
--------
>>> a = np.array([[10., 7., 4.], [3., 2., 1.]])
>>> a[0][1] = np.nan
>>> a
array([[ 10., nan, 4.],
[ 3., 2., 1.]])
>>> np.percentile(a, 50)
nan
>>> np.nanpercentile(a, 50)
3.5
>>> np.nanpercentile(a, 50, axis=0)
array([ 6.5, 2., 2.5])
>>> np.nanpercentile(a, 50, axis=1, keepdims=True)
array([[ 7.],
[ 2.]])
>>> m = np.nanpercentile(a, 50, axis=0)
>>> out = np.zeros_like(m)
>>> np.nanpercentile(a, 50, axis=0, out=out)
array([ 6.5, 2., 2.5])
>>> m
array([ 6.5, 2. , 2.5])
>>> b = a.copy()
>>> np.nanpercentile(b, 50, axis=1, overwrite_input=True)
array([ 7., 2.])
>>> assert not np.all(a==b)
"""
a = np.asanyarray(a)
q = np.asanyarray(q)
# apply_along_axis in _nanpercentile doesn't handle empty arrays well,
# so deal them upfront
if a.size == 0:
return np.nanmean(a, axis, out=out, keepdims=keepdims)
r, k = _ureduce(a, func=_nanpercentile, q=q, axis=axis, out=out,
overwrite_input=overwrite_input,
interpolation=interpolation)
if keepdims and keepdims is not np._NoValue:
if q.ndim == 0:
return r.reshape(k)
else:
return r.reshape([len(q)] + k)
else:
return r
def _nanpercentile(a, q, axis=None, out=None, overwrite_input=False,
interpolation='linear'):
"""
Private function that doesn't support extended axis or keepdims.
These methods are extended to this function using _ureduce
See nanpercentile for parameter usage
"""
if axis is None or a.ndim == 1:
part = a.ravel()
result = _nanpercentile1d(part, q, overwrite_input, interpolation)
else:
result = np.apply_along_axis(_nanpercentile1d, axis, a, q,
overwrite_input, interpolation)
# apply_along_axis fills in collapsed axis with results.
# Move that axis to the beginning to match percentile's
# convention.
if q.ndim != 0:
result = np.rollaxis(result, axis)
if out is not None:
out[...] = result
return result
def _nanpercentile1d(arr1d, q, overwrite_input=False, interpolation='linear'):
"""
Private function for rank 1 arrays. Compute percentile ignoring
NaNs.
See nanpercentile for parameter usage
"""
c = np.isnan(arr1d)
s = np.where(c)[0]
if s.size == arr1d.size:
warnings.warn("All-NaN slice encountered", RuntimeWarning, stacklevel=3)
if q.ndim == 0:
return np.nan
else:
return np.nan * np.ones((len(q),))
elif s.size == 0:
return np.percentile(arr1d, q, overwrite_input=overwrite_input,
interpolation=interpolation)
else:
if overwrite_input:
x = arr1d
else:
x = arr1d.copy()
# select non-nans at end of array
enonan = arr1d[-s.size:][~c[-s.size:]]
# fill nans in beginning of array with non-nans of end
x[s[:enonan.size]] = enonan
# slice nans away
return np.percentile(x[:-s.size], q, overwrite_input=True,
interpolation=interpolation)
def nanvar(a, axis=None, dtype=None, out=None, ddof=0, keepdims=np._NoValue):
"""
Compute the variance along the specified axis, while ignoring NaNs.
Returns the variance of the array elements, a measure of the spread of
a distribution. The variance is computed for the flattened array by
default, otherwise over the specified axis.
For all-NaN slices or slices with zero degrees of freedom, NaN is
returned and a `RuntimeWarning` is raised.
.. versionadded:: 1.8.0
Parameters
----------
a : array_like
Array containing numbers whose variance is desired. If `a` is not an
array, a conversion is attempted.
axis : int, optional
Axis along which the variance is computed. The default is to compute
the variance of the flattened array.
dtype : data-type, optional
Type to use in computing the variance. For arrays of integer type
the default is `float32`; for arrays of float types it is the same as
the array type.
out : ndarray, optional
Alternate output array in which to place the result. It must have
the same shape as the expected output, but the type is cast if
necessary.
ddof : int, optional
"Delta Degrees of Freedom": the divisor used in the calculation is
``N - ddof``, where ``N`` represents the number of non-NaN
elements. By default `ddof` is zero.
keepdims : bool, optional
If this is set to True, the axes which are reduced are left
in the result as dimensions with size one. With this option,
the result will broadcast correctly against the original `a`.
Returns
-------
variance : ndarray, see dtype parameter above
If `out` is None, return a new array containing the variance,
otherwise return a reference to the output array. If ddof is >= the
number of non-NaN elements in a slice or the slice contains only
NaNs, then the result for that slice is NaN.
See Also
--------
std : Standard deviation
mean : Average
var : Variance while not ignoring NaNs
nanstd, nanmean
numpy.doc.ufuncs : Section "Output arguments"
Notes
-----
The variance is the average of the squared deviations from the mean,
i.e., ``var = mean(abs(x - x.mean())**2)``.
The mean is normally calculated as ``x.sum() / N``, where ``N = len(x)``.
If, however, `ddof` is specified, the divisor ``N - ddof`` is used
instead. In standard statistical practice, ``ddof=1`` provides an
unbiased estimator of the variance of a hypothetical infinite
population. ``ddof=0`` provides a maximum likelihood estimate of the
variance for normally distributed variables.
Note that for complex numbers, the absolute value is taken before
squaring, so that the result is always real and nonnegative.
For floating-point input, the variance is computed using the same
precision the input has. Depending on the input data, this can cause
the results to be inaccurate, especially for `float32` (see example
below). Specifying a higher-accuracy accumulator using the ``dtype``
keyword can alleviate this issue.
For this function to work on sub-classes of ndarray, they must define
`sum` with the kwarg `keepdims`
Examples
--------
>>> a = np.array([[1, np.nan], [3, 4]])
>>> np.var(a)
1.5555555555555554
>>> np.nanvar(a, axis=0)
array([ 1., 0.])
>>> np.nanvar(a, axis=1)
array([ 0., 0.25])
"""
arr, mask = _replace_nan(a, 0)
if mask is None:
return np.var(arr, axis=axis, dtype=dtype, out=out, ddof=ddof,
keepdims=keepdims)
if dtype is not None:
dtype = np.dtype(dtype)
if dtype is not None and not issubclass(dtype.type, np.inexact):
raise TypeError("If a is inexact, then dtype must be inexact")
if out is not None and not issubclass(out.dtype.type, np.inexact):
raise TypeError("If a is inexact, then out must be inexact")
# Compute mean
if type(arr) is np.matrix:
_keepdims = np._NoValue
else:
_keepdims = True
# we need to special case matrix for reverse compatibility
# in order for this to work, these sums need to be called with
# keepdims=True, however matrix now raises an error in this case, but
# the reason that it drops the keepdims kwarg is to force keepdims=True
# so this used to work by serendipity.
cnt = np.sum(~mask, axis=axis, dtype=np.intp, keepdims=_keepdims)
avg = np.sum(arr, axis=axis, dtype=dtype, keepdims=_keepdims)
avg = _divide_by_count(avg, cnt)
# Compute squared deviation from mean.
np.subtract(arr, avg, out=arr, casting='unsafe')
arr = _copyto(arr, 0, mask)
if issubclass(arr.dtype.type, np.complexfloating):
sqr = np.multiply(arr, arr.conj(), out=arr).real
else:
sqr = np.multiply(arr, arr, out=arr)
# Compute variance.
var = np.sum(sqr, axis=axis, dtype=dtype, out=out, keepdims=keepdims)
if var.ndim < cnt.ndim:
# Subclasses of ndarray may ignore keepdims, so check here.
cnt = cnt.squeeze(axis)
dof = cnt - ddof
var = _divide_by_count(var, dof)
isbad = (dof <= 0)
if np.any(isbad):
warnings.warn("Degrees of freedom <= 0 for slice.", RuntimeWarning, stacklevel=2)
# NaN, inf, or negative numbers are all possible bad
# values, so explicitly replace them with NaN.
var = _copyto(var, np.nan, isbad)
return var
def nanstd(a, axis=None, dtype=None, out=None, ddof=0, keepdims=np._NoValue):
"""
Compute the standard deviation along the specified axis, while
ignoring NaNs.
Returns the standard deviation, a measure of the spread of a
distribution, of the non-NaN array elements. The standard deviation is
computed for the flattened array by default, otherwise over the
specified axis.
For all-NaN slices or slices with zero degrees of freedom, NaN is
returned and a `RuntimeWarning` is raised.
.. versionadded:: 1.8.0
Parameters
----------
a : array_like
Calculate the standard deviation of the non-NaN values.
axis : int, optional
Axis along which the standard deviation is computed. The default is
to compute the standard deviation of the flattened array.
dtype : dtype, optional
Type to use in computing the standard deviation. For arrays of
integer type the default is float64, for arrays of float types it
is the same as the array type.
out : ndarray, optional
Alternative output array in which to place the result. It must have
the same shape as the expected output but the type (of the
calculated values) will be cast if necessary.
ddof : int, optional
Means Delta Degrees of Freedom. The divisor used in calculations
is ``N - ddof``, where ``N`` represents the number of non-NaN
elements. By default `ddof` is zero.
keepdims : bool, optional
If this is set to True, the axes which are reduced are left
in the result as dimensions with size one. With this option,
the result will broadcast correctly against the original `a`.
If this value is anything but the default it is passed through
as-is to the relevant functions of the sub-classes. If these
functions do not have a `keepdims` kwarg, a RuntimeError will
be raised.
Returns
-------
standard_deviation : ndarray, see dtype parameter above.
If `out` is None, return a new array containing the standard
deviation, otherwise return a reference to the output array. If
ddof is >= the number of non-NaN elements in a slice or the slice
contains only NaNs, then the result for that slice is NaN.
See Also
--------
var, mean, std
nanvar, nanmean
numpy.doc.ufuncs : Section "Output arguments"
Notes
-----
The standard deviation is the square root of the average of the squared
deviations from the mean: ``std = sqrt(mean(abs(x - x.mean())**2))``.
The average squared deviation is normally calculated as
``x.sum() / N``, where ``N = len(x)``. If, however, `ddof` is
specified, the divisor ``N - ddof`` is used instead. In standard
statistical practice, ``ddof=1`` provides an unbiased estimator of the
variance of the infinite population. ``ddof=0`` provides a maximum
likelihood estimate of the variance for normally distributed variables.
The standard deviation computed in this function is the square root of
the estimated variance, so even with ``ddof=1``, it will not be an
unbiased estimate of the standard deviation per se.
Note that, for complex numbers, `std` takes the absolute value before
squaring, so that the result is always real and nonnegative.
For floating-point input, the *std* is computed using the same
precision the input has. Depending on the input data, this can cause
the results to be inaccurate, especially for float32 (see example
below). Specifying a higher-accuracy accumulator using the `dtype`
keyword can alleviate this issue.
Examples
--------
>>> a = np.array([[1, np.nan], [3, 4]])
>>> np.nanstd(a)
1.247219128924647
>>> np.nanstd(a, axis=0)
array([ 1., 0.])
>>> np.nanstd(a, axis=1)
array([ 0., 0.5])
"""
var = nanvar(a, axis=axis, dtype=dtype, out=out, ddof=ddof,
keepdims=keepdims)
if isinstance(var, np.ndarray):
std = np.sqrt(var, out=var)
else:
std = var.dtype.type(np.sqrt(var))
return std
| apache-2.0 |
kevinrigney/PlaylistDatabase | ReplaceVideoUrl.py | 1 | 4628 | #!/usr/bin/env python3
# Find videos with duplicates: SELECT Track.track_name, Artist.artist_name,Track.youtube_link FROM Track JOIN Artist WHERE Track.artist_id = Artist.id GROUP BY Track.youtube_link HAVING count(*) >=2
from PlaylistDatabase import PlaylistDatabase
db = PlaylistDatabase(config_file='PlaylistDatabaseConfig.ini')
video = input('Enter the video ID: ')
if video.startswith('https://youtu.be/'):
pass
elif video.startswith('https://www.youtube.com/watch?v='):
video.replace('https://www.youtube.com/watch?v=','https://youtu.be/')
else:
video = 'https://youtu.be/'+video
db._cur.execute('''SELECT Track.id, Track.track_name, Artist.artist_name, Album.album_name, Track.artist_id, Track.album_id, Track.youtube_link from Track JOIN Artist JOIN Album WHERE Track.youtube_link=%s AND Track.album_id=Album.id AND Track.artist_id=Artist.id''',(video,))
track = db._cur.fetchall()
if len(track) > 1:
print('\nWARNING: More than one track has the same video.\n')
for ii,t in enumerate(track):
print
track_id,track_name,artist_name,album_name,artist_id,album_id,youtube_link = track[ii]
print('Track '+str(ii)+' is: ',track_id,track_name,artist_name,album_name,artist_id,album_id, youtube_link)
ii=int(input('\nWhat track do you want to use? '))
else:
ii=0
track_id,track_name,artist_name,album_name,artist_id,album_id,youtube_link = track[ii]
print('Track '+str(ii)+' is: ',track_id,track_name,artist_name,album_name,artist_id,album_id, youtube_link)
#yesorno = input('Do you want to delete this track and add it to the ignore lists? (yes/no): ')
yesorno='no'
if yesorno.lower()=='yes':
db._cur.execute('''SELECT Playlist.*,Station.* FROM Playlist JOIN Station WHERE Playlist.track_id=%s AND Playlist.station_id=Station.id''',(track_id,))
stations = db._cur.fetchall()
unique_station = {}
for s in stations:
playlist_entry_id, track_id, pl_station_id,playtime,station_id,station_name,station_url,ignore_artists,ignore_titles,playlist_url = s
unique_station[station_id] = (station_name,station_url,ignore_artists,ignore_titles,playlist_url)
print(unique_station)
for id in unique_station:
exec('ignore_artists = ' + unique_station[id][2])
exec('ignore_titles = ' + unique_station[id][3])
if artist_name not in ignore_artists:
ignore_artists.append(artist_name)
if track_name not in ignore_titles:
ignore_titles.append(track_name)
unique_station[id] = unique_station[id][0],unique_station[id][1],str(ignore_artists),str(ignore_titles),unique_station[id][4]
db._cur.execute('''
UPDATE Station
SET ignore_artists=%s, ignore_titles=%s
WHERE Station.id=%s
''',(str(ignore_artists),str(ignore_titles), id))
db._conn.commit()
print(unique_station)
# Get all tracks with the matching artist id and album id
all_tracks = []
db._cur.execute('''SELECT Track.id FROM Track WHERE Track.album_id=%s AND Track.artist_id=%s''',(album_id,artist_id))
for id in db._cur.fetchall():
if id not in all_tracks:
all_tracks.append(id[0])
for id in all_tracks:
# Remove the station entries
db._cur.execute('''DELETE FROM Playlist WHERE Playlist.track_id=%s''',(id,))
# Remove the track entries
db._cur.execute('''DELETE FROM Track WHERE Track.id=%s''',(id,))
# Remove the album entries
db._cur.execute('''DELETE FROM Album WHERE Album.id=%s''',(album_id,))
# Remove the artist entries
db._cur.execute('''DELETE FROM Artist WHERE Artist.id=%s''',(artist_id,))
db._conn.commit()
#Tracks = db._cur.fetchall()
else:
#yesorno = input('Do you want to update the youtube URL for this track? (yes/no): ')
yesorno='yes'
if yesorno.lower() == 'yes':
url = input('Enter the new youtube url: ')
if url == '':
print('No URL Specified... Exiting.')
else:
if url.startswith('https://youtu.be/'):
pass
elif url.startswith('https://www.youtube.com/watch?v='):
url.replace('https://www.youtube.com/watch?v=','https://youtu.be/')
else:
url = 'https://youtu.be/'+url
db._cur.execute('''
UPDATE Track
SET youtube_link=%s
WHERE Track.id=%s
''',(url,track_id))
db._conn.commit()
else:
print('Not modifying database.')
| mit |
hwu25/AppPkg | Applications/Python/Python-2.7.2/Lib/textwrap.py | 53 | 17265 | """Text wrapping and filling.
"""
# Copyright (C) 1999-2001 Gregory P. Ward.
# Copyright (C) 2002, 2003 Python Software Foundation.
# Written by Greg Ward <gward@python.net>
__revision__ = "$Id$"
import string, re
# Do the right thing with boolean values for all known Python versions
# (so this module can be copied to projects that don't depend on Python
# 2.3, e.g. Optik and Docutils) by uncommenting the block of code below.
#try:
# True, False
#except NameError:
# (True, False) = (1, 0)
__all__ = ['TextWrapper', 'wrap', 'fill', 'dedent']
# Hardcode the recognized whitespace characters to the US-ASCII
# whitespace characters. The main reason for doing this is that in
# ISO-8859-1, 0xa0 is non-breaking whitespace, so in certain locales
# that character winds up in string.whitespace. Respecting
# string.whitespace in those cases would 1) make textwrap treat 0xa0 the
# same as any other whitespace char, which is clearly wrong (it's a
# *non-breaking* space), 2) possibly cause problems with Unicode,
# since 0xa0 is not in range(128).
_whitespace = '\t\n\x0b\x0c\r '
class TextWrapper:
"""
Object for wrapping/filling text. The public interface consists of
the wrap() and fill() methods; the other methods are just there for
subclasses to override in order to tweak the default behaviour.
If you want to completely replace the main wrapping algorithm,
you'll probably have to override _wrap_chunks().
Several instance attributes control various aspects of wrapping:
width (default: 70)
the maximum width of wrapped lines (unless break_long_words
is false)
initial_indent (default: "")
string that will be prepended to the first line of wrapped
output. Counts towards the line's width.
subsequent_indent (default: "")
string that will be prepended to all lines save the first
of wrapped output; also counts towards each line's width.
expand_tabs (default: true)
Expand tabs in input text to spaces before further processing.
Each tab will become 1 .. 8 spaces, depending on its position in
its line. If false, each tab is treated as a single character.
replace_whitespace (default: true)
Replace all whitespace characters in the input text by spaces
after tab expansion. Note that if expand_tabs is false and
replace_whitespace is true, every tab will be converted to a
single space!
fix_sentence_endings (default: false)
Ensure that sentence-ending punctuation is always followed
by two spaces. Off by default because the algorithm is
(unavoidably) imperfect.
break_long_words (default: true)
Break words longer than 'width'. If false, those words will not
be broken, and some lines might be longer than 'width'.
break_on_hyphens (default: true)
Allow breaking hyphenated words. If true, wrapping will occur
preferably on whitespaces and right after hyphens part of
compound words.
drop_whitespace (default: true)
Drop leading and trailing whitespace from lines.
"""
whitespace_trans = string.maketrans(_whitespace, ' ' * len(_whitespace))
unicode_whitespace_trans = {}
uspace = ord(u' ')
for x in map(ord, _whitespace):
unicode_whitespace_trans[x] = uspace
# This funky little regex is just the trick for splitting
# text up into word-wrappable chunks. E.g.
# "Hello there -- you goof-ball, use the -b option!"
# splits into
# Hello/ /there/ /--/ /you/ /goof-/ball,/ /use/ /the/ /-b/ /option!
# (after stripping out empty strings).
wordsep_re = re.compile(
r'(\s+|' # any whitespace
r'[^\s\w]*\w+[^0-9\W]-(?=\w+[^0-9\W])|' # hyphenated words
r'(?<=[\w\!\"\'\&\.\,\?])-{2,}(?=\w))') # em-dash
# This less funky little regex just split on recognized spaces. E.g.
# "Hello there -- you goof-ball, use the -b option!"
# splits into
# Hello/ /there/ /--/ /you/ /goof-ball,/ /use/ /the/ /-b/ /option!/
wordsep_simple_re = re.compile(r'(\s+)')
# XXX this is not locale- or charset-aware -- string.lowercase
# is US-ASCII only (and therefore English-only)
sentence_end_re = re.compile(r'[%s]' # lowercase letter
r'[\.\!\?]' # sentence-ending punct.
r'[\"\']?' # optional end-of-quote
r'\Z' # end of chunk
% string.lowercase)
def __init__(self,
width=70,
initial_indent="",
subsequent_indent="",
expand_tabs=True,
replace_whitespace=True,
fix_sentence_endings=False,
break_long_words=True,
drop_whitespace=True,
break_on_hyphens=True):
self.width = width
self.initial_indent = initial_indent
self.subsequent_indent = subsequent_indent
self.expand_tabs = expand_tabs
self.replace_whitespace = replace_whitespace
self.fix_sentence_endings = fix_sentence_endings
self.break_long_words = break_long_words
self.drop_whitespace = drop_whitespace
self.break_on_hyphens = break_on_hyphens
# recompile the regexes for Unicode mode -- done in this clumsy way for
# backwards compatibility because it's rather common to monkey-patch
# the TextWrapper class' wordsep_re attribute.
self.wordsep_re_uni = re.compile(self.wordsep_re.pattern, re.U)
self.wordsep_simple_re_uni = re.compile(
self.wordsep_simple_re.pattern, re.U)
# -- Private methods -----------------------------------------------
# (possibly useful for subclasses to override)
def _munge_whitespace(self, text):
"""_munge_whitespace(text : string) -> string
Munge whitespace in text: expand tabs and convert all other
whitespace characters to spaces. Eg. " foo\tbar\n\nbaz"
becomes " foo bar baz".
"""
if self.expand_tabs:
text = text.expandtabs()
if self.replace_whitespace:
if isinstance(text, str):
text = text.translate(self.whitespace_trans)
elif isinstance(text, unicode):
text = text.translate(self.unicode_whitespace_trans)
return text
def _split(self, text):
"""_split(text : string) -> [string]
Split the text to wrap into indivisible chunks. Chunks are
not quite the same as words; see _wrap_chunks() for full
details. As an example, the text
Look, goof-ball -- use the -b option!
breaks into the following chunks:
'Look,', ' ', 'goof-', 'ball', ' ', '--', ' ',
'use', ' ', 'the', ' ', '-b', ' ', 'option!'
if break_on_hyphens is True, or in:
'Look,', ' ', 'goof-ball', ' ', '--', ' ',
'use', ' ', 'the', ' ', '-b', ' ', option!'
otherwise.
"""
if isinstance(text, unicode):
if self.break_on_hyphens:
pat = self.wordsep_re_uni
else:
pat = self.wordsep_simple_re_uni
else:
if self.break_on_hyphens:
pat = self.wordsep_re
else:
pat = self.wordsep_simple_re
chunks = pat.split(text)
chunks = filter(None, chunks) # remove empty chunks
return chunks
def _fix_sentence_endings(self, chunks):
"""_fix_sentence_endings(chunks : [string])
Correct for sentence endings buried in 'chunks'. Eg. when the
original text contains "... foo.\nBar ...", munge_whitespace()
and split() will convert that to [..., "foo.", " ", "Bar", ...]
which has one too few spaces; this method simply changes the one
space to two.
"""
i = 0
patsearch = self.sentence_end_re.search
while i < len(chunks)-1:
if chunks[i+1] == " " and patsearch(chunks[i]):
chunks[i+1] = " "
i += 2
else:
i += 1
def _handle_long_word(self, reversed_chunks, cur_line, cur_len, width):
"""_handle_long_word(chunks : [string],
cur_line : [string],
cur_len : int, width : int)
Handle a chunk of text (most likely a word, not whitespace) that
is too long to fit in any line.
"""
# Figure out when indent is larger than the specified width, and make
# sure at least one character is stripped off on every pass
if width < 1:
space_left = 1
else:
space_left = width - cur_len
# If we're allowed to break long words, then do so: put as much
# of the next chunk onto the current line as will fit.
if self.break_long_words:
cur_line.append(reversed_chunks[-1][:space_left])
reversed_chunks[-1] = reversed_chunks[-1][space_left:]
# Otherwise, we have to preserve the long word intact. Only add
# it to the current line if there's nothing already there --
# that minimizes how much we violate the width constraint.
elif not cur_line:
cur_line.append(reversed_chunks.pop())
# If we're not allowed to break long words, and there's already
# text on the current line, do nothing. Next time through the
# main loop of _wrap_chunks(), we'll wind up here again, but
# cur_len will be zero, so the next line will be entirely
# devoted to the long word that we can't handle right now.
def _wrap_chunks(self, chunks):
"""_wrap_chunks(chunks : [string]) -> [string]
Wrap a sequence of text chunks and return a list of lines of
length 'self.width' or less. (If 'break_long_words' is false,
some lines may be longer than this.) Chunks correspond roughly
to words and the whitespace between them: each chunk is
indivisible (modulo 'break_long_words'), but a line break can
come between any two chunks. Chunks should not have internal
whitespace; ie. a chunk is either all whitespace or a "word".
Whitespace chunks will be removed from the beginning and end of
lines, but apart from that whitespace is preserved.
"""
lines = []
if self.width <= 0:
raise ValueError("invalid width %r (must be > 0)" % self.width)
# Arrange in reverse order so items can be efficiently popped
# from a stack of chucks.
chunks.reverse()
while chunks:
# Start the list of chunks that will make up the current line.
# cur_len is just the length of all the chunks in cur_line.
cur_line = []
cur_len = 0
# Figure out which static string will prefix this line.
if lines:
indent = self.subsequent_indent
else:
indent = self.initial_indent
# Maximum width for this line.
width = self.width - len(indent)
# First chunk on line is whitespace -- drop it, unless this
# is the very beginning of the text (ie. no lines started yet).
if self.drop_whitespace and chunks[-1].strip() == '' and lines:
del chunks[-1]
while chunks:
l = len(chunks[-1])
# Can at least squeeze this chunk onto the current line.
if cur_len + l <= width:
cur_line.append(chunks.pop())
cur_len += l
# Nope, this line is full.
else:
break
# The current line is full, and the next chunk is too big to
# fit on *any* line (not just this one).
if chunks and len(chunks[-1]) > width:
self._handle_long_word(chunks, cur_line, cur_len, width)
# If the last chunk on this line is all whitespace, drop it.
if self.drop_whitespace and cur_line and cur_line[-1].strip() == '':
del cur_line[-1]
# Convert current line back to a string and store it in list
# of all lines (return value).
if cur_line:
lines.append(indent + ''.join(cur_line))
return lines
# -- Public interface ----------------------------------------------
def wrap(self, text):
"""wrap(text : string) -> [string]
Reformat the single paragraph in 'text' so it fits in lines of
no more than 'self.width' columns, and return a list of wrapped
lines. Tabs in 'text' are expanded with string.expandtabs(),
and all other whitespace characters (including newline) are
converted to space.
"""
text = self._munge_whitespace(text)
chunks = self._split(text)
if self.fix_sentence_endings:
self._fix_sentence_endings(chunks)
return self._wrap_chunks(chunks)
def fill(self, text):
"""fill(text : string) -> string
Reformat the single paragraph in 'text' to fit in lines of no
more than 'self.width' columns, and return a new string
containing the entire wrapped paragraph.
"""
return "\n".join(self.wrap(text))
# -- Convenience interface ---------------------------------------------
def wrap(text, width=70, **kwargs):
"""Wrap a single paragraph of text, returning a list of wrapped lines.
Reformat the single paragraph in 'text' so it fits in lines of no
more than 'width' columns, and return a list of wrapped lines. By
default, tabs in 'text' are expanded with string.expandtabs(), and
all other whitespace characters (including newline) are converted to
space. See TextWrapper class for available keyword args to customize
wrapping behaviour.
"""
w = TextWrapper(width=width, **kwargs)
return w.wrap(text)
def fill(text, width=70, **kwargs):
"""Fill a single paragraph of text, returning a new string.
Reformat the single paragraph in 'text' to fit in lines of no more
than 'width' columns, and return a new string containing the entire
wrapped paragraph. As with wrap(), tabs are expanded and other
whitespace characters converted to space. See TextWrapper class for
available keyword args to customize wrapping behaviour.
"""
w = TextWrapper(width=width, **kwargs)
return w.fill(text)
# -- Loosely related functionality -------------------------------------
_whitespace_only_re = re.compile('^[ \t]+$', re.MULTILINE)
_leading_whitespace_re = re.compile('(^[ \t]*)(?:[^ \t\n])', re.MULTILINE)
def dedent(text):
"""Remove any common leading whitespace from every line in `text`.
This can be used to make triple-quoted strings line up with the left
edge of the display, while still presenting them in the source code
in indented form.
Note that tabs and spaces are both treated as whitespace, but they
are not equal: the lines " hello" and "\thello" are
considered to have no common leading whitespace. (This behaviour is
new in Python 2.5; older versions of this module incorrectly
expanded tabs before searching for common leading whitespace.)
"""
# Look for the longest leading string of spaces and tabs common to
# all lines.
margin = None
text = _whitespace_only_re.sub('', text)
indents = _leading_whitespace_re.findall(text)
for indent in indents:
if margin is None:
margin = indent
# Current line more deeply indented than previous winner:
# no change (previous winner is still on top).
elif indent.startswith(margin):
pass
# Current line consistent with and no deeper than previous winner:
# it's the new winner.
elif margin.startswith(indent):
margin = indent
# Current line and previous winner have no common whitespace:
# there is no margin.
else:
margin = ""
break
# sanity check (testing/debugging only)
if 0 and margin:
for line in text.split("\n"):
assert not line or line.startswith(margin), \
"line = %r, margin = %r" % (line, margin)
if margin:
text = re.sub(r'(?m)^' + margin, '', text)
return text
if __name__ == "__main__":
#print dedent("\tfoo\n\tbar")
#print dedent(" \thello there\n \t how are you?")
print dedent("Hello there.\n This is indented.")
| bsd-2-clause |
1st/django | django/contrib/flatpages/templatetags/flatpages.py | 472 | 3632 | from django import template
from django.conf import settings
from django.contrib.flatpages.models import FlatPage
from django.contrib.sites.shortcuts import get_current_site
register = template.Library()
class FlatpageNode(template.Node):
def __init__(self, context_name, starts_with=None, user=None):
self.context_name = context_name
if starts_with:
self.starts_with = template.Variable(starts_with)
else:
self.starts_with = None
if user:
self.user = template.Variable(user)
else:
self.user = None
def render(self, context):
if 'request' in context:
site_pk = get_current_site(context['request']).pk
else:
site_pk = settings.SITE_ID
flatpages = FlatPage.objects.filter(sites__id=site_pk)
# If a prefix was specified, add a filter
if self.starts_with:
flatpages = flatpages.filter(
url__startswith=self.starts_with.resolve(context))
# If the provided user is not authenticated, or no user
# was provided, filter the list to only public flatpages.
if self.user:
user = self.user.resolve(context)
if not user.is_authenticated():
flatpages = flatpages.filter(registration_required=False)
else:
flatpages = flatpages.filter(registration_required=False)
context[self.context_name] = flatpages
return ''
@register.tag
def get_flatpages(parser, token):
"""
Retrieves all flatpage objects available for the current site and
visible to the specific user (or visible to all users if no user is
specified). Populates the template context with them in a variable
whose name is defined by the ``as`` clause.
An optional ``for`` clause can be used to control the user whose
permissions are to be used in determining which flatpages are visible.
An optional argument, ``starts_with``, can be applied to limit the
returned flatpages to those beginning with a particular base URL.
This argument can be passed as a variable or a string, as it resolves
from the template context.
Syntax::
{% get_flatpages ['url_starts_with'] [for user] as context_name %}
Example usage::
{% get_flatpages as flatpages %}
{% get_flatpages for someuser as flatpages %}
{% get_flatpages '/about/' as about_pages %}
{% get_flatpages prefix as about_pages %}
{% get_flatpages '/about/' for someuser as about_pages %}
"""
bits = token.split_contents()
syntax_message = ("%(tag_name)s expects a syntax of %(tag_name)s "
"['url_starts_with'] [for user] as context_name" %
dict(tag_name=bits[0]))
# Must have at 3-6 bits in the tag
if len(bits) >= 3 and len(bits) <= 6:
# If there's an even number of bits, there's no prefix
if len(bits) % 2 == 0:
prefix = bits[1]
else:
prefix = None
# The very last bit must be the context name
if bits[-2] != 'as':
raise template.TemplateSyntaxError(syntax_message)
context_name = bits[-1]
# If there are 5 or 6 bits, there is a user defined
if len(bits) >= 5:
if bits[-4] != 'for':
raise template.TemplateSyntaxError(syntax_message)
user = bits[-3]
else:
user = None
return FlatpageNode(context_name, starts_with=prefix, user=user)
else:
raise template.TemplateSyntaxError(syntax_message)
| bsd-3-clause |
araisrobo/machinekit | src/hal/user_comps/hal_temp_ads7828.py | 12 | 3854 | #!/usr/bin/python
# encoding: utf-8
"""
Temperature.py
Created by Alexander Rössler on 2014-03-24.
"""
from drivers.ADS7828 import ADS7828
from fdm.r2temp import R2Temp
import argparse
import time
import sys
import hal
class Pin:
def __init__(self):
self.pin = 0
self.r2temp = None
self.halValuePin = 0
self.halRawPin = 0
self.filterSamples = []
self.filterSize = 10
self.rawValue = 0.0
self.filterSamples = []
self.rawValue = 0.0
def addSample(self, value):
self.filterSamples.append(value)
if (len(self.filterSamples) > self.filterSize):
self.filterSamples.pop(0)
sampleSum = 0.0
for sample in self.filterSamples:
sampleSum += sample
self.rawValue = sampleSum / len(self.filterSamples)
def getHalName(pin):
return "ch-" + '{0:02d}'.format(pin.pin)
def adc2Temp(pin):
R1 = 4700.0
R2 = R1 / max(4095.0 / pin.rawValue - 1.0, 0.000001)
return round(pin.r2temp.r2t(R2) * 10.0) / 10.0
parser = argparse.ArgumentParser(description='HAL component to read Temperature values over I2C')
parser.add_argument('-n', '--name', help='HAL component name', required=True)
parser.add_argument('-b', '--bus_id', help='I2C bus id', default=2)
parser.add_argument('-a', '--address', help='I2C device address', default=0x20)
parser.add_argument('-i', '--interval', help='I2C update interval', default=0.05)
parser.add_argument('-c', '--channels', help='Komma separated list of channels and thermistors to use e.g. 01:semitec_103GT_2,02:epcos_B57560G1104', required=True)
parser.add_argument('-f', '--filter_size', help='Size of the low pass filter to use', default=10)
parser.add_argument('-d', '--delay', help='Delay before the i2c should be updated', default=0.0)
args = parser.parse_args()
updateInterval = float(args.interval)
delayInterval = float(args.delay)
filterSize = int(args.filter_size)
error = True
watchdog = True
adc = ADS7828(busId=int(args.bus_id),
address=int(args.address))
# Create pins
pins = []
if (args.channels != ""):
channelsRaw = args.channels.split(',')
for channel in channelsRaw:
pinRaw = channel.split(':')
if (len(pinRaw) != 2):
print(("wrong input"))
sys.exit(1)
pin = Pin()
pin.pin = int(pinRaw[0])
if ((pin.pin > 7) or (pin.pin < 0)):
print(("Pin not available"))
sys.exit(1)
if (pinRaw[1] != "none"):
pin.r2temp = R2Temp(pinRaw[1])
pin.filterSize = filterSize
pins.append(pin)
# Initialize HAL
h = hal.component(args.name)
for pin in pins:
pin.halRawPin = h.newpin(getHalName(pin) + ".raw", hal.HAL_FLOAT, hal.HAL_OUT)
if (pin.r2temp is not None):
pin.halValuePin = h.newpin(getHalName(pin) + ".value", hal.HAL_FLOAT, hal.HAL_OUT)
halErrorPin = h.newpin("error", hal.HAL_BIT, hal.HAL_OUT)
halNoErrorPin = h.newpin("no-error", hal.HAL_BIT, hal.HAL_OUT)
halWatchdogPin = h.newpin("watchdog", hal.HAL_BIT, hal.HAL_OUT)
h.ready()
halErrorPin.value = error
halNoErrorPin.value = not error
halWatchdogPin.value = watchdog
try:
time.sleep(delayInterval)
while (True):
try:
for pin in pins:
value = float(adc.readChannel(pin.pin))
pin.addSample(value)
pin.halRawPin.value = pin.rawValue
if (pin.r2temp is not None):
pin.halValuePin.value = adc2Temp(pin)
error = False
except IOError as e:
error = True
halErrorPin.value = error
halNoErrorPin.value = not error
watchdog = not watchdog
halWatchdogPin.value = watchdog
time.sleep(updateInterval)
except:
print(("exiting HAL component " + args.name))
h.exit()
| lgpl-2.1 |
tensorflow/tensorflow | tensorflow/python/keras/tests/model_subclassing_compiled_test.py | 6 | 14360 | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for compiled Model subclassing."""
import os
import numpy as np
from tensorflow.python import keras
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.keras import keras_parameterized
from tensorflow.python.keras import testing_utils
from tensorflow.python.keras.tests import model_subclassing_test_util as model_util
from tensorflow.python.platform import test
try:
import h5py # pylint:disable=g-import-not-at-top
except ImportError:
h5py = None
@keras_parameterized.run_all_keras_modes
class ModelSubclassCompiledTest(keras_parameterized.TestCase):
def test_single_io_workflow_with_np_arrays(self):
num_classes = 2
num_samples = 100
input_dim = 50
model = testing_utils.SmallSubclassMLP(
num_hidden=32, num_classes=num_classes, use_dp=True, use_bn=True)
model.compile(
loss='mse',
optimizer='rmsprop',
metrics=['acc', keras.metrics.CategoricalAccuracy()],
run_eagerly=testing_utils.should_run_eagerly())
x = np.ones((num_samples, input_dim))
y = np.zeros((num_samples, num_classes))
model.fit(x, y, epochs=2, batch_size=32, verbose=0)
_ = model.evaluate(x, y, verbose=0)
def test_multi_io_workflow_with_np_arrays(self):
num_classes = (2, 3)
num_samples = 1000
input_dim = 50
model = model_util.get_multi_io_subclass_model(
num_classes=num_classes, use_dp=True, use_bn=True)
model.compile(
loss='mse',
optimizer='rmsprop',
metrics=['acc'],
run_eagerly=testing_utils.should_run_eagerly())
x1 = np.ones((num_samples, input_dim))
x2 = np.ones((num_samples, input_dim))
y1 = np.zeros((num_samples, num_classes[0]))
y2 = np.zeros((num_samples, num_classes[1]))
model.fit([x1, x2], [y1, y2], epochs=2, batch_size=32, verbose=0)
_ = model.evaluate([x1, x2], [y1, y2], verbose=0)
def test_single_io_workflow_with_datasets(self):
num_classes = 2
num_samples = 10
input_dim = 50
with self.cached_session():
model = testing_utils.SmallSubclassMLP(
num_hidden=32, num_classes=num_classes, use_dp=True, use_bn=True)
model.compile(
loss='mse',
optimizer='rmsprop',
run_eagerly=testing_utils.should_run_eagerly())
x = np.ones((num_samples, input_dim), dtype=np.float32)
y = np.zeros((num_samples, num_classes), dtype=np.float32)
dataset = dataset_ops.Dataset.from_tensor_slices((x, y))
dataset = dataset.repeat(100)
dataset = dataset.batch(10)
model.fit(dataset, epochs=2, steps_per_epoch=10, verbose=0)
_ = model.evaluate(dataset, steps=10, verbose=0)
def test_attributes(self):
# layers, weights, trainable_weights, non_trainable_weights, inputs, outputs
num_classes = (2, 3)
num_samples = 100
input_dim = 50
model = model_util.get_multi_io_subclass_model(
num_classes=num_classes, use_bn=True)
x1 = np.ones((num_samples, input_dim))
x2 = np.ones((num_samples, input_dim))
y1 = np.zeros((num_samples, num_classes[0]))
y2 = np.zeros((num_samples, num_classes[1]))
self.assertEqual(model.name, 'test_model')
self.assertEqual(model.built, False)
self.assertEqual(len(model.weights), 0)
model.compile(
loss='mse',
optimizer='rmsprop',
run_eagerly=testing_utils.should_run_eagerly())
model.train_on_batch([x1, x2], [y1, y2])
self.assertEqual(model.built, True)
self.assertEqual(len(model.layers), 4)
self.assertEqual(len(model.weights), 10)
self.assertEqual(len(model.trainable_weights), 8)
self.assertEqual(len(model.non_trainable_weights), 2)
def test_updates(self):
# test that updates get run during training
num_samples = 100
input_dim = 50
class BNNet(keras.Model):
def __init__(self):
super(BNNet, self).__init__()
self.bn = keras.layers.BatchNormalization(beta_initializer='ones',
gamma_initializer='ones')
def call(self, inputs):
return self.bn(inputs)
x = np.ones((num_samples, input_dim))
y = np.ones((num_samples, input_dim))
model = BNNet()
model.compile(
loss='mse',
optimizer='rmsprop',
run_eagerly=testing_utils.should_run_eagerly())
y_ref = model.predict(x)
model.train_on_batch(x, y)
y_new = model.predict(x)
self.assertGreater(np.sum(np.abs(y_ref - y_new)), 0.1)
def test_training_and_inference_behavior(self):
# test that dropout is applied in training and not inference
num_samples = 100
input_dim = 50
class DPNet(keras.Model):
def __init__(self):
super(DPNet, self).__init__()
self.dp = keras.layers.Dropout(0.5)
self.dense = keras.layers.Dense(1,
use_bias=False,
kernel_initializer='ones')
def call(self, inputs):
x = self.dp(inputs)
return self.dense(x)
model = DPNet()
x = np.ones((num_samples, input_dim))
y = model.predict(x)
self.assertEqual(np.sum(y), np.sum(x))
model.compile(
loss='mse',
optimizer='rmsprop',
run_eagerly=testing_utils.should_run_eagerly())
loss = model.train_on_batch(x, y)
self.assertGreater(loss, 0.1)
def test_training_methods(self):
# test fit, train_on_batch
# on different input types: list, dict
num_classes = (2, 3)
num_samples = 100
input_dim = 50
x1 = np.ones((num_samples, input_dim))
x2 = np.ones((num_samples, input_dim))
y1 = np.zeros((num_samples, num_classes[0]))
y2 = np.zeros((num_samples, num_classes[1]))
model = model_util.get_multi_io_subclass_model(
num_classes=num_classes, use_bn=True)
model.compile(
loss='mse',
optimizer='rmsprop',
run_eagerly=testing_utils.should_run_eagerly())
model.fit([x1, x2], [y1, y2], epochs=2, batch_size=32, verbose=0)
model.fit({'input_1': x1, 'input_2': x2},
{'output_1': y1, 'output_2': y2},
epochs=2, batch_size=32)
model.fit([x1, x2], [y1, y2], epochs=2, batch_size=32, verbose=0,
validation_data=([x1, x2], [y1, y2]))
model = model_util.get_multi_io_subclass_model(
num_classes=num_classes, use_bn=True)
model.compile(
loss='mse',
optimizer='rmsprop',
run_eagerly=testing_utils.should_run_eagerly())
model.train_on_batch([x1, x2], [y1, y2])
model.train_on_batch({'input_1': x1, 'input_2': x2},
{'output_1': y1, 'output_2': y2})
def test_inference_methods(self):
# test predict, evaluate, test_on_batch, predict_on_batch
# on different input types: list, dict
num_classes = (2, 3)
num_samples = 100
input_dim = 50
x1 = np.ones((num_samples, input_dim))
x2 = np.ones((num_samples, input_dim))
y1 = np.zeros((num_samples, num_classes[0]))
y2 = np.zeros((num_samples, num_classes[1]))
model = model_util.get_multi_io_subclass_model(
num_classes=num_classes, use_bn=True)
model.compile(
loss='mse',
optimizer='rmsprop',
run_eagerly=testing_utils.should_run_eagerly())
model.evaluate([x1, x2], [y1, y2])
model.test_on_batch([x1, x2], [y1, y2])
model = model_util.get_multi_io_subclass_model(
num_classes=num_classes, use_bn=True)
model.predict([x1, x2])
model = model_util.get_multi_io_subclass_model(
num_classes=num_classes, use_bn=True)
model.predict_on_batch([x1, x2])
def test_saving(self):
num_classes = (2, 3)
num_samples = 100
input_dim = 50
x1 = np.ones((num_samples, input_dim))
x2 = np.ones((num_samples, input_dim))
y1 = np.zeros((num_samples, num_classes[0]))
y2 = np.zeros((num_samples, num_classes[1]))
model = model_util.get_multi_io_subclass_model(
num_classes=num_classes, use_bn=True)
model.compile(
loss='mse',
optimizer='rmsprop',
run_eagerly=testing_utils.should_run_eagerly())
model.fit([x1, x2], [y1, y2], epochs=2, batch_size=32, verbose=0)
y_ref_1, y_ref_2 = model.predict([x1, x2])
tf_format_name = os.path.join(self.get_temp_dir(), 'ckpt')
model.save_weights(tf_format_name)
if h5py is not None:
hdf5_format_name = os.path.join(self.get_temp_dir(), 'weights.h5')
model.save_weights(hdf5_format_name)
model = model_util.get_multi_io_subclass_model(
num_classes=num_classes, use_bn=True)
if h5py is not None:
with self.assertRaises(ValueError):
model.load_weights(hdf5_format_name)
model.load_weights(tf_format_name)
y1, y2 = model.predict([x1, x2])
self.assertAllClose(y_ref_1, y1, atol=1e-5)
self.assertAllClose(y_ref_2, y2, atol=1e-5)
if h5py is not None:
model.load_weights(hdf5_format_name)
y1, y2 = model.predict([x1, x2])
self.assertAllClose(y_ref_1, y1, atol=1e-5)
self.assertAllClose(y_ref_2, y2, atol=1e-5)
def test_subclass_nested_in_subclass(self):
num_classes = 2
num_samples = 100
input_dim = 50
model = model_util.NestedTestModel1(num_classes=num_classes)
model.compile(
loss='mse',
optimizer='rmsprop',
metrics=['acc'],
run_eagerly=testing_utils.should_run_eagerly())
x = np.ones((num_samples, input_dim))
y = np.zeros((num_samples, num_classes))
model.fit(x, y, epochs=2, batch_size=32, verbose=0)
_ = model.evaluate(x, y, verbose=0)
self.assertEqual(len(model.weights), 8 + len(model.test_net.weights))
self.assertEqual(len(model.non_trainable_weights),
2 + len(model.test_net.non_trainable_weights))
self.assertEqual(len(model.trainable_weights),
6 + len(model.test_net.trainable_weights))
def test_graph_nested_in_subclass(self):
num_classes = 2
num_samples = 100
input_dim = 50
model = model_util.NestedTestModel2(num_classes=num_classes)
model.compile(
loss='mse',
optimizer='rmsprop',
metrics=['acc'],
run_eagerly=testing_utils.should_run_eagerly())
x = np.ones((num_samples, input_dim))
y = np.zeros((num_samples, num_classes))
model.fit(x, y, epochs=2, batch_size=32, verbose=0)
_ = model.evaluate(x, y, verbose=0)
self.assertEqual(len(model.weights), 8 + len(model.test_net.weights))
self.assertEqual(len(model.non_trainable_weights),
2 + len(model.test_net.non_trainable_weights))
self.assertEqual(len(model.trainable_weights),
6 + len(model.test_net.trainable_weights))
def test_subclass_nested_in_graph(self):
num_classes = 2
num_samples = 100
input_dim = 50
model = model_util.get_nested_model_3(
input_dim=input_dim, num_classes=num_classes)
model.compile(
loss='mse',
optimizer='rmsprop',
metrics=['acc'],
run_eagerly=testing_utils.should_run_eagerly())
x = np.ones((num_samples, input_dim))
y = np.zeros((num_samples, num_classes))
model.fit(x, y, epochs=2, batch_size=32, verbose=0)
_ = model.evaluate(x, y, verbose=0)
self.assertEqual(len(model.weights), 16)
self.assertEqual(len(model.non_trainable_weights), 4)
self.assertEqual(len(model.trainable_weights), 12)
def test_subclass_nested_in_sequential(self):
num_classes = 2
num_samples = 100
input_dim = 50
class Inner(keras.Model):
def __init__(self):
super(Inner, self).__init__()
self.dense1 = keras.layers.Dense(32, activation='relu')
self.dense2 = keras.layers.Dense(num_classes, activation='relu')
self.bn = keras.layers.BatchNormalization()
def call(self, inputs):
x = self.dense1(inputs)
x = self.dense2(x)
return self.bn(x)
model = keras.Sequential([Inner()])
model.compile(
loss='mse',
optimizer='rmsprop',
metrics=['acc'],
run_eagerly=testing_utils.should_run_eagerly())
x = np.ones((num_samples, input_dim))
y = np.zeros((num_samples, num_classes))
model.fit(x, y, epochs=2, batch_size=32, verbose=0)
_ = model.evaluate(x, y, verbose=0)
self.assertEqual(len(model.weights), 8)
self.assertEqual(len(model.non_trainable_weights), 2)
self.assertEqual(len(model.trainable_weights), 6)
def test_support_for_manual_training_arg(self):
# In most cases, the `training` argument is left unspecified, in which
# case it defaults to value corresponding to the Model method being used
# (fit -> True, predict -> False, etc).
# If the user writes their model `call` method to take
# an explicit `training` argument, we must check that the correct value
# is being passed to the model for each method call.
class DPNet(keras.Model):
def __init__(self):
super(DPNet, self).__init__()
self.dp = keras.layers.Dropout(0.5)
self.dense = keras.layers.Dense(1,
use_bias=False,
kernel_initializer='ones')
def call(self, inputs, training=False):
x = self.dp(inputs, training=training)
return self.dense(x)
model = DPNet()
x = np.ones((10, 10))
y = model.predict(x)
self.assertEqual(np.sum(y), np.sum(x))
model.compile(
loss='mse',
optimizer='rmsprop',
run_eagerly=testing_utils.should_run_eagerly())
loss = model.train_on_batch(x, y)
self.assertGreater(loss, 0.1)
if __name__ == '__main__':
test.main()
| apache-2.0 |
zhengwsh/InplusTrader_Linux | InplusTrader/backtestEngine/api/ext.py | 1 | 1329 | # -*- coding: utf-8 -*-
#
# Copyright 2017 Ricequant, Inc
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import six
from ..execution_context import ExecutionContext
from .api_base import instruments
def get_current_bar_dict():
bar_dict = ExecutionContext.get_current_bar_dict()
return bar_dict
def price_change(stock):
bar_dict = get_current_bar_dict()
return bar_dict[stock].close / bar_dict[stock].prev_close - 1
def symbol(order_book_id, split=", "):
if isinstance(order_book_id, six.string_types):
return "{}[{}]".format(order_book_id, instruments(order_book_id).symbol)
else:
s = split.join(symbol(item) for item in order_book_id)
return s
def now_time_str(str_format="%H:%M:%S"):
dt = ExecutionContext.get_current_trading_dt()
return dt.strftime(str_format)
| mit |
matt-jordan/mjmud | tests/game/commands/standard/test_standard_set.py | 1 | 2831 | #
# mjmud - The neverending MUD project
#
# Copyright (c) 2014, Matt Jordan
#
# See https://github.com/matt-jordan/mjmud for more information about the
# project. Please do not contact the maintainers of the project for information
# or assistance. The project uses Github for these purposes.
#
# This program is free software, distributed under the conditions of the MIT
# License (MIT). See the LICENSE file at the top of the source tree for
# details.
import unittest
from lib.commands.command_set import CommandParserError
from game.commands.standard.quit_command import QuitCommand
from game.commands.standard.say_command import SayCommand
from game.commands.standard.standard_set import StandardCommandSet, \
UnknownResponse
class TestStandardCommandSet(unittest.TestCase):
"""Test the StandardCommandSet"""
def test_parse_invalid_json(self):
"""Test providing an invalid JSON object to the command set"""
exception_raised = False
result = None
command_set = StandardCommandSet()
try:
result = command_set.parse(None)
except CommandParserError:
exception_raised = True
self.assertTrue(exception_raised)
self.assertIsNone(result)
def test_parse_no_command(self):
"""Test providing a valid JSON object that contains no command"""
exception_raised = False
result = None
command_set = StandardCommandSet()
try:
result = command_set.parse({})
except CommandParserError:
exception_raised = True
self.assertTrue(exception_raised)
self.assertIsNone(result)
def test_parse_unknown(self):
"""Test the parse function with an unknown command"""
exception_raised = False
result = None
command_set = StandardCommandSet()
try:
result = command_set.parse({'mudcommand': 'test_I_dont_exist'})
except CommandParserError:
exception_raised = True
self.assertTrue(exception_raised)
self.assertIsNone(result)
def test_parse_quit(self):
"""Test parsing a quit command"""
command_set = StandardCommandSet()
command = command_set.parse({'mudcommand': 'quit'})
self.assertTrue(isinstance(command, QuitCommand))
def test_parse_say(self):
"""Test parsing of a say command"""
command_set = StandardCommandSet()
command = command_set.parse({'mudcommand': 'say'})
self.assertTrue(isinstance(command, SayCommand))
command = command_set.parse({'mudcommand': 'say',
'text': 'hello there'})
self.assertTrue(isinstance(command, SayCommand))
self.assertEqual('hello there', command.text)
if __name__ == "__main__":
unittest.main()
| mit |
pselle/calibre | src/calibre/ptempfile.py | 17 | 7583 | from __future__ import with_statement
__license__ = 'GPL v3'
__copyright__ = '2008, Kovid Goyal <kovid at kovidgoyal.net>'
"""
Provides platform independent temporary files that persist even after
being closed.
"""
import tempfile, os, atexit
from future_builtins import map
from calibre.constants import (__version__, __appname__, filesystem_encoding,
get_unicode_windows_env_var, iswindows, get_windows_temp_path)
def cleanup(path):
try:
import os as oss
if oss.path.exists(path):
oss.remove(path)
except:
pass
_base_dir = None
def remove_dir(x):
try:
import shutil
shutil.rmtree(x, ignore_errors=True)
except:
pass
def determined_remove_dir(x):
for i in range(10):
try:
import shutil
shutil.rmtree(x)
return
except:
import os # noqa
if os.path.exists(x):
# In case some other program has one of the temp files open.
import time
time.sleep(0.1)
else:
return
try:
import shutil
shutil.rmtree(x, ignore_errors=True)
except:
pass
def app_prefix(prefix):
if iswindows:
return '%s_'%__appname__
return '%s_%s_%s'%(__appname__, __version__, prefix)
def reset_temp_folder_permissions():
# There are some broken windows installs where the permissions for the temp
# folder are set to not be executable, which means chdir() into temp
# folders fails. Try to fix that by resetting the permissions on the temp
# folder.
global _base_dir
if iswindows and _base_dir:
import subprocess
from calibre import prints
parent = os.path.dirname(_base_dir)
retcode = subprocess.Popen(['icacls.exe', parent, '/reset', '/Q', '/T']).wait()
prints('Trying to reset permissions of temp folder', parent, 'return code:', retcode)
def base_dir():
global _base_dir
if _base_dir is not None and not os.path.exists(_base_dir):
# Some people seem to think that running temp file cleaners that
# delete the temp dirs of running programs is a good idea!
_base_dir = None
if _base_dir is None:
td = os.environ.get('CALIBRE_WORKER_TEMP_DIR', None)
if td is not None:
import cPickle, binascii
try:
td = cPickle.loads(binascii.unhexlify(td))
except:
td = None
if td and os.path.exists(td):
_base_dir = td
else:
base = os.environ.get('CALIBRE_TEMP_DIR', None)
if base is not None and iswindows:
base = get_unicode_windows_env_var('CALIBRE_TEMP_DIR')
prefix = app_prefix(u'tmp_')
if base is None and iswindows:
# On windows, if the TMP env var points to a path that
# cannot be encoded using the mbcs encoding, then the
# python 2 tempfile algorithm for getting the temporary
# directory breaks. So we use the win32 api to get a
# unicode temp path instead. See
# https://bugs.launchpad.net/bugs/937389
base = get_windows_temp_path()
_base_dir = tempfile.mkdtemp(prefix=prefix, dir=base)
atexit.register(determined_remove_dir if iswindows else remove_dir, _base_dir)
try:
tempfile.gettempdir()
except:
# Widows temp vars set to a path not encodable in mbcs
# Use our temp dir
tempfile.tempdir = _base_dir
return _base_dir
def reset_base_dir():
global _base_dir
_base_dir = None
base_dir()
def force_unicode(x):
# Cannot use the implementation in calibre.__init__ as it causes a circular
# dependency
if isinstance(x, bytes):
x = x.decode(filesystem_encoding)
return x
def _make_file(suffix, prefix, base):
suffix, prefix = map(force_unicode, (suffix, prefix))
return tempfile.mkstemp(suffix, prefix, dir=base)
def _make_dir(suffix, prefix, base):
suffix, prefix = map(force_unicode, (suffix, prefix))
return tempfile.mkdtemp(suffix, prefix, base)
class PersistentTemporaryFile(object):
"""
A file-like object that is a temporary file that is available even after being closed on
all platforms. It is automatically deleted on normal program termination.
"""
_file = None
def __init__(self, suffix="", prefix="", dir=None, mode='w+b'):
if prefix is None:
prefix = ""
if dir is None:
dir = base_dir()
fd, name = _make_file(suffix, prefix, dir)
self._file = os.fdopen(fd, mode)
self._name = name
self._fd = fd
atexit.register(cleanup, name)
def __getattr__(self, name):
if name == 'name':
return self.__dict__['_name']
return getattr(self.__dict__['_file'], name)
def __enter__(self):
return self
def __exit__(self, *args):
self.close()
def __del__(self):
try:
self.close()
except:
pass
def PersistentTemporaryDirectory(suffix='', prefix='', dir=None):
'''
Return the path to a newly created temporary directory that will
be automatically deleted on application exit.
'''
if dir is None:
dir = base_dir()
tdir = _make_dir(suffix, prefix, dir)
atexit.register(remove_dir, tdir)
return tdir
class TemporaryDirectory(object):
'''
A temporary directory to be used in a with statement.
'''
def __init__(self, suffix='', prefix='', dir=None, keep=False):
self.suffix = suffix
self.prefix = prefix
if dir is None:
dir = base_dir()
self.dir = dir
self.keep = keep
def __enter__(self):
if not hasattr(self, 'tdir'):
self.tdir = _make_dir(self.suffix, self.prefix, self.dir)
return self.tdir
def __exit__(self, *args):
if not self.keep and os.path.exists(self.tdir):
remove_dir(self.tdir)
class TemporaryFile(object):
def __init__(self, suffix="", prefix="", dir=None, mode='w+b'):
if prefix is None:
prefix = ''
if suffix is None:
suffix = ''
if dir is None:
dir = base_dir()
self.prefix, self.suffix, self.dir, self.mode = prefix, suffix, dir, mode
self._file = None
def __enter__(self):
fd, name = _make_file(self.suffix, self.prefix, self.dir)
self._file = os.fdopen(fd, self.mode)
self._name = name
self._file.close()
return name
def __exit__(self, *args):
cleanup(self._name)
class SpooledTemporaryFile(tempfile.SpooledTemporaryFile):
def __init__(self, max_size=0, suffix="", prefix="", dir=None, mode='w+b',
bufsize=-1):
if prefix is None:
prefix = ''
if suffix is None:
suffix = ''
if dir is None:
dir = base_dir()
tempfile.SpooledTemporaryFile.__init__(self, max_size=max_size,
suffix=suffix, prefix=prefix, dir=dir, mode=mode,
bufsize=bufsize)
def truncate(self, *args):
# The stdlib SpooledTemporaryFile implementation of truncate() doesn't
# allow specifying a size.
self._file.truncate(*args)
def better_mktemp(*args, **kwargs):
fd, path = tempfile.mkstemp(*args, **kwargs)
os.close(fd)
return path
| gpl-3.0 |
mogoweb/webkit_for_android5.1 | webkit/Tools/Scripts/webkitpy/layout_tests/layout_package/test_failures_unittest.py | 15 | 3297 | # Copyright (C) 2010 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
""""Tests code paths not covered by the regular unit tests."""
import unittest
from webkitpy.layout_tests.layout_package.test_failures import *
class Test(unittest.TestCase):
def assert_loads(self, cls):
failure_obj = cls()
s = failure_obj.dumps()
new_failure_obj = TestFailure.loads(s)
self.assertTrue(isinstance(new_failure_obj, cls))
self.assertEqual(failure_obj, new_failure_obj)
# Also test that != is implemented.
self.assertFalse(failure_obj != new_failure_obj)
def test_crash(self):
FailureCrash()
def test_hash_incorrect(self):
FailureImageHashIncorrect()
def test_missing(self):
FailureMissingResult()
def test_missing_image(self):
FailureMissingImage()
def test_missing_image_hash(self):
FailureMissingImageHash()
def test_timeout(self):
FailureTimeout()
def test_unknown_failure_type(self):
class UnknownFailure(TestFailure):
pass
failure_obj = UnknownFailure()
self.assertRaises(ValueError, determine_result_type, [failure_obj])
self.assertRaises(NotImplementedError, failure_obj.message)
def test_loads(self):
for c in ALL_FAILURE_CLASSES:
self.assert_loads(c)
def test_equals(self):
self.assertEqual(FailureCrash(), FailureCrash())
self.assertNotEqual(FailureCrash(), FailureTimeout())
crash_set = set([FailureCrash(), FailureCrash()])
self.assertEqual(len(crash_set), 1)
# The hash happens to be the name of the class, but sets still work:
crash_set = set([FailureCrash(), "FailureCrash"])
self.assertEqual(len(crash_set), 2)
if __name__ == '__main__':
unittest.main()
| apache-2.0 |
javier-ruiz-b/docker-rasppi-images | raspberry-google-home/env/lib/python3.7/site-packages/requests_oauthlib/compliance_fixes/facebook.py | 6 | 1119 | from json import dumps
try:
from urlparse import parse_qsl
except ImportError:
from urllib.parse import parse_qsl
from oauthlib.common import to_unicode
def facebook_compliance_fix(session):
def _compliance_fix(r):
# if Facebook claims to be sending us json, let's trust them.
if "application/json" in r.headers.get("content-type", {}):
return r
# Facebook returns a content-type of text/plain when sending their
# x-www-form-urlencoded responses, along with a 200. If not, let's
# assume we're getting JSON and bail on the fix.
if "text/plain" in r.headers.get("content-type", {}) and r.status_code == 200:
token = dict(parse_qsl(r.text, keep_blank_values=True))
else:
return r
expires = token.get("expires")
if expires is not None:
token["expires_in"] = expires
token["token_type"] = "Bearer"
r._content = to_unicode(dumps(token)).encode("UTF-8")
return r
session.register_compliance_hook("access_token_response", _compliance_fix)
return session
| apache-2.0 |
mkrupcale/ansible | hacking/metadata-tool.py | 14 | 20756 | #!/usr/bin/env python
import ast
import csv
import os
import sys
from collections import defaultdict
from distutils.version import StrictVersion
from pprint import pformat, pprint
import yaml
from ansible.module_utils._text import to_text
from ansible.plugins import module_loader
# There's a few files that are not new-style modules. Have to blacklist them
NONMODULE_PY_FILES = frozenset(('async_wrapper.py',))
NONMODULE_MODULE_NAMES = frozenset(os.path.splitext(p)[0] for p in NONMODULE_PY_FILES)
# Default metadata
DEFAULT_METADATA = {'version': '1.0', 'status': ['preview'], 'supported_by':'community'}
class ParseError(Exception):
"""Thrown when parsing a file fails"""
pass
class MissingModuleError(Exception):
"""Thrown when unable to find a plugin"""
pass
def usage():
print("""Usage:
metadata-tester.py report [--version X]
metadata-tester.py add [--version X] [--overwrite] CSVFILE
metadata-tester.py add-default [--version X] [--overwrite]""")
sys.exit(1)
def parse_args(arg_string):
if len(arg_string) < 1:
usage()
action = arg_string[0]
version = None
if '--version' in arg_string:
version_location = arg_string.index('--version')
arg_string.pop(version_location)
version = arg_string.pop(version_location)
overwrite = False
if '--overwrite' in arg_string:
overwrite = True
arg_string.remove('--overwrite')
csvfile = None
if len(arg_string) == 2:
csvfile = arg_string[1]
elif len(arg_string) > 2:
usage()
return action, {'version': version, 'overwrite': overwrite, 'csvfile': csvfile}
def seek_end_of_dict(module_data, start_line, start_col, next_node_line, next_node_col):
"""Look for the end of a dict in a set of lines
We know the starting position of the dict and we know the start of the
next code node but in between there may be multiple newlines and comments.
There may also be multiple python statements on the same line (separated
by semicolons)
Examples::
ANSIBLE_METADATA = {[..]}
DOCUMENTATION = [..]
ANSIBLE_METADATA = {[..]} # Optional comments with confusing junk => {}
# Optional comments {}
DOCUMENTATION = [..]
ANSIBLE_METADATA = {
[..]
}
# Optional comments {}
DOCUMENTATION = [..]
ANSIBLE_METADATA = {[..]} ; DOCUMENTATION = [..]
ANSIBLE_METADATA = {}EOF
"""
if next_node_line == None:
# The dict is the last statement in the file
snippet = module_data.splitlines()[start_line:]
next_node_col = 0
# Include the last line in the file
last_line_offset = 0
else:
# It's somewhere in the middle so we need to separate it from the rest
snippet = module_data.splitlines()[start_line:next_node_line]
# Do not include the last line because that's where the next node
# starts
last_line_offset = 1
if next_node_col == 0:
# This handles all variants where there are only comments and blank
# lines between the dict and the next code node
# Step backwards through all the lines in the snippet
for line_idx, line in tuple(reversed(tuple(enumerate(snippet))))[last_line_offset:]:
end_col = None
# Step backwards through all the characters in the line
for col_idx, char in reversed(tuple(enumerate(c for c in line))):
if char == '}' and end_col is None:
# Potentially found the end of the dict
end_col = col_idx
elif char == '#' and end_col is not None:
# The previous '}' was part of a comment. Keep trying
end_col = None
if end_col is not None:
# Found the end!
end_line = start_line + line_idx
break
else:
# Harder cases involving multiple statements on one line
# Good Ansible Module style doesn't do this so we're just going to
# treat this as an error for now:
raise ParseError('Multiple statements per line confuses the module metadata parser.')
return end_line, end_col
def seek_end_of_string(module_data, start_line, start_col, next_node_line, next_node_col):
"""
This is much trickier than finding the end of a dict. A dict has only one
ending character, "}". Strings have four potential ending characters. We
have to parse the beginning of the string to determine what the ending
character will be.
Examples:
ANSIBLE_METADATA = '''[..]''' # Optional comment with confusing chars '''
# Optional comment with confusing chars '''
DOCUMENTATION = [..]
ANSIBLE_METADATA = '''
[..]
'''
DOCUMENTATIONS = [..]
ANSIBLE_METADATA = '''[..]''' ; DOCUMENTATION = [..]
SHORT_NAME = ANSIBLE_METADATA = '''[..]''' ; DOCUMENTATION = [..]
String marker variants:
* '[..]'
* "[..]"
* '''[..]'''
* \"\"\"[..]\"\"\"
Each of these come in u, r, and b variants:
* '[..]'
* u'[..]'
* b'[..]'
* r'[..]'
* ur'[..]'
* ru'[..]'
* br'[..]'
* b'[..]'
* rb'[..]'
"""
raise NotImplementedError('Finding end of string not yet implemented')
def extract_metadata(module_data):
"""Extract the metadata from a module
:arg module_data: Byte string containing a module's code
:returns: a tuple of metadata (a dict), line the metadata starts on,
column the metadata starts on, line the metadata ends on, column the
metadata ends on, and the names the metadata is assigned to. One of
the names the metadata is assigned to will be ANSIBLE_METADATA If no
metadata is found, the tuple will be (None, -1, -1, -1, -1, None)
"""
metadata = None
start_line = -1
start_col = -1
end_line = -1
end_col = -1
targets = None
mod_ast_tree = ast.parse(module_data)
for root_idx, child in enumerate(mod_ast_tree.body):
if isinstance(child, ast.Assign):
for target in child.targets:
if target.id == 'ANSIBLE_METADATA':
if isinstance(child.value, ast.Dict):
metadata = ast.literal_eval(child.value)
try:
# Determine where the next node starts
next_node = mod_ast_tree.body[root_idx+1]
next_lineno = next_node.lineno
next_col_offset = next_node.col_offset
except IndexError:
# Metadata is defined in the last node of the file
next_lineno = None
next_col_offset = None
# Determine where the current metadata ends
end_line, end_col = seek_end_of_dict(module_data,
child.lineno - 1, child.col_offset, next_lineno,
next_col_offset)
elif isinstance(child.value, ast.Str):
metadata = yaml.safe_load(child.value.s)
end_line = seek_end_of_string(module_data)
elif isinstance(child.value, ast.Bytes):
metadata = yaml.safe_load(to_text(child.value.s, errors='surrogate_or_strict'))
end_line = seek_end_of_string(module_data)
else:
# Example:
# ANSIBLE_METADATA = 'junk'
# ANSIBLE_METADATA = { [..the real metadata..] }
continue
# Do these after the if-else so we don't pollute them in
# case this was a false positive
start_line = child.lineno - 1
start_col = child.col_offset
targets = [t.id for t in child.targets]
break
if metadata is not None:
# Once we've found the metadata we're done
break
return metadata, start_line, start_col, end_line, end_col, targets
def find_documentation(module_data):
"""Find the DOCUMENTATION metadata for a module file"""
start_line = -1
mod_ast_tree = ast.parse(module_data)
for child in mod_ast_tree.body:
if isinstance(child, ast.Assign):
for target in child.targets:
if target.id == 'DOCUMENTATION':
start_line = child.lineno - 1
break
return start_line
def remove_metadata(module_data, start_line, start_col, end_line, end_col):
"""Remove a section of a module file"""
lines = module_data.split('\n')
new_lines = lines[:start_line]
if start_col != 0:
new_lines.append(lines[start_line][:start_col])
next_line = lines[end_line]
if len(next_line) - 1 != end_col:
new_lines.append(next_line[end_col:])
if len(lines) > end_line:
new_lines.extend(lines[end_line + 1:])
return '\n'.join(new_lines)
def insert_metadata(module_data, new_metadata, insertion_line, targets=('ANSIBLE_METADATA',)):
"""Insert a new set of metadata at a specified line"""
assignments = ' = '.join(targets)
pretty_metadata = pformat(new_metadata, width=1).split('\n')
new_lines = []
new_lines.append('{} = {}'.format(assignments, pretty_metadata[0]))
if len(pretty_metadata) > 1:
for line in pretty_metadata[1:]:
new_lines.append('{}{}'.format(' ' * (len(assignments) - 1 + len(' = {')), line))
old_lines = module_data.split('\n')
lines = old_lines[:insertion_line] + new_lines + [''] + old_lines[insertion_line:]
return '\n'.join(lines)
def parse_assigned_metadata_initial(csvfile):
"""
Fields:
:0: Module name
:1: Core (x if so)
:2: Extras (x if so)
:3: Category
:4: Supported/SLA
:5: Committer
:6: Stable
:7: Deprecated
:8: Notes
:9: Team Notes
:10: Notes 2
:11: final supported_by field
"""
with open(csvfile, 'rb') as f:
for record in csv.reader(f):
module = record[0]
if record[12] == 'core':
supported_by = 'core'
elif record[12] == 'curated':
supported_by = 'committer'
elif record[12] == 'community':
supported_by = 'community'
else:
print('Module %s has no supported_by field. Using community' % record[0])
supported_by = 'community'
supported_by = DEFAULT_METADATA['supported_by']
status = []
if record[6]:
status.append('stableinterface')
if record[7]:
status.append('deprecated')
if not status:
status.extend(DEFAULT_METADATA['status'])
yield (module, {'version': DEFAULT_METADATA['version'], 'supported_by': supported_by, 'status': status})
def parse_assigned_metadata(csvfile):
"""
Fields:
:0: Module name
:1: supported_by string. One of the valid support fields
core, community, unmaintained, committer
:2: stableinterface
:3: preview
:4: deprecated
:5: removed
:6: tested
https://github.com/ansible/proposals/issues/30
"""
with open(csvfile, 'rb') as f:
for record in csv.reader(f):
module = record[0]
supported_by = record[1]
status = []
if record[2]:
status.append('stableinterface')
if record[4]:
status.append('deprecated')
if record[5]:
status.append('removed')
if record[6]:
status.append('tested')
if not status or record[3]:
status.append('preview')
yield (module, {'version': '1.0', 'supported_by': supported_by, 'status': status})
def write_metadata(filename, new_metadata, version=None, overwrite=False):
with open(filename, 'rb') as f:
module_data = f.read()
try:
current_metadata, start_line, start_col, end_line, end_col, targets = extract_metadata(module_data)
except SyntaxError:
if filename.endswith('.py'):
raise
# Probably non-python modules. These should all have python
# documentation files where we can place the data
raise ParseError('Could not add metadata to {}'.format(filename))
if current_metadata is None:
# No curent metadata so we can just add it
start_line = find_documentation(module_data)
if start_line < 0:
if os.path.basename(filename) in NONMODULE_PY_FILES:
# These aren't new-style modules
return
raise Exception('Module file {} had no ANSIBLE_METADATA or DOCUMENTATION'.format(filename))
module_data = insert_metadata(module_data, new_metadata, start_line, targets=('ANSIBLE_METADATA',))
elif overwrite or (version is not None and ('version' not in current_metadata or StrictVersion(current_metadata['version']) < StrictVersion(version))):
# Current metadata that we do not want. Remove the current
# metadata and put the new version in its place
module_data = remove_metadata(module_data, start_line, start_col, end_line, end_col)
module_data = insert_metadata(module_data, new_metadata, start_line, targets=targets)
else:
# Current metadata and we don't want to overwrite it
return
# Save the new version of the module
with open(filename, 'wb') as f:
f.write(module_data)
def return_metadata(plugins):
metadata = {}
for name, filename in plugins:
# There may be several files for a module (if it is written in another
# language, for instance) but only one of them (the .py file) should
# contain the metadata.
if name not in metadata or metadata[name] is not None:
with open(filename, 'rb') as f:
module_data = f.read()
metadata[name] = extract_metadata(module_data)[0]
return metadata
def metadata_summary(plugins, version=None):
"""Compile information about the metadata status for a list of modules
:arg plugins: List of plugins to look for. Each entry in the list is
a tuple of (module name, full path to module)
:kwarg version: If given, make sure the modules have this version of
metadata or higher.
:returns: A tuple consisting of a list of modules with no metadata at the
required version and a list of files that have metadata at the
required version.
"""
no_metadata = {}
has_metadata = {}
supported_by = defaultdict(set)
status = defaultdict(set)
plugins = list(plugins)
all_mods_metadata = return_metadata(plugins)
for name, filename in plugins:
# Does the module have metadata?
if name not in no_metadata and name not in has_metadata:
metadata = all_mods_metadata[name]
if metadata is None:
no_metadata[name] = filename
elif version is not None and ('version' not in metadata or StrictVersion(metadata['version']) < StrictVersion(version)):
no_metadata[name] = filename
else:
has_metadata[name] = filename
# What categories does the plugin belong in?
if all_mods_metadata[name] is None:
# No metadata for this module. Use the default metadata
supported_by[DEFAULT_METADATA['supported_by']].add(filename)
status[DEFAULT_METADATA['status'][0]].add(filename)
else:
supported_by[all_mods_metadata[name]['supported_by']].add(filename)
for one_status in all_mods_metadata[name]['status']:
status[one_status].add(filename)
return list(no_metadata.values()), list(has_metadata.values()), supported_by, status
#
# Subcommands
#
def add_from_csv(csv_file, version=None, overwrite=False):
"""Implement the subcommand to add metadata from a csv file
"""
# Add metadata for everything from the CSV file
diagnostic_messages = []
for module_name, new_metadata in parse_assigned_metadata_initial(csv_file):
filename = module_loader.find_plugin(module_name, mod_type='.py')
if filename is None:
diagnostic_messages.append('Unable to find the module file for {}'.format(module_name))
continue
try:
write_metadata(filename, new_metadata, version, overwrite)
except ParseError as e:
diagnostic_messages.append(e.args[0])
continue
if diagnostic_messages:
pprint(diagnostic_messages)
return 0
def add_default(version=None, overwrite=False):
"""Implement the subcommand to add default metadata to modules
Add the default metadata to any plugin which lacks it.
:kwarg version: If given, the metadata must be at least this version.
Otherwise, treat the module as not having existing metadata.
:kwarg overwrite: If True, overwrite any existing metadata. Otherwise,
do not modify files which have metadata at an appropriate version
"""
# List of all plugins
plugins = module_loader.all(path_only=True)
plugins = ((os.path.splitext((os.path.basename(p)))[0], p) for p in plugins)
plugins = (p for p in plugins if p[0] not in NONMODULE_MODULE_NAMES)
# Iterate through each plugin
processed = set()
diagnostic_messages = []
for name, filename in (info for info in plugins if info[0] not in processed):
try:
write_metadata(filename, DEFAULT_METADATA, version, overwrite)
except ParseError as e:
diagnostic_messages.append(e.args[0])
continue
processed.add(name)
if diagnostic_messages:
pprint(diagnostic_messages)
return 0
def report(version=None):
"""Implement the report subcommand
Print out all the modules that have metadata and all the ones that do not.
:kwarg version: If given, the metadata must be at least this version.
Otherwise return it as not having metadata
"""
# List of all plugins
plugins = module_loader.all(path_only=True)
plugins = list(plugins)
plugins = ((os.path.splitext((os.path.basename(p)))[0], p) for p in plugins)
plugins = (p for p in plugins if p[0] != NONMODULE_MODULE_NAMES)
plugins = list(plugins)
no_metadata, has_metadata, support, status = metadata_summary(plugins, version=version)
print('== Has metadata ==')
pprint(sorted(has_metadata))
print('')
print('== Has no metadata ==')
pprint(sorted(no_metadata))
print('')
print('== Supported by core ==')
pprint(sorted(support['core']))
print('== Supported by committers ==')
pprint(sorted(support['committer']))
print('== Supported by community ==')
pprint(sorted(support['community']))
print('')
print('== Status: stableinterface ==')
pprint(sorted(status['stableinterface']))
print('== Status: preview ==')
pprint(sorted(status['preview']))
print('== Status: deprecated ==')
pprint(sorted(status['deprecated']))
print('== Status: removed ==')
pprint(sorted(status['removed']))
print('')
print('== Summary ==')
print('No Metadata: {0} Has Metadata: {1}'.format(len(no_metadata), len(has_metadata)))
print('Supported by core: {0} Supported by community: {1} Supported by committer: {2}'.format(len(support['core']), len(support['community']), len(support['committer'])))
print('Status StableInterface: {0} Status Preview: {1} Status Deprecated: {2} Status Removed: {3}'.format(len(status['stableinterface']), len(status['preview']), len(status['deprecated']), len(status['removed'])))
return 0
if __name__ == '__main__':
action, args = parse_args(sys.argv[1:])
### TODO: Implement upgrade metadata and upgrade metadata from csvfile
if action == 'report':
rc = report(version=args['version'])
elif action == 'add':
rc = add_from_csv(args['csvfile'], version=args['version'], overwrite=args['overwrite'])
elif action == 'add-default':
rc = add_default(version=args['version'], overwrite=args['overwrite'])
sys.exit(rc)
| gpl-3.0 |
yavalvas/yav_com | build/matplotlib/doc/mpl_toolkits/axes_grid/examples/demo_parasite_axes2.py | 16 | 1208 | from mpl_toolkits.axes_grid1 import host_subplot
import mpl_toolkits.axisartist as AA
import matplotlib.pyplot as plt
if 1:
host = host_subplot(111, axes_class=AA.Axes)
plt.subplots_adjust(right=0.75)
par1 = host.twinx()
par2 = host.twinx()
offset = 60
new_fixed_axis = par2.get_grid_helper().new_fixed_axis
par2.axis["right"] = new_fixed_axis(loc="right",
axes=par2,
offset=(offset, 0))
par2.axis["right"].toggle(all=True)
host.set_xlim(0, 2)
host.set_ylim(0, 2)
host.set_xlabel("Distance")
host.set_ylabel("Density")
par1.set_ylabel("Temperature")
par2.set_ylabel("Velocity")
p1, = host.plot([0, 1, 2], [0, 1, 2], label="Density")
p2, = par1.plot([0, 1, 2], [0, 3, 2], label="Temperature")
p3, = par2.plot([0, 1, 2], [50, 30, 15], label="Velocity")
par1.set_ylim(0, 4)
par2.set_ylim(1, 65)
host.legend()
host.axis["left"].label.set_color(p1.get_color())
par1.axis["right"].label.set_color(p2.get_color())
par2.axis["right"].label.set_color(p3.get_color())
plt.draw()
plt.show()
#plt.savefig("Test")
| mit |
fheeger/gost | PyQtTachyEmulator.py | 1 | 14872 | import sys, random, time
from glob import glob
import numpy
from PyQt5.QtCore import QObject, QTimer, QIODevice
from PyQt5.QtWidgets import *
from PyQt5 import QtSerialPort
class TachyConnection(QObject):
def __init__(self, dev=None, baut=4800, lineend="\r\n", timeout=3, log=sys.stderr):
super(TachyConnection, self).__init__()
self.log = log
self.lineend = lineend
if dev is None:
self.port = None
else:
sel.connect(dev, baut)
self.buffer = ""
self.lineBuffer = []
self.timeout = timeout
def connect(self, dev, baut=4800):
self.port = QtSerialPort.QSerialPort(dev)
self.port.open(QIODevice.ReadWrite)
self.port.setBaudRate(baut)
def readline(self):
if self.port is None:
raise NotConnectedError
if self.port.waitForReadyRead(self.timeout*1000):
line = self.port.readLine().decode("ascii")
self.log.write("READ LINE: %s" % line)
return line
raise TimeoutError("time out while reading line")
def readLines(self, n=2):
self.buffer += bytes(self.port.readAll()).decode("ascii")
#print("addinf data to buffer: %s" % repr(self.buffer))
pos = self.buffer.find(self.lineend)
while pos > 0:
self.lineBuffer.append(self.buffer[:pos])
print("adding data to line buffer: %s" % repr(self.buffer[:pos]))
self.buffer = self.buffer[pos+len(self.lineend):]
pos = self.buffer.find(self.lineend)
if len(self.lineBuffer) == n:
tmp = self.lineBuffer
self.lineBuffer = []
print("returning: %s" % tmp)
return tmp
return None
def write(self, data):
if self.port is None:
raise NotConnectedError
self.log.write("WRITE: %s\n" % repr(data))
self.port.write(bytes(data, "ascii"))
self.port.flush()
if not self.port.waitForBytesWritten(self.timeout*1000):
raise TimeoutError("time out while writing")
def read(self, bytes=1, timeout=None):
if self.port is None:
raise NotConnectedError
if not timeout is None:
self.port.timeout = timeout
if self.port.waitForReadyRead(self.timeout*1000):
data = self.port.read(bytes).decode("ascii")
self.log.write("READ: %s\n" % data)
return data
raise TimeoutError("time out while reading")
class MeassureWindow(QDialog):
def __init__(self, parent):
super(MeassureWindow, self).__init__(parent)
self.xField = QLineEdit()
self.xField.setText("0")
self.yField = QLineEdit()
self.yField.setText("0")
self.zField = QLineEdit()
self.zField.setText("0")
self.hzField = QLineEdit()
self.hzField.setText("0")
self.vertField = QLineEdit()
self.vertField.setText("0")
self.distField = QLineEdit()
self.distField.setText("0")
mainLayout = QGridLayout()
mainLayout.addWidget(QLabel("x:"), 0, 0)
mainLayout.addWidget(self.xField , 0, 1)
mainLayout.addWidget(QLabel("y:"), 1, 0)
mainLayout.addWidget(self.yField, 1, 1)
mainLayout.addWidget(QLabel("z:"), 2, 0)
mainLayout.addWidget(self.zField, 2, 1)
mainLayout.addWidget(QLabel("horizontal Angle:"), 3, 0)
mainLayout.addWidget(self.hzField, 3, 1)
mainLayout.addWidget(QLabel("vertical Angle:"), 4, 0)
mainLayout.addWidget(self.vertField, 4, 1)
mainLayout.addWidget(QLabel("Distance:"), 5, 0)
mainLayout.addWidget(self.distField, 5, 1)
self.okButton = QPushButton("Ok")
self.cancleButton = QPushButton("Cancel")
mainLayout.addWidget(self.okButton)
mainLayout.addWidget(self.cancleButton)
self.setLayout(mainLayout)
self.setWindowTitle("Meassure Point")
self.okButton.clicked.connect(self.accept)
self.cancleButton.clicked.connect(self.reject)
def accept(self):
self.parent().anyPoint(float(self.xField.text()),
float(self.yField.text()),
float(self.zField.text()),
float(self.hzField.text()),
float(self.vertField.text()),
float(self.distField.text())
)
super(MeassureWindow, self).accept()
class RandomCircleWindow(QDialog):
def __init__(self, parent):
super(RandomCircleWindow, self).__init__(parent)
self.xField = QLineEdit()
self.xField.setText("0")
self.yField = QLineEdit()
self.yField.setText("0")
self.zField = QLineEdit()
self.zField.setText("0")
self.rField = QLineEdit()
self.rField.setText("3")
self.nField = QLineEdit()
self.nField.setText("20")
self.hField = QLineEdit()
self.hField.setText("0")
self.meassureButton = QPushButton("Meassure")
mainLayout = QGridLayout()
mainLayout.addWidget(QLabel("Circle center x:"), 0, 0)
mainLayout.addWidget(self.xField , 0, 1)
mainLayout.addWidget(QLabel("Circle center y:"), 1, 0)
mainLayout.addWidget(self.yField , 1, 1)
mainLayout.addWidget(QLabel("Circle center z:"), 2, 0)
mainLayout.addWidget(self.zField , 2, 1)
mainLayout.addWidget(QLabel("Circle radius:"), 3, 0)
mainLayout.addWidget(self.rField , 3, 1)
mainLayout.addWidget(QLabel("Number of points:"), 4, 0)
mainLayout.addWidget(self.nField , 4, 1)
mainLayout.addWidget(QLabel("Circle height:"), 5, 0)
mainLayout.addWidget(self.hField , 5, 1)
self.okButton = QPushButton("Ok")
self.cancleButton = QPushButton("Cancel")
mainLayout.addWidget(self.okButton)
mainLayout.addWidget(self.cancleButton)
self.setLayout(mainLayout)
self.okButton.clicked.connect(self.accept)
self.cancleButton.clicked.connect(self.reject)
def accept(self):
x = float(self.xField.text())
y = float(self.yField.text())
z = float(self.zField.text())
r = float(self.rField.text())
n = int(self.nField.text())
h = float(self.hField.text())
self.measureRandomPolyCircle(x,y,z,r,n,h)
super(RandomCircleWindow, self).accept()
def measureRandomPolyCircle(self, x0=0, y0=0, z0=0, r=3, n=20, h=2):
angles = []
for i in range(n):
angles.append(random.uniform(0, 2*numpy.pi))
angles.sort()
for a in angles:
x = x0 + r*numpy.cos(a)
y = y0 + r*numpy.sin(a)
z = z0 + random.uniform(0, h)
self.parentWidget().anyPoint(x, y, z, a, 0, r)
time.sleep(0.5)
class TachyEmulator(QWidget):
def __init__(self, dev, parent=None):
super(TachyEmulator, self).__init__(parent)
self.x = 1.0
self.y = 2.0
self.z = 3.0
self.hzAngle = 4.0
self.vertAngle = 0.0
self.instrumentHeight = 1.7
self.reflectorHeight = 1.5
self.ptNr = 0
self.selectPort = QComboBox(self)
for port in self.avail_ports():
self.selectPort.addItem(port)
#display current state
self.xLabel = QLabel("")
self.yLabel = QLabel("")
self.zLabel = QLabel("")
self.hzAngleLabel = QLabel("")
self.vertAngleLabel = QLabel("")
self.reflectorHeightLabel = QLabel("")
self.instrumentHeightLabel = QLabel("")
stateLayout = QGridLayout()
stateLayout.addWidget(QLabel("x:"), 0, 0)
stateLayout.addWidget(self.xLabel, 0, 1)
stateLayout.addWidget(QLabel("y:"), 1, 0)
stateLayout.addWidget(self.yLabel, 1, 1)
stateLayout.addWidget(QLabel("z:"), 2, 0)
stateLayout.addWidget(self.zLabel, 2, 1)
stateLayout.addWidget(QLabel("horizontal Angle:"), 3, 0)
stateLayout.addWidget(self.hzAngleLabel, 3, 1)
stateLayout.addWidget(QLabel("vertical Angle:"), 4, 0)
stateLayout.addWidget(self.vertAngleLabel, 4, 1)
stateLayout.addWidget(QLabel("reflector Height:"), 5, 0)
stateLayout.addWidget(self.reflectorHeightLabel, 5, 1)
stateLayout.addWidget(QLabel("instrument Height:"), 6, 0)
stateLayout.addWidget(self.instrumentHeightLabel, 6, 1)
self.meassureButton = QPushButton("Meassure Point")
self.circleButton = QPushButton("Meassure random circle")
self.meassureButton.setEnabled(False)
self.circleButton.setEnabled(False)
mainLayout = QVBoxLayout()
mainLayout.addWidget(self.selectPort)
mainLayout.addLayout(stateLayout)
mainLayout.addWidget(self.meassureButton)
mainLayout.addWidget(self.circleButton)
self.setLayout(mainLayout)
self.setWindowTitle("Tachy Emulator")
self.updateStateDisplay()
self.connection = TachyConnection()
self.meassureButton.clicked.connect(self.meassurePoint)
self.circleButton.clicked.connect(self.measureRandomPolyCircle)
self.selectPort.activated[str].connect(self.connect)
def updateStateDisplay(self):
self.xLabel.setText(str(self.x))
self.yLabel.setText(str(self.y))
self.zLabel.setText(str(self.z))
self.hzAngleLabel.setText(str(self.hzAngle))
self.vertAngleLabel.setText(str(self.vertAngle))
self.reflectorHeightLabel.setText(str(self.reflectorHeight))
self.instrumentHeightLabel.setText(str(self.instrumentHeight))
def processData(self):
print("processing data")
data = self.connection.readLines(1)[0]
print(data)
if not data is None:
comArr = data.strip().split("/")
if comArr[0] == "GET":
if comArr[2] == "WI21":
self.connection.write("*21.322%0+17.d\r\n" % (self.hzAngle * 10**5))
elif comArr[2] == "WI84":
self.connection.write("*84.322%0+17.d\r\n" % (self.x * 10**3))
elif comArr[2] == "WI85":
self.connection.write("*85.322%0+17.d\r\n" % (self.y * 10**3))
elif comArr[2] == "WI86":
self.connection.write("*86.322%0+17.d\r\n" % (self.z * 10**3))
elif comArr[2] == "WI87":
self.connection.write("*87.322%0+17.d\r\n" % (self.reflectorHeight * 10**3))
elif comArr[2] == "WI88":
self.connection.write("*88.322%0+17.d\r\n" % (self.instrumentHeight * 10**3))
else:
self.connection.write("@W127\r\n")
elif comArr[0] == "PUT":
if comArr[1][:2] == "21":
self.hzAngle = float(comArr[1][-17:]) / 10**5
self.updateStateDisplay()
self.connection.write("?\r\n")
elif comArr[1][:2] == "84":
self.x = float(comArr[1][-17:]) / 10**3
self.updateStateDisplay()
self.connection.write("?\r\n")
elif comArr[1][:2] == "85":
self.y = float(comArr[1][-17:]) / 10**3
self.updateStateDisplay()
self.connection.write("?\r\n")
elif comArr[1][:2] == "86":
self.z = float(comArr[1][-17:]) / 10**3
self.updateStateDisplay()
self.connection.write("?\r\n")
elif comArr[1][:2] == "87":
self.reflectorHeight = float(comArr[1][-17:]) / 10**3
self.updateStateDisplay()
self.connection.write("?\r\n")
elif comArr[1][:2] == "88":
self.instrumentHeight = float(comArr[1][-17:]) / 10**3
self.updateStateDisplay()
self.connection.write("?\r\n")
else:
print("could not process data: " + data)
self.connection.write("@W127\r\n")
else:
print("could not process data: " + data)
self.connection.write("@W127\r\n")
print("done processing data")
def anyPoint(self, x, y, z, hz, vert, dist, reflectorH=0):
self.connection.port.readyRead.disconnect()
data = "110006%+017.f 21.322%+017.f 22.322%+017.f 31..00%+017.f 81..00%+017.f 82..00%+017.f 83..00%+017.f 87..10%+017.f" % (self.ptNr, hz*10**5, vert*10**5, dist*10**3, x*10**3, y*10**3, z*10**3, reflectorH*10**3)
self.connection.write("*%s\r\n" % data)
self.connection.write("w\r\n")
lines = None
while lines is None:
self.connection.port.waitForReadyRead(500)
lines = self.connection.readLines(1)
answer = lines[0]
self.connection.port.readyRead.connect(self.processData)
if answer.strip() != "OK":
QMessageBox.critical(self, "Unexpected Answer from Blender", "Blender answered: %s" % answer)
else:
self.ptNr += 1
print("Messung erfolgreich\n")
def meassurePoint(self):
meassureWindow = MeassureWindow(self)
meassureWindow.exec_()
def measureRandomPolyCircle(self):
circleWindow = RandomCircleWindow(self)
circleWindow.exec_()
def avail_ports(self):
return [p.portName() for p in QtSerialPort.QSerialPortInfo.availablePorts() if not p.isBusy()]
def connect(self, port):
print("connecting to port: %s" % port)
self.connection.connect(port)
self.meassureButton.setEnabled(True)
self.circleButton.setEnabled(True)
self.connection.port.readyRead.connect(self.processData)
class NotConnectedError(IOError):
pass
if __name__ == '__main__':
app = QApplication(sys.argv)
screen = TachyEmulator("COM3")
screen.show()
sys.exit(app.exec_())
| gpl-3.0 |
bspink/django | tests/template_tests/filter_tests/test_iriencode.py | 388 | 1603 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.template.defaultfilters import iriencode, urlencode
from django.test import SimpleTestCase
from django.utils.safestring import mark_safe
from ..utils import setup
class IriencodeTests(SimpleTestCase):
"""
Ensure iriencode keeps safe strings.
"""
@setup({'iriencode01': '{{ url|iriencode }}'})
def test_iriencode01(self):
output = self.engine.render_to_string('iriencode01', {'url': '?test=1&me=2'})
self.assertEqual(output, '?test=1&me=2')
@setup({'iriencode02': '{% autoescape off %}{{ url|iriencode }}{% endautoescape %}'})
def test_iriencode02(self):
output = self.engine.render_to_string('iriencode02', {'url': '?test=1&me=2'})
self.assertEqual(output, '?test=1&me=2')
@setup({'iriencode03': '{{ url|iriencode }}'})
def test_iriencode03(self):
output = self.engine.render_to_string('iriencode03', {'url': mark_safe('?test=1&me=2')})
self.assertEqual(output, '?test=1&me=2')
@setup({'iriencode04': '{% autoescape off %}{{ url|iriencode }}{% endautoescape %}'})
def test_iriencode04(self):
output = self.engine.render_to_string('iriencode04', {'url': mark_safe('?test=1&me=2')})
self.assertEqual(output, '?test=1&me=2')
class FunctionTests(SimpleTestCase):
def test_unicode(self):
self.assertEqual(iriencode('S\xf8r-Tr\xf8ndelag'), 'S%C3%B8r-Tr%C3%B8ndelag')
def test_urlencoded(self):
self.assertEqual(iriencode(urlencode('fran\xe7ois & jill')), 'fran%C3%A7ois%20%26%20jill')
| bsd-3-clause |
Ecogenomics/CheckM | scripts/simMarkerGenesVsMarkerSet.py | 3 | 6013 | #!/usr/bin/env python
###############################################################################
# #
# This program is free software: you can redistribute it and/or modify #
# it under the terms of the GNU General Public License as published by #
# the Free Software Foundation, either version 3 of the License, or #
# (at your option) any later version. #
# #
# This program is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU General Public License for more details. #
# #
# You should have received a copy of the GNU General Public License #
# along with this program. If not, see <http://www.gnu.org/licenses/>. #
# #
###############################################################################
"""
Perform simulation to show that marker sets give better completion estimations
compared to marker genes.
"""
__author__ = 'Donovan Parks'
__copyright__ = 'Copyright 2013'
__credits__ = ['Donovan Parks']
__license__ = 'GPL3'
__version__ = '1.0.0'
__maintainer__ = 'Donovan Parks'
__email__ = 'donovan.parks@gmail.com'
__status__ = 'Development'
import argparse
import random
from lib.img import IMG
from lib.taxonomyUtils import ranksByLabel
from lib.plots.boxplot import BoxPlot
class SimMarkerGenesVsMarkerSets(object):
def __init__(self):
pass
def run(self, taxonomyStr, ubiquityThreshold, singleCopyThreshold, percentCompletion, numReplicates, numGenomes, contigLen):
img = IMG()
genomeIds = img.genomeIdsByTaxonomy(taxonomyStr, 'Final')
print '\nLineage ' + taxonomyStr + ' contains ' + str(len(genomeIds)) + ' genomes.'
# build marker genes and colocated marker sets
countTable = img.countTable(genomeIds)
markerGenes = img.markerGenes(genomeIds, countTable, ubiquityThreshold*len(genomeIds), singleCopyThreshold*len(genomeIds))
print ' Marker genes: ' + str(len(markerGenes))
geneDistTable = img.geneDistTable(genomeIds, markerGenes, spacingBetweenContigs=1e6)
colocatedGenes = img.colocatedGenes(geneDistTable)
colocatedSets = img.colocatedSets(colocatedGenes, markerGenes)
print ' Co-located gene sets: ' + str(len(colocatedSets))
# random sample genomes
if numGenomes == -1:
rndGenomeIds = genomeIds
else:
rndGenomeIds = random.sample(genomeIds, numGenomes)
# estimate completion for each genome using both the marker genes and marker sets
metadata = img.genomeMetadata('Final')
plotLabels = []
plotData = []
for genomeId in rndGenomeIds:
mgCompletion = []
msCompletion = []
for _ in xrange(0, numReplicates):
startPartialGenomeContigs = img.sampleGenome(metadata[genomeId]['genome size'], percentCompletion, contigLen)
# calculate completion with marker genes
containedMarkerGenes = img.containedMarkerGenes(markerGenes, geneDistTable[genomeId], startPartialGenomeContigs, contigLen)
mgCompletion.append(float(len(containedMarkerGenes))/len(markerGenes) - percentCompletion)
# calculate completion with marker set
comp = 0.0
for cs in colocatedSets:
present = 0
for contigId in cs:
if contigId in containedMarkerGenes:
present += 1
comp += float(present) / len(cs)
msCompletion.append(comp / len(colocatedSets) - percentCompletion)
plotData.append(mgCompletion)
plotData.append(msCompletion)
species = ' '.join(metadata[genomeId]['taxonomy'][ranksByLabel['Genus']:])
plotLabels.append(species + ' (' + genomeId + ')')
plotLabels.append('')
# plot data
boxPlot = BoxPlot()
plotFilename = './images/sim.MGvsMS.' + taxonomyStr.replace(';','_') + '.' + str(percentCompletion) + '.errorbar.png'
title = taxonomyStr.replace(';', '; ') + '\n' + 'Percent completion = %.2f' % percentCompletion
boxPlot.plot(plotFilename, plotData, plotLabels, r'$\Delta$' + ' Percent Completion', '', False, title)
if __name__ == '__main__':
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('-T', '--taxonomy', help='IMG taxonomy string indicating lineage of interest', default = 'prokaryotes')
parser.add_argument('-u', '--ubiquity', help='Ubiquity threshold for defining marker set', type=float, default = 0.97)
parser.add_argument('-s', '--single_copy', help='Single-copy threshold for defining marker set', type=float, default = 0.97)
parser.add_argument('-p', '--percent_complete', help='Percent completion to simulate', type=float, default = 0.75)
parser.add_argument('-r', '--replicates', help='Replicates per genome.', type=int, default = 100)
parser.add_argument('-g', '--num_genomes', help='Number of random genomes to consider (-1 for all)', type=int, default = 20)
parser.add_argument('-c', '--contig_len', help='Length of contigs to simulate', type=int, default = 5000)
args = parser.parse_args()
simMarkerGenesVsMarkerSets = SimMarkerGenesVsMarkerSets()
simMarkerGenesVsMarkerSets.run(args.taxonomy, args.ubiquity, args.single_copy, args.percent_complete, args.replicates, args.num_genomes, args.contig_len)
| gpl-3.0 |
woozzu/pylearn2 | pylearn2/scripts/tests/test_print_monitor_cv.py | 48 | 1927 | """
Test print_monitor_cv.py by training on a short TrainCV YAML file and
analyzing the output pickle.
"""
import os
import tempfile
from pylearn2.config import yaml_parse
from pylearn2.scripts import print_monitor_cv
from pylearn2.testing.skip import skip_if_no_sklearn
def test_print_monitor_cv():
"""Test print_monitor_cv.py."""
skip_if_no_sklearn()
handle, filename = tempfile.mkstemp()
trainer = yaml_parse.load(test_print_monitor_cv_yaml %
{'filename': filename})
trainer.main_loop()
# run print_monitor_cv.py main
print_monitor_cv.main(filename)
# run print_monitor_cv.py main with all=True
print_monitor_cv.main(filename, all=True)
# cleanup
os.remove(filename)
test_print_monitor_cv_yaml = """
!obj:pylearn2.cross_validation.TrainCV {
dataset_iterator:
!obj:pylearn2.cross_validation.dataset_iterators.DatasetKFold {
dataset:
!obj:pylearn2.testing.datasets.random_one_hot_dense_design_matrix
{
rng: !obj:numpy.random.RandomState { seed: 1 },
num_examples: 10,
dim: 10,
num_classes: 2,
},
},
model: !obj:pylearn2.models.mlp.MLP {
layers: [
!obj:pylearn2.models.mlp.Sigmoid {
layer_name: h0,
dim: 8,
irange: 0.05,
},
!obj:pylearn2.models.mlp.Softmax {
layer_name: y,
n_classes: 2,
irange: 0.05,
},
],
nvis: 10,
},
algorithm: !obj:pylearn2.training_algorithms.bgd.BGD {
batch_size: 5,
line_search_mode: 'exhaustive',
conjugate: 1,
termination_criterion:
!obj:pylearn2.termination_criteria.EpochCounter {
max_epochs: 1,
},
},
save_path: %(filename)s,
}
"""
| bsd-3-clause |
jianglu/mojo | build/android/pylib/perf/surface_stats_collector.py | 47 | 6781 | # Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import Queue
import datetime
import logging
import re
import threading
from pylib import android_commands
from pylib.device import device_utils
# Log marker containing SurfaceTexture timestamps.
_SURFACE_TEXTURE_TIMESTAMPS_MESSAGE = 'SurfaceTexture update timestamps'
_SURFACE_TEXTURE_TIMESTAMP_RE = r'\d+'
class SurfaceStatsCollector(object):
"""Collects surface stats for a SurfaceView from the output of SurfaceFlinger.
Args:
device: A DeviceUtils instance.
"""
def __init__(self, device):
# TODO(jbudorick) Remove once telemetry gets switched over.
if isinstance(device, android_commands.AndroidCommands):
device = device_utils.DeviceUtils(device)
self._device = device
self._collector_thread = None
self._surface_before = None
self._get_data_event = None
self._data_queue = None
self._stop_event = None
self._warn_about_empty_data = True
def DisableWarningAboutEmptyData(self):
self._warn_about_empty_data = False
def Start(self):
assert not self._collector_thread
if self._ClearSurfaceFlingerLatencyData():
self._get_data_event = threading.Event()
self._stop_event = threading.Event()
self._data_queue = Queue.Queue()
self._collector_thread = threading.Thread(target=self._CollectorThread)
self._collector_thread.start()
else:
raise Exception('SurfaceFlinger not supported on this device.')
def Stop(self):
assert self._collector_thread
(refresh_period, timestamps) = self._GetDataFromThread()
if self._collector_thread:
self._stop_event.set()
self._collector_thread.join()
self._collector_thread = None
return (refresh_period, timestamps)
def _CollectorThread(self):
last_timestamp = 0
timestamps = []
retries = 0
while not self._stop_event.is_set():
self._get_data_event.wait(1)
try:
refresh_period, new_timestamps = self._GetSurfaceFlingerFrameData()
if refresh_period is None or timestamps is None:
retries += 1
if retries < 3:
continue
if last_timestamp:
# Some data has already been collected, but either the app
# was closed or there's no new data. Signal the main thread and
# wait.
self._data_queue.put((None, None))
self._stop_event.wait()
break
raise Exception('Unable to get surface flinger latency data')
timestamps += [timestamp for timestamp in new_timestamps
if timestamp > last_timestamp]
if len(timestamps):
last_timestamp = timestamps[-1]
if self._get_data_event.is_set():
self._get_data_event.clear()
self._data_queue.put((refresh_period, timestamps))
timestamps = []
except Exception as e:
# On any error, before aborting, put the exception into _data_queue to
# prevent the main thread from waiting at _data_queue.get() infinitely.
self._data_queue.put(e)
raise
def _GetDataFromThread(self):
self._get_data_event.set()
ret = self._data_queue.get()
if isinstance(ret, Exception):
raise ret
return ret
def _ClearSurfaceFlingerLatencyData(self):
"""Clears the SurfaceFlinger latency data.
Returns:
True if SurfaceFlinger latency is supported by the device, otherwise
False.
"""
# The command returns nothing if it is supported, otherwise returns many
# lines of result just like 'dumpsys SurfaceFlinger'.
results = self._device.RunShellCommand(
'dumpsys SurfaceFlinger --latency-clear SurfaceView')
return not len(results)
def GetSurfaceFlingerPid(self):
results = self._device.RunShellCommand('ps | grep surfaceflinger')
if not results:
raise Exception('Unable to get surface flinger process id')
pid = results[0].split()[1]
return pid
def _GetSurfaceFlingerFrameData(self):
"""Returns collected SurfaceFlinger frame timing data.
Returns:
A tuple containing:
- The display's nominal refresh period in milliseconds.
- A list of timestamps signifying frame presentation times in
milliseconds.
The return value may be (None, None) if there was no data collected (for
example, if the app was closed before the collector thread has finished).
"""
# adb shell dumpsys SurfaceFlinger --latency <window name>
# prints some information about the last 128 frames displayed in
# that window.
# The data returned looks like this:
# 16954612
# 7657467895508 7657482691352 7657493499756
# 7657484466553 7657499645964 7657511077881
# 7657500793457 7657516600576 7657527404785
# (...)
#
# The first line is the refresh period (here 16.95 ms), it is followed
# by 128 lines w/ 3 timestamps in nanosecond each:
# A) when the app started to draw
# B) the vsync immediately preceding SF submitting the frame to the h/w
# C) timestamp immediately after SF submitted that frame to the h/w
#
# The difference between the 1st and 3rd timestamp is the frame-latency.
# An interesting data is when the frame latency crosses a refresh period
# boundary, this can be calculated this way:
#
# ceil((C - A) / refresh-period)
#
# (each time the number above changes, we have a "jank").
# If this happens a lot during an animation, the animation appears
# janky, even if it runs at 60 fps in average.
#
# We use the special "SurfaceView" window name because the statistics for
# the activity's main window are not updated when the main web content is
# composited into a SurfaceView.
results = self._device.RunShellCommand(
'dumpsys SurfaceFlinger --latency SurfaceView')
if not len(results):
return (None, None)
timestamps = []
nanoseconds_per_millisecond = 1e6
refresh_period = long(results[0]) / nanoseconds_per_millisecond
# If a fence associated with a frame is still pending when we query the
# latency data, SurfaceFlinger gives the frame a timestamp of INT64_MAX.
# Since we only care about completed frames, we will ignore any timestamps
# with this value.
pending_fence_timestamp = (1 << 63) - 1
for line in results[1:]:
fields = line.split()
if len(fields) != 3:
continue
timestamp = long(fields[1])
if timestamp == pending_fence_timestamp:
continue
timestamp /= nanoseconds_per_millisecond
timestamps.append(timestamp)
return (refresh_period, timestamps)
| bsd-3-clause |
jaruba/chromium.src | tools/telemetry/telemetry/value/list_of_scalar_values_unittest.py | 12 | 6051 | # Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import unittest
from telemetry import page as page_module
from telemetry import value
from telemetry.page import page_set
from telemetry.value import list_of_scalar_values
from telemetry.value import none_values
class TestBase(unittest.TestCase):
def setUp(self):
ps = page_set.PageSet(file_path=os.path.dirname(__file__))
ps.AddUserStory(page_module.Page('http://www.bar.com/', ps, ps.base_dir))
ps.AddUserStory(page_module.Page('http://www.baz.com/', ps, ps.base_dir))
ps.AddUserStory(page_module.Page('http://www.foo.com/', ps, ps.base_dir))
self.page_set = ps
@property
def pages(self):
return self.page_set.pages
class ValueTest(TestBase):
def testListSamePageMergingWithSamePageConcatenatePolicy(self):
page0 = self.pages[0]
v0 = list_of_scalar_values.ListOfScalarValues(
page0, 'x', 'unit',
[1, 2], same_page_merge_policy=value.CONCATENATE)
v1 = list_of_scalar_values.ListOfScalarValues(
page0, 'x', 'unit',
[3, 4], same_page_merge_policy=value.CONCATENATE)
self.assertTrue(v1.IsMergableWith(v0))
vM = (list_of_scalar_values.ListOfScalarValues.
MergeLikeValuesFromSamePage([v0, v1]))
self.assertEquals(page0, vM.page)
self.assertEquals('x', vM.name)
self.assertEquals('unit', vM.units)
self.assertEquals(value.CONCATENATE, vM.same_page_merge_policy)
self.assertEquals(True, vM.important)
self.assertEquals([1, 2, 3, 4], vM.values)
def testListSamePageMergingWithPickFirstPolicy(self):
page0 = self.pages[0]
v0 = list_of_scalar_values.ListOfScalarValues(
page0, 'x', 'unit',
[1, 2], same_page_merge_policy=value.PICK_FIRST)
v1 = list_of_scalar_values.ListOfScalarValues(
page0, 'x', 'unit',
[3, 4], same_page_merge_policy=value.PICK_FIRST)
self.assertTrue(v1.IsMergableWith(v0))
vM = (list_of_scalar_values.ListOfScalarValues.
MergeLikeValuesFromSamePage([v0, v1]))
self.assertEquals(page0, vM.page)
self.assertEquals('x', vM.name)
self.assertEquals('unit', vM.units)
self.assertEquals(value.PICK_FIRST, vM.same_page_merge_policy)
self.assertEquals(True, vM.important)
self.assertEquals([1, 2], vM.values)
def testListDifferentPageMerging(self):
page0 = self.pages[0]
page1 = self.pages[1]
v0 = list_of_scalar_values.ListOfScalarValues(
page0, 'x', 'unit',
[1, 2], same_page_merge_policy=value.CONCATENATE)
v1 = list_of_scalar_values.ListOfScalarValues(
page1, 'x', 'unit',
[3, 4], same_page_merge_policy=value.CONCATENATE)
self.assertTrue(v1.IsMergableWith(v0))
vM = (list_of_scalar_values.ListOfScalarValues.
MergeLikeValuesFromDifferentPages([v0, v1]))
self.assertEquals(None, vM.page)
self.assertEquals('x', vM.name)
self.assertEquals('unit', vM.units)
self.assertEquals(value.CONCATENATE, vM.same_page_merge_policy)
self.assertEquals(True, vM.important)
self.assertEquals([1, 2, 3, 4], vM.values)
def testListWithNoneValueMerging(self):
page0 = self.pages[0]
v0 = list_of_scalar_values.ListOfScalarValues(
page0, 'x', 'unit',
[1, 2], same_page_merge_policy=value.CONCATENATE)
v1 = list_of_scalar_values.ListOfScalarValues(
page0, 'x', 'unit',
None, same_page_merge_policy=value.CONCATENATE, none_value_reason='n')
self.assertTrue(v1.IsMergableWith(v0))
vM = (list_of_scalar_values.ListOfScalarValues.
MergeLikeValuesFromSamePage([v0, v1]))
self.assertEquals(None, vM.values)
self.assertEquals(none_values.MERGE_FAILURE_REASON,
vM.none_value_reason)
def testListWithNoneValueMustHaveNoneReason(self):
page0 = self.pages[0]
self.assertRaises(none_values.NoneValueMissingReason,
lambda: list_of_scalar_values.ListOfScalarValues(
page0, 'x', 'unit', None))
def testListWithNoneReasonMustHaveNoneValue(self):
page0 = self.pages[0]
self.assertRaises(none_values.ValueMustHaveNoneValue,
lambda: list_of_scalar_values.ListOfScalarValues(
page0, 'x', 'unit', [1, 2],
none_value_reason='n'))
def testAsDict(self):
v = list_of_scalar_values.ListOfScalarValues(
None, 'x', 'unit', [1, 2],
same_page_merge_policy=value.PICK_FIRST, important=False)
d = v.AsDictWithoutBaseClassEntries()
self.assertEquals(d, {
'values': [1, 2]
})
def testNoneValueAsDict(self):
v = list_of_scalar_values.ListOfScalarValues(
None, 'x', 'unit', None, same_page_merge_policy=value.PICK_FIRST,
important=False, none_value_reason='n')
d = v.AsDictWithoutBaseClassEntries()
self.assertEquals(d, {
'values': None,
'none_value_reason': 'n'
})
def testFromDictInts(self):
d = {
'type': 'list_of_scalar_values',
'name': 'x',
'units': 'unit',
'values': [1, 2]
}
v = value.Value.FromDict(d, {})
self.assertTrue(isinstance(v, list_of_scalar_values.ListOfScalarValues))
self.assertEquals(v.values, [1, 2])
def testFromDictFloats(self):
d = {
'type': 'list_of_scalar_values',
'name': 'x',
'units': 'unit',
'values': [1.3, 2.7]
}
v = value.Value.FromDict(d, {})
self.assertTrue(isinstance(v, list_of_scalar_values.ListOfScalarValues))
self.assertEquals(v.values, [1.3, 2.7])
def testFromDictNoneValue(self):
d = {
'type': 'list_of_scalar_values',
'name': 'x',
'units': 'unit',
'values': None,
'none_value_reason': 'n'
}
v = value.Value.FromDict(d, {})
self.assertTrue(isinstance(v, list_of_scalar_values.ListOfScalarValues))
self.assertEquals(v.values, None)
self.assertEquals(v.none_value_reason, 'n')
| bsd-3-clause |
Yannig/ansible | lib/ansible/modules/network/netvisor/pn_vrouterlbif.py | 29 | 10152 | #!/usr/bin/python
""" PN CLI vrouter-loopback-interface-add/remove """
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = """
---
module: pn_vrouterlbif
author: "Pluribus Networks (@amitsi)"
version_added: "2.2"
short_description: CLI command to add/remove vrouter-loopback-interface.
description:
- Execute vrouter-loopback-interface-add, vrouter-loopback-interface-remove
commands.
- Each fabric, cluster, standalone switch, or virtual network (VNET) can
provide its tenants with a virtual router (vRouter) service that forwards
traffic between networks and implements Layer 3 protocols.
options:
pn_cliusername:
description:
- Provide login username if user is not root.
required: False
pn_clipassword:
description:
- Provide login password if user is not root.
required: False
pn_cliswitch:
description:
- Target switch(es) to run the cli on.
required: False
state:
description:
- State the action to perform. Use 'present' to add vrouter loopback
interface and 'absent' to remove vrouter loopback interface.
required: True
choices: ['present', 'absent']
pn_vrouter_name:
description:
- Specify the name of the vRouter.
required: True
pn_index:
description:
- Specify the interface index from 1 to 255.
pn_interface_ip:
description:
- Specify the IP address.
required: True
"""
EXAMPLES = """
- name: add vrouter-loopback-interface
pn_vrouterlbif:
state: 'present'
pn_vrouter_name: 'ansible-vrouter'
pn_interface_ip: '104.104.104.1'
- name: remove vrouter-loopback-interface
pn_vrouterlbif:
state: 'absent'
pn_vrouter_name: 'ansible-vrouter'
pn_interface_ip: '104.104.104.1'
"""
RETURN = """
command:
description: The CLI command run on the target node(s).
returned: always
type: str
stdout:
description: The set of responses from the vrouterlb command.
returned: always
type: list
stderr:
description: The set of error responses from the vrouterlb command.
returned: on error
type: list
changed:
description: Indicates whether the CLI caused changes on the target.
returned: always
type: bool
"""
import shlex
VROUTER_EXISTS = None
LB_INTERFACE_EXISTS = None
# Index range
MIN_INDEX = 1
MAX_INDEX = 255
def pn_cli(module):
"""
This method is to generate the cli portion to launch the Netvisor cli.
It parses the username, password, switch parameters from module.
:param module: The Ansible module to fetch username, password and switch
:return: returns the cli string for further processing
"""
username = module.params['pn_cliusername']
password = module.params['pn_clipassword']
cliswitch = module.params['pn_cliswitch']
if username and password:
cli = '/usr/bin/cli --quiet --user %s:%s ' % (username, password)
else:
cli = '/usr/bin/cli --quiet '
if cliswitch == 'local':
cli += ' switch-local '
else:
cli += ' switch ' + cliswitch
return cli
def check_cli(module, cli):
"""
This method checks if vRouter exists on the target node.
This method also checks for idempotency using the
vrouter-loopback-interface-show command.
If the given vRouter exists, return VROUTER_EXISTS as True else False.
If a loopback interface with the given ip exists on the given vRouter,
return LB_INTERFACE_EXISTS as True else False.
:param module: The Ansible module to fetch input parameters
:param cli: The CLI string
:return Global Booleans: VROUTER_EXISTS, LB_INTERFACE_EXISTS
"""
vrouter_name = module.params['pn_vrouter_name']
interface_ip = module.params['pn_interface_ip']
# Global flags
global VROUTER_EXISTS, LB_INTERFACE_EXISTS
# Check for vRouter
check_vrouter = cli + ' vrouter-show format name no-show-headers '
check_vrouter = shlex.split(check_vrouter)
out = module.run_command(check_vrouter)[1]
out = out.split()
if vrouter_name in out:
VROUTER_EXISTS = True
else:
VROUTER_EXISTS = False
# Check for loopback interface
show = (cli + ' vrouter-loopback-interface-show vrouter-name %s format ip '
'no-show-headers' % vrouter_name)
show = shlex.split(show)
out = module.run_command(show)[1]
out = out.split()
if interface_ip in out:
LB_INTERFACE_EXISTS = True
else:
LB_INTERFACE_EXISTS = False
def run_cli(module, cli):
"""
This method executes the cli command on the target node(s) and returns the
output. The module then exits based on the output.
:param cli: the complete cli string to be executed on the target node(s).
:param module: The Ansible module to fetch command
"""
cliswitch = module.params['pn_cliswitch']
state = module.params['state']
command = get_command_from_state(state)
cmd = shlex.split(cli)
# 'out' contains the output
# 'err' contains the error messages
result, out, err = module.run_command(cmd)
print_cli = cli.split(cliswitch)[1]
# Response in JSON format
if result != 0:
module.exit_json(
command=print_cli,
stderr=err.strip(),
msg="%s operation failed" % command,
changed=False
)
if out:
module.exit_json(
command=print_cli,
stdout=out.strip(),
msg="%s operation completed" % command,
changed=True
)
else:
module.exit_json(
command=print_cli,
msg="%s operation completed" % command,
changed=True
)
def get_command_from_state(state):
"""
This method gets appropriate command name for the state specified. It
returns the command name for the specified state.
:param state: The state for which the respective command name is required.
"""
command = None
if state == 'present':
command = 'vrouter-loopback-interface-add'
if state == 'absent':
command = 'vrouter-loopback-interface-remove'
return command
def main():
""" This portion is for arguments parsing """
module = AnsibleModule(
argument_spec=dict(
pn_cliusername=dict(required=False, type='str'),
pn_clipassword=dict(required=False, type='str', no_log=True),
pn_cliswitch=dict(required=False, type='str', default='local'),
state =dict(required=True, type='str',
choices=['present', 'absent']),
pn_vrouter_name=dict(required=True, type='str'),
pn_interface_ip=dict(type='str'),
pn_index=dict(type='int')
),
required_if=(
["state", "present",
["pn_vrouter_name", "pn_interface_ip"]],
["state", "absent",
["pn_vrouter_name", "pn_interface_ip"]]
)
)
# Accessing the arguments
state = module.params['state']
vrouter_name = module.params['pn_vrouter_name']
interface_ip = module.params['pn_interface_ip']
index = module.params['pn_index']
command = get_command_from_state(state)
# Building the CLI command string
cli = pn_cli(module)
if index:
if not MIN_INDEX <= index <= MAX_INDEX:
module.exit_json(
msg="Index must be between 1 and 255",
changed=False
)
index = str(index)
if command == 'vrouter-loopback-interface-remove':
check_cli(module, cli)
if VROUTER_EXISTS is False:
module.exit_json(
skipped=True,
msg='vRouter %s does not exist' % vrouter_name
)
if LB_INTERFACE_EXISTS is False:
module.exit_json(
skipped=True,
msg=('Loopback interface with IP %s does not exist on %s'
% (interface_ip, vrouter_name))
)
if not index:
# To remove loopback interface, we need the index.
# If index is not specified, get the Loopback interface index
# using the given interface ip.
get_index = cli
get_index += (' vrouter-loopback-interface-show vrouter-name %s ip '
'%s ' % (vrouter_name, interface_ip))
get_index += 'format index no-show-headers'
get_index = shlex.split(get_index)
out = module.run_command(get_index)[1]
index = out.split()[1]
cli += ' %s vrouter-name %s index %s' % (command, vrouter_name, index)
if command == 'vrouter-loopback-interface-add':
check_cli(module, cli)
if VROUTER_EXISTS is False:
module.exit_json(
skipped=True,
msg=('vRouter %s does not exist' % vrouter_name)
)
if LB_INTERFACE_EXISTS is True:
module.exit_json(
skipped=True,
msg=('Loopback interface with IP %s already exists on %s'
% (interface_ip, vrouter_name))
)
cli += (' %s vrouter-name %s ip %s'
% (command, vrouter_name, interface_ip))
if index:
cli += ' index %s ' % index
run_cli(module, cli)
# Ansible boiler-plate
from ansible.module_utils.basic import AnsibleModule
if __name__ == '__main__':
main()
| gpl-3.0 |
edgedb/edgedb | edb/testbase/serutils.py | 1 | 2605 | # mypy: ignore-errors
#
# This source file is part of the EdgeDB open source project.
#
# Copyright 2019-present MagicStack Inc. and the EdgeDB authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import annotations
import datetime
import decimal
import functools
import uuid
import edgedb
@functools.singledispatch
def serialize(o):
raise TypeError(f'cannot serialiaze type {type(o)}')
@serialize.register
def _tuple(o: edgedb.Tuple):
return [serialize(el) for el in o]
@serialize.register
def _namedtuple(o: edgedb.NamedTuple):
return {attr: serialize(getattr(o, attr)) for attr in dir(o)}
@serialize.register
def _linkset(o: edgedb.LinkSet):
return [serialize(el) for el in o]
@serialize.register
def _link(o: edgedb.Link):
ret = {}
for lprop in dir(o):
if lprop in {'source', 'target'}:
continue
ret[f'@{lprop}'] = serialize(getattr(o, lprop))
ret.update(_object(o.target))
return ret
@serialize.register
def _object(o: edgedb.Object):
ret = {}
for attr in dir(o):
try:
link = o[attr]
except (KeyError, TypeError):
link = None
if link:
ret[attr] = serialize(link)
else:
ret[attr] = serialize(getattr(o, attr))
return ret
@serialize.register(edgedb.Set)
@serialize.register(edgedb.Array)
def _set(o):
return [serialize(el) for el in o]
@serialize.register(uuid.UUID)
def _stringify(o):
return str(o)
@serialize.register(int)
@serialize.register(float)
@serialize.register(str)
@serialize.register(bool)
@serialize.register(type(None))
@serialize.register(decimal.Decimal)
@serialize.register(datetime.timedelta)
@serialize.register(edgedb.RelativeDuration)
def _scalar(o):
return o
@serialize.register
def _datetime(o: datetime.datetime):
return o.isoformat()
@serialize.register
def _date(o: datetime.date):
return o.isoformat()
@serialize.register
def _time(o: datetime.time):
return o.isoformat()
@serialize.register
def _enum(o: edgedb.EnumValue):
return str(o)
| apache-2.0 |
openprivacy/.emacs.d | elpy/rpc-venv/lib/python3.8/site-packages/setuptools/_distutils/command/build.py | 35 | 5767 | """distutils.command.build
Implements the Distutils 'build' command."""
import sys, os
from distutils.core import Command
from distutils.errors import DistutilsOptionError
from distutils.util import get_platform
def show_compilers():
from distutils.ccompiler import show_compilers
show_compilers()
class build(Command):
description = "build everything needed to install"
user_options = [
('build-base=', 'b',
"base directory for build library"),
('build-purelib=', None,
"build directory for platform-neutral distributions"),
('build-platlib=', None,
"build directory for platform-specific distributions"),
('build-lib=', None,
"build directory for all distribution (defaults to either " +
"build-purelib or build-platlib"),
('build-scripts=', None,
"build directory for scripts"),
('build-temp=', 't',
"temporary build directory"),
('plat-name=', 'p',
"platform name to build for, if supported "
"(default: %s)" % get_platform()),
('compiler=', 'c',
"specify the compiler type"),
('parallel=', 'j',
"number of parallel build jobs"),
('debug', 'g',
"compile extensions and libraries with debugging information"),
('force', 'f',
"forcibly build everything (ignore file timestamps)"),
('executable=', 'e',
"specify final destination interpreter path (build.py)"),
]
boolean_options = ['debug', 'force']
help_options = [
('help-compiler', None,
"list available compilers", show_compilers),
]
def initialize_options(self):
self.build_base = 'build'
# these are decided only after 'build_base' has its final value
# (unless overridden by the user or client)
self.build_purelib = None
self.build_platlib = None
self.build_lib = None
self.build_temp = None
self.build_scripts = None
self.compiler = None
self.plat_name = None
self.debug = None
self.force = 0
self.executable = None
self.parallel = None
def finalize_options(self):
if self.plat_name is None:
self.plat_name = get_platform()
else:
# plat-name only supported for windows (other platforms are
# supported via ./configure flags, if at all). Avoid misleading
# other platforms.
if os.name != 'nt':
raise DistutilsOptionError(
"--plat-name only supported on Windows (try "
"using './configure --help' on your platform)")
plat_specifier = ".%s-%d.%d" % (self.plat_name, *sys.version_info[:2])
# Make it so Python 2.x and Python 2.x with --with-pydebug don't
# share the same build directories. Doing so confuses the build
# process for C modules
if hasattr(sys, 'gettotalrefcount'):
plat_specifier += '-pydebug'
# 'build_purelib' and 'build_platlib' just default to 'lib' and
# 'lib.<plat>' under the base build directory. We only use one of
# them for a given distribution, though --
if self.build_purelib is None:
self.build_purelib = os.path.join(self.build_base, 'lib')
if self.build_platlib is None:
self.build_platlib = os.path.join(self.build_base,
'lib' + plat_specifier)
# 'build_lib' is the actual directory that we will use for this
# particular module distribution -- if user didn't supply it, pick
# one of 'build_purelib' or 'build_platlib'.
if self.build_lib is None:
if self.distribution.ext_modules:
self.build_lib = self.build_platlib
else:
self.build_lib = self.build_purelib
# 'build_temp' -- temporary directory for compiler turds,
# "build/temp.<plat>"
if self.build_temp is None:
self.build_temp = os.path.join(self.build_base,
'temp' + plat_specifier)
if self.build_scripts is None:
self.build_scripts = os.path.join(self.build_base,
'scripts-%d.%d' % sys.version_info[:2])
if self.executable is None and sys.executable:
self.executable = os.path.normpath(sys.executable)
if isinstance(self.parallel, str):
try:
self.parallel = int(self.parallel)
except ValueError:
raise DistutilsOptionError("parallel should be an integer")
def run(self):
# Run all relevant sub-commands. This will be some subset of:
# - build_py - pure Python modules
# - build_clib - standalone C libraries
# - build_ext - Python extensions
# - build_scripts - (Python) scripts
for cmd_name in self.get_sub_commands():
self.run_command(cmd_name)
# -- Predicates for the sub-command list ---------------------------
def has_pure_modules(self):
return self.distribution.has_pure_modules()
def has_c_libraries(self):
return self.distribution.has_c_libraries()
def has_ext_modules(self):
return self.distribution.has_ext_modules()
def has_scripts(self):
return self.distribution.has_scripts()
sub_commands = [('build_py', has_pure_modules),
('build_clib', has_c_libraries),
('build_ext', has_ext_modules),
('build_scripts', has_scripts),
]
| gpl-2.0 |
dimartiro/gspread | gspread/urls.py | 41 | 2820 | # -*- coding: utf-8 -*-
"""
gspread.urls
~~~~~~~~~~~~
This module is Google API url patterns storage.
"""
import re
from .exceptions import UnsupportedFeedTypeError, UrlParameterMissing
SPREADSHEETS_SERVER = 'spreadsheets.google.com'
SPREADSHEETS_FEED_URL = 'https://%s/%s/' % (SPREADSHEETS_SERVER, 'feeds')
# General pattern
# /feeds/feedType/key/worksheetId/visibility/projection
#
# Spreadsheet metafeed
# /feeds/spreadsheets/private/full
# /feeds/spreadsheets/private/full/key
#
# Worksheet
# /feeds/worksheets/key/visibility/projection
# /feeds/worksheets/key/visibility/projection/worksheetId
#
# Cell-based feed
# /feeds/cells/key/worksheetId/visibility/projection
# /feeds/cells/key/worksheetId/visibility/projection/cellId
_feed_types = {'spreadsheets': 'spreadsheets/{visibility}/{projection}',
'worksheets': 'worksheets/{spreadsheet_id}/{visibility}/{projection}',
'worksheet': 'worksheets/{spreadsheet_id}/{visibility}/{projection}/{worksheet_id}/{version}',
'cells': 'cells/{spreadsheet_id}/{worksheet_id}/{visibility}/{projection}',
'cells_batch': 'cells/{spreadsheet_id}/{worksheet_id}/{visibility}/{projection}/batch',
'cells_cell_id': 'cells/{spreadsheet_id}/{worksheet_id}/{visibility}/{projection}/{cell_id}'}
_fields_cache = {}
_field_re = re.compile(r'{(\w+)}')
def _extract_fields(patternstr):
return _field_re.findall(patternstr)
def construct_url(feedtype=None,
obj=None,
visibility='private',
projection='full',
spreadsheet_id=None,
worksheet_id=None,
cell_id=None,
worksheet_version=None):
"""Constructs URL to be used for API request.
"""
try:
urlpattern = _feed_types[feedtype]
fields = _fields_cache.get(feedtype)
if fields is None:
fields = _extract_fields(urlpattern)
_fields_cache[feedtype] = fields
except KeyError as e:
raise UnsupportedFeedTypeError(e)
obj_fields = obj.get_id_fields() if obj is not None else {}
params = {'visibility': visibility,
'projection': projection,
'spreadsheet_id': (spreadsheet_id if spreadsheet_id
else obj_fields.get('spreadsheet_id')),
'worksheet_id': (worksheet_id if worksheet_id
else obj_fields.get('worksheet_id')),
'cell_id': cell_id,
'version': worksheet_version}
params = dict((k, v) for k, v in params.items() if v is not None)
try:
return '%s%s' % (SPREADSHEETS_FEED_URL,
urlpattern.format(**params))
except KeyError as e:
raise UrlParameterMissing(e)
| mit |
paulcoiffier/Mobissime-Liberta | vendor/justinrainbow/json-schema/docs/conf.py | 74 | 7837 | # -*- coding: utf-8 -*-
#
# JsonSchema documentation build configuration file, created by
# sphinx-quickstart on Sat Dec 10 15:34:44 2011.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = []
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'JsonSchema'
copyright = u'2011, Justin Rainbow, Bruno Prieto Reis'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '1.0.0'
# The full version, including alpha/beta/rc tags.
release = '1.0.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'JsonSchemadoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'JsonSchema.tex', u'JsonSchema Documentation',
u'Justin Rainbow, Bruno Prieto Reis', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'jsonschema', u'JsonSchema Documentation',
[u'Justin Rainbow, Bruno Prieto Reis'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'JsonSchema', u'JsonSchema Documentation', u'Justin Rainbow, Bruno Prieto Reis',
'JsonSchema', 'One line description of project.', 'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
| gpl-2.0 |
minhphung171093/OpenERP_V8 | openerp/tools/which.py | 456 | 6884 | #!/usr/bin/env python
""" Which - locate a command
* adapted from Brian Curtin's http://bugs.python.org/file15381/shutil_which.patch
* see http://bugs.python.org/issue444582
* uses ``PATHEXT`` on Windows
* searches current directory before ``PATH`` on Windows,
but not before an explicitly passed path
* accepts both string or iterable for an explicitly passed path, or pathext
* accepts an explicitly passed empty path, or pathext (either '' or [])
* does not search ``PATH`` for files that have a path specified in their name already
* moved defpath and defpathext lists initialization to module level,
instead of initializing them on each function call
* changed interface: which_files() returns generator, which() returns first match,
or raises IOError(errno.ENOENT)
.. function:: which_files(file [, mode=os.F_OK | os.X_OK[, path=None[, pathext=None]]])
Return a generator which yields full paths in which the *file* name exists
in a directory that is part of the file name, or on *path*,
and has the given *mode*.
By default, *mode* matches an inclusive OR of os.F_OK and os.X_OK - an
existing executable file.
The *path* is, by default, the ``PATH`` variable on the platform,
or the string/iterable passed in as *path*.
In the event that a ``PATH`` variable is not found, :const:`os.defpath` is used.
On Windows, a current directory is searched before using the ``PATH`` variable,
but not before an explicitly passed *path*.
The *pathext* is only used on Windows to match files with given extensions appended as well.
It defaults to the ``PATHEXT`` variable, or the string/iterable passed in as *pathext*.
In the event that a ``PATHEXT`` variable is not found,
default value for Windows XP/Vista is used.
The command is always searched without extension first,
even when *pathext* is explicitly passed.
.. function:: which(file [, mode=os.F_OK | os.X_OK[, path=None[, pathext=None]]])
Return first match generated by which_files(file, mode, path, pathext),
or raise IOError(errno.ENOENT).
"""
__docformat__ = 'restructuredtext en'
__all__ = 'which which_files pathsep defpath defpathext F_OK R_OK W_OK X_OK'.split()
import sys
from os import access, defpath, pathsep, environ, F_OK, R_OK, W_OK, X_OK
from os.path import exists, dirname, split, join
windows = sys.platform.startswith('win')
defpath = environ.get('PATH', defpath).split(pathsep)
if windows:
defpath.insert(0, '.') # can insert without checking, when duplicates are removed
# given the quite usual mess in PATH on Windows, let's rather remove duplicates
seen = set()
defpath = [dir for dir in defpath if dir.lower() not in seen and not seen.add(dir.lower())]
del seen
defpathext = [''] + environ.get('PATHEXT',
'.COM;.EXE;.BAT;.CMD;.VBS;.VBE;.JS;.JSE;.WSF;.WSH;.MSC').lower().split(pathsep)
else:
defpathext = ['']
def which_files(file, mode=F_OK | X_OK, path=None, pathext=None):
""" Locate a file in a path supplied as a part of the file name,
or the user's path, or a supplied path.
The function yields full paths (not necessarily absolute paths),
in which the given file name matches an existing file in a directory on the path.
>>> def test_which(expected, *args, **argd):
... result = list(which_files(*args, **argd))
... assert result == expected, 'which_files: %s != %s' % (result, expected)
...
... try:
... result = [ which(*args, **argd) ]
... except IOError:
... result = []
... assert result[:1] == expected[:1], 'which: %s != %s' % (result[:1], expected[:1])
>>> if windows: cmd = environ['COMSPEC']
>>> if windows: test_which([cmd], 'cmd')
>>> if windows: test_which([cmd], 'cmd.exe')
>>> if windows: test_which([cmd], 'cmd', path=dirname(cmd))
>>> if windows: test_which([cmd], 'cmd', pathext='.exe')
>>> if windows: test_which([cmd], cmd)
>>> if windows: test_which([cmd], cmd, path='<nonexistent>')
>>> if windows: test_which([cmd], cmd, pathext='<nonexistent>')
>>> if windows: test_which([cmd], cmd[:-4])
>>> if windows: test_which([cmd], cmd[:-4], path='<nonexistent>')
>>> if windows: test_which([], 'cmd', path='<nonexistent>')
>>> if windows: test_which([], 'cmd', pathext='<nonexistent>')
>>> if windows: test_which([], '<nonexistent>/cmd')
>>> if windows: test_which([], cmd[:-4], pathext='<nonexistent>')
>>> if not windows: sh = '/bin/sh'
>>> if not windows: test_which([sh], 'sh')
>>> if not windows: test_which([sh], 'sh', path=dirname(sh))
>>> if not windows: test_which([sh], 'sh', pathext='<nonexistent>')
>>> if not windows: test_which([sh], sh)
>>> if not windows: test_which([sh], sh, path='<nonexistent>')
>>> if not windows: test_which([sh], sh, pathext='<nonexistent>')
>>> if not windows: test_which([], 'sh', mode=W_OK) # not running as root, are you?
>>> if not windows: test_which([], 'sh', path='<nonexistent>')
>>> if not windows: test_which([], '<nonexistent>/sh')
"""
filepath, file = split(file)
if filepath:
path = (filepath,)
elif path is None:
path = defpath
elif isinstance(path, str):
path = path.split(pathsep)
if pathext is None:
pathext = defpathext
elif isinstance(pathext, str):
pathext = pathext.split(pathsep)
if not '' in pathext:
pathext.insert(0, '') # always check command without extension, even for custom pathext
for dir in path:
basepath = join(dir, file)
for ext in pathext:
fullpath = basepath + ext
if exists(fullpath) and access(fullpath, mode):
yield fullpath
def which(file, mode=F_OK | X_OK, path=None, pathext=None):
""" Locate a file in a path supplied as a part of the file name,
or the user's path, or a supplied path.
The function returns full path (not necessarily absolute path),
in which the given file name matches an existing file in a directory on the path,
or raises IOError(errno.ENOENT).
>>> # for doctest see which_files()
"""
try:
return iter(which_files(file, mode, path, pathext)).next()
except StopIteration:
try:
from errno import ENOENT
except ImportError:
ENOENT = 2
raise IOError(ENOENT, '%s not found' % (mode & X_OK and 'command' or 'file'), file)
if __name__ == '__main__':
import doctest
doctest.testmod()
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
emilroz/openmicroscopy | components/tools/OmeroWeb/omeroweb/webclient/controller/index.py | 5 | 2494 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
#
#
# Copyright (c) 2008-2011 University of Dundee.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Author: Aleksandra Tarkowska <A(dot)Tarkowska(at)dundee(dot)ac(dot)uk>, 2008.
#
# Version: 1.0
#
from webclient.controller import BaseController
class BaseIndex(BaseController):
def __init__(self, conn):
BaseController.__init__(self, conn)
def loadMostRecent(self):
self.mostRecentSharesComments = list(self.conn.listMostRecentShareComments())
self.mostRecentSharesComments.sort(key=lambda x: x.creationEventDate(), reverse=True)
self.mostRecentShares = list()
for sh in list(self.conn.listMostRecentShares()):
flag = True
for s in self.mostRecentShares:
if sh.id == s.id:
flag = False
if flag:
self.mostRecentShares.append(sh)
self.mostRecentShares.sort(key=lambda x: x.started, reverse=True)
def loadTagCloud(self):
tags = dict()
for ann in list(self.conn.listMostRecentTags()):
try:
if tags[ann.id]['count'] > 0:
tags[ann.id]['count'] = tags[ann.id]['count'] + 1
else:
tags[ann.id]['count'] = 1
except:
tags[ann.id] = {'obj':ann, 'count':1}
if len(tags) == 20:
break
font = {'max': 0, 'min': 1}
for key, value in tags.items():
if value['count'] < font['min']:
font['min'] = value['count']
if value['count'] > font['max']:
font['max'] = value['count']
self.font = font
self.mostRecentTags = tags
def loadLastAcquisitions(self):
self.lastAcquiredImages = list(self.conn.listLastImportedImages())
| gpl-2.0 |
h3biomed/ansible | lib/ansible/modules/network/bigswitch/bigmon_policy.py | 44 | 6499 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2016, Ted Elhourani <ted@bigswitch.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# Ansible module to manage Big Monitoring Fabric service chains
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: bigmon_policy
author: "Ted (@tedelhourani)"
short_description: Create and remove a bigmon out-of-band policy.
description:
- Create and remove a bigmon out-of-band policy.
version_added: "2.3"
options:
name:
description:
- The name of the policy.
required: true
policy_description:
description:
- Description of policy.
action:
description:
- Forward matching packets to delivery interfaces, Drop is for measure rate of matching packets,
but do not forward to delivery interfaces, capture packets and write to a PCAP file, or enable NetFlow generation.
default: forward
choices: ['forward', 'drop', 'flow-gen']
priority:
description:
- A priority associated with this policy. The higher priority policy takes precedence over a lower priority.
default: 100
duration:
description:
- Run policy for duration duration or until delivery_packet_count packets are delivered, whichever comes first.
default: 0
start_time:
description:
- Date the policy becomes active
default: ansible_date_time.iso8601
delivery_packet_count:
description:
- Run policy until delivery_packet_count packets are delivered.
default: 0
state:
description:
- Whether the policy should be present or absent.
default: present
choices: ['present', 'absent']
controller:
description:
- The controller address.
required: true
validate_certs:
description:
- If C(false), SSL certificates will not be validated. This should only be used
on personally controlled devices using self-signed certificates.
required: false
default: true
type: bool
access_token:
description:
- Bigmon access token. If this isn't set, the environment variable C(BIGSWITCH_ACCESS_TOKEN) is used.
'''
EXAMPLES = '''
- name: policy to aggregate filter and deliver data center (DC) 1 traffic
bigmon_policy:
name: policy1
policy_description: DC 1 traffic policy
action: drop
controller: '{{ inventory_hostname }}'
state: present
validate_certs: false
'''
RETURN = ''' # '''
import datetime
import os
import traceback
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.network.bigswitch.bigswitch import Rest
from ansible.module_utils._text import to_native
def policy(module):
try:
access_token = module.params['access_token'] or os.environ['BIGSWITCH_ACCESS_TOKEN']
except KeyError as e:
module.fail_json(msg='Unable to load %s' % e.message, exception=traceback.format_exc())
name = module.params['name']
policy_description = module.params['policy_description']
action = module.params['action']
priority = module.params['priority']
duration = module.params['duration']
start_time = module.params['start_time']
delivery_packet_count = module.params['delivery_packet_count']
state = module.params['state']
controller = module.params['controller']
rest = Rest(module,
{'content-type': 'application/json', 'Cookie': 'session_cookie=' + access_token},
'https://' + controller + ':8443/api/v1/data/controller/applications/bigtap')
if name is None:
module.fail_json(msg='parameter `name` is missing')
response = rest.get('policy?config=true', data={})
if response.status_code != 200:
module.fail_json(msg="failed to obtain existing policy config: {0}".format(response.json['description']))
config_present = False
matching = [policy for policy in response.json
if policy['name'] == name and
policy['duration'] == duration and
policy['delivery-packet-count'] == delivery_packet_count and
policy['policy-description'] == policy_description and
policy['action'] == action and
policy['priority'] == priority]
if matching:
config_present = True
if state in ('present') and config_present:
module.exit_json(changed=False)
if state in ('absent') and not config_present:
module.exit_json(changed=False)
if state in ('present'):
data = {'name': name, 'action': action, 'policy-description': policy_description,
'priority': priority, 'duration': duration, 'start-time': start_time,
'delivery-packet-count': delivery_packet_count}
response = rest.put('policy[name="%s"]' % name, data=data)
if response.status_code == 204:
module.exit_json(changed=True)
else:
module.fail_json(msg="error creating policy '{0}': {1}".format(name, response.json['description']))
if state in ('absent'):
response = rest.delete('policy[name="%s"]' % name, data={})
if response.status_code == 204:
module.exit_json(changed=True)
else:
module.fail_json(msg="error deleting policy '{0}': {1}".format(name, response.json['description']))
def main():
module = AnsibleModule(
argument_spec=dict(
name=dict(type='str', required=True),
policy_description=dict(type='str', default=''),
action=dict(choices=['forward', 'drop', 'capture', 'flow-gen'], default='forward'),
priority=dict(type='int', default=100),
duration=dict(type='int', default=0),
start_time=dict(type='str', default=datetime.datetime.now().isoformat() + '+00:00'),
delivery_packet_count=dict(type='int', default=0),
controller=dict(type='str', required=True),
state=dict(choices=['present', 'absent'], default='present'),
validate_certs=dict(type='bool', default='True'),
access_token=dict(type='str', no_log=True)
)
)
try:
policy(module)
except Exception as e:
module.fail_json(msg=to_native(e), exception=traceback.format_exc())
if __name__ == '__main__':
main()
| gpl-3.0 |
vortex-ape/scikit-learn | examples/bicluster/plot_bicluster_newsgroups.py | 39 | 5911 | """
================================================================
Biclustering documents with the Spectral Co-clustering algorithm
================================================================
This example demonstrates the Spectral Co-clustering algorithm on the
twenty newsgroups dataset. The 'comp.os.ms-windows.misc' category is
excluded because it contains many posts containing nothing but data.
The TF-IDF vectorized posts form a word frequency matrix, which is
then biclustered using Dhillon's Spectral Co-Clustering algorithm. The
resulting document-word biclusters indicate subsets words used more
often in those subsets documents.
For a few of the best biclusters, its most common document categories
and its ten most important words get printed. The best biclusters are
determined by their normalized cut. The best words are determined by
comparing their sums inside and outside the bicluster.
For comparison, the documents are also clustered using
MiniBatchKMeans. The document clusters derived from the biclusters
achieve a better V-measure than clusters found by MiniBatchKMeans.
"""
from __future__ import print_function
from collections import defaultdict
import operator
from time import time
import numpy as np
from sklearn.cluster.bicluster import SpectralCoclustering
from sklearn.cluster import MiniBatchKMeans
from sklearn.externals.six import iteritems
from sklearn.datasets.twenty_newsgroups import fetch_20newsgroups
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics.cluster import v_measure_score
print(__doc__)
def number_normalizer(tokens):
""" Map all numeric tokens to a placeholder.
For many applications, tokens that begin with a number are not directly
useful, but the fact that such a token exists can be relevant. By applying
this form of dimensionality reduction, some methods may perform better.
"""
return ("#NUMBER" if token[0].isdigit() else token for token in tokens)
class NumberNormalizingVectorizer(TfidfVectorizer):
def build_tokenizer(self):
tokenize = super(NumberNormalizingVectorizer, self).build_tokenizer()
return lambda doc: list(number_normalizer(tokenize(doc)))
# exclude 'comp.os.ms-windows.misc'
categories = ['alt.atheism', 'comp.graphics',
'comp.sys.ibm.pc.hardware', 'comp.sys.mac.hardware',
'comp.windows.x', 'misc.forsale', 'rec.autos',
'rec.motorcycles', 'rec.sport.baseball',
'rec.sport.hockey', 'sci.crypt', 'sci.electronics',
'sci.med', 'sci.space', 'soc.religion.christian',
'talk.politics.guns', 'talk.politics.mideast',
'talk.politics.misc', 'talk.religion.misc']
newsgroups = fetch_20newsgroups(categories=categories)
y_true = newsgroups.target
vectorizer = NumberNormalizingVectorizer(stop_words='english', min_df=5)
cocluster = SpectralCoclustering(n_clusters=len(categories),
svd_method='arpack', random_state=0)
kmeans = MiniBatchKMeans(n_clusters=len(categories), batch_size=20000,
random_state=0)
print("Vectorizing...")
X = vectorizer.fit_transform(newsgroups.data)
print("Coclustering...")
start_time = time()
cocluster.fit(X)
y_cocluster = cocluster.row_labels_
print("Done in {:.2f}s. V-measure: {:.4f}".format(
time() - start_time,
v_measure_score(y_cocluster, y_true)))
print("MiniBatchKMeans...")
start_time = time()
y_kmeans = kmeans.fit_predict(X)
print("Done in {:.2f}s. V-measure: {:.4f}".format(
time() - start_time,
v_measure_score(y_kmeans, y_true)))
feature_names = vectorizer.get_feature_names()
document_names = list(newsgroups.target_names[i] for i in newsgroups.target)
def bicluster_ncut(i):
rows, cols = cocluster.get_indices(i)
if not (np.any(rows) and np.any(cols)):
import sys
return sys.float_info.max
row_complement = np.nonzero(np.logical_not(cocluster.rows_[i]))[0]
col_complement = np.nonzero(np.logical_not(cocluster.columns_[i]))[0]
# Note: the following is identical to X[rows[:, np.newaxis],
# cols].sum() but much faster in scipy <= 0.16
weight = X[rows][:, cols].sum()
cut = (X[row_complement][:, cols].sum() +
X[rows][:, col_complement].sum())
return cut / weight
def most_common(d):
"""Items of a defaultdict(int) with the highest values.
Like Counter.most_common in Python >=2.7.
"""
return sorted(iteritems(d), key=operator.itemgetter(1), reverse=True)
bicluster_ncuts = list(bicluster_ncut(i)
for i in range(len(newsgroups.target_names)))
best_idx = np.argsort(bicluster_ncuts)[:5]
print()
print("Best biclusters:")
print("----------------")
for idx, cluster in enumerate(best_idx):
n_rows, n_cols = cocluster.get_shape(cluster)
cluster_docs, cluster_words = cocluster.get_indices(cluster)
if not len(cluster_docs) or not len(cluster_words):
continue
# categories
counter = defaultdict(int)
for i in cluster_docs:
counter[document_names[i]] += 1
cat_string = ", ".join("{:.0f}% {}".format(float(c) / n_rows * 100, name)
for name, c in most_common(counter)[:3])
# words
out_of_cluster_docs = cocluster.row_labels_ != cluster
out_of_cluster_docs = np.where(out_of_cluster_docs)[0]
word_col = X[:, cluster_words]
word_scores = np.array(word_col[cluster_docs, :].sum(axis=0) -
word_col[out_of_cluster_docs, :].sum(axis=0))
word_scores = word_scores.ravel()
important_words = list(feature_names[cluster_words[i]]
for i in word_scores.argsort()[:-11:-1])
print("bicluster {} : {} documents, {} words".format(
idx, n_rows, n_cols))
print("categories : {}".format(cat_string))
print("words : {}\n".format(', '.join(important_words)))
| bsd-3-clause |
Altazon97/WolfsDen | modules/twython-3.1.0/twython/streaming/types.py | 9 | 2825 | # -*- coding: utf-8 -*-
"""
twython.streaming.types
~~~~~~~~~~~~~~~~~~~~~~~
This module contains classes and methods for :class:`TwythonStreamer` to use.
"""
class TwythonStreamerTypes(object):
"""Class for different stream endpoints
Not all streaming endpoints have nested endpoints.
User Streams and Site Streams are single streams with no nested endpoints
Status Streams include filter, sample and firehose endpoints
"""
def __init__(self, streamer):
self.streamer = streamer
self.statuses = TwythonStreamerTypesStatuses(streamer)
def user(self, **params):
"""Stream user
Accepted params found at:
https://dev.twitter.com/docs/api/1.1/get/user
"""
url = 'https://userstream.twitter.com/%s/user.json' \
% self.streamer.api_version
self.streamer._request(url, params=params)
def site(self, **params):
"""Stream site
Accepted params found at:
https://dev.twitter.com/docs/api/1.1/get/site
"""
url = 'https://sitestream.twitter.com/%s/site.json' \
% self.streamer.api_version
self.streamer._request(url, params=params)
class TwythonStreamerTypesStatuses(object):
"""Class for different statuses endpoints
Available so TwythonStreamer.statuses.filter() is available.
Just a bit cleaner than TwythonStreamer.statuses_filter(),
statuses_sample(), etc. all being single methods in TwythonStreamer
"""
def __init__(self, streamer):
self.streamer = streamer
def filter(self, **params):
"""Stream statuses/filter
:param \*\*params: Parameters to send with your stream request
Accepted params found at:
https://dev.twitter.com/docs/api/1.1/post/statuses/filter
"""
url = 'https://stream.twitter.com/%s/statuses/filter.json' \
% self.streamer.api_version
self.streamer._request(url, 'POST', params=params)
def sample(self, **params):
"""Stream statuses/sample
:param \*\*params: Parameters to send with your stream request
Accepted params found at:
https://dev.twitter.com/docs/api/1.1/get/statuses/sample
"""
url = 'https://stream.twitter.com/%s/statuses/sample.json' \
% self.streamer.api_version
self.streamer._request(url, params=params)
def firehose(self, **params):
"""Stream statuses/firehose
:param \*\*params: Parameters to send with your stream request
Accepted params found at:
https://dev.twitter.com/docs/api/1.1/get/statuses/firehose
"""
url = 'https://stream.twitter.com/%s/statuses/firehose.json' \
% self.streamer.api_version
self.streamer._request(url, params=params)
| gpl-3.0 |
jessefeinman/FintechHackathon | venv/Lib/site-packages/setuptools/msvc.py | 89 | 37091 | """
Improved support for Microsoft Visual C++ compilers.
Known supported compilers:
--------------------------
Microsoft Visual C++ 9.0:
Microsoft Visual C++ Compiler for Python 2.7 (x86, amd64);
Microsoft Windows SDK 7.0 (x86, x64, ia64);
Microsoft Windows SDK 6.1 (x86, x64, ia64)
Microsoft Visual C++ 10.0:
Microsoft Windows SDK 7.1 (x86, x64, ia64)
Microsoft Visual C++ 14.0:
Microsoft Visual C++ Build Tools 2015 (x86, x64, arm)
"""
import os
import sys
import platform
import itertools
import distutils.errors
from pkg_resources.extern.packaging.version import LegacyVersion
from setuptools.extern.six.moves import filterfalse
from .monkey import get_unpatched
if platform.system() == 'Windows':
from setuptools.extern.six.moves import winreg
safe_env = os.environ
else:
"""
Mock winreg and environ so the module can be imported
on this platform.
"""
class winreg:
HKEY_USERS = None
HKEY_CURRENT_USER = None
HKEY_LOCAL_MACHINE = None
HKEY_CLASSES_ROOT = None
safe_env = dict()
try:
from distutils.msvc9compiler import Reg
except ImportError:
pass
def msvc9_find_vcvarsall(version):
"""
Patched "distutils.msvc9compiler.find_vcvarsall" to use the standalone
compiler build for Python (VCForPython). Fall back to original behavior
when the standalone compiler is not available.
Redirect the path of "vcvarsall.bat".
Known supported compilers
-------------------------
Microsoft Visual C++ 9.0:
Microsoft Visual C++ Compiler for Python 2.7 (x86, amd64)
Parameters
----------
version: float
Required Microsoft Visual C++ version.
Return
------
vcvarsall.bat path: str
"""
VC_BASE = r'Software\%sMicrosoft\DevDiv\VCForPython\%0.1f'
key = VC_BASE % ('', version)
try:
# Per-user installs register the compiler path here
productdir = Reg.get_value(key, "installdir")
except KeyError:
try:
# All-user installs on a 64-bit system register here
key = VC_BASE % ('Wow6432Node\\', version)
productdir = Reg.get_value(key, "installdir")
except KeyError:
productdir = None
if productdir:
vcvarsall = os.path.os.path.join(productdir, "vcvarsall.bat")
if os.path.isfile(vcvarsall):
return vcvarsall
return get_unpatched(msvc9_find_vcvarsall)(version)
def msvc9_query_vcvarsall(ver, arch='x86', *args, **kwargs):
"""
Patched "distutils.msvc9compiler.query_vcvarsall" for support standalones
compilers.
Set environment without use of "vcvarsall.bat".
Known supported compilers
-------------------------
Microsoft Visual C++ 9.0:
Microsoft Visual C++ Compiler for Python 2.7 (x86, amd64);
Microsoft Windows SDK 7.0 (x86, x64, ia64);
Microsoft Windows SDK 6.1 (x86, x64, ia64)
Microsoft Visual C++ 10.0:
Microsoft Windows SDK 7.1 (x86, x64, ia64)
Parameters
----------
ver: float
Required Microsoft Visual C++ version.
arch: str
Target architecture.
Return
------
environment: dict
"""
# Try to get environement from vcvarsall.bat (Classical way)
try:
orig = get_unpatched(msvc9_query_vcvarsall)
return orig(ver, arch, *args, **kwargs)
except distutils.errors.DistutilsPlatformError:
# Pass error if Vcvarsall.bat is missing
pass
except ValueError:
# Pass error if environment not set after executing vcvarsall.bat
pass
# If error, try to set environment directly
try:
return EnvironmentInfo(arch, ver).return_env()
except distutils.errors.DistutilsPlatformError as exc:
_augment_exception(exc, ver, arch)
raise
def msvc14_get_vc_env(plat_spec):
"""
Patched "distutils._msvccompiler._get_vc_env" for support standalones
compilers.
Set environment without use of "vcvarsall.bat".
Known supported compilers
-------------------------
Microsoft Visual C++ 14.0:
Microsoft Visual C++ Build Tools 2015 (x86, x64, arm)
Parameters
----------
plat_spec: str
Target architecture.
Return
------
environment: dict
"""
# Try to get environment from vcvarsall.bat (Classical way)
try:
return get_unpatched(msvc14_get_vc_env)(plat_spec)
except distutils.errors.DistutilsPlatformError:
# Pass error Vcvarsall.bat is missing
pass
# If error, try to set environment directly
try:
return EnvironmentInfo(plat_spec, vc_min_ver=14.0).return_env()
except distutils.errors.DistutilsPlatformError as exc:
_augment_exception(exc, 14.0)
raise
def msvc14_gen_lib_options(*args, **kwargs):
"""
Patched "distutils._msvccompiler.gen_lib_options" for fix
compatibility between "numpy.distutils" and "distutils._msvccompiler"
(for Numpy < 1.11.2)
"""
if "numpy.distutils" in sys.modules:
import numpy as np
if LegacyVersion(np.__version__) < LegacyVersion('1.11.2'):
return np.distutils.ccompiler.gen_lib_options(*args, **kwargs)
return get_unpatched(msvc14_gen_lib_options)(*args, **kwargs)
def _augment_exception(exc, version, arch=''):
"""
Add details to the exception message to help guide the user
as to what action will resolve it.
"""
# Error if MSVC++ directory not found or environment not set
message = exc.args[0]
if "vcvarsall" in message.lower() or "visual c" in message.lower():
# Special error message if MSVC++ not installed
tmpl = 'Microsoft Visual C++ {version:0.1f} is required.'
message = tmpl.format(**locals())
msdownload = 'www.microsoft.com/download/details.aspx?id=%d'
if version == 9.0:
if arch.lower().find('ia64') > -1:
# For VC++ 9.0, if IA64 support is needed, redirect user
# to Windows SDK 7.0
message += ' Get it with "Microsoft Windows SDK 7.0": '
message += msdownload % 3138
else:
# For VC++ 9.0 redirect user to Vc++ for Python 2.7 :
# This redirection link is maintained by Microsoft.
# Contact vspython@microsoft.com if it needs updating.
message += ' Get it from http://aka.ms/vcpython27'
elif version == 10.0:
# For VC++ 10.0 Redirect user to Windows SDK 7.1
message += ' Get it with "Microsoft Windows SDK 7.1": '
message += msdownload % 8279
elif version >= 14.0:
# For VC++ 14.0 Redirect user to Visual C++ Build Tools
message += (' Get it with "Microsoft Visual C++ Build Tools": '
r'http://landinghub.visualstudio.com/'
'visual-cpp-build-tools')
exc.args = (message, )
class PlatformInfo:
"""
Current and Target Architectures informations.
Parameters
----------
arch: str
Target architecture.
"""
current_cpu = safe_env.get('processor_architecture', '').lower()
def __init__(self, arch):
self.arch = arch.lower().replace('x64', 'amd64')
@property
def target_cpu(self):
return self.arch[self.arch.find('_') + 1:]
def target_is_x86(self):
return self.target_cpu == 'x86'
def current_is_x86(self):
return self.current_cpu == 'x86'
def current_dir(self, hidex86=False, x64=False):
"""
Current platform specific subfolder.
Parameters
----------
hidex86: bool
return '' and not '\x86' if architecture is x86.
x64: bool
return '\x64' and not '\amd64' if architecture is amd64.
Return
------
subfolder: str
'\target', or '' (see hidex86 parameter)
"""
return (
'' if (self.current_cpu == 'x86' and hidex86) else
r'\x64' if (self.current_cpu == 'amd64' and x64) else
r'\%s' % self.current_cpu
)
def target_dir(self, hidex86=False, x64=False):
"""
Target platform specific subfolder.
Parameters
----------
hidex86: bool
return '' and not '\x86' if architecture is x86.
x64: bool
return '\x64' and not '\amd64' if architecture is amd64.
Return
------
subfolder: str
'\current', or '' (see hidex86 parameter)
"""
return (
'' if (self.target_cpu == 'x86' and hidex86) else
r'\x64' if (self.target_cpu == 'amd64' and x64) else
r'\%s' % self.target_cpu
)
def cross_dir(self, forcex86=False):
"""
Cross platform specific subfolder.
Parameters
----------
forcex86: bool
Use 'x86' as current architecture even if current acritecture is
not x86.
Return
------
subfolder: str
'' if target architecture is current architecture,
'\current_target' if not.
"""
current = 'x86' if forcex86 else self.current_cpu
return (
'' if self.target_cpu == current else
self.target_dir().replace('\\', '\\%s_' % current)
)
class RegistryInfo:
"""
Microsoft Visual Studio related registry informations.
Parameters
----------
platform_info: PlatformInfo
"PlatformInfo" instance.
"""
HKEYS = (winreg.HKEY_USERS,
winreg.HKEY_CURRENT_USER,
winreg.HKEY_LOCAL_MACHINE,
winreg.HKEY_CLASSES_ROOT)
def __init__(self, platform_info):
self.pi = platform_info
@property
def visualstudio(self):
"""
Microsoft Visual Studio root registry key.
"""
return 'VisualStudio'
@property
def sxs(self):
"""
Microsoft Visual Studio SxS registry key.
"""
return os.path.join(self.visualstudio, 'SxS')
@property
def vc(self):
"""
Microsoft Visual C++ VC7 registry key.
"""
return os.path.join(self.sxs, 'VC7')
@property
def vs(self):
"""
Microsoft Visual Studio VS7 registry key.
"""
return os.path.join(self.sxs, 'VS7')
@property
def vc_for_python(self):
"""
Microsoft Visual C++ for Python registry key.
"""
return r'DevDiv\VCForPython'
@property
def microsoft_sdk(self):
"""
Microsoft SDK registry key.
"""
return 'Microsoft SDKs'
@property
def windows_sdk(self):
"""
Microsoft Windows/Platform SDK registry key.
"""
return os.path.join(self.microsoft_sdk, 'Windows')
@property
def netfx_sdk(self):
"""
Microsoft .NET Framework SDK registry key.
"""
return os.path.join(self.microsoft_sdk, 'NETFXSDK')
@property
def windows_kits_roots(self):
"""
Microsoft Windows Kits Roots registry key.
"""
return r'Windows Kits\Installed Roots'
def microsoft(self, key, x86=False):
"""
Return key in Microsoft software registry.
Parameters
----------
key: str
Registry key path where look.
x86: str
Force x86 software registry.
Return
------
str: value
"""
node64 = '' if self.pi.current_is_x86() or x86 else r'\Wow6432Node'
return os.path.join('Software', node64, 'Microsoft', key)
def lookup(self, key, name):
"""
Look for values in registry in Microsoft software registry.
Parameters
----------
key: str
Registry key path where look.
name: str
Value name to find.
Return
------
str: value
"""
KEY_READ = winreg.KEY_READ
openkey = winreg.OpenKey
ms = self.microsoft
for hkey in self.HKEYS:
try:
bkey = openkey(hkey, ms(key), 0, KEY_READ)
except (OSError, IOError):
if not self.pi.current_is_x86():
try:
bkey = openkey(hkey, ms(key, True), 0, KEY_READ)
except (OSError, IOError):
continue
else:
continue
try:
return winreg.QueryValueEx(bkey, name)[0]
except (OSError, IOError):
pass
class SystemInfo:
"""
Microsoft Windows and Visual Studio related system inormations.
Parameters
----------
registry_info: RegistryInfo
"RegistryInfo" instance.
vc_ver: float
Required Microsoft Visual C++ version.
"""
# Variables and properties in this class use originals CamelCase variables
# names from Microsoft source files for more easy comparaison.
WinDir = safe_env.get('WinDir', '')
ProgramFiles = safe_env.get('ProgramFiles', '')
ProgramFilesx86 = safe_env.get('ProgramFiles(x86)', ProgramFiles)
def __init__(self, registry_info, vc_ver=None):
self.ri = registry_info
self.pi = self.ri.pi
if vc_ver:
self.vc_ver = vc_ver
else:
try:
self.vc_ver = self.find_available_vc_vers()[-1]
except IndexError:
err = 'No Microsoft Visual C++ version found'
raise distutils.errors.DistutilsPlatformError(err)
def find_available_vc_vers(self):
"""
Find all available Microsoft Visual C++ versions.
"""
vckeys = (self.ri.vc, self.ri.vc_for_python)
vc_vers = []
for hkey in self.ri.HKEYS:
for key in vckeys:
try:
bkey = winreg.OpenKey(hkey, key, 0, winreg.KEY_READ)
except (OSError, IOError):
continue
subkeys, values, _ = winreg.QueryInfoKey(bkey)
for i in range(values):
try:
ver = float(winreg.EnumValue(bkey, i)[0])
if ver not in vc_vers:
vc_vers.append(ver)
except ValueError:
pass
for i in range(subkeys):
try:
ver = float(winreg.EnumKey(bkey, i))
if ver not in vc_vers:
vc_vers.append(ver)
except ValueError:
pass
return sorted(vc_vers)
@property
def VSInstallDir(self):
"""
Microsoft Visual Studio directory.
"""
# Default path
name = 'Microsoft Visual Studio %0.1f' % self.vc_ver
default = os.path.join(self.ProgramFilesx86, name)
# Try to get path from registry, if fail use default path
return self.ri.lookup(self.ri.vs, '%0.1f' % self.vc_ver) or default
@property
def VCInstallDir(self):
"""
Microsoft Visual C++ directory.
"""
# Default path
default = r'Microsoft Visual Studio %0.1f\VC' % self.vc_ver
guess_vc = os.path.join(self.ProgramFilesx86, default)
# Try to get "VC++ for Python" path from registry as default path
reg_path = os.path.join(self.ri.vc_for_python, '%0.1f' % self.vc_ver)
python_vc = self.ri.lookup(reg_path, 'installdir')
default_vc = os.path.join(python_vc, 'VC') if python_vc else guess_vc
# Try to get path from registry, if fail use default path
path = self.ri.lookup(self.ri.vc, '%0.1f' % self.vc_ver) or default_vc
if not os.path.isdir(path):
msg = 'Microsoft Visual C++ directory not found'
raise distutils.errors.DistutilsPlatformError(msg)
return path
@property
def WindowsSdkVersion(self):
"""
Microsoft Windows SDK versions.
"""
# Set Windows SDK versions for specified MSVC++ version
if self.vc_ver <= 9.0:
return ('7.0', '6.1', '6.0a')
elif self.vc_ver == 10.0:
return ('7.1', '7.0a')
elif self.vc_ver == 11.0:
return ('8.0', '8.0a')
elif self.vc_ver == 12.0:
return ('8.1', '8.1a')
elif self.vc_ver >= 14.0:
return ('10.0', '8.1')
@property
def WindowsSdkDir(self):
"""
Microsoft Windows SDK directory.
"""
sdkdir = ''
for ver in self.WindowsSdkVersion:
# Try to get it from registry
loc = os.path.join(self.ri.windows_sdk, 'v%s' % ver)
sdkdir = self.ri.lookup(loc, 'installationfolder')
if sdkdir:
break
if not sdkdir or not os.path.isdir(sdkdir):
# Try to get "VC++ for Python" version from registry
path = os.path.join(self.ri.vc_for_python, '%0.1f' % self.vc_ver)
install_base = self.ri.lookup(path, 'installdir')
if install_base:
sdkdir = os.path.join(install_base, 'WinSDK')
if not sdkdir or not os.path.isdir(sdkdir):
# If fail, use default new path
for ver in self.WindowsSdkVersion:
intver = ver[:ver.rfind('.')]
path = r'Microsoft SDKs\Windows Kits\%s' % (intver)
d = os.path.join(self.ProgramFiles, path)
if os.path.isdir(d):
sdkdir = d
if not sdkdir or not os.path.isdir(sdkdir):
# If fail, use default old path
for ver in self.WindowsSdkVersion:
path = r'Microsoft SDKs\Windows\v%s' % ver
d = os.path.join(self.ProgramFiles, path)
if os.path.isdir(d):
sdkdir = d
if not sdkdir:
# If fail, use Platform SDK
sdkdir = os.path.join(self.VCInstallDir, 'PlatformSDK')
return sdkdir
@property
def WindowsSDKExecutablePath(self):
"""
Microsoft Windows SDK executable directory.
"""
# Find WinSDK NetFx Tools registry dir name
if self.vc_ver <= 11.0:
netfxver = 35
arch = ''
else:
netfxver = 40
hidex86 = True if self.vc_ver <= 12.0 else False
arch = self.pi.current_dir(x64=True, hidex86=hidex86)
fx = 'WinSDK-NetFx%dTools%s' % (netfxver, arch.replace('\\', '-'))
# liste all possibles registry paths
regpaths = []
if self.vc_ver >= 14.0:
for ver in self.NetFxSdkVersion:
regpaths += [os.path.join(self.ri.netfx_sdk, ver, fx)]
for ver in self.WindowsSdkVersion:
regpaths += [os.path.join(self.ri.windows_sdk, 'v%sA' % ver, fx)]
# Return installation folder from the more recent path
for path in regpaths:
execpath = self.ri.lookup(path, 'installationfolder')
if execpath:
break
return execpath
@property
def FSharpInstallDir(self):
"""
Microsoft Visual F# directory.
"""
path = r'%0.1f\Setup\F#' % self.vc_ver
path = os.path.join(self.ri.visualstudio, path)
return self.ri.lookup(path, 'productdir') or ''
@property
def UniversalCRTSdkDir(self):
"""
Microsoft Universal CRT SDK directory.
"""
# Set Kit Roots versions for specified MSVC++ version
if self.vc_ver >= 14.0:
vers = ('10', '81')
else:
vers = ()
# Find path of the more recent Kit
for ver in vers:
sdkdir = self.ri.lookup(self.ri.windows_kits_roots,
'kitsroot%s' % ver)
if sdkdir:
break
return sdkdir or ''
@property
def NetFxSdkVersion(self):
"""
Microsoft .NET Framework SDK versions.
"""
# Set FxSdk versions for specified MSVC++ version
if self.vc_ver >= 14.0:
return ('4.6.1', '4.6')
else:
return ()
@property
def NetFxSdkDir(self):
"""
Microsoft .NET Framework SDK directory.
"""
for ver in self.NetFxSdkVersion:
loc = os.path.join(self.ri.netfx_sdk, ver)
sdkdir = self.ri.lookup(loc, 'kitsinstallationfolder')
if sdkdir:
break
return sdkdir or ''
@property
def FrameworkDir32(self):
"""
Microsoft .NET Framework 32bit directory.
"""
# Default path
guess_fw = os.path.join(self.WinDir, r'Microsoft.NET\Framework')
# Try to get path from registry, if fail use default path
return self.ri.lookup(self.ri.vc, 'frameworkdir32') or guess_fw
@property
def FrameworkDir64(self):
"""
Microsoft .NET Framework 64bit directory.
"""
# Default path
guess_fw = os.path.join(self.WinDir, r'Microsoft.NET\Framework64')
# Try to get path from registry, if fail use default path
return self.ri.lookup(self.ri.vc, 'frameworkdir64') or guess_fw
@property
def FrameworkVersion32(self):
"""
Microsoft .NET Framework 32bit versions.
"""
return self._find_dot_net_versions(32)
@property
def FrameworkVersion64(self):
"""
Microsoft .NET Framework 64bit versions.
"""
return self._find_dot_net_versions(64)
def _find_dot_net_versions(self, bits=32):
"""
Find Microsoft .NET Framework versions.
Parameters
----------
bits: int
Platform number of bits: 32 or 64.
"""
# Find actual .NET version
ver = self.ri.lookup(self.ri.vc, 'frameworkver%d' % bits) or ''
# Set .NET versions for specified MSVC++ version
if self.vc_ver >= 12.0:
frameworkver = (ver, 'v4.0')
elif self.vc_ver >= 10.0:
frameworkver = ('v4.0.30319' if ver.lower()[:2] != 'v4' else ver,
'v3.5')
elif self.vc_ver == 9.0:
frameworkver = ('v3.5', 'v2.0.50727')
if self.vc_ver == 8.0:
frameworkver = ('v3.0', 'v2.0.50727')
return frameworkver
class EnvironmentInfo:
"""
Return environment variables for specified Microsoft Visual C++ version
and platform : Lib, Include, Path and libpath.
This function is compatible with Microsoft Visual C++ 9.0 to 14.0.
Script created by analysing Microsoft environment configuration files like
"vcvars[...].bat", "SetEnv.Cmd", "vcbuildtools.bat", ...
Parameters
----------
arch: str
Target architecture.
vc_ver: float
Required Microsoft Visual C++ version. If not set, autodetect the last
version.
vc_min_ver: float
Minimum Microsoft Visual C++ version.
"""
# Variables and properties in this class use originals CamelCase variables
# names from Microsoft source files for more easy comparaison.
def __init__(self, arch, vc_ver=None, vc_min_ver=None):
self.pi = PlatformInfo(arch)
self.ri = RegistryInfo(self.pi)
self.si = SystemInfo(self.ri, vc_ver)
if vc_min_ver:
if self.vc_ver < vc_min_ver:
err = 'No suitable Microsoft Visual C++ version found'
raise distutils.errors.DistutilsPlatformError(err)
@property
def vc_ver(self):
"""
Microsoft Visual C++ version.
"""
return self.si.vc_ver
@property
def VSTools(self):
"""
Microsoft Visual Studio Tools
"""
paths = [r'Common7\IDE', r'Common7\Tools']
if self.vc_ver >= 14.0:
arch_subdir = self.pi.current_dir(hidex86=True, x64=True)
paths += [r'Common7\IDE\CommonExtensions\Microsoft\TestWindow']
paths += [r'Team Tools\Performance Tools']
paths += [r'Team Tools\Performance Tools%s' % arch_subdir]
return [os.path.join(self.si.VSInstallDir, path) for path in paths]
@property
def VCIncludes(self):
"""
Microsoft Visual C++ & Microsoft Foundation Class Includes
"""
return [os.path.join(self.si.VCInstallDir, 'Include'),
os.path.join(self.si.VCInstallDir, r'ATLMFC\Include')]
@property
def VCLibraries(self):
"""
Microsoft Visual C++ & Microsoft Foundation Class Libraries
"""
arch_subdir = self.pi.target_dir(hidex86=True)
paths = ['Lib%s' % arch_subdir, r'ATLMFC\Lib%s' % arch_subdir]
if self.vc_ver >= 14.0:
paths += [r'Lib\store%s' % arch_subdir]
return [os.path.join(self.si.VCInstallDir, path) for path in paths]
@property
def VCStoreRefs(self):
"""
Microsoft Visual C++ store references Libraries
"""
if self.vc_ver < 14.0:
return []
return [os.path.join(self.si.VCInstallDir, r'Lib\store\references')]
@property
def VCTools(self):
"""
Microsoft Visual C++ Tools
"""
si = self.si
tools = [os.path.join(si.VCInstallDir, 'VCPackages')]
forcex86 = True if self.vc_ver <= 10.0 else False
arch_subdir = self.pi.cross_dir(forcex86)
if arch_subdir:
tools += [os.path.join(si.VCInstallDir, 'Bin%s' % arch_subdir)]
if self.vc_ver >= 14.0:
path = 'Bin%s' % self.pi.current_dir(hidex86=True)
tools += [os.path.join(si.VCInstallDir, path)]
else:
tools += [os.path.join(si.VCInstallDir, 'Bin')]
return tools
@property
def OSLibraries(self):
"""
Microsoft Windows SDK Libraries
"""
if self.vc_ver <= 10.0:
arch_subdir = self.pi.target_dir(hidex86=True, x64=True)
return [os.path.join(self.si.WindowsSdkDir, 'Lib%s' % arch_subdir)]
else:
arch_subdir = self.pi.target_dir(x64=True)
lib = os.path.join(self.si.WindowsSdkDir, 'lib')
libver = self._get_content_dirname(lib)
return [os.path.join(lib, '%sum%s' % (libver, arch_subdir))]
@property
def OSIncludes(self):
"""
Microsoft Windows SDK Include
"""
include = os.path.join(self.si.WindowsSdkDir, 'include')
if self.vc_ver <= 10.0:
return [include, os.path.join(include, 'gl')]
else:
if self.vc_ver >= 14.0:
sdkver = self._get_content_dirname(include)
else:
sdkver = ''
return [os.path.join(include, '%sshared' % sdkver),
os.path.join(include, '%sum' % sdkver),
os.path.join(include, '%swinrt' % sdkver)]
@property
def OSLibpath(self):
"""
Microsoft Windows SDK Libraries Paths
"""
ref = os.path.join(self.si.WindowsSdkDir, 'References')
libpath = []
if self.vc_ver <= 9.0:
libpath += self.OSLibraries
if self.vc_ver >= 11.0:
libpath += [os.path.join(ref, r'CommonConfiguration\Neutral')]
if self.vc_ver >= 14.0:
libpath += [
ref,
os.path.join(self.si.WindowsSdkDir, 'UnionMetadata'),
os.path.join(
ref,
'Windows.Foundation.UniversalApiContract',
'1.0.0.0',
),
os.path.join(
ref,
'Windows.Foundation.FoundationContract',
'1.0.0.0',
),
os.path.join(
ref,
'Windows.Networking.Connectivity.WwanContract',
'1.0.0.0',
),
os.path.join(
self.si.WindowsSdkDir,
'ExtensionSDKs',
'Microsoft.VCLibs',
'%0.1f' % self.vc_ver,
'References',
'CommonConfiguration',
'neutral',
),
]
return libpath
@property
def SdkTools(self):
"""
Microsoft Windows SDK Tools
"""
bin_dir = 'Bin' if self.vc_ver <= 11.0 else r'Bin\x86'
tools = [os.path.join(self.si.WindowsSdkDir, bin_dir)]
if not self.pi.current_is_x86():
arch_subdir = self.pi.current_dir(x64=True)
path = 'Bin%s' % arch_subdir
tools += [os.path.join(self.si.WindowsSdkDir, path)]
if self.vc_ver == 10.0 or self.vc_ver == 11.0:
if self.pi.target_is_x86():
arch_subdir = ''
else:
arch_subdir = self.pi.current_dir(hidex86=True, x64=True)
path = r'Bin\NETFX 4.0 Tools%s' % arch_subdir
tools += [os.path.join(self.si.WindowsSdkDir, path)]
if self.si.WindowsSDKExecutablePath:
tools += [self.si.WindowsSDKExecutablePath]
return tools
@property
def SdkSetup(self):
"""
Microsoft Windows SDK Setup
"""
if self.vc_ver > 9.0:
return []
return [os.path.join(self.si.WindowsSdkDir, 'Setup')]
@property
def FxTools(self):
"""
Microsoft .NET Framework Tools
"""
pi = self.pi
si = self.si
if self.vc_ver <= 10.0:
include32 = True
include64 = not pi.target_is_x86() and not pi.current_is_x86()
else:
include32 = pi.target_is_x86() or pi.current_is_x86()
include64 = pi.current_cpu == 'amd64' or pi.target_cpu == 'amd64'
tools = []
if include32:
tools += [os.path.join(si.FrameworkDir32, ver)
for ver in si.FrameworkVersion32]
if include64:
tools += [os.path.join(si.FrameworkDir64, ver)
for ver in si.FrameworkVersion64]
return tools
@property
def NetFxSDKLibraries(self):
"""
Microsoft .Net Framework SDK Libraries
"""
if self.vc_ver < 14.0 or not self.si.NetFxSdkDir:
return []
arch_subdir = self.pi.target_dir(x64=True)
return [os.path.join(self.si.NetFxSdkDir, r'lib\um%s' % arch_subdir)]
@property
def NetFxSDKIncludes(self):
"""
Microsoft .Net Framework SDK Includes
"""
if self.vc_ver < 14.0 or not self.si.NetFxSdkDir:
return []
return [os.path.join(self.si.NetFxSdkDir, r'include\um')]
@property
def VsTDb(self):
"""
Microsoft Visual Studio Team System Database
"""
return [os.path.join(self.si.VSInstallDir, r'VSTSDB\Deploy')]
@property
def MSBuild(self):
"""
Microsoft Build Engine
"""
if self.vc_ver < 12.0:
return []
arch_subdir = self.pi.current_dir(hidex86=True)
path = r'MSBuild\%0.1f\bin%s' % (self.vc_ver, arch_subdir)
return [os.path.join(self.si.ProgramFilesx86, path)]
@property
def HTMLHelpWorkshop(self):
"""
Microsoft HTML Help Workshop
"""
if self.vc_ver < 11.0:
return []
return [os.path.join(self.si.ProgramFilesx86, 'HTML Help Workshop')]
@property
def UCRTLibraries(self):
"""
Microsoft Universal CRT Libraries
"""
if self.vc_ver < 14.0:
return []
arch_subdir = self.pi.target_dir(x64=True)
lib = os.path.join(self.si.UniversalCRTSdkDir, 'lib')
ucrtver = self._get_content_dirname(lib)
return [os.path.join(lib, '%sucrt%s' % (ucrtver, arch_subdir))]
@property
def UCRTIncludes(self):
"""
Microsoft Universal CRT Include
"""
if self.vc_ver < 14.0:
return []
include = os.path.join(self.si.UniversalCRTSdkDir, 'include')
ucrtver = self._get_content_dirname(include)
return [os.path.join(include, '%sucrt' % ucrtver)]
@property
def FSharp(self):
"""
Microsoft Visual F#
"""
if self.vc_ver < 11.0 and self.vc_ver > 12.0:
return []
return self.si.FSharpInstallDir
@property
def VCRuntimeRedist(self):
"""
Microsoft Visual C++ runtime redistribuable dll
"""
arch_subdir = self.pi.target_dir(x64=True)
vcruntime = 'redist%s\\Microsoft.VC%d0.CRT\\vcruntime%d0.dll'
vcruntime = vcruntime % (arch_subdir, self.vc_ver, self.vc_ver)
return os.path.join(self.si.VCInstallDir, vcruntime)
def return_env(self, exists=True):
"""
Return environment dict.
Parameters
----------
exists: bool
It True, only return existing paths.
"""
env = dict(
include=self._build_paths('include',
[self.VCIncludes,
self.OSIncludes,
self.UCRTIncludes,
self.NetFxSDKIncludes],
exists),
lib=self._build_paths('lib',
[self.VCLibraries,
self.OSLibraries,
self.FxTools,
self.UCRTLibraries,
self.NetFxSDKLibraries],
exists),
libpath=self._build_paths('libpath',
[self.VCLibraries,
self.FxTools,
self.VCStoreRefs,
self.OSLibpath],
exists),
path=self._build_paths('path',
[self.VCTools,
self.VSTools,
self.VsTDb,
self.SdkTools,
self.SdkSetup,
self.FxTools,
self.MSBuild,
self.HTMLHelpWorkshop,
self.FSharp],
exists),
)
if self.vc_ver >= 14 and os.path.isfile(self.VCRuntimeRedist):
env['py_vcruntime_redist'] = self.VCRuntimeRedist
return env
def _build_paths(self, name, spec_path_lists, exists):
"""
Given an environment variable name and specified paths,
return a pathsep-separated string of paths containing
unique, extant, directories from those paths and from
the environment variable. Raise an error if no paths
are resolved.
"""
# flatten spec_path_lists
spec_paths = itertools.chain.from_iterable(spec_path_lists)
env_paths = safe_env.get(name, '').split(os.pathsep)
paths = itertools.chain(spec_paths, env_paths)
extant_paths = list(filter(os.path.isdir, paths)) if exists else paths
if not extant_paths:
msg = "%s environment variable is empty" % name.upper()
raise distutils.errors.DistutilsPlatformError(msg)
unique_paths = self._unique_everseen(extant_paths)
return os.pathsep.join(unique_paths)
# from Python docs
def _unique_everseen(self, iterable, key=None):
"""
List unique elements, preserving order.
Remember all elements ever seen.
_unique_everseen('AAAABBBCCDAABBB') --> A B C D
_unique_everseen('ABBCcAD', str.lower) --> A B C D
"""
seen = set()
seen_add = seen.add
if key is None:
for element in filterfalse(seen.__contains__, iterable):
seen_add(element)
yield element
else:
for element in iterable:
k = key(element)
if k not in seen:
seen_add(k)
yield element
def _get_content_dirname(self, path):
"""
Return name of the first dir in path or '' if no dir found.
Parameters
----------
path: str
Path where search dir.
Return
------
foldername: str
"name\" or ""
"""
try:
name = os.listdir(path)
if name:
return '%s\\' % name[0]
return ''
except (OSError, IOError):
return ''
| bsd-2-clause |
redPanther/hyperion.ng | effects/candle.py | 4 | 2098 |
# Candleflicker effect by penfold42
# Algorithm courtesy of
# https://cpldcpu.com/2013/12/08/hacking-a-candleflicker-led/
# candles can be :
# a single led number, a list of candle numbers
# "all" to flicker all the leds randomly
# "all-together" to flicker all the leds in unison
import hyperion
import time
import colorsys
import random
# Get parameters
color = hyperion.args.get('color', (255,138,0))
colorShift = float(hyperion.args.get('colorShift', 1))/100.0
brightness = float(hyperion.args.get('brightness', 100))/100.0
sleepTime = float(hyperion.args.get('sleepTime', 0.14))
candles = hyperion.args.get('candles', "all")
ledlist = hyperion.args.get('ledlist', "1")
candlelist = ()
if (candles == "list") and (type(ledlist) is str):
for s in ledlist.split(','):
i = int(s)
if (i<hyperion.ledCount):
candlelist += (i,)
elif (candles == "list") and (type(ledlist) is list):
for s in (ledlist):
i = int(s)
if (i<hyperion.ledCount):
candlelist += (i,)
else:
candlelist = range(hyperion.ledCount)
# Convert rgb color to hsv
hsv = colorsys.rgb_to_hsv(color[0]/255.0, color[1]/255.0, color [2]/255.0)
def CandleRgb():
hue = random.uniform(hsv[0]-colorShift, hsv[0]+colorShift) % 1.0
RAND=random.randint(0,15)
while ((RAND & 0x0c)==0):
RAND=random.randint(0,15)
val = ( min(RAND, 15)/15.0001 ) * brightness
frgb = colorsys.hsv_to_rgb(hue, hsv[1], val);
return (int(255*frgb[0]), int(255*frgb[1]), int(255*frgb[2]))
ledData = bytearray(hyperion.ledCount * (0,0,0) )
while not hyperion.abort():
if (candles == "all-together"):
rgb = CandleRgb()
for lednum in candlelist:
ledData[3*lednum+0] = rgb[0]
ledData[3*lednum+1] = rgb[1]
ledData[3*lednum+2] = rgb[2]
elif (candles == "all"):
for lednum in candlelist:
rgb = CandleRgb()
ledData[3*lednum+0] = rgb[0]
ledData[3*lednum+1] = rgb[1]
ledData[3*lednum+2] = rgb[2]
else:
for lednum in candlelist:
rgb = CandleRgb()
ledData[3*lednum+0] = rgb[0]
ledData[3*lednum+1] = rgb[1]
ledData[3*lednum+2] = rgb[2]
hyperion.setColor (ledData)
time.sleep(sleepTime)
| mit |
agentfog/qiime | tests/test_parallel/test_map_reads_to_reference.py | 15 | 18488 | #!/usr/bin/env python
# File created on 07 Jul 2012
from __future__ import division
__author__ = "Greg Caporaso"
__copyright__ = "Copyright 2011, The QIIME project"
__credits__ = ["Greg Caporaso"]
__license__ = "GPL"
__version__ = "1.9.1-dev"
__maintainer__ = "Greg Caporaso"
__email__ = "gregcaporaso@gmail.com"
from glob import glob
from shutil import rmtree
from os import close
from os.path import exists, join
from tempfile import mkstemp, mkdtemp
from skbio.util import remove_files
from unittest import TestCase, main
from numpy.testing import assert_almost_equal
from biom import load_table
from qiime.test import initiate_timeout, disable_timeout
from qiime.util import get_qiime_temp_dir
from qiime.parse import parse_otu_map
from qiime.parallel.map_reads_to_reference import (ParallelDatabaseMapperBlat,
ParallelDatabaseMapperUsearch, ParallelDatabaseMapperBwaShort)
class ParallelDatabaseMapperTests(TestCase):
def setUp(self):
""" """
self.files_to_remove = []
self.dirs_to_remove = []
tmp_dir = get_qiime_temp_dir()
self.test_out = mkdtemp(dir=tmp_dir,
prefix='qiime_parallel_tests_',
suffix='')
self.dirs_to_remove.append(self.test_out)
fd, self.refseqs1_fp = mkstemp(dir=self.test_out,
prefix='qiime_refseqs',
suffix='.fasta')
close(fd)
refseqs1_f = open(self.refseqs1_fp, 'w')
refseqs1_f.write(refseqs1)
refseqs1_f.close()
self.files_to_remove.append(self.refseqs1_fp)
fd, self.refseqs2_fp = mkstemp(dir=self.test_out,
prefix='qiime_refseqs',
suffix='.fasta')
close(fd)
refseqs2_f = open(self.refseqs2_fp, 'w')
refseqs2_f.write(refseqs2)
refseqs2_f.close()
self.files_to_remove.append(self.refseqs2_fp)
fd, self.inseqs1_fp = mkstemp(dir=self.test_out,
prefix='qiime_inseqs',
suffix='.fasta')
close(fd)
inseqs1_f = open(self.inseqs1_fp, 'w')
inseqs1_f.write(inseqs1)
inseqs1_f.close()
self.files_to_remove.append(self.inseqs1_fp)
fd, self.inseqs2_fp = mkstemp(dir=self.test_out,
prefix='qiime_inseqs',
suffix='.fasta')
close(fd)
inseqs2_f = open(self.inseqs2_fp, 'w')
inseqs2_f.write(inseqs2)
inseqs2_f.close()
self.files_to_remove.append(self.inseqs2_fp)
initiate_timeout(60)
def tearDown(self):
""" """
disable_timeout()
remove_files(self.files_to_remove)
# remove directories last, so we don't get errors
# trying to remove files which may be in the directories
for d in self.dirs_to_remove:
if exists(d):
rmtree(d)
class ParallelDatabaseMapperUsearchTests(ParallelDatabaseMapperTests):
def test_parallel_database_mapper_usearch(self):
""" parallel_database_mapper_usearch functions as expected """
params = {'refseqs_fp': self.refseqs1_fp,
'min_percent_id': 0.97,
'evalue': 1e-10,
'max_accepts': 1,
'max_rejects': 32,
'queryalnfract': 0.35,
'targetalnfract': 0.0,
'observation_metadata_fp': None
}
app = ParallelDatabaseMapperUsearch()
r = app(self.inseqs1_fp,
self.test_out,
params,
job_prefix='PTEST',
poll_directly=True,
suppress_submit_jobs=False)
observation_map_fp = glob(
join(self.test_out, 'observation_map.txt'))[0]
omap = parse_otu_map(open(observation_map_fp, 'U'))
self.assertEqual(len(omap[0]), 3)
self.assertItemsEqual(
omap[1],
['eco:b0015',
'eco:b0122',
'eco:b0015:duplicate'])
self.assertItemsEqual(omap[2], ['eco:b0015-pr', 'eco:b0122-pr'])
class ParallelDatabaseMapperBlatTests(ParallelDatabaseMapperTests):
def test_parallel_database_mapper_blat(self):
""" parallel_database_mapper_blat functions as expected """
params = {'refseqs_fp': self.refseqs1_fp,
'min_percent_id': 0.97,
'evalue': 1e-10,
'max_accepts': 1,
'max_rejects': 32,
'queryalnfract': 0.35,
'targetalnfract': 0.0,
'observation_metadata_fp': None
}
app = ParallelDatabaseMapperBlat()
r = app(self.inseqs1_fp,
self.test_out,
params,
job_prefix='PTEST',
poll_directly=True,
suppress_submit_jobs=False)
observation_map_fp = glob(
join(self.test_out, 'observation_map.txt'))[0]
omap = parse_otu_map(open(observation_map_fp, 'U'))
self.assertEqual(len(omap[0]), 3)
self.assertItemsEqual(
omap[1],
['eco:b0015',
'eco:b0122',
'eco:b0015:duplicate'])
self.assertItemsEqual(omap[2], ['eco:b0015-pr', 'eco:b0122-pr'])
class ParallelDatabaseMapperBwaShortTests(ParallelDatabaseMapperTests):
def test_bwa_short_database_mapper(self):
"""bwa_short_database_mapper functions as expected """
params = {'refseqs_fp': self.refseqs2_fp,
'max_diff': None,
'observation_metadata_fp': None}
app = ParallelDatabaseMapperBwaShort()
r = app(self.inseqs2_fp,
self.test_out,
params,
poll_directly=True,
suppress_submit_jobs=False)
observation_map_fp = join(self.test_out, 'observation_map.txt')
self.assertTrue(exists(observation_map_fp))
observation_table_fp = join(self.test_out, 'observation_table.biom')
table = load_table(observation_table_fp)
self.assertItemsEqual(table.ids(), ['s2', 's1'])
self.assertItemsEqual(
table.ids(axis='observation'),
['r1',
'r2',
'r3',
'r4',
'r5'])
self.assertEqual(table.sum(), 6)
def test_bwa_short_database_mapper_alt_params(self):
"""bwa_short_database_mapper functions as expected """
params = {'refseqs_fp': self.refseqs2_fp,
'max_diff': 1,
'observation_metadata_fp': None}
app = ParallelDatabaseMapperBwaShort()
r = app(self.inseqs2_fp,
self.test_out,
params,
poll_directly=True,
suppress_submit_jobs=False)
observation_map_fp = join(self.test_out, 'observation_map.txt')
self.assertTrue(exists(observation_map_fp))
observation_table_fp = join(self.test_out, 'observation_table.biom')
table = load_table(observation_table_fp)
self.assertItemsEqual(table.ids(), ['s2', 's1'])
self.assertItemsEqual(table.ids(axis='observation'),
['r2', 'r3', 'r4', 'r5'])
self.assertEqual(table.sum(), 5)
refseqs1 = """>eco:b0001-pr
MKRISTTITTTITITTGNGAG
>eco:b0015-pr dnaJ
MAKQDYYEILGVSKTAEEREIRKAYKRLAMKYHPDRNQGDKEAEAKFKEIKEAYEVLTDS
QKRAAYDQYGHAAFEQGGMGGGGFGGGADFSDIFGDVFGDIFGGGRGRQRAARGADLRYN
MELTLEEAVRGVTKEIRIPTLEECDVCHGSGAKPGTQPQTCPTCHGSGQVQMRQGFFAVQ
QTCPHCQGRGTLIKDPCNKCHGHGRVERSKTLSVKIPAGVDTGDRIRLAGEGEAGEHGAP
AGDLYVQVQVKQHPIFEREGNNLYCEVPINFAMAALGGEIEVPTLDGRVKLKVPGETQTG
KLFRMRGKGVKSVRGGAQGDLLCRVVVETPVGLNERQKQLLQELQESFGGPTGEHNSPRS
KSFFDGVKKFFDDLTR
>eco:b0122-pr
MKTFFRTVLFGSLMAVCANSYALSESEAEDMADLTAVFVFLKNDCGYQNLPNGQIRRALV
FFAQQNQWDLSNYDTFDMKALGEDSYRDLSGIGIPVAKKCKALARDSLSLLAYVK
"""
refseqs2 = """>r1
atgaaacgcattagcaccaccattaccaccaccatcaccattaccacaggtaacggtgcg
ggctga
>r2 some comments...
atggctaagcaagattattacgagattttaggcgtttccaaaacagcggaagagcgtgaa
atcagaaaggcctacaaacgcctggccatgaaataccacccggaccgtaaccagggtgac
aaagaggccgaggcgaaatttaaagagatcaaggaagcttatgaagttctgaccgactcg
caaaaacgtgcggcatacgatcagtatggtcatgctgcgtttgagcaaggtggcatgggc
ggcggcggttttggcggcggcgcagacttcagcgatatttttggtgacgttttcggcgat
atttttggcggcggacgtggtcgtcaacgtgcggcgcgcggtgctgatttacgctataac
atggagctcaccctcgaagaagctgtacgtggcgtgaccaaagagatccgcattccgact
ctggaagagtgtgacgtttgccacggtagcggtgcaaaaccaggtacacagccgcagact
tgtccgacctgtcatggttctggtcaggtgcagatgcgccagggattcttcgctgtacag
cagacctgtccacactgtcagggccgcggtacgctgatcaaagatccgtgcaacaaatgt
catggtcatggtcgtgttgagcgcagcaaaacgctgtccgttaaaatcccggcaggggtg
gacactggagaccgcatccgtcttgcgggcgaaggtgaagcgggcgagcatggcgcaccg
gcaggcgatctgtacgttcaggttcaggttaaacagcacccgattttcgagcgtgaaggc
aacaacctgtattgcgaagtcccgatcaacttcgctatggcggcgctgggtggcgaaatc
gaagtaccgacccttgatggtcgcgtcaaactgaaagtgcctggcgaaacccagaccggt
aagctattccgtatgcgcggtaaaggcgtcaagtctgtccgcggtggcgcacagggtgat
ttgctgtgccgcgttgtcgtcgaaacaccggtaggcctgaacgaaaggcagaaacagctg
ctgcaagagctgcaagaaagcttcggtggcccaaccggcgagcacaacagcccgcgctca
aagagcttctttgatggtgtgaagaagttttttgacgacctgacccgagaa
>r3
atgaagacgtttttcagaacagtgttattcggcagcctgatggccgtctgcgcaaacagt
tacgcgctcagcgagtctgaagccgaagatatggccgatttaacggcagtttttgtcttt
ctgaagaacgattgtggttaccagaacttacctaacgggcaaattcgtcgcgcactggtc
tttttcgctcagcaaaaccagtgggacctcagtaattacgacaccttcgacatgaaagcc
ctcggtgaagacagctaccgcgatctcagcggcattggcattcccgtcgctaaaaaatgc
aaagccctggcccgcgattccttaagcctgcttgcctacgtcaaataa
>r4
atgaagaaaattttcagaacagtgttattcggcagcctgatggccgtctgcgcaaacagt
tacgcgctcagcgagtctgaagccgaagatatggccgatttaacggcagtttttgtcttt
ctgaagaacgattgtggttaccagaacttacctaacgggcaaattcgtcgcgcactggtc
tttttcgctcagcaaaaccagtgggacctcagtaattacgacaccttcgacatgaaagcc
ctcggtgaagacagctaccgcgatctcagcggcattggcattcccgtcgctaaaaaatgc
aaagccctggcccgcgattccttaagcctgcttgcctacgtcaaatcc
>r5 some comments...
aatgactaagcaagattattacgagattttaggcgtttccaaaacagcggaagagcgtgaa
atcagaaaggcctacaaacgcctggccatgaaataccacccggaccgtaaccagggtgac
aaagaggccgaggcgaaatttaaagagatcaaggaagcttatgaagttctgaccgactcg
caaaaacgtgcggcatacgatcagtatggtcatgctgcgtttgagcaaggtggcatgggc
ggcggcggttttggcggcggcgcagacttcagcgatatttttggtgacgttttcggcgat
atttttggcggcggacgtggtcgtcaacgtgcggcgcgcggtgctgatttacgctataac
atggagctcaccctcgaagaagctgtacgtggcgtgaccaaagagatccgcattccgact
ctggaagagtgtgacgtttgccacggtagcggtgcaaaaccaggtacacagccgcagact
tgtccgacctgtcatggttctggtcaggtgcagatgcgccagggattcttcgctgtacag
cagacctgtccacactgtcagggccgcggtacgctgatcaaagatccgtgcaacaaatgt
catggtcatggtcgtgttgagcgcagcaaaacgctgtccgttaaaatcccggcaggggtg
gacactggagaccgcatccgtcttgcgggcgaaggtgaagcgggcgagcatggcgcaccg
gcaggcgatctgtacgttcaggttcaggttaaacagcacccgattttcgagcgtgaaggc
aacaacctgtattgcgaagtcccgatcaacttcgctatggcggcgctgggtggcgaaatc
gaagtaccgacccttgatggtcgcgtcaaactgaaagtgcctggcgaaacccagaccggt
aagctattccgtatgcgcggtaaaggcgtcaagtctgtccgcggtggcgcacagggtgat
ttgctgtgccgcgttgtcgtcgaaacaccggtaggcctgaacgaaaggcagaaacagctg
ctgcaagagctgcaagaaagcttcggtggcccaaccggcgagcacaacagcccgcgctca
aagagcttctttgatggtgtgaagaagttttttgacgacctgacccgctaa
"""
inseqs1 = """>eco:b0001 thrL; thr operon leader peptide; K08278 thr operon leader peptide (N)
atgaaacgcattagcaccaccattaccaccaccatcaccattaccacaggtaacggtgcg
ggctga
>eco:b0015 dnaJ; chaperone Hsp40, co-chaperone with DnaK; K03686 molecular chaperone DnaJ (N)
atggctaagcaagattattacgagattttaggcgtttccaaaacagcggaagagcgtgaa
atcagaaaggcctacaaacgcctggccatgaaataccacccggaccgtaaccagggtgac
aaagaggccgaggcgaaatttaaagagatcaaggaagcttatgaagttctgaccgactcg
caaaaacgtgcggcatacgatcagtatggtcatgctgcgtttgagcaaggtggcatgggc
ggcggcggttttggcggcggcgcagacttcagcgatatttttggtgacgttttcggcgat
atttttggcggcggacgtggtcgtcaacgtgcggcgcgcggtgctgatttacgctataac
atggagctcaccctcgaagaagctgtacgtggcgtgaccaaagagatccgcattccgact
ctggaagagtgtgacgtttgccacggtagcggtgcaaaaccaggtacacagccgcagact
tgtccgacctgtcatggttctggtcaggtgcagatgcgccagggattcttcgctgtacag
cagacctgtccacactgtcagggccgcggtacgctgatcaaagatccgtgcaacaaatgt
catggtcatggtcgtgttgagcgcagcaaaacgctgtccgttaaaatcccggcaggggtg
gacactggagaccgcatccgtcttgcgggcgaaggtgaagcgggcgagcatggcgcaccg
gcaggcgatctgtacgttcaggttcaggttaaacagcacccgattttcgagcgtgaaggc
aacaacctgtattgcgaagtcccgatcaacttcgctatggcggcgctgggtggcgaaatc
gaagtaccgacccttgatggtcgcgtcaaactgaaagtgcctggcgaaacccagaccggt
aagctattccgtatgcgcggtaaaggcgtcaagtctgtccgcggtggcgcacagggtgat
ttgctgtgccgcgttgtcgtcgaaacaccggtaggcctgaacgaaaggcagaaacagctg
ctgcaagagctgcaagaaagcttcggtggcccaaccggcgagcacaacagcccgcgctca
aagagcttctttgatggtgtgaagaagttttttgacgacctgacccgctaa
>eco:b0122
atgaagacgtttttcagaacagtgttattcggcagcctgatggccgtctgcgcaaacagt
tacgcgctcagcgagtctgaagccgaagatatggccgatttaacggcagtttttgtcttt
ctgaagaacgattgtggttaccagaacttacctaacgggcaaattcgtcgcgcactggtc
tttttcgctcagcaaaaccagtgggacctcagtaattacgacaccttcgacatgaaagcc
ctcggtgaagacagctaccgcgatctcagcggcattggcattcccgtcgctaaaaaatgc
aaagccctggcccgcgattccttaagcctgcttgcctacgtcaaataa
>eco:b0015:duplicate
atggctaagcaagattattacgagattttaggcgtttccaaaacagcggaagagcgtgaa
atcagaaaggcctacaaacgcctggccatgaaataccacccggaccgtaaccagggtgac
aaagaggccgaggcgaaatttaaagagatcaaggaagcttatgaagttctgaccgactcg
caaaaacgtgcggcatacgatcagtatggtcatgctgcgtttgagcaaggtggcatgggc
ggcggcggttttggcggcggcgcagacttcagcgatatttttggtgacgttttcggcgat
atttttggcggcggacgtggtcgtcaacgtgcggcgcgcggtgctgatttacgctataac
atggagctcaccctcgaagaagctgtacgtggcgtgaccaaagagatccgcattccgact
ctggaagagtgtgacgtttgccacggtagcggtgcaaaaccaggtacacagccgcagact
tgtccgacctgtcatggttctggtcaggtgcagatgcgccagggattcttcgctgtacag
cagacctgtccacactgtcagggccgcggtacgctgatcaaagatccgtgcaacaaatgt
catggtcatggtcgtgttgagcgcagcaaaacgctgtccgttaaaatcccggcaggggtg
gacactggagaccgcatccgtcttgcgggcgaaggtgaagcgggcgagcatggcgcaccg
gcaggcgatctgtacgttcaggttcaggttaaacagcacccgattttcgagcgtgaaggc
aacaacctgtattgcgaagtcccgatcaacttcgctatggcggcgctgggtggcgaaatc
gaagtaccgacccttgatggtcgcgtcaaactgaaagtgcctggcgaaacccagaccggt
aagctattccgtatgcgcggtaaaggcgtcaagtctgtccgcggtggcgcacagggtgat
ttgctgtgccgcgttgtcgtcgaaacaccggtaggcctgaacgaaaggcagaaacagctg
ctgcaagagctgcaagaaagcttcggtggcccaaccggcgagcacaacagcccgcgctca
aagagcttctttgatggtgtgaagaagttttttgacgacctgacccgctaa
"""
inseqs2 = """>s1_1
atgttacgcattagcaccaccattaccaccaccatcaccattaccacaggtaacggtgcg
ggctga
>s2_2 some comments...
atggctaagcaagattattacgagattttaggcgtttccaaaacagcggaagagcgtgaa
atcagaaaggcctacaaacgcctggccatgaaataccacccggaccgtaaccagggtgac
aaagaggccgaggcgaaatttaaagagatcaaggaagcttatgaagttctgaccgactcg
caaaaacgtgcggcatacgatcagtatggtcatgctgcgtttgagcaaggtggcatgggc
ggcggcggttttggcggcggcgcagacttcagcgatatttttggtgacgttttcggcgat
atttttggcggcggacgtggtcgtcaacgtgcggcgcgcggtgctgatttacgctataac
atggagctcaccctcgaagaagctgtacgtggcgtgaccaaagagatccgcattccgact
ctggaagagtgtgacgtttgccacggtagcggtgcaaaaccaggtacacagccgcagact
tgtccgacctgtcatggttctggtcaggtgcagatgcgccagggattcttcgctgtacag
cagacctgtccacactgtcagggccgcggtacgctgatcaaagatccgtgcaacaaatgt
catggtcatggtcgtgttgagcgcagcaaaacgctgtccgttaaaatcccggcaggggtg
gacactggagaccgcatccgtcttgcgggcgaaggtgaagcgggcgagcatggcgcaccg
gcaggcgatctgtacgttcaggttcaggttaaacagcacccgattttcgagcgtgaaggc
aacaacctgtattgcgaagtcccgatcaacttcgctatggcggcgctgggtggcgaaatc
gaagtaccgacccttgatggtcgcgtcaaactgaaagtgcctggcgaaacccagaccggt
aagctattccgtatgcgcggtaaaggcgtcaagtctgtccgcggtggcgcacagggtgat
ttgctgtgccgcgttgtcgtcgaaacaccggtaggcctgaacgaaaggcagaaacagctg
ctgcaagagctgcaagaaagcttcggtggcccaaccggcgagcacaacagcccgcgctca
aagagcttctttgatggtgtgaagaagttttttgacgacctgacccgagaa
>s1_3
atgaagacgtttttcagaacagtgttattcggcagcctgatggccgtctgcgcaaacagt
tacgcgctcagcgagtctgaagccgaagatatggccgatttaacggcagtttttgtcttt
ctgaagaacgattgtggttaccagaacttacctaacgggcaaattcgtcgcgcactggtc
tttttcgctcagcaaaaccagtgggacctcagtaattacgacaccttcgacatgaaagcc
ctcggtgaagacagctaccgcgatctcagcggcattggcattcccgtcgctaaaaaatgc
aaagccctggcccgcgattccttaagcctgcttgcctacgtcaaataa
>s1_4
atgaagaaaattttcagaacagtgttattcggcagcctgatggccgtctgcgcaaacagt
tacgcgctcagcgagtctgaagccgaagatatggccgatttaacggcagtttttgtcttt
ctgaagaacgattgtggttaccagaacttacctaacgggcaaattcgtcgcgcactggtc
tttttcgctcagcaaaaccagtgggacctcagtaattacgacaccttcgacatgaaagcc
ctcggtgaagacagctaccgcgatctcagcggcattggcattcccgtcgctaaaaaatgc
aaagccctggcccgcgattccttaagcctgcttgcctacgtcaaatcc
>s1_5
atggctaagcaagattattacgagattttaggcgtttccaaaacagcggaagagcgtgaa
atcagaaaggcctacaaacgcctggccatgaaataccacccggaccgtaaccagggtgac
aaagaggccgaggcgaaatttaaagagatcaaggaagcttatgaagttctgaccgactcg
caaaaacgtgcggcatacgatcagtatggtcatgctgcgtttgagcaaggtggcatgggc
ggcggcggttttggcggcggcgcagacttcagcgatatttttggtgacgttttcggcgat
atttttggcggcggacgtggtcgtcaacgtgcggcgcgcggtgctgatttacgctataac
atggagctcaccctcgaagaagctgtacgtggcgtgaccaaagagatccgcattccgact
ctggaagagtgtgacgtttgccacggtagcggtgcaaaaccaggtacacagccgcagact
tgtccgacctgtcatggttctggtcaggtgcagatgcgccagggattcttcgctgtacag
cagacctgtccacactgtcagggccgcggtacgctgatcaaagatccgtgcaacaaatgt
catggtcatggtcgtgttgagcgcagcaaaacgctgtccgttaaaatcccggcaggggtg
gacactggagaccgcatccgtcttgcgggcgaaggtgaagcgggcgagcatggcgcaccg
gcaggcgatctgtacgttcaggttcaggttaaacagcacccgattttcgagcgtgaaggc
aacaacctgtattgcgaagtcccgatcaacttcgctatggcggcgctgggtggcgaaatc
gaagtaccgacccttgatggtcgcgtcaaactgaaagtgcctggcgaaacccagaccggt
aagctattccgtatgcgcggtaaaggcgtcaagtctgtccgcggtggcgcacagggtgat
ttgctgtgccgcgttgtcgtcgaaacaccggtaggcctgaacgaaaggcagaaacagctg
ctgcaagagctgcaagaaagcttcggtggcccaaccggcgagcacaacagcccgcgctca
aagagcttctttgatggtgtgaagaagttttttgacgacctgacccgctaa
>s1_6 some comments...
aatgactaagcaagattattacgagattttaggcgtttccaaaacagcggaagagcgtgaa
atcagaaaggcctacaaacgcctggccatgaaataccacccggaccgtaaccagggtgac
aaagaggccgaggcgaaatttaaagagatcaaggaagcttatgaagttctgaccgactcg
caaaaacgtgcggcatacgatcagtatggtcatgctgcgtttgagcaaggtggcatgggc
ggcggcggttttggcggcggcgcagacttcagcgatatttttggtgacgttttcggcgat
atttttggcggcggacgtggtcgtcaacgtgcggcgcgcggtgctgatttacgctataac
atggagctcaccctcgaagaagctgtacgtggcgtgaccaaagagatccgcattccgact
ctggaagagtgtgacgtttgccacggtagcggtgcaaaaccaggtacacagccgcagact
tgtccgacctgtcatggttctggtcaggtgcagatgcgccagggattcttcgctgtacag
cagacctgtccacactgtcagggccgcggtacgctgatcaaagatccgtgcaacaaatgt
catggtcatggtcgtgttgagcgcagcaaaacgctgtccgttaaaatcccggcaggggtg
gacactggagaccgcatccgtcttgcgggcgaaggtgaagcgggcgagcatggcgcaccg
gcaggcgatctgtacgttcaggttcaggttaaacagcacccgattttcgagcgtgaaggc
aacaacctgtattgcgaagtcccgatcaacttcgctatggcggcgctgggtggcgaaatc
gaagtaccgacccttgatggtcgcgtcaaactgaaagtgcctggcgaaacccagaccggt
aagctattccgtatgcgcggtaaaggcgtcaagtctgtccgcggtggcgcacagggtgat
ttgctgtgccgcgttgtcgtcgaaacaccggtaggcctgaacgaaaggcagaaacagctg
ctgcaagagctgcaagaaagcttcggtggcccaaccggcgagcacaacagcccgcgctca
aagagcttctttgatggtgtgaagaagttttttgacgacctgacccgctaa
"""
if __name__ == "__main__":
main()
| gpl-2.0 |
fxtentacle/phantomjs | src/qt/qtbase/util/local_database/dateconverter.py | 105 | 5318 | #!/usr/bin/env python
#############################################################################
##
## Copyright (C) 2013 Digia Plc and/or its subsidiary(-ies).
## Contact: http://www.qt-project.org/legal
##
## This file is part of the test suite of the Qt Toolkit.
##
## $QT_BEGIN_LICENSE:LGPL$
## Commercial License Usage
## Licensees holding valid commercial Qt licenses may use this file in
## accordance with the commercial license agreement provided with the
## Software or, alternatively, in accordance with the terms contained in
## a written agreement between you and Digia. For licensing terms and
## conditions see http://qt.digia.com/licensing. For further information
## use the contact form at http://qt.digia.com/contact-us.
##
## GNU Lesser General Public License Usage
## Alternatively, this file may be used under the terms of the GNU Lesser
## General Public License version 2.1 as published by the Free Software
## Foundation and appearing in the file LICENSE.LGPL included in the
## packaging of this file. Please review the following information to
## ensure the GNU Lesser General Public License version 2.1 requirements
## will be met: http://www.gnu.org/licenses/old-licenses/lgpl-2.1.html.
##
## In addition, as a special exception, Digia gives you certain additional
## rights. These rights are described in the Digia Qt LGPL Exception
## version 1.1, included in the file LGPL_EXCEPTION.txt in this package.
##
## GNU General Public License Usage
## Alternatively, this file may be used under the terms of the GNU
## General Public License version 3.0 as published by the Free Software
## Foundation and appearing in the file LICENSE.GPL included in the
## packaging of this file. Please review the following information to
## ensure the GNU General Public License version 3.0 requirements will be
## met: http://www.gnu.org/copyleft/gpl.html.
##
##
## $QT_END_LICENSE$
##
#############################################################################
import re
def _convert_pattern(pattern):
# patterns from http://www.unicode.org/reports/tr35/#Date_Format_Patterns
qt_regexps = {
r"yyy{3,}" : "yyyy", # more that three digits hence convert to four-digit year
r"L" : "M", # stand-alone month names. not supported.
r"g{1,}": "", # modified julian day. not supported.
r"S{1,}" : "", # fractional seconds. not supported.
r"A{1,}" : "" # milliseconds in day. not supported.
}
qt_patterns = {
"G" : "", "GG" : "", "GGG" : "", "GGGG" : "", "GGGGG" : "", # Era. not supported.
"y" : "yyyy", # four-digit year without leading zeroes
"Q" : "", "QQ" : "", "QQQ" : "", "QQQQ" : "", # quarter. not supported.
"q" : "", "qq" : "", "qqq" : "", "qqqq" : "", # quarter. not supported.
"MMMMM" : "MMM", # narrow month name.
"LLLLL" : "MMM", # stand-alone narrow month name.
"l" : "", # special symbol for chinese leap month. not supported.
"w" : "", "W" : "", # week of year/month. not supported.
"D" : "", "DD" : "", "DDD" : "", # day of year. not supported.
"F" : "", # day of week in month. not supported.
"E" : "ddd", "EE" : "ddd", "EEE" : "ddd", "EEEEE" : "ddd", "EEEE" : "dddd", # day of week
"e" : "ddd", "ee" : "ddd", "eee" : "ddd", "eeeee" : "ddd", "eeee" : "dddd", # local day of week
"c" : "ddd", "cc" : "ddd", "ccc" : "ddd", "ccccc" : "ddd", "cccc" : "dddd", # stand-alone local day of week
"a" : "AP", # AM/PM
"K" : "h", # Hour 0-11
"k" : "H", # Hour 1-24
"j" : "", # special reserved symbol.
"z" : "t", "zz" : "t", "zzz" : "t", "zzzz" : "t", # timezone
"Z" : "t", "ZZ" : "t", "ZZZ" : "t", "ZZZZ" : "t", # timezone
"v" : "t", "vv" : "t", "vvv" : "t", "vvvv" : "t", # timezone
"V" : "t", "VV" : "t", "VVV" : "t", "VVVV" : "t" # timezone
}
if qt_patterns.has_key(pattern):
return qt_patterns[pattern]
for r,v in qt_regexps.items():
pattern = re.sub(r, v, pattern)
return pattern
def convert_date(input):
result = ""
patterns = "GyYuQqMLlwWdDFgEecahHKkjmsSAzZvV"
last = ""
inquote = 0
chars_to_strip = " -"
for c in input:
if c == "'":
inquote = inquote + 1
if inquote % 2 == 0:
if c in patterns:
if not last:
last = c
else:
if c in last:
last += c
else:
# pattern changed
converted = _convert_pattern(last)
result += converted
if not converted:
result = result.rstrip(chars_to_strip)
last = c
continue
if last:
# pattern ended
converted = _convert_pattern(last)
result += converted
if not converted:
result = result.rstrip(chars_to_strip)
last = ""
result += c
if last:
converted = _convert_pattern(last)
result += converted
if not converted:
result = result.rstrip(chars_to_strip)
return result.lstrip(chars_to_strip)
| bsd-3-clause |
vicky2135/lucious | tests/integration/offer/test_availability.py | 2 | 4516 | import datetime
from decimal import Decimal as D
from django.test import TestCase
from oscar.apps.offer import models
from oscar.core.compat import get_user_model
from oscar.test.factories import (
OrderDiscountFactory, UserFactory, create_order)
User = get_user_model()
class TestAPerUserConditionalOffer(TestCase):
def setUp(self):
self.offer = models.ConditionalOffer(max_user_applications=1)
self.user = UserFactory()
def test_is_available_with_no_applications(self):
self.assertTrue(self.offer.is_available())
def test_max_applications_is_correct_when_no_applications(self):
self.assertEqual(1, self.offer.get_max_applications(self.user))
def test_max_applications_is_correct_when_equal_applications(self):
order = create_order(user=self.user)
OrderDiscountFactory(
order=order, offer_id=self.offer.id, frequency=1)
self.assertEqual(0, self.offer.get_max_applications(self.user))
def test_max_applications_is_correct_when_more_applications(self):
order = create_order(user=self.user)
OrderDiscountFactory(
order=order, offer_id=self.offer.id, frequency=5)
self.assertEqual(0, self.offer.get_max_applications(self.user))
class TestADateBasedConditionalOffer(TestCase):
def setUp(self):
self.start = datetime.date(2011, 1, 1)
self.end = datetime.date(2011, 2, 1)
self.offer = models.ConditionalOffer(start_datetime=self.start,
end_datetime=self.end)
def test_is_available_during_date_range(self):
test = datetime.date(2011, 1, 10)
self.assertTrue(self.offer.is_available(test_date=test))
def test_is_inactive_before_date_range(self):
test = datetime.date(2010, 3, 10)
self.assertFalse(self.offer.is_available(test_date=test))
def test_is_inactive_after_date_range(self):
test = datetime.date(2011, 3, 10)
self.assertFalse(self.offer.is_available(test_date=test))
def test_is_active_on_end_datetime(self):
self.assertTrue(self.offer.is_available(test_date=self.end))
class TestAConsumptionFrequencyBasedConditionalOffer(TestCase):
def setUp(self):
self.offer = models.ConditionalOffer(max_global_applications=4)
def test_is_available_with_no_applications(self):
self.assertTrue(self.offer.is_available())
def test_is_available_with_fewer_applications_than_max(self):
self.offer.num_applications = 3
self.assertTrue(self.offer.is_available())
def test_is_inactive_with_equal_applications_to_max(self):
self.offer.num_applications = 4
self.assertFalse(self.offer.is_available())
def test_is_inactive_with_more_applications_than_max(self):
self.offer.num_applications = 4
self.assertFalse(self.offer.is_available())
def test_restricts_number_of_applications_correctly_with_no_applications(self):
self.assertEqual(4, self.offer.get_max_applications())
def test_restricts_number_of_applications_correctly_with_fewer_applications_than_max(self):
self.offer.num_applications = 3
self.assertEqual(1, self.offer.get_max_applications())
def test_restricts_number_of_applications_correctly_with_more_applications_than_max(self):
self.offer.num_applications = 5
self.assertEqual(0, self.offer.get_max_applications())
class TestCappedDiscountConditionalOffer(TestCase):
def setUp(self):
self.offer = models.ConditionalOffer(
max_discount=D('100.00'),
total_discount=D('0.00'))
def test_is_available_when_below_threshold(self):
self.assertTrue(self.offer.is_available())
def test_is_inactive_when_on_threshold(self):
self.offer.total_discount = self.offer.max_discount
self.assertFalse(self.offer.is_available())
def test_is_inactive_when_above_threshold(self):
self.offer.total_discount = self.offer.max_discount + D('10.00')
self.assertFalse(self.offer.is_available())
class TestASuspendedOffer(TestCase):
def setUp(self):
self.offer = models.ConditionalOffer(
status=models.ConditionalOffer.SUSPENDED)
def test_is_unavailable(self):
self.assertFalse(self.offer.is_available())
def test_lists_suspension_as_an_availability_restriction(self):
restrictions = self.offer.availability_restrictions()
self.assertEqual(1, len(restrictions))
| bsd-3-clause |
sumit4iit/django-guardian | guardian/admin.py | 9 | 14646 | from django import forms
from django.conf import settings
from django.conf.urls.defaults import patterns, url
from django.contrib import admin
from django.contrib import messages
from django.contrib.admin.widgets import FilteredSelectMultiple
from django.core.urlresolvers import reverse
from django.shortcuts import render_to_response, get_object_or_404, redirect
from django.template import RequestContext
from django.utils.datastructures import SortedDict
from django.utils.translation import ugettext, ugettext_lazy as _
from guardian.forms import UserObjectPermissionsForm
from guardian.forms import GroupObjectPermissionsForm
from guardian.shortcuts import get_perms
from guardian.shortcuts import get_users_with_perms
from guardian.shortcuts import get_groups_with_perms
from guardian.shortcuts import get_perms_for_model
from guardian.models import User, Group
class AdminUserObjectPermissionsForm(UserObjectPermissionsForm):
"""
Extends :form:`UserObjectPermissionsForm`. It only overrides
``get_obj_perms_field_widget`` method so it return
``django.contrib.admin.widgets.FilteredSelectMultiple`` widget.
"""
def get_obj_perms_field_widget(self):
return FilteredSelectMultiple(_("Permissions"), False)
class AdminGroupObjectPermissionsForm(GroupObjectPermissionsForm):
"""
Extends :form:`GroupObjectPermissionsForm`. It only overrides
``get_obj_perms_field_widget`` method so it return
``django.contrib.admin.widgets.FilteredSelectMultiple`` widget.
"""
def get_obj_perms_field_widget(self):
return FilteredSelectMultiple(_("Permissions"), False)
class GuardedModelAdmin(admin.ModelAdmin):
"""
Extends ``django.contrib.admin.ModelAdmin`` class. Provides some extra
views for object permissions management at admin panel. It also changes
default ``change_form_template`` option to
``'admin/guardian/model/change_form.html'`` which is required for proper
url (object permissions related) being shown at the model pages.
**Extra options**
``GuardedModelAdmin.obj_perms_manage_template``
*Default*: ``admin/guardian/model/obj_perms_manage.html``
``GuardedModelAdmin.obj_perms_manage_user_template``
*Default*: ``admin/guardian/model/obj_perms_manage_user.html``
``GuardedModelAdmin.obj_perms_manage_group_template``
*Default*: ``admin/guardian/model/obj_perms_manage_group.html``
``GuardedModelAdmin.user_can_access_owned_objects_only``
*Default*: ``False``
If this would be set to ``True``, ``request.user`` would be used to
filter out objects he or she doesn't own (checking ``user`` field
of used model - field name may be overridden by
``user_owned_objects_field`` option.
.. note::
Please remember that this will **NOT** affect superusers!
Admins would still see all items.
``GuardedModelAdmin.user_owned_objects_field``
*Default*: ``user``
**Usage example**
Just use :admin:`GuardedModelAdmin` instead of
``django.contrib.admin.ModelAdmin``.
.. code-block:: python
from django.contrib import admin
from guardian.admin import GuardedModelAdmin
from myapp.models import Author
class AuthorAdmin(GuardedModelAdmin):
pass
admin.site.register(Author, AuthorAdmin)
"""
change_form_template = \
'admin/guardian/model/change_form.html'
obj_perms_manage_template = \
'admin/guardian/model/obj_perms_manage.html'
obj_perms_manage_user_template = \
'admin/guardian/model/obj_perms_manage_user.html'
obj_perms_manage_group_template = \
'admin/guardian/model/obj_perms_manage_group.html'
user_can_access_owned_objects_only = False
user_owned_objects_field = 'user'
def queryset(self, request):
qs = super(GuardedModelAdmin, self).queryset(request)
if self.user_can_access_owned_objects_only and \
not request.user.is_superuser:
filters = {self.user_owned_objects_field: request.user}
qs = qs.filter(**filters)
return qs
def get_urls(self):
"""
Extends standard admin model urls with the following:
- ``.../permissions/``
- ``.../permissions/user-manage/<user_id>/``
- ``.../permissions/group-manage/<group_id>/``
.. note::
``...`` above are standard, instance detail url (i.e.
``/admin/flatpages/1/``)
"""
urls = super(GuardedModelAdmin, self).get_urls()
info = self.model._meta.app_label, self.model._meta.module_name
myurls = patterns('',
url(r'^(?P<object_pk>.+)/permissions/$',
view=self.admin_site.admin_view(self.obj_perms_manage_view),
name='%s_%s_permissions' % info),
url(r'^(?P<object_pk>.+)/permissions/user-manage/(?P<user_id>\-?\d+)/$',
view=self.admin_site.admin_view(
self.obj_perms_manage_user_view),
name='%s_%s_permissions_manage_user' % info),
url(r'^(?P<object_pk>.+)/permissions/group-manage/(?P<group_id>\-?\d+)/$',
view=self.admin_site.admin_view(
self.obj_perms_manage_group_view),
name='%s_%s_permissions_manage_group' % info),
)
return myurls + urls
def get_obj_perms_base_context(self, request, obj):
"""
Returns context dictionary with common admin and object permissions
related content.
"""
context = {
'adminform': {'model_admin': self},
'object': obj,
'app_label': self.model._meta.app_label,
'opts': self.model._meta,
'original': hasattr(obj, '__unicode__') and obj.__unicode__() or\
str(obj),
'has_change_permission': self.has_change_permission(request, obj),
'model_perms': get_perms_for_model(obj),
'title': _("Object permissions"),
}
return context
def obj_perms_manage_view(self, request, object_pk):
"""
Main object permissions view. Presents all users and groups with any
object permissions for the current model *instance*. Users or groups
without object permissions for related *instance* would **not** be
shown. In order to add or manage user or group one should use links or
forms presented within the page.
"""
obj = get_object_or_404(self.queryset(request), pk=object_pk)
users_perms = SortedDict(
get_users_with_perms(obj, attach_perms=True,
with_group_users=False))
users_perms.keyOrder.sort(key=lambda user: user.username)
groups_perms = SortedDict(
get_groups_with_perms(obj, attach_perms=True))
groups_perms.keyOrder.sort(key=lambda group: group.name)
if request.method == 'POST' and 'submit_manage_user' in request.POST:
user_form = UserManage(request.POST)
group_form = GroupManage()
info = (
self.admin_site.name,
self.model._meta.app_label,
self.model._meta.module_name
)
if user_form.is_valid():
user_id = user_form.cleaned_data['user'].id
url = reverse(
'%s:%s_%s_permissions_manage_user' % info,
args=[obj.pk, user_id]
)
return redirect(url)
elif request.method == 'POST' and 'submit_manage_group' in request.POST:
user_form = UserManage()
group_form = GroupManage(request.POST)
info = (
self.admin_site.name,
self.model._meta.app_label,
self.model._meta.module_name
)
if group_form.is_valid():
group_id = group_form.cleaned_data['group'].id
url = reverse(
'%s:%s_%s_permissions_manage_group' % info,
args=[obj.pk, group_id]
)
return redirect(url)
else:
user_form = UserManage()
group_form = GroupManage()
context = self.get_obj_perms_base_context(request, obj)
context['users_perms'] = users_perms
context['groups_perms'] = groups_perms
context['user_form'] = user_form
context['group_form'] = group_form
return render_to_response(self.get_obj_perms_manage_template(),
context, RequestContext(request, current_app=self.admin_site.name))
def get_obj_perms_manage_template(self):
"""
Returns main object permissions admin template. May be overridden if
need to change it dynamically.
.. note::
If ``INSTALLED_APPS`` contains ``grappelli`` this function would
return ``"admin/guardian/grappelli/obj_perms_manage.html"``.
"""
if 'grappelli' in settings.INSTALLED_APPS:
return 'admin/guardian/contrib/grappelli/obj_perms_manage.html'
return self.obj_perms_manage_template
def obj_perms_manage_user_view(self, request, object_pk, user_id):
"""
Manages selected users' permissions for current object.
"""
user = get_object_or_404(User, id=user_id)
obj = get_object_or_404(self.queryset(request), pk=object_pk)
form_class = self.get_obj_perms_manage_user_form()
form = form_class(user, obj, request.POST or None)
if request.method == 'POST' and form.is_valid():
form.save_obj_perms()
msg = ugettext("Permissions saved.")
messages.success(request, msg)
info = (
self.admin_site.name,
self.model._meta.app_label,
self.model._meta.module_name
)
url = reverse(
'%s:%s_%s_permissions_manage_user' % info,
args=[obj.pk, user.id]
)
return redirect(url)
context = self.get_obj_perms_base_context(request, obj)
context['user_obj'] = user
context['user_perms'] = get_perms(user, obj)
context['form'] = form
return render_to_response(self.get_obj_perms_manage_user_template(),
context, RequestContext(request, current_app=self.admin_site.name))
def get_obj_perms_manage_user_template(self):
"""
Returns object permissions for user admin template. May be overridden
if need to change it dynamically.
.. note::
If ``INSTALLED_APPS`` contains ``grappelli`` this function would
return ``"admin/guardian/grappelli/obj_perms_manage_user.html"``.
"""
if 'grappelli' in settings.INSTALLED_APPS:
return 'admin/guardian/contrib/grappelli/obj_perms_manage_user.html'
return self.obj_perms_manage_user_template
def get_obj_perms_manage_user_form(self):
"""
Returns form class for user object permissions management. By default
:form:`AdminUserObjectPermissionsForm` is returned.
"""
return AdminUserObjectPermissionsForm
def obj_perms_manage_group_view(self, request, object_pk, group_id):
"""
Manages selected groups' permissions for current object.
"""
group = get_object_or_404(Group, id=group_id)
obj = get_object_or_404(self.queryset(request), pk=object_pk)
form_class = self.get_obj_perms_manage_group_form()
form = form_class(group, obj, request.POST or None)
if request.method == 'POST' and form.is_valid():
form.save_obj_perms()
msg = ugettext("Permissions saved.")
messages.success(request, msg)
info = (
self.admin_site.name,
self.model._meta.app_label,
self.model._meta.module_name
)
url = reverse(
'%s:%s_%s_permissions_manage_group' % info,
args=[obj.pk, group.id]
)
return redirect(url)
context = self.get_obj_perms_base_context(request, obj)
context['group_obj'] = group
context['group_perms'] = get_perms(group, obj)
context['form'] = form
return render_to_response(self.get_obj_perms_manage_group_template(),
context, RequestContext(request, current_app=self.admin_site.name))
def get_obj_perms_manage_group_template(self):
"""
Returns object permissions for group admin template. May be overridden
if need to change it dynamically.
.. note::
If ``INSTALLED_APPS`` contains ``grappelli`` this function would
return ``"admin/guardian/grappelli/obj_perms_manage_group.html"``.
"""
if 'grappelli' in settings.INSTALLED_APPS:
return 'admin/guardian/contrib/grappelli/obj_perms_manage_group.html'
return self.obj_perms_manage_group_template
def get_obj_perms_manage_group_form(self):
"""
Returns form class for group object permissions management. By default
:form:`AdminGroupObjectPermissionsForm` is returned.
"""
return AdminGroupObjectPermissionsForm
class UserManage(forms.Form):
user = forms.RegexField(label=_("Username"), max_length=30,
regex=r'^[\w.@+-]+$',
error_messages = {
'invalid': _("This value may contain only letters, numbers and "
"@/./+/-/_ characters."),
'does_not_exist': _("This user does not exist")})
def clean_user(self):
"""
Returns ``User`` instance based on the given username.
"""
username = self.cleaned_data['user']
try:
user = User.objects.get(username=username)
return user
except User.DoesNotExist:
raise forms.ValidationError(
self.fields['user'].error_messages['does_not_exist'])
class GroupManage(forms.Form):
group = forms.CharField(max_length=80, error_messages={'does_not_exist':
_("This group does not exist")})
def clean_group(self):
"""
Returns ``Group`` instance based on the given group name.
"""
name = self.cleaned_data['group']
try:
group = Group.objects.get(name=name)
return group
except Group.DoesNotExist:
raise forms.ValidationError(
self.fields['group'].error_messages['does_not_exist'])
| bsd-2-clause |
gautam1858/tensorflow | tensorflow/contrib/learn/python/learn/estimators/svm.py | 42 | 9389 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Support Vector Machine (SVM) Estimator (deprecated).
This module and all its submodules are deprecated. See
[contrib/learn/README.md](https://www.tensorflow.org/code/tensorflow/contrib/learn/README.md)
for migration instructions.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib import layers
from tensorflow.contrib.framework import deprecated
from tensorflow.contrib.framework import deprecated_arg_values
from tensorflow.contrib.learn.python.learn.estimators import estimator
from tensorflow.contrib.learn.python.learn.estimators import head as head_lib
from tensorflow.contrib.learn.python.learn.estimators import linear
from tensorflow.contrib.learn.python.learn.estimators import prediction_key
from tensorflow.contrib.linear_optimizer.python import sdca_optimizer
def _as_iterable(preds, output):
for pred in preds:
yield pred[output]
class SVM(estimator.Estimator):
"""Support Vector Machine (SVM) model for binary classification.
THIS CLASS IS DEPRECATED. See
[contrib/learn/README.md](https://www.tensorflow.org/code/tensorflow/contrib/learn/README.md)
for general migration instructions.
Currently, only linear SVMs are supported. For the underlying optimization
problem, the `SDCAOptimizer` is used. For performance and convergence tuning,
the num_loss_partitions parameter passed to `SDCAOptimizer` (see `__init__()`
method), should be set to (#concurrent train ops per worker) x (#workers). If
num_loss_partitions is larger or equal to this value, convergence is
guaranteed but becomes slower as num_loss_partitions increases. If it is set
to a smaller value, the optimizer is more aggressive in reducing the global
loss but convergence is not guaranteed. The recommended value in an
`Estimator` (where there is one process per worker) is the number of workers
running the train steps. It defaults to 1 (single machine).
Example:
```python
real_feature_column = real_valued_column(...)
sparse_feature_column = sparse_column_with_hash_bucket(...)
estimator = SVM(
example_id_column='example_id',
feature_columns=[real_feature_column, sparse_feature_column],
l2_regularization=10.0)
# Input builders
def input_fn_train: # returns x, y
...
def input_fn_eval: # returns x, y
...
estimator.fit(input_fn=input_fn_train)
estimator.evaluate(input_fn=input_fn_eval)
estimator.predict(x=x)
```
Input of `fit` and `evaluate` should have following features, otherwise there
will be a `KeyError`:
a feature with `key=example_id_column` whose value is a `Tensor` of dtype
string.
if `weight_column_name` is not `None`, a feature with
`key=weight_column_name` whose value is a `Tensor`.
for each `column` in `feature_columns`:
- if `column` is a `SparseColumn`, a feature with `key=column.name`
whose `value` is a `SparseTensor`.
- if `column` is a `RealValuedColumn, a feature with `key=column.name`
whose `value` is a `Tensor`.
"""
def __init__(self,
example_id_column,
feature_columns,
weight_column_name=None,
model_dir=None,
l1_regularization=0.0,
l2_regularization=0.0,
num_loss_partitions=1,
kernels=None,
config=None,
feature_engineering_fn=None):
"""Constructs an `SVM` estimator object.
Args:
example_id_column: A string defining the feature column name representing
example ids. Used to initialize the underlying optimizer.
feature_columns: An iterable containing all the feature columns used by
the model. All items in the set should be instances of classes derived
from `FeatureColumn`.
weight_column_name: A string defining feature column name representing
weights. It is used to down weight or boost examples during training. It
will be multiplied by the loss of the example.
model_dir: Directory to save model parameters, graph and etc. This can
also be used to load checkpoints from the directory into a estimator to
continue training a previously saved model.
l1_regularization: L1-regularization parameter. Refers to global L1
regularization (across all examples).
l2_regularization: L2-regularization parameter. Refers to global L2
regularization (across all examples).
num_loss_partitions: number of partitions of the (global) loss function
optimized by the underlying optimizer (SDCAOptimizer).
kernels: A list of kernels for the SVM. Currently, no kernels are
supported. Reserved for future use for non-linear SVMs.
config: RunConfig object to configure the runtime settings.
feature_engineering_fn: Feature engineering function. Takes features and
labels which are the output of `input_fn` and
returns features and labels which will be fed
into the model.
Raises:
ValueError: if kernels passed is not None.
"""
if kernels is not None:
raise ValueError("Kernel SVMs are not currently supported.")
optimizer = sdca_optimizer.SDCAOptimizer(
example_id_column=example_id_column,
num_loss_partitions=num_loss_partitions,
symmetric_l1_regularization=l1_regularization,
symmetric_l2_regularization=l2_regularization)
self._feature_columns = feature_columns
chief_hook = linear._SdcaUpdateWeightsHook() # pylint: disable=protected-access
super(SVM, self).__init__(
model_fn=linear.sdca_model_fn,
model_dir=model_dir,
config=config,
params={
"head": head_lib.binary_svm_head(
weight_column_name=weight_column_name,
enable_centered_bias=False),
"feature_columns": feature_columns,
"optimizer": optimizer,
"weight_column_name": weight_column_name,
"update_weights_hook": chief_hook,
},
feature_engineering_fn=feature_engineering_fn)
@deprecated_arg_values(
estimator.AS_ITERABLE_DATE, estimator.AS_ITERABLE_INSTRUCTIONS,
as_iterable=False)
def predict_classes(self, x=None, input_fn=None, batch_size=None,
as_iterable=True):
"""Runs inference to determine the predicted class."""
key = prediction_key.PredictionKey.CLASSES
preds = super(SVM, self).predict(
x=x,
input_fn=input_fn,
batch_size=batch_size,
outputs=[key],
as_iterable=as_iterable)
if as_iterable:
return _as_iterable(preds, output=key)
return preds[key]
@deprecated_arg_values(
estimator.AS_ITERABLE_DATE, estimator.AS_ITERABLE_INSTRUCTIONS,
as_iterable=False)
def predict_proba(self, x=None, input_fn=None, batch_size=None, outputs=None,
as_iterable=True):
"""Runs inference to determine the class probability predictions."""
key = prediction_key.PredictionKey.PROBABILITIES
preds = super(SVM, self).predict(
x=x,
input_fn=input_fn,
batch_size=batch_size,
outputs=[key],
as_iterable=as_iterable)
if as_iterable:
return _as_iterable(preds, output=key)
return preds[key]
# pylint: enable=protected-access
@deprecated("2017-03-25", "Please use Estimator.export_savedmodel() instead.")
def export(self, export_dir, signature_fn=None,
input_fn=None, default_batch_size=1,
exports_to_keep=None):
"""See BaseEstimator.export."""
return self.export_with_defaults(
export_dir=export_dir,
signature_fn=signature_fn,
input_fn=input_fn,
default_batch_size=default_batch_size,
exports_to_keep=exports_to_keep)
@deprecated("2017-03-25", "Please use Estimator.export_savedmodel() instead.")
def export_with_defaults(
self,
export_dir,
signature_fn=None,
input_fn=None,
default_batch_size=1,
exports_to_keep=None):
"""Same as BaseEstimator.export, but uses some defaults."""
def default_input_fn(unused_estimator, examples):
return layers.parse_feature_columns_from_examples(
examples, self._feature_columns)
return super(SVM, self).export(export_dir=export_dir,
signature_fn=signature_fn,
input_fn=input_fn or default_input_fn,
default_batch_size=default_batch_size,
exports_to_keep=exports_to_keep)
| apache-2.0 |
fmacias64/spyre | setup.py | 3 | 1217 | from setuptools import setup, find_packages
setup(
name='DataSpyre',
version='0.2.0',
description='Spyre makes it easy to build interactive web applications, and requires no knowledge of HTML, CSS, or Javascript.',
url='https://github.com/adamhajari/spyre',
author='Adam Hajari',
author_email='adam@nextbigsound.com',
license='MIT',
classifiers=[
'Development Status :: 4 - Beta',
'Framework :: CherryPy',
'Intended Audience :: Education',
'Intended Audience :: Science/Research',
'Environment :: Web Environment',
'Topic :: Scientific/Engineering',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.4',
],
keywords='web application template data visualization',
include_package_data = True, # include everything in source control
packages = ['spyre'], # include all packages under src
package_data = {
'': ['*.js','*.css','*.html'],
'public': ['js/*.js','css/*.css'],
},
install_requires=[
"numpy",
"pandas",
"cherrypy",
"jinja2",
"matplotlib",
]
)
| mit |
mikemow/youtube-dl | youtube_dl/extractor/ellentv.py | 107 | 2708 | # coding: utf-8
from __future__ import unicode_literals
import json
from .common import InfoExtractor
from ..utils import (
ExtractorError,
)
class EllenTVIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?(?:ellentv|ellentube)\.com/videos/(?P<id>[a-z0-9_-]+)'
_TEST = {
'url': 'http://www.ellentv.com/videos/0-ipq1gsai/',
'md5': '8e3c576bf2e9bfff4d76565f56f94c9c',
'info_dict': {
'id': '0_ipq1gsai',
'ext': 'mp4',
'title': 'Fast Fingers of Fate',
'description': 'md5:587e79fbbd0d73b148bc596d99ce48e6',
'timestamp': 1428035648,
'upload_date': '20150403',
'uploader_id': 'batchUser',
}
}
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(
'http://widgets.ellentube.com/videos/%s' % video_id,
video_id)
partner_id = self._search_regex(
r"var\s+partnerId\s*=\s*'([^']+)", webpage, 'partner id')
kaltura_id = self._search_regex(
[r'id="kaltura_player_([^"]+)"',
r"_wb_entry_id\s*:\s*'([^']+)",
r'data-kaltura-entry-id="([^"]+)'],
webpage, 'kaltura id')
return self.url_result('kaltura:%s:%s' % (partner_id, kaltura_id), 'Kaltura')
class EllenTVClipsIE(InfoExtractor):
IE_NAME = 'EllenTV:clips'
_VALID_URL = r'https?://(?:www\.)?ellentv\.com/episodes/(?P<id>[a-z0-9_-]+)'
_TEST = {
'url': 'http://www.ellentv.com/episodes/meryl-streep-vanessa-hudgens/',
'info_dict': {
'id': 'meryl-streep-vanessa-hudgens',
'title': 'Meryl Streep, Vanessa Hudgens',
},
'playlist_mincount': 7,
}
def _real_extract(self, url):
playlist_id = self._match_id(url)
webpage = self._download_webpage(url, playlist_id)
playlist = self._extract_playlist(webpage)
return {
'_type': 'playlist',
'id': playlist_id,
'title': self._og_search_title(webpage),
'entries': self._extract_entries(playlist)
}
def _extract_playlist(self, webpage):
json_string = self._search_regex(r'playerView.addClips\(\[\{(.*?)\}\]\);', webpage, 'json')
try:
return json.loads("[{" + json_string + "}]")
except ValueError as ve:
raise ExtractorError('Failed to download JSON', cause=ve)
def _extract_entries(self, playlist):
return [
self.url_result(
'kaltura:%s:%s' % (item['kaltura_partner_id'], item['kaltura_entry_id']),
'Kaltura')
for item in playlist]
| unlicense |
xorstream/unicorn | qemu/header_gen.py | 5 | 105836 | #!/usr/bin/python
# Unicorn Emulator Engine
# By Dang Hoang Vu & Nguyen Anh Quynh
# syntax: ./header_gen.py <arm|aarch64|x86|name>
import sys
symbols = (
'arm_release',
'aarch64_tb_set_jmp_target',
'ppc_tb_set_jmp_target',
'use_idiv_instructions_rt',
'tcg_target_deposit_valid',
'helper_power_down',
'check_exit_request',
'address_space_unregister',
'tb_invalidate_phys_page_fast',
'phys_mem_clean',
'tb_cleanup',
'memory_map',
'memory_map_ptr',
'memory_unmap',
'memory_free',
'free_code_gen_buffer',
'helper_raise_exception',
'tcg_enabled',
'tcg_exec_init',
'memory_register_types',
'cpu_exec_init_all',
'vm_start',
'resume_all_vcpus',
'a15_l2ctlr_read',
'a64_translate_init',
'aa32_generate_debug_exceptions',
'aa64_cacheop_access',
'aa64_daif_access',
'aa64_daif_write',
'aa64_dczid_read',
'aa64_fpcr_read',
'aa64_fpcr_write',
'aa64_fpsr_read',
'aa64_fpsr_write',
'aa64_generate_debug_exceptions',
'aa64_zva_access',
'aarch64_banked_spsr_index',
'aarch64_restore_sp',
'aarch64_save_sp',
'accel_find',
'accel_init_machine',
'accel_type',
'access_with_adjusted_size',
'add128',
'add16_sat',
'add16_usat',
'add192',
'add8_sat',
'add8_usat',
'add_cpreg_to_hashtable',
'add_cpreg_to_list',
'addFloat128Sigs',
'addFloat32Sigs',
'addFloat64Sigs',
'addFloatx80Sigs',
'add_qemu_ldst_label',
'address_space_access_valid',
'address_space_destroy',
'address_space_destroy_dispatch',
'address_space_get_flatview',
'address_space_init',
'address_space_init_dispatch',
'address_space_lookup_region',
'address_space_map',
'address_space_read',
'address_space_rw',
'address_space_translate',
'address_space_translate_for_iotlb',
'address_space_translate_internal',
'address_space_unmap',
'address_space_update_topology',
'address_space_update_topology_pass',
'address_space_write',
'addrrange_contains',
'addrrange_end',
'addrrange_equal',
'addrrange_intersection',
'addrrange_intersects',
'addrrange_make',
'adjust_endianness',
'all_helpers',
'alloc_code_gen_buffer',
'alloc_entry',
'always_true',
'arm1026_initfn',
'arm1136_initfn',
'arm1136_r2_initfn',
'arm1176_initfn',
'arm11mpcore_initfn',
'arm926_initfn',
'arm946_initfn',
'arm_ccnt_enabled',
'arm_cp_read_zero',
'arm_cp_reset_ignore',
'arm_cpu_do_interrupt',
'arm_cpu_exec_interrupt',
'arm_cpu_finalizefn',
'arm_cpu_get_phys_page_debug',
'arm_cpu_handle_mmu_fault',
'arm_cpu_initfn',
'arm_cpu_list',
'cpu_loop_exit',
'arm_cpu_post_init',
'arm_cpu_realizefn',
'arm_cpu_register_gdb_regs_for_features',
'arm_cpu_register_types',
'cpu_resume_from_signal',
'arm_cpus',
'arm_cpu_set_pc',
'arm_cp_write_ignore',
'arm_current_el',
'arm_dc_feature',
'arm_debug_excp_handler',
'arm_debug_target_el',
'arm_el_is_aa64',
'arm_env_get_cpu',
'arm_excp_target_el',
'arm_excp_unmasked',
'arm_feature',
'arm_generate_debug_exceptions',
'gen_intermediate_code',
'gen_intermediate_code_pc',
'arm_gen_test_cc',
'arm_gt_ptimer_cb',
'arm_gt_vtimer_cb',
'arm_handle_psci_call',
'arm_is_psci_call',
'arm_is_secure',
'arm_is_secure_below_el3',
'arm_ldl_code',
'arm_lduw_code',
'arm_log_exception',
'arm_reg_read',
'arm_reg_reset',
'arm_reg_write',
'restore_state_to_opc',
'arm_rmode_to_sf',
'arm_singlestep_active',
'tlb_fill',
'tlb_flush',
'tlb_flush_page',
'tlb_set_page',
'arm_translate_init',
'arm_v7m_class_init',
'arm_v7m_cpu_do_interrupt',
'ats_access',
'ats_write',
'bad_mode_switch',
'bank_number',
'bitmap_zero_extend',
'bp_wp_matches',
'breakpoint_invalidate',
'build_page_bitmap',
'bus_add_child',
'bus_class_init',
'bus_info',
'bus_unparent',
'cache_block_ops_cp_reginfo',
'cache_dirty_status_cp_reginfo',
'cache_test_clean_cp_reginfo',
'call_recip_estimate',
'can_merge',
'capacity_increase',
'ccsidr_read',
'check_ap',
'check_breakpoints',
'check_watchpoints',
'cho',
'clear_bit',
'clz32',
'clz64',
'cmp_flatrange_addr',
'code_gen_alloc',
'commonNaNToFloat128',
'commonNaNToFloat16',
'commonNaNToFloat32',
'commonNaNToFloat64',
'commonNaNToFloatx80',
'compute_abs_deadline',
'cond_name',
'configure_accelerator',
'container_get',
'container_info',
'container_register_types',
'contextidr_write',
'core_log_global_start',
'core_log_global_stop',
'core_memory_listener',
'cortexa15_cp_reginfo',
'cortex_a15_initfn',
'cortexa8_cp_reginfo',
'cortex_a8_initfn',
'cortexa9_cp_reginfo',
'cortex_a9_initfn',
'cortex_m3_initfn',
'count_cpreg',
'countLeadingZeros32',
'countLeadingZeros64',
'cp_access_ok',
'cpacr_write',
'cpreg_field_is_64bit',
'cp_reginfo',
'cpreg_key_compare',
'cpreg_make_keylist',
'cp_reg_reset',
'cpreg_to_kvm_id',
'cpsr_read',
'cpsr_write',
'cptype_valid',
'cpu_abort',
'cpu_arm_exec',
'cpu_arm_gen_code',
'cpu_arm_init',
'cpu_breakpoint_insert',
'cpu_breakpoint_remove',
'cpu_breakpoint_remove_all',
'cpu_breakpoint_remove_by_ref',
'cpu_can_do_io',
'cpu_can_run',
'cpu_class_init',
'cpu_common_class_by_name',
'cpu_common_exec_interrupt',
'cpu_common_get_arch_id',
'cpu_common_get_memory_mapping',
'cpu_common_get_paging_enabled',
'cpu_common_has_work',
'cpu_common_initfn',
'cpu_common_noop',
'cpu_common_parse_features',
'cpu_common_realizefn',
'cpu_common_reset',
'cpu_dump_statistics',
'cpu_exec_init',
'cpu_flush_icache_range',
'cpu_gen_init',
'cpu_get_clock',
'cpu_get_real_ticks',
'cpu_get_tb_cpu_state',
'cpu_handle_debug_exception',
'cpu_handle_guest_debug',
'cpu_inb',
'cpu_inl',
'cpu_interrupt',
'cpu_interrupt_handler',
'cpu_inw',
'cpu_io_recompile',
'cpu_is_stopped',
'cpu_ldl_code',
'cpu_ldub_code',
'cpu_lduw_code',
'cpu_memory_rw_debug',
'cpu_mmu_index',
'cpu_outb',
'cpu_outl',
'cpu_outw',
'cpu_physical_memory_clear_dirty_range',
'cpu_physical_memory_get_clean',
'cpu_physical_memory_get_dirty',
'cpu_physical_memory_get_dirty_flag',
'cpu_physical_memory_is_clean',
'cpu_physical_memory_is_io',
'cpu_physical_memory_map',
'cpu_physical_memory_range_includes_clean',
'cpu_physical_memory_reset_dirty',
'cpu_physical_memory_rw',
'cpu_physical_memory_set_dirty_flag',
'cpu_physical_memory_set_dirty_range',
'cpu_physical_memory_unmap',
'cpu_physical_memory_write_rom',
'cpu_physical_memory_write_rom_internal',
'cpu_register',
'cpu_register_types',
'cpu_restore_state',
'cpu_restore_state_from_tb',
'cpu_single_step',
'cpu_tb_exec',
'cpu_tlb_reset_dirty_all',
'cpu_to_be64',
'cpu_to_le32',
'cpu_to_le64',
'cpu_type_info',
'cpu_unassigned_access',
'cpu_watchpoint_address_matches',
'cpu_watchpoint_insert',
'cpu_watchpoint_remove',
'cpu_watchpoint_remove_all',
'cpu_watchpoint_remove_by_ref',
'crc32c_table',
'create_new_memory_mapping',
'csselr_write',
'cto32',
'ctr_el0_access',
'ctz32',
'ctz64',
'dacr_write',
'dbgbcr_write',
'dbgbvr_write',
'dbgwcr_write',
'dbgwvr_write',
'debug_cp_reginfo',
'debug_frame',
'debug_lpae_cp_reginfo',
'define_arm_cp_regs',
'define_arm_cp_regs_with_opaque',
'define_debug_regs',
'define_one_arm_cp_reg',
'define_one_arm_cp_reg_with_opaque',
'deposit32',
'deposit64',
'deregister_tm_clones',
'device_class_base_init',
'device_class_init',
'device_finalize',
'device_get_realized',
'device_initfn',
'device_post_init',
'device_reset',
'device_set_realized',
'device_type_info',
'disas_arm_insn',
'disas_coproc_insn',
'disas_dsp_insn',
'disas_iwmmxt_insn',
'disas_neon_data_insn',
'disas_neon_ls_insn',
'disas_thumb2_insn',
'disas_thumb_insn',
'disas_vfp_insn',
'disas_vfp_v8_insn',
'do_arm_semihosting',
'do_clz16',
'do_clz8',
'do_constant_folding',
'do_constant_folding_2',
'do_constant_folding_cond',
'do_constant_folding_cond2',
'do_constant_folding_cond_32',
'do_constant_folding_cond_64',
'do_constant_folding_cond_eq',
'do_fcvt_f16_to_f32',
'do_fcvt_f32_to_f16',
'do_ssat',
'do_usad',
'do_usat',
'do_v7m_exception_exit',
'dummy_c15_cp_reginfo',
'dummy_func',
'dummy_section',
'_DYNAMIC',
'_edata',
'_end',
'end_list',
'eq128',
'ErrorClass_lookup',
'error_copy',
'error_exit',
'error_get_class',
'error_get_pretty',
'error_setg_file_open',
'estimateDiv128To64',
'estimateSqrt32',
'excnames',
'excp_is_internal',
'extended_addresses_enabled',
'extended_mpu_ap_bits',
'extract32',
'extract64',
'extractFloat128Exp',
'extractFloat128Frac0',
'extractFloat128Frac1',
'extractFloat128Sign',
'extractFloat16Exp',
'extractFloat16Frac',
'extractFloat16Sign',
'extractFloat32Exp',
'extractFloat32Frac',
'extractFloat32Sign',
'extractFloat64Exp',
'extractFloat64Frac',
'extractFloat64Sign',
'extractFloatx80Exp',
'extractFloatx80Frac',
'extractFloatx80Sign',
'fcse_write',
'find_better_copy',
'find_default_machine',
'find_desc_by_name',
'find_first_bit',
'find_paging_enabled_cpu',
'find_ram_block',
'find_ram_offset',
'find_string',
'find_type',
'_fini',
'flatrange_equal',
'flatview_destroy',
'flatview_init',
'flatview_insert',
'flatview_lookup',
'flatview_ref',
'flatview_simplify',
'flatview_unref',
'float128_add',
'float128_compare',
'float128_compare_internal',
'float128_compare_quiet',
'float128_default_nan',
'float128_div',
'float128_eq',
'float128_eq_quiet',
'float128_is_quiet_nan',
'float128_is_signaling_nan',
'float128_le',
'float128_le_quiet',
'float128_lt',
'float128_lt_quiet',
'float128_maybe_silence_nan',
'float128_mul',
'float128_rem',
'float128_round_to_int',
'float128_scalbn',
'float128_sqrt',
'float128_sub',
'float128ToCommonNaN',
'float128_to_float32',
'float128_to_float64',
'float128_to_floatx80',
'float128_to_int32',
'float128_to_int32_round_to_zero',
'float128_to_int64',
'float128_to_int64_round_to_zero',
'float128_unordered',
'float128_unordered_quiet',
'float16_default_nan',
'float16_is_quiet_nan',
'float16_is_signaling_nan',
'float16_maybe_silence_nan',
'float16ToCommonNaN',
'float16_to_float32',
'float16_to_float64',
'float32_abs',
'float32_add',
'float32_chs',
'float32_compare',
'float32_compare_internal',
'float32_compare_quiet',
'float32_default_nan',
'float32_div',
'float32_eq',
'float32_eq_quiet',
'float32_exp2',
'float32_exp2_coefficients',
'float32_is_any_nan',
'float32_is_infinity',
'float32_is_neg',
'float32_is_quiet_nan',
'float32_is_signaling_nan',
'float32_is_zero',
'float32_is_zero_or_denormal',
'float32_le',
'float32_le_quiet',
'float32_log2',
'float32_lt',
'float32_lt_quiet',
'float32_max',
'float32_maxnum',
'float32_maxnummag',
'float32_maybe_silence_nan',
'float32_min',
'float32_minmax',
'float32_minnum',
'float32_minnummag',
'float32_mul',
'float32_muladd',
'float32_rem',
'float32_round_to_int',
'float32_scalbn',
'float32_set_sign',
'float32_sqrt',
'float32_squash_input_denormal',
'float32_sub',
'float32ToCommonNaN',
'float32_to_float128',
'float32_to_float16',
'float32_to_float64',
'float32_to_floatx80',
'float32_to_int16',
'float32_to_int16_round_to_zero',
'float32_to_int32',
'float32_to_int32_round_to_zero',
'float32_to_int64',
'float32_to_int64_round_to_zero',
'float32_to_uint16',
'float32_to_uint16_round_to_zero',
'float32_to_uint32',
'float32_to_uint32_round_to_zero',
'float32_to_uint64',
'float32_to_uint64_round_to_zero',
'float32_unordered',
'float32_unordered_quiet',
'float64_abs',
'float64_add',
'float64_chs',
'float64_compare',
'float64_compare_internal',
'float64_compare_quiet',
'float64_default_nan',
'float64_div',
'float64_eq',
'float64_eq_quiet',
'float64_is_any_nan',
'float64_is_infinity',
'float64_is_neg',
'float64_is_quiet_nan',
'float64_is_signaling_nan',
'float64_is_zero',
'float64_le',
'float64_le_quiet',
'float64_log2',
'float64_lt',
'float64_lt_quiet',
'float64_max',
'float64_maxnum',
'float64_maxnummag',
'float64_maybe_silence_nan',
'float64_min',
'float64_minmax',
'float64_minnum',
'float64_minnummag',
'float64_mul',
'float64_muladd',
'float64_rem',
'float64_round_to_int',
'float64_scalbn',
'float64_set_sign',
'float64_sqrt',
'float64_squash_input_denormal',
'float64_sub',
'float64ToCommonNaN',
'float64_to_float128',
'float64_to_float16',
'float64_to_float32',
'float64_to_floatx80',
'float64_to_int16',
'float64_to_int16_round_to_zero',
'float64_to_int32',
'float64_to_int32_round_to_zero',
'float64_to_int64',
'float64_to_int64_round_to_zero',
'float64_to_uint16',
'float64_to_uint16_round_to_zero',
'float64_to_uint32',
'float64_to_uint32_round_to_zero',
'float64_to_uint64',
'float64_to_uint64_round_to_zero',
'float64_trunc_to_int',
'float64_unordered',
'float64_unordered_quiet',
'float_raise',
'floatx80_add',
'floatx80_compare',
'floatx80_compare_internal',
'floatx80_compare_quiet',
'floatx80_default_nan',
'floatx80_div',
'floatx80_eq',
'floatx80_eq_quiet',
'floatx80_is_quiet_nan',
'floatx80_is_signaling_nan',
'floatx80_le',
'floatx80_le_quiet',
'floatx80_lt',
'floatx80_lt_quiet',
'floatx80_maybe_silence_nan',
'floatx80_mul',
'floatx80_rem',
'floatx80_round_to_int',
'floatx80_scalbn',
'floatx80_sqrt',
'floatx80_sub',
'floatx80ToCommonNaN',
'floatx80_to_float128',
'floatx80_to_float32',
'floatx80_to_float64',
'floatx80_to_int32',
'floatx80_to_int32_round_to_zero',
'floatx80_to_int64',
'floatx80_to_int64_round_to_zero',
'floatx80_unordered',
'floatx80_unordered_quiet',
'flush_icache_range',
'format_string',
'fp_decode_rm',
'frame_dummy',
'free_range',
'fstat64',
'futex_wait',
'futex_wake',
'gen_aa32_ld16s',
'gen_aa32_ld16u',
'gen_aa32_ld32u',
'gen_aa32_ld64',
'gen_aa32_ld8s',
'gen_aa32_ld8u',
'gen_aa32_st16',
'gen_aa32_st32',
'gen_aa32_st64',
'gen_aa32_st8',
'gen_adc',
'gen_adc_CC',
'gen_add16',
'gen_add_carry',
'gen_add_CC',
'gen_add_datah_offset',
'gen_add_data_offset',
'gen_addq',
'gen_addq_lo',
'gen_addq_msw',
'gen_arm_parallel_addsub',
'gen_arm_shift_im',
'gen_arm_shift_reg',
'gen_bx',
'gen_bx_im',
'gen_clrex',
'generate_memory_topology',
'generic_timer_cp_reginfo',
'gen_exception',
'gen_exception_insn',
'gen_exception_internal',
'gen_exception_internal_insn',
'gen_exception_return',
'gen_goto_tb',
'gen_helper_access_check_cp_reg',
'gen_helper_add_saturate',
'gen_helper_add_setq',
'gen_helper_clear_pstate_ss',
'gen_helper_clz32',
'gen_helper_clz64',
'gen_helper_clz_arm',
'gen_helper_cpsr_read',
'gen_helper_cpsr_write',
'gen_helper_crc32_arm',
'gen_helper_crc32c',
'gen_helper_crypto_aese',
'gen_helper_crypto_aesmc',
'gen_helper_crypto_sha1_3reg',
'gen_helper_crypto_sha1h',
'gen_helper_crypto_sha1su1',
'gen_helper_crypto_sha256h',
'gen_helper_crypto_sha256h2',
'gen_helper_crypto_sha256su0',
'gen_helper_crypto_sha256su1',
'gen_helper_double_saturate',
'gen_helper_exception_internal',
'gen_helper_exception_with_syndrome',
'gen_helper_get_cp_reg',
'gen_helper_get_cp_reg64',
'gen_helper_get_r13_banked',
'gen_helper_get_user_reg',
'gen_helper_iwmmxt_addcb',
'gen_helper_iwmmxt_addcl',
'gen_helper_iwmmxt_addcw',
'gen_helper_iwmmxt_addnb',
'gen_helper_iwmmxt_addnl',
'gen_helper_iwmmxt_addnw',
'gen_helper_iwmmxt_addsb',
'gen_helper_iwmmxt_addsl',
'gen_helper_iwmmxt_addsw',
'gen_helper_iwmmxt_addub',
'gen_helper_iwmmxt_addul',
'gen_helper_iwmmxt_adduw',
'gen_helper_iwmmxt_align',
'gen_helper_iwmmxt_avgb0',
'gen_helper_iwmmxt_avgb1',
'gen_helper_iwmmxt_avgw0',
'gen_helper_iwmmxt_avgw1',
'gen_helper_iwmmxt_bcstb',
'gen_helper_iwmmxt_bcstl',
'gen_helper_iwmmxt_bcstw',
'gen_helper_iwmmxt_cmpeqb',
'gen_helper_iwmmxt_cmpeql',
'gen_helper_iwmmxt_cmpeqw',
'gen_helper_iwmmxt_cmpgtsb',
'gen_helper_iwmmxt_cmpgtsl',
'gen_helper_iwmmxt_cmpgtsw',
'gen_helper_iwmmxt_cmpgtub',
'gen_helper_iwmmxt_cmpgtul',
'gen_helper_iwmmxt_cmpgtuw',
'gen_helper_iwmmxt_insr',
'gen_helper_iwmmxt_macsw',
'gen_helper_iwmmxt_macuw',
'gen_helper_iwmmxt_maddsq',
'gen_helper_iwmmxt_madduq',
'gen_helper_iwmmxt_maxsb',
'gen_helper_iwmmxt_maxsl',
'gen_helper_iwmmxt_maxsw',
'gen_helper_iwmmxt_maxub',
'gen_helper_iwmmxt_maxul',
'gen_helper_iwmmxt_maxuw',
'gen_helper_iwmmxt_minsb',
'gen_helper_iwmmxt_minsl',
'gen_helper_iwmmxt_minsw',
'gen_helper_iwmmxt_minub',
'gen_helper_iwmmxt_minul',
'gen_helper_iwmmxt_minuw',
'gen_helper_iwmmxt_msbb',
'gen_helper_iwmmxt_msbl',
'gen_helper_iwmmxt_msbw',
'gen_helper_iwmmxt_muladdsl',
'gen_helper_iwmmxt_muladdsw',
'gen_helper_iwmmxt_muladdswl',
'gen_helper_iwmmxt_mulshw',
'gen_helper_iwmmxt_mulslw',
'gen_helper_iwmmxt_muluhw',
'gen_helper_iwmmxt_mululw',
'gen_helper_iwmmxt_packsl',
'gen_helper_iwmmxt_packsq',
'gen_helper_iwmmxt_packsw',
'gen_helper_iwmmxt_packul',
'gen_helper_iwmmxt_packuq',
'gen_helper_iwmmxt_packuw',
'gen_helper_iwmmxt_rorl',
'gen_helper_iwmmxt_rorq',
'gen_helper_iwmmxt_rorw',
'gen_helper_iwmmxt_sadb',
'gen_helper_iwmmxt_sadw',
'gen_helper_iwmmxt_setpsr_nz',
'gen_helper_iwmmxt_shufh',
'gen_helper_iwmmxt_slll',
'gen_helper_iwmmxt_sllq',
'gen_helper_iwmmxt_sllw',
'gen_helper_iwmmxt_sral',
'gen_helper_iwmmxt_sraq',
'gen_helper_iwmmxt_sraw',
'gen_helper_iwmmxt_srll',
'gen_helper_iwmmxt_srlq',
'gen_helper_iwmmxt_srlw',
'gen_helper_iwmmxt_subnb',
'gen_helper_iwmmxt_subnl',
'gen_helper_iwmmxt_subnw',
'gen_helper_iwmmxt_subsb',
'gen_helper_iwmmxt_subsl',
'gen_helper_iwmmxt_subsw',
'gen_helper_iwmmxt_subub',
'gen_helper_iwmmxt_subul',
'gen_helper_iwmmxt_subuw',
'gen_helper_iwmmxt_unpackhb',
'gen_helper_iwmmxt_unpackhl',
'gen_helper_iwmmxt_unpackhsb',
'gen_helper_iwmmxt_unpackhsl',
'gen_helper_iwmmxt_unpackhsw',
'gen_helper_iwmmxt_unpackhub',
'gen_helper_iwmmxt_unpackhul',
'gen_helper_iwmmxt_unpackhuw',
'gen_helper_iwmmxt_unpackhw',
'gen_helper_iwmmxt_unpacklb',
'gen_helper_iwmmxt_unpackll',
'gen_helper_iwmmxt_unpacklsb',
'gen_helper_iwmmxt_unpacklsl',
'gen_helper_iwmmxt_unpacklsw',
'gen_helper_iwmmxt_unpacklub',
'gen_helper_iwmmxt_unpacklul',
'gen_helper_iwmmxt_unpackluw',
'gen_helper_iwmmxt_unpacklw',
'gen_helper_neon_abd_f32',
'gen_helper_neon_abdl_s16',
'gen_helper_neon_abdl_s32',
'gen_helper_neon_abdl_s64',
'gen_helper_neon_abdl_u16',
'gen_helper_neon_abdl_u32',
'gen_helper_neon_abdl_u64',
'gen_helper_neon_abd_s16',
'gen_helper_neon_abd_s32',
'gen_helper_neon_abd_s8',
'gen_helper_neon_abd_u16',
'gen_helper_neon_abd_u32',
'gen_helper_neon_abd_u8',
'gen_helper_neon_abs_s16',
'gen_helper_neon_abs_s8',
'gen_helper_neon_acge_f32',
'gen_helper_neon_acgt_f32',
'gen_helper_neon_addl_saturate_s32',
'gen_helper_neon_addl_saturate_s64',
'gen_helper_neon_addl_u16',
'gen_helper_neon_addl_u32',
'gen_helper_neon_add_u16',
'gen_helper_neon_add_u8',
'gen_helper_neon_ceq_f32',
'gen_helper_neon_ceq_u16',
'gen_helper_neon_ceq_u32',
'gen_helper_neon_ceq_u8',
'gen_helper_neon_cge_f32',
'gen_helper_neon_cge_s16',
'gen_helper_neon_cge_s32',
'gen_helper_neon_cge_s8',
'gen_helper_neon_cge_u16',
'gen_helper_neon_cge_u32',
'gen_helper_neon_cge_u8',
'gen_helper_neon_cgt_f32',
'gen_helper_neon_cgt_s16',
'gen_helper_neon_cgt_s32',
'gen_helper_neon_cgt_s8',
'gen_helper_neon_cgt_u16',
'gen_helper_neon_cgt_u32',
'gen_helper_neon_cgt_u8',
'gen_helper_neon_cls_s16',
'gen_helper_neon_cls_s32',
'gen_helper_neon_cls_s8',
'gen_helper_neon_clz_u16',
'gen_helper_neon_clz_u8',
'gen_helper_neon_cnt_u8',
'gen_helper_neon_fcvt_f16_to_f32',
'gen_helper_neon_fcvt_f32_to_f16',
'gen_helper_neon_hadd_s16',
'gen_helper_neon_hadd_s32',
'gen_helper_neon_hadd_s8',
'gen_helper_neon_hadd_u16',
'gen_helper_neon_hadd_u32',
'gen_helper_neon_hadd_u8',
'gen_helper_neon_hsub_s16',
'gen_helper_neon_hsub_s32',
'gen_helper_neon_hsub_s8',
'gen_helper_neon_hsub_u16',
'gen_helper_neon_hsub_u32',
'gen_helper_neon_hsub_u8',
'gen_helper_neon_max_s16',
'gen_helper_neon_max_s32',
'gen_helper_neon_max_s8',
'gen_helper_neon_max_u16',
'gen_helper_neon_max_u32',
'gen_helper_neon_max_u8',
'gen_helper_neon_min_s16',
'gen_helper_neon_min_s32',
'gen_helper_neon_min_s8',
'gen_helper_neon_min_u16',
'gen_helper_neon_min_u32',
'gen_helper_neon_min_u8',
'gen_helper_neon_mull_p8',
'gen_helper_neon_mull_s16',
'gen_helper_neon_mull_s8',
'gen_helper_neon_mull_u16',
'gen_helper_neon_mull_u8',
'gen_helper_neon_mul_p8',
'gen_helper_neon_mul_u16',
'gen_helper_neon_mul_u8',
'gen_helper_neon_narrow_high_u16',
'gen_helper_neon_narrow_high_u8',
'gen_helper_neon_narrow_round_high_u16',
'gen_helper_neon_narrow_round_high_u8',
'gen_helper_neon_narrow_sat_s16',
'gen_helper_neon_narrow_sat_s32',
'gen_helper_neon_narrow_sat_s8',
'gen_helper_neon_narrow_sat_u16',
'gen_helper_neon_narrow_sat_u32',
'gen_helper_neon_narrow_sat_u8',
'gen_helper_neon_narrow_u16',
'gen_helper_neon_narrow_u8',
'gen_helper_neon_negl_u16',
'gen_helper_neon_negl_u32',
'gen_helper_neon_paddl_u16',
'gen_helper_neon_paddl_u32',
'gen_helper_neon_padd_u16',
'gen_helper_neon_padd_u8',
'gen_helper_neon_pmax_s16',
'gen_helper_neon_pmax_s8',
'gen_helper_neon_pmax_u16',
'gen_helper_neon_pmax_u8',
'gen_helper_neon_pmin_s16',
'gen_helper_neon_pmin_s8',
'gen_helper_neon_pmin_u16',
'gen_helper_neon_pmin_u8',
'gen_helper_neon_pmull_64_hi',
'gen_helper_neon_pmull_64_lo',
'gen_helper_neon_qabs_s16',
'gen_helper_neon_qabs_s32',
'gen_helper_neon_qabs_s8',
'gen_helper_neon_qadd_s16',
'gen_helper_neon_qadd_s32',
'gen_helper_neon_qadd_s64',
'gen_helper_neon_qadd_s8',
'gen_helper_neon_qadd_u16',
'gen_helper_neon_qadd_u32',
'gen_helper_neon_qadd_u64',
'gen_helper_neon_qadd_u8',
'gen_helper_neon_qdmulh_s16',
'gen_helper_neon_qdmulh_s32',
'gen_helper_neon_qneg_s16',
'gen_helper_neon_qneg_s32',
'gen_helper_neon_qneg_s8',
'gen_helper_neon_qrdmulh_s16',
'gen_helper_neon_qrdmulh_s32',
'gen_helper_neon_qrshl_s16',
'gen_helper_neon_qrshl_s32',
'gen_helper_neon_qrshl_s64',
'gen_helper_neon_qrshl_s8',
'gen_helper_neon_qrshl_u16',
'gen_helper_neon_qrshl_u32',
'gen_helper_neon_qrshl_u64',
'gen_helper_neon_qrshl_u8',
'gen_helper_neon_qshl_s16',
'gen_helper_neon_qshl_s32',
'gen_helper_neon_qshl_s64',
'gen_helper_neon_qshl_s8',
'gen_helper_neon_qshl_u16',
'gen_helper_neon_qshl_u32',
'gen_helper_neon_qshl_u64',
'gen_helper_neon_qshl_u8',
'gen_helper_neon_qshlu_s16',
'gen_helper_neon_qshlu_s32',
'gen_helper_neon_qshlu_s64',
'gen_helper_neon_qshlu_s8',
'gen_helper_neon_qsub_s16',
'gen_helper_neon_qsub_s32',
'gen_helper_neon_qsub_s64',
'gen_helper_neon_qsub_s8',
'gen_helper_neon_qsub_u16',
'gen_helper_neon_qsub_u32',
'gen_helper_neon_qsub_u64',
'gen_helper_neon_qsub_u8',
'gen_helper_neon_qunzip16',
'gen_helper_neon_qunzip32',
'gen_helper_neon_qunzip8',
'gen_helper_neon_qzip16',
'gen_helper_neon_qzip32',
'gen_helper_neon_qzip8',
'gen_helper_neon_rhadd_s16',
'gen_helper_neon_rhadd_s32',
'gen_helper_neon_rhadd_s8',
'gen_helper_neon_rhadd_u16',
'gen_helper_neon_rhadd_u32',
'gen_helper_neon_rhadd_u8',
'gen_helper_neon_rshl_s16',
'gen_helper_neon_rshl_s32',
'gen_helper_neon_rshl_s64',
'gen_helper_neon_rshl_s8',
'gen_helper_neon_rshl_u16',
'gen_helper_neon_rshl_u32',
'gen_helper_neon_rshl_u64',
'gen_helper_neon_rshl_u8',
'gen_helper_neon_shl_s16',
'gen_helper_neon_shl_s32',
'gen_helper_neon_shl_s64',
'gen_helper_neon_shl_s8',
'gen_helper_neon_shl_u16',
'gen_helper_neon_shl_u32',
'gen_helper_neon_shl_u64',
'gen_helper_neon_shl_u8',
'gen_helper_neon_subl_u16',
'gen_helper_neon_subl_u32',
'gen_helper_neon_sub_u16',
'gen_helper_neon_sub_u8',
'gen_helper_neon_tbl',
'gen_helper_neon_tst_u16',
'gen_helper_neon_tst_u32',
'gen_helper_neon_tst_u8',
'gen_helper_neon_unarrow_sat16',
'gen_helper_neon_unarrow_sat32',
'gen_helper_neon_unarrow_sat8',
'gen_helper_neon_unzip16',
'gen_helper_neon_unzip8',
'gen_helper_neon_widen_s16',
'gen_helper_neon_widen_s8',
'gen_helper_neon_widen_u16',
'gen_helper_neon_widen_u8',
'gen_helper_neon_zip16',
'gen_helper_neon_zip8',
'gen_helper_pre_hvc',
'gen_helper_pre_smc',
'gen_helper_qadd16',
'gen_helper_qadd8',
'gen_helper_qaddsubx',
'gen_helper_qsub16',
'gen_helper_qsub8',
'gen_helper_qsubaddx',
'gen_helper_rbit',
'gen_helper_recpe_f32',
'gen_helper_recpe_u32',
'gen_helper_recps_f32',
'gen_helper_rintd',
'gen_helper_rintd_exact',
'gen_helper_rints',
'gen_helper_rints_exact',
'gen_helper_ror_cc',
'gen_helper_rsqrte_f32',
'gen_helper_rsqrte_u32',
'gen_helper_rsqrts_f32',
'gen_helper_sadd16',
'gen_helper_sadd8',
'gen_helper_saddsubx',
'gen_helper_sar_cc',
'gen_helper_sdiv',
'gen_helper_sel_flags',
'gen_helper_set_cp_reg',
'gen_helper_set_cp_reg64',
'gen_helper_set_neon_rmode',
'gen_helper_set_r13_banked',
'gen_helper_set_rmode',
'gen_helper_set_user_reg',
'gen_helper_shadd16',
'gen_helper_shadd8',
'gen_helper_shaddsubx',
'gen_helper_shl_cc',
'gen_helper_shr_cc',
'gen_helper_shsub16',
'gen_helper_shsub8',
'gen_helper_shsubaddx',
'gen_helper_ssat',
'gen_helper_ssat16',
'gen_helper_ssub16',
'gen_helper_ssub8',
'gen_helper_ssubaddx',
'gen_helper_sub_saturate',
'gen_helper_sxtb16',
'gen_helper_uadd16',
'gen_helper_uadd8',
'gen_helper_uaddsubx',
'gen_helper_udiv',
'gen_helper_uhadd16',
'gen_helper_uhadd8',
'gen_helper_uhaddsubx',
'gen_helper_uhsub16',
'gen_helper_uhsub8',
'gen_helper_uhsubaddx',
'gen_helper_uqadd16',
'gen_helper_uqadd8',
'gen_helper_uqaddsubx',
'gen_helper_uqsub16',
'gen_helper_uqsub8',
'gen_helper_uqsubaddx',
'gen_helper_usad8',
'gen_helper_usat',
'gen_helper_usat16',
'gen_helper_usub16',
'gen_helper_usub8',
'gen_helper_usubaddx',
'gen_helper_uxtb16',
'gen_helper_v7m_mrs',
'gen_helper_v7m_msr',
'gen_helper_vfp_absd',
'gen_helper_vfp_abss',
'gen_helper_vfp_addd',
'gen_helper_vfp_adds',
'gen_helper_vfp_cmpd',
'gen_helper_vfp_cmped',
'gen_helper_vfp_cmpes',
'gen_helper_vfp_cmps',
'gen_helper_vfp_divd',
'gen_helper_vfp_divs',
'gen_helper_vfp_fcvtds',
'gen_helper_vfp_fcvt_f16_to_f32',
'gen_helper_vfp_fcvt_f16_to_f64',
'gen_helper_vfp_fcvt_f32_to_f16',
'gen_helper_vfp_fcvt_f64_to_f16',
'gen_helper_vfp_fcvtsd',
'gen_helper_vfp_get_fpscr',
'gen_helper_vfp_maxnumd',
'gen_helper_vfp_maxnums',
'gen_helper_vfp_maxs',
'gen_helper_vfp_minnumd',
'gen_helper_vfp_minnums',
'gen_helper_vfp_mins',
'gen_helper_vfp_muladdd',
'gen_helper_vfp_muladds',
'gen_helper_vfp_muld',
'gen_helper_vfp_muls',
'gen_helper_vfp_negd',
'gen_helper_vfp_negs',
'gen_helper_vfp_set_fpscr',
'gen_helper_vfp_shtod',
'gen_helper_vfp_shtos',
'gen_helper_vfp_sitod',
'gen_helper_vfp_sitos',
'gen_helper_vfp_sltod',
'gen_helper_vfp_sltos',
'gen_helper_vfp_sqrtd',
'gen_helper_vfp_sqrts',
'gen_helper_vfp_subd',
'gen_helper_vfp_subs',
'gen_helper_vfp_toshd_round_to_zero',
'gen_helper_vfp_toshs_round_to_zero',
'gen_helper_vfp_tosid',
'gen_helper_vfp_tosis',
'gen_helper_vfp_tosizd',
'gen_helper_vfp_tosizs',
'gen_helper_vfp_tosld',
'gen_helper_vfp_tosld_round_to_zero',
'gen_helper_vfp_tosls',
'gen_helper_vfp_tosls_round_to_zero',
'gen_helper_vfp_touhd_round_to_zero',
'gen_helper_vfp_touhs_round_to_zero',
'gen_helper_vfp_touid',
'gen_helper_vfp_touis',
'gen_helper_vfp_touizd',
'gen_helper_vfp_touizs',
'gen_helper_vfp_tould',
'gen_helper_vfp_tould_round_to_zero',
'gen_helper_vfp_touls',
'gen_helper_vfp_touls_round_to_zero',
'gen_helper_vfp_uhtod',
'gen_helper_vfp_uhtos',
'gen_helper_vfp_uitod',
'gen_helper_vfp_uitos',
'gen_helper_vfp_ultod',
'gen_helper_vfp_ultos',
'gen_helper_wfe',
'gen_helper_wfi',
'gen_hvc',
'gen_intermediate_code_internal',
'gen_intermediate_code_internal_a64',
'gen_iwmmxt_address',
'gen_iwmmxt_shift',
'gen_jmp',
'gen_load_and_replicate',
'gen_load_exclusive',
'gen_logic_CC',
'gen_logicq_cc',
'gen_lookup_tb',
'gen_mov_F0_vreg',
'gen_mov_F1_vreg',
'gen_mov_vreg_F0',
'gen_muls_i64_i32',
'gen_mulu_i64_i32',
'gen_mulxy',
'gen_neon_add',
'gen_neon_addl',
'gen_neon_addl_saturate',
'gen_neon_bsl',
'gen_neon_dup_high16',
'gen_neon_dup_low16',
'gen_neon_dup_u8',
'gen_neon_mull',
'gen_neon_narrow',
'gen_neon_narrow_op',
'gen_neon_narrow_sats',
'gen_neon_narrow_satu',
'gen_neon_negl',
'gen_neon_rsb',
'gen_neon_shift_narrow',
'gen_neon_subl',
'gen_neon_trn_u16',
'gen_neon_trn_u8',
'gen_neon_unarrow_sats',
'gen_neon_unzip',
'gen_neon_widen',
'gen_neon_zip',
'gen_new_label',
'gen_nop_hint',
'gen_op_iwmmxt_addl_M0_wRn',
'gen_op_iwmmxt_addnb_M0_wRn',
'gen_op_iwmmxt_addnl_M0_wRn',
'gen_op_iwmmxt_addnw_M0_wRn',
'gen_op_iwmmxt_addsb_M0_wRn',
'gen_op_iwmmxt_addsl_M0_wRn',
'gen_op_iwmmxt_addsw_M0_wRn',
'gen_op_iwmmxt_addub_M0_wRn',
'gen_op_iwmmxt_addul_M0_wRn',
'gen_op_iwmmxt_adduw_M0_wRn',
'gen_op_iwmmxt_andq_M0_wRn',
'gen_op_iwmmxt_avgb0_M0_wRn',
'gen_op_iwmmxt_avgb1_M0_wRn',
'gen_op_iwmmxt_avgw0_M0_wRn',
'gen_op_iwmmxt_avgw1_M0_wRn',
'gen_op_iwmmxt_cmpeqb_M0_wRn',
'gen_op_iwmmxt_cmpeql_M0_wRn',
'gen_op_iwmmxt_cmpeqw_M0_wRn',
'gen_op_iwmmxt_cmpgtsb_M0_wRn',
'gen_op_iwmmxt_cmpgtsl_M0_wRn',
'gen_op_iwmmxt_cmpgtsw_M0_wRn',
'gen_op_iwmmxt_cmpgtub_M0_wRn',
'gen_op_iwmmxt_cmpgtul_M0_wRn',
'gen_op_iwmmxt_cmpgtuw_M0_wRn',
'gen_op_iwmmxt_macsw_M0_wRn',
'gen_op_iwmmxt_macuw_M0_wRn',
'gen_op_iwmmxt_maddsq_M0_wRn',
'gen_op_iwmmxt_madduq_M0_wRn',
'gen_op_iwmmxt_maxsb_M0_wRn',
'gen_op_iwmmxt_maxsl_M0_wRn',
'gen_op_iwmmxt_maxsw_M0_wRn',
'gen_op_iwmmxt_maxub_M0_wRn',
'gen_op_iwmmxt_maxul_M0_wRn',
'gen_op_iwmmxt_maxuw_M0_wRn',
'gen_op_iwmmxt_minsb_M0_wRn',
'gen_op_iwmmxt_minsl_M0_wRn',
'gen_op_iwmmxt_minsw_M0_wRn',
'gen_op_iwmmxt_minub_M0_wRn',
'gen_op_iwmmxt_minul_M0_wRn',
'gen_op_iwmmxt_minuw_M0_wRn',
'gen_op_iwmmxt_movq_M0_wRn',
'gen_op_iwmmxt_movq_wRn_M0',
'gen_op_iwmmxt_mulshw_M0_wRn',
'gen_op_iwmmxt_mulslw_M0_wRn',
'gen_op_iwmmxt_muluhw_M0_wRn',
'gen_op_iwmmxt_mululw_M0_wRn',
'gen_op_iwmmxt_orq_M0_wRn',
'gen_op_iwmmxt_packsl_M0_wRn',
'gen_op_iwmmxt_packsq_M0_wRn',
'gen_op_iwmmxt_packsw_M0_wRn',
'gen_op_iwmmxt_packul_M0_wRn',
'gen_op_iwmmxt_packuq_M0_wRn',
'gen_op_iwmmxt_packuw_M0_wRn',
'gen_op_iwmmxt_sadb_M0_wRn',
'gen_op_iwmmxt_sadw_M0_wRn',
'gen_op_iwmmxt_set_cup',
'gen_op_iwmmxt_set_mup',
'gen_op_iwmmxt_setpsr_nz',
'gen_op_iwmmxt_subnb_M0_wRn',
'gen_op_iwmmxt_subnl_M0_wRn',
'gen_op_iwmmxt_subnw_M0_wRn',
'gen_op_iwmmxt_subsb_M0_wRn',
'gen_op_iwmmxt_subsl_M0_wRn',
'gen_op_iwmmxt_subsw_M0_wRn',
'gen_op_iwmmxt_subub_M0_wRn',
'gen_op_iwmmxt_subul_M0_wRn',
'gen_op_iwmmxt_subuw_M0_wRn',
'gen_op_iwmmxt_unpackhb_M0_wRn',
'gen_op_iwmmxt_unpackhl_M0_wRn',
'gen_op_iwmmxt_unpackhsb_M0',
'gen_op_iwmmxt_unpackhsl_M0',
'gen_op_iwmmxt_unpackhsw_M0',
'gen_op_iwmmxt_unpackhub_M0',
'gen_op_iwmmxt_unpackhul_M0',
'gen_op_iwmmxt_unpackhuw_M0',
'gen_op_iwmmxt_unpackhw_M0_wRn',
'gen_op_iwmmxt_unpacklb_M0_wRn',
'gen_op_iwmmxt_unpackll_M0_wRn',
'gen_op_iwmmxt_unpacklsb_M0',
'gen_op_iwmmxt_unpacklsl_M0',
'gen_op_iwmmxt_unpacklsw_M0',
'gen_op_iwmmxt_unpacklub_M0',
'gen_op_iwmmxt_unpacklul_M0',
'gen_op_iwmmxt_unpackluw_M0',
'gen_op_iwmmxt_unpacklw_M0_wRn',
'gen_op_iwmmxt_xorq_M0_wRn',
'gen_rev16',
'gen_revsh',
'gen_rfe',
'gen_sar',
'gen_sbc_CC',
'gen_sbfx',
'gen_set_CF_bit31',
'gen_set_condexec',
'gen_set_cpsr',
'gen_set_label',
'gen_set_pc_im',
'gen_set_psr',
'gen_set_psr_im',
'gen_shl',
'gen_shr',
'gen_smc',
'gen_smul_dual',
'gen_srs',
'gen_ss_advance',
'gen_step_complete_exception',
'gen_store_exclusive',
'gen_storeq_reg',
'gen_sub_carry',
'gen_sub_CC',
'gen_subq_msw',
'gen_swap_half',
'gen_thumb2_data_op',
'gen_thumb2_parallel_addsub',
'gen_ubfx',
'gen_vfp_abs',
'gen_vfp_add',
'gen_vfp_cmp',
'gen_vfp_cmpe',
'gen_vfp_div',
'gen_vfp_F1_ld0',
'gen_vfp_F1_mul',
'gen_vfp_F1_neg',
'gen_vfp_ld',
'gen_vfp_mrs',
'gen_vfp_msr',
'gen_vfp_mul',
'gen_vfp_neg',
'gen_vfp_shto',
'gen_vfp_sito',
'gen_vfp_slto',
'gen_vfp_sqrt',
'gen_vfp_st',
'gen_vfp_sub',
'gen_vfp_tosh',
'gen_vfp_tosi',
'gen_vfp_tosiz',
'gen_vfp_tosl',
'gen_vfp_touh',
'gen_vfp_toui',
'gen_vfp_touiz',
'gen_vfp_toul',
'gen_vfp_uhto',
'gen_vfp_uito',
'gen_vfp_ulto',
'get_arm_cp_reginfo',
'get_clock',
'get_clock_realtime',
'get_constraint_priority',
'get_float_exception_flags',
'get_float_rounding_mode',
'get_fpstatus_ptr',
'get_level1_table_address',
'get_mem_index',
'get_next_param_value',
'get_opt_name',
'get_opt_value',
'get_page_addr_code',
'get_param_value',
'get_phys_addr',
'get_phys_addr_lpae',
'get_phys_addr_mpu',
'get_phys_addr_v5',
'get_phys_addr_v6',
'get_system_memory',
'get_ticks_per_sec',
'g_list_insert_sorted_merged',
'_GLOBAL_OFFSET_TABLE_',
'gt_cntfrq_access',
'gt_cnt_read',
'gt_cnt_reset',
'gt_counter_access',
'gt_ctl_write',
'gt_cval_write',
'gt_get_countervalue',
'gt_pct_access',
'gt_ptimer_access',
'gt_recalc_timer',
'gt_timer_access',
'gt_tval_read',
'gt_tval_write',
'gt_vct_access',
'gt_vtimer_access',
'guest_phys_blocks_free',
'guest_phys_blocks_init',
'handle_vcvt',
'handle_vminmaxnm',
'handle_vrint',
'handle_vsel',
'has_help_option',
'have_bmi1',
'have_bmi2',
'hcr_write',
'helper_access_check_cp_reg',
'helper_add_saturate',
'helper_add_setq',
'helper_add_usaturate',
'helper_be_ldl_cmmu',
'helper_be_ldq_cmmu',
'helper_be_ldq_mmu',
'helper_be_ldsl_mmu',
'helper_be_ldsw_mmu',
'helper_be_ldul_mmu',
'helper_be_lduw_mmu',
'helper_be_ldw_cmmu',
'helper_be_stl_mmu',
'helper_be_stq_mmu',
'helper_be_stw_mmu',
'helper_clear_pstate_ss',
'helper_clz_arm',
'helper_cpsr_read',
'helper_cpsr_write',
'helper_crc32_arm',
'helper_crc32c',
'helper_crypto_aese',
'helper_crypto_aesmc',
'helper_crypto_sha1_3reg',
'helper_crypto_sha1h',
'helper_crypto_sha1su1',
'helper_crypto_sha256h',
'helper_crypto_sha256h2',
'helper_crypto_sha256su0',
'helper_crypto_sha256su1',
'helper_dc_zva',
'helper_double_saturate',
'helper_exception_internal',
'helper_exception_return',
'helper_exception_with_syndrome',
'helper_get_cp_reg',
'helper_get_cp_reg64',
'helper_get_r13_banked',
'helper_get_user_reg',
'helper_iwmmxt_addcb',
'helper_iwmmxt_addcl',
'helper_iwmmxt_addcw',
'helper_iwmmxt_addnb',
'helper_iwmmxt_addnl',
'helper_iwmmxt_addnw',
'helper_iwmmxt_addsb',
'helper_iwmmxt_addsl',
'helper_iwmmxt_addsw',
'helper_iwmmxt_addub',
'helper_iwmmxt_addul',
'helper_iwmmxt_adduw',
'helper_iwmmxt_align',
'helper_iwmmxt_avgb0',
'helper_iwmmxt_avgb1',
'helper_iwmmxt_avgw0',
'helper_iwmmxt_avgw1',
'helper_iwmmxt_bcstb',
'helper_iwmmxt_bcstl',
'helper_iwmmxt_bcstw',
'helper_iwmmxt_cmpeqb',
'helper_iwmmxt_cmpeql',
'helper_iwmmxt_cmpeqw',
'helper_iwmmxt_cmpgtsb',
'helper_iwmmxt_cmpgtsl',
'helper_iwmmxt_cmpgtsw',
'helper_iwmmxt_cmpgtub',
'helper_iwmmxt_cmpgtul',
'helper_iwmmxt_cmpgtuw',
'helper_iwmmxt_insr',
'helper_iwmmxt_macsw',
'helper_iwmmxt_macuw',
'helper_iwmmxt_maddsq',
'helper_iwmmxt_madduq',
'helper_iwmmxt_maxsb',
'helper_iwmmxt_maxsl',
'helper_iwmmxt_maxsw',
'helper_iwmmxt_maxub',
'helper_iwmmxt_maxul',
'helper_iwmmxt_maxuw',
'helper_iwmmxt_minsb',
'helper_iwmmxt_minsl',
'helper_iwmmxt_minsw',
'helper_iwmmxt_minub',
'helper_iwmmxt_minul',
'helper_iwmmxt_minuw',
'helper_iwmmxt_msbb',
'helper_iwmmxt_msbl',
'helper_iwmmxt_msbw',
'helper_iwmmxt_muladdsl',
'helper_iwmmxt_muladdsw',
'helper_iwmmxt_muladdswl',
'helper_iwmmxt_mulshw',
'helper_iwmmxt_mulslw',
'helper_iwmmxt_muluhw',
'helper_iwmmxt_mululw',
'helper_iwmmxt_packsl',
'helper_iwmmxt_packsq',
'helper_iwmmxt_packsw',
'helper_iwmmxt_packul',
'helper_iwmmxt_packuq',
'helper_iwmmxt_packuw',
'helper_iwmmxt_rorl',
'helper_iwmmxt_rorq',
'helper_iwmmxt_rorw',
'helper_iwmmxt_sadb',
'helper_iwmmxt_sadw',
'helper_iwmmxt_setpsr_nz',
'helper_iwmmxt_shufh',
'helper_iwmmxt_slll',
'helper_iwmmxt_sllq',
'helper_iwmmxt_sllw',
'helper_iwmmxt_sral',
'helper_iwmmxt_sraq',
'helper_iwmmxt_sraw',
'helper_iwmmxt_srll',
'helper_iwmmxt_srlq',
'helper_iwmmxt_srlw',
'helper_iwmmxt_subnb',
'helper_iwmmxt_subnl',
'helper_iwmmxt_subnw',
'helper_iwmmxt_subsb',
'helper_iwmmxt_subsl',
'helper_iwmmxt_subsw',
'helper_iwmmxt_subub',
'helper_iwmmxt_subul',
'helper_iwmmxt_subuw',
'helper_iwmmxt_unpackhb',
'helper_iwmmxt_unpackhl',
'helper_iwmmxt_unpackhsb',
'helper_iwmmxt_unpackhsl',
'helper_iwmmxt_unpackhsw',
'helper_iwmmxt_unpackhub',
'helper_iwmmxt_unpackhul',
'helper_iwmmxt_unpackhuw',
'helper_iwmmxt_unpackhw',
'helper_iwmmxt_unpacklb',
'helper_iwmmxt_unpackll',
'helper_iwmmxt_unpacklsb',
'helper_iwmmxt_unpacklsl',
'helper_iwmmxt_unpacklsw',
'helper_iwmmxt_unpacklub',
'helper_iwmmxt_unpacklul',
'helper_iwmmxt_unpackluw',
'helper_iwmmxt_unpacklw',
'helper_ldb_cmmu',
'helper_ldb_mmu',
'helper_ldl_cmmu',
'helper_ldl_mmu',
'helper_ldq_cmmu',
'helper_ldq_mmu',
'helper_ldw_cmmu',
'helper_ldw_mmu',
'helper_le_ldl_cmmu',
'helper_le_ldq_cmmu',
'helper_le_ldq_mmu',
'helper_le_ldsl_mmu',
'helper_le_ldsw_mmu',
'helper_le_ldul_mmu',
'helper_le_lduw_mmu',
'helper_le_ldw_cmmu',
'helper_le_stl_mmu',
'helper_le_stq_mmu',
'helper_le_stw_mmu',
'helper_msr_i_pstate',
'helper_neon_abd_f32',
'helper_neon_abdl_s16',
'helper_neon_abdl_s32',
'helper_neon_abdl_s64',
'helper_neon_abdl_u16',
'helper_neon_abdl_u32',
'helper_neon_abdl_u64',
'helper_neon_abd_s16',
'helper_neon_abd_s32',
'helper_neon_abd_s8',
'helper_neon_abd_u16',
'helper_neon_abd_u32',
'helper_neon_abd_u8',
'helper_neon_abs_s16',
'helper_neon_abs_s8',
'helper_neon_acge_f32',
'helper_neon_acge_f64',
'helper_neon_acgt_f32',
'helper_neon_acgt_f64',
'helper_neon_addl_saturate_s32',
'helper_neon_addl_saturate_s64',
'helper_neon_addl_u16',
'helper_neon_addl_u32',
'helper_neon_add_u16',
'helper_neon_add_u8',
'helper_neon_ceq_f32',
'helper_neon_ceq_u16',
'helper_neon_ceq_u32',
'helper_neon_ceq_u8',
'helper_neon_cge_f32',
'helper_neon_cge_s16',
'helper_neon_cge_s32',
'helper_neon_cge_s8',
'helper_neon_cge_u16',
'helper_neon_cge_u32',
'helper_neon_cge_u8',
'helper_neon_cgt_f32',
'helper_neon_cgt_s16',
'helper_neon_cgt_s32',
'helper_neon_cgt_s8',
'helper_neon_cgt_u16',
'helper_neon_cgt_u32',
'helper_neon_cgt_u8',
'helper_neon_cls_s16',
'helper_neon_cls_s32',
'helper_neon_cls_s8',
'helper_neon_clz_u16',
'helper_neon_clz_u8',
'helper_neon_cnt_u8',
'helper_neon_fcvt_f16_to_f32',
'helper_neon_fcvt_f32_to_f16',
'helper_neon_hadd_s16',
'helper_neon_hadd_s32',
'helper_neon_hadd_s8',
'helper_neon_hadd_u16',
'helper_neon_hadd_u32',
'helper_neon_hadd_u8',
'helper_neon_hsub_s16',
'helper_neon_hsub_s32',
'helper_neon_hsub_s8',
'helper_neon_hsub_u16',
'helper_neon_hsub_u32',
'helper_neon_hsub_u8',
'helper_neon_max_s16',
'helper_neon_max_s32',
'helper_neon_max_s8',
'helper_neon_max_u16',
'helper_neon_max_u32',
'helper_neon_max_u8',
'helper_neon_min_s16',
'helper_neon_min_s32',
'helper_neon_min_s8',
'helper_neon_min_u16',
'helper_neon_min_u32',
'helper_neon_min_u8',
'helper_neon_mull_p8',
'helper_neon_mull_s16',
'helper_neon_mull_s8',
'helper_neon_mull_u16',
'helper_neon_mull_u8',
'helper_neon_mul_p8',
'helper_neon_mul_u16',
'helper_neon_mul_u8',
'helper_neon_narrow_high_u16',
'helper_neon_narrow_high_u8',
'helper_neon_narrow_round_high_u16',
'helper_neon_narrow_round_high_u8',
'helper_neon_narrow_sat_s16',
'helper_neon_narrow_sat_s32',
'helper_neon_narrow_sat_s8',
'helper_neon_narrow_sat_u16',
'helper_neon_narrow_sat_u32',
'helper_neon_narrow_sat_u8',
'helper_neon_narrow_u16',
'helper_neon_narrow_u8',
'helper_neon_negl_u16',
'helper_neon_negl_u32',
'helper_neon_paddl_u16',
'helper_neon_paddl_u32',
'helper_neon_padd_u16',
'helper_neon_padd_u8',
'helper_neon_pmax_s16',
'helper_neon_pmax_s8',
'helper_neon_pmax_u16',
'helper_neon_pmax_u8',
'helper_neon_pmin_s16',
'helper_neon_pmin_s8',
'helper_neon_pmin_u16',
'helper_neon_pmin_u8',
'helper_neon_pmull_64_hi',
'helper_neon_pmull_64_lo',
'helper_neon_qabs_s16',
'helper_neon_qabs_s32',
'helper_neon_qabs_s64',
'helper_neon_qabs_s8',
'helper_neon_qadd_s16',
'helper_neon_qadd_s32',
'helper_neon_qadd_s64',
'helper_neon_qadd_s8',
'helper_neon_qadd_u16',
'helper_neon_qadd_u32',
'helper_neon_qadd_u64',
'helper_neon_qadd_u8',
'helper_neon_qdmulh_s16',
'helper_neon_qdmulh_s32',
'helper_neon_qneg_s16',
'helper_neon_qneg_s32',
'helper_neon_qneg_s64',
'helper_neon_qneg_s8',
'helper_neon_qrdmulh_s16',
'helper_neon_qrdmulh_s32',
'helper_neon_qrshl_s16',
'helper_neon_qrshl_s32',
'helper_neon_qrshl_s64',
'helper_neon_qrshl_s8',
'helper_neon_qrshl_u16',
'helper_neon_qrshl_u32',
'helper_neon_qrshl_u64',
'helper_neon_qrshl_u8',
'helper_neon_qshl_s16',
'helper_neon_qshl_s32',
'helper_neon_qshl_s64',
'helper_neon_qshl_s8',
'helper_neon_qshl_u16',
'helper_neon_qshl_u32',
'helper_neon_qshl_u64',
'helper_neon_qshl_u8',
'helper_neon_qshlu_s16',
'helper_neon_qshlu_s32',
'helper_neon_qshlu_s64',
'helper_neon_qshlu_s8',
'helper_neon_qsub_s16',
'helper_neon_qsub_s32',
'helper_neon_qsub_s64',
'helper_neon_qsub_s8',
'helper_neon_qsub_u16',
'helper_neon_qsub_u32',
'helper_neon_qsub_u64',
'helper_neon_qsub_u8',
'helper_neon_qunzip16',
'helper_neon_qunzip32',
'helper_neon_qunzip8',
'helper_neon_qzip16',
'helper_neon_qzip32',
'helper_neon_qzip8',
'helper_neon_rbit_u8',
'helper_neon_rhadd_s16',
'helper_neon_rhadd_s32',
'helper_neon_rhadd_s8',
'helper_neon_rhadd_u16',
'helper_neon_rhadd_u32',
'helper_neon_rhadd_u8',
'helper_neon_rshl_s16',
'helper_neon_rshl_s32',
'helper_neon_rshl_s64',
'helper_neon_rshl_s8',
'helper_neon_rshl_u16',
'helper_neon_rshl_u32',
'helper_neon_rshl_u64',
'helper_neon_rshl_u8',
'helper_neon_shl_s16',
'helper_neon_shl_s32',
'helper_neon_shl_s64',
'helper_neon_shl_s8',
'helper_neon_shl_u16',
'helper_neon_shl_u32',
'helper_neon_shl_u64',
'helper_neon_shl_u8',
'helper_neon_sqadd_u16',
'helper_neon_sqadd_u32',
'helper_neon_sqadd_u64',
'helper_neon_sqadd_u8',
'helper_neon_subl_u16',
'helper_neon_subl_u32',
'helper_neon_sub_u16',
'helper_neon_sub_u8',
'helper_neon_tbl',
'helper_neon_tst_u16',
'helper_neon_tst_u32',
'helper_neon_tst_u8',
'helper_neon_unarrow_sat16',
'helper_neon_unarrow_sat32',
'helper_neon_unarrow_sat8',
'helper_neon_unzip16',
'helper_neon_unzip8',
'helper_neon_uqadd_s16',
'helper_neon_uqadd_s32',
'helper_neon_uqadd_s64',
'helper_neon_uqadd_s8',
'helper_neon_widen_s16',
'helper_neon_widen_s8',
'helper_neon_widen_u16',
'helper_neon_widen_u8',
'helper_neon_zip16',
'helper_neon_zip8',
'helper_pre_hvc',
'helper_pre_smc',
'helper_qadd16',
'helper_qadd8',
'helper_qaddsubx',
'helper_qsub16',
'helper_qsub8',
'helper_qsubaddx',
'helper_rbit',
'helper_recpe_f32',
'helper_recpe_f64',
'helper_recpe_u32',
'helper_recps_f32',
'helper_ret_ldb_cmmu',
'helper_ret_ldsb_mmu',
'helper_ret_ldub_mmu',
'helper_ret_stb_mmu',
'helper_rintd',
'helper_rintd_exact',
'helper_rints',
'helper_rints_exact',
'helper_ror_cc',
'helper_rsqrte_f32',
'helper_rsqrte_f64',
'helper_rsqrte_u32',
'helper_rsqrts_f32',
'helper_sadd16',
'helper_sadd8',
'helper_saddsubx',
'helper_sar_cc',
'helper_sdiv',
'helper_sel_flags',
'helper_set_cp_reg',
'helper_set_cp_reg64',
'helper_set_neon_rmode',
'helper_set_r13_banked',
'helper_set_rmode',
'helper_set_user_reg',
'helper_shadd16',
'helper_shadd8',
'helper_shaddsubx',
'helper_shl_cc',
'helper_shr_cc',
'helper_shsub16',
'helper_shsub8',
'helper_shsubaddx',
'helper_ssat',
'helper_ssat16',
'helper_ssub16',
'helper_ssub8',
'helper_ssubaddx',
'helper_stb_mmu',
'helper_stl_mmu',
'helper_stq_mmu',
'helper_stw_mmu',
'helper_sub_saturate',
'helper_sub_usaturate',
'helper_sxtb16',
'helper_uadd16',
'helper_uadd8',
'helper_uaddsubx',
'helper_udiv',
'helper_uhadd16',
'helper_uhadd8',
'helper_uhaddsubx',
'helper_uhsub16',
'helper_uhsub8',
'helper_uhsubaddx',
'helper_uqadd16',
'helper_uqadd8',
'helper_uqaddsubx',
'helper_uqsub16',
'helper_uqsub8',
'helper_uqsubaddx',
'helper_usad8',
'helper_usat',
'helper_usat16',
'helper_usub16',
'helper_usub8',
'helper_usubaddx',
'helper_uxtb16',
'helper_v7m_mrs',
'helper_v7m_msr',
'helper_vfp_absd',
'helper_vfp_abss',
'helper_vfp_addd',
'helper_vfp_adds',
'helper_vfp_cmpd',
'helper_vfp_cmped',
'helper_vfp_cmpes',
'helper_vfp_cmps',
'helper_vfp_divd',
'helper_vfp_divs',
'helper_vfp_fcvtds',
'helper_vfp_fcvt_f16_to_f32',
'helper_vfp_fcvt_f16_to_f64',
'helper_vfp_fcvt_f32_to_f16',
'helper_vfp_fcvt_f64_to_f16',
'helper_vfp_fcvtsd',
'helper_vfp_get_fpscr',
'helper_vfp_maxd',
'helper_vfp_maxnumd',
'helper_vfp_maxnums',
'helper_vfp_maxs',
'helper_vfp_mind',
'helper_vfp_minnumd',
'helper_vfp_minnums',
'helper_vfp_mins',
'helper_vfp_muladdd',
'helper_vfp_muladds',
'helper_vfp_muld',
'helper_vfp_muls',
'helper_vfp_negd',
'helper_vfp_negs',
'helper_vfp_set_fpscr',
'helper_vfp_shtod',
'helper_vfp_shtos',
'helper_vfp_sitod',
'helper_vfp_sitos',
'helper_vfp_sltod',
'helper_vfp_sltos',
'helper_vfp_sqrtd',
'helper_vfp_sqrts',
'helper_vfp_sqtod',
'helper_vfp_sqtos',
'helper_vfp_subd',
'helper_vfp_subs',
'helper_vfp_toshd',
'helper_vfp_toshd_round_to_zero',
'helper_vfp_toshs',
'helper_vfp_toshs_round_to_zero',
'helper_vfp_tosid',
'helper_vfp_tosis',
'helper_vfp_tosizd',
'helper_vfp_tosizs',
'helper_vfp_tosld',
'helper_vfp_tosld_round_to_zero',
'helper_vfp_tosls',
'helper_vfp_tosls_round_to_zero',
'helper_vfp_tosqd',
'helper_vfp_tosqs',
'helper_vfp_touhd',
'helper_vfp_touhd_round_to_zero',
'helper_vfp_touhs',
'helper_vfp_touhs_round_to_zero',
'helper_vfp_touid',
'helper_vfp_touis',
'helper_vfp_touizd',
'helper_vfp_touizs',
'helper_vfp_tould',
'helper_vfp_tould_round_to_zero',
'helper_vfp_touls',
'helper_vfp_touls_round_to_zero',
'helper_vfp_touqd',
'helper_vfp_touqs',
'helper_vfp_uhtod',
'helper_vfp_uhtos',
'helper_vfp_uitod',
'helper_vfp_uitos',
'helper_vfp_ultod',
'helper_vfp_ultos',
'helper_vfp_uqtod',
'helper_vfp_uqtos',
'helper_wfe',
'helper_wfi',
'hex2decimal',
'hw_breakpoint_update',
'hw_breakpoint_update_all',
'hw_watchpoint_update',
'hw_watchpoint_update_all',
'_init',
'init_cpreg_list',
'init_lists',
'input_type_enum',
'int128_2_64',
'int128_add',
'int128_addto',
'int128_and',
'int128_eq',
'int128_ge',
'int128_get64',
'int128_gt',
'int128_le',
'int128_lt',
'int128_make64',
'int128_max',
'int128_min',
'int128_ne',
'int128_neg',
'int128_nz',
'int128_rshift',
'int128_sub',
'int128_subfrom',
'int128_zero',
'int16_to_float32',
'int16_to_float64',
'int32_to_float128',
'int32_to_float32',
'int32_to_float64',
'int32_to_floatx80',
'int64_to_float128',
'int64_to_float32',
'int64_to_float64',
'int64_to_floatx80',
'invalidate_and_set_dirty',
'invalidate_page_bitmap',
'io_mem_read',
'io_mem_write',
'io_readb',
'io_readl',
'io_readq',
'io_readw',
'iotlb_to_region',
'io_writeb',
'io_writel',
'io_writeq',
'io_writew',
'is_a64',
'is_help_option',
'isr_read',
'is_valid_option_list',
'iwmmxt_load_creg',
'iwmmxt_load_reg',
'iwmmxt_store_creg',
'iwmmxt_store_reg',
'__jit_debug_descriptor',
'__jit_debug_register_code',
'kvm_to_cpreg_id',
'last_ram_offset',
'ldl_be_p',
'ldl_be_phys',
'ldl_he_p',
'ldl_le_p',
'ldl_le_phys',
'ldl_phys',
'ldl_phys_internal',
'ldq_be_p',
'ldq_be_phys',
'ldq_he_p',
'ldq_le_p',
'ldq_le_phys',
'ldq_phys',
'ldq_phys_internal',
'ldst_name',
'ldub_p',
'ldub_phys',
'lduw_be_p',
'lduw_be_phys',
'lduw_he_p',
'lduw_le_p',
'lduw_le_phys',
'lduw_phys',
'lduw_phys_internal',
'le128',
'linked_bp_matches',
'listener_add_address_space',
'load_cpu_offset',
'load_reg',
'load_reg_var',
'log_cpu_state',
'lpae_cp_reginfo',
'lt128',
'machine_class_init',
'machine_finalize',
'machine_info',
'machine_initfn',
'machine_register_types',
'machvirt_init',
'machvirt_machine_init',
'maj',
'mapping_conflict',
'mapping_contiguous',
'mapping_have_same_region',
'mapping_merge',
'mem_add',
'mem_begin',
'mem_commit',
'memory_access_is_direct',
'memory_access_size',
'memory_init',
'memory_listener_match',
'memory_listener_register',
'memory_listener_unregister',
'memory_map_init',
'memory_mapping_filter',
'memory_mapping_list_add_mapping_sorted',
'memory_mapping_list_add_merge_sorted',
'memory_mapping_list_free',
'memory_mapping_list_init',
'memory_region_access_valid',
'memory_region_add_subregion',
'memory_region_add_subregion_common',
'memory_region_add_subregion_overlap',
'memory_region_big_endian',
'memory_region_clear_pending',
'memory_region_del_subregion',
'memory_region_destructor_alias',
'memory_region_destructor_none',
'memory_region_destructor_ram',
'memory_region_destructor_ram_from_ptr',
'memory_region_dispatch_read',
'memory_region_dispatch_read1',
'memory_region_dispatch_write',
'memory_region_escape_name',
'memory_region_finalize',
'memory_region_find',
'memory_region_get_addr',
'memory_region_get_alignment',
'memory_region_get_container',
'memory_region_get_fd',
'memory_region_get_may_overlap',
'memory_region_get_priority',
'memory_region_get_ram_addr',
'memory_region_get_ram_ptr',
'memory_region_get_size',
'memory_region_info',
'memory_region_init',
'memory_region_init_alias',
'memory_region_initfn',
'memory_region_init_io',
'memory_region_init_ram',
'memory_region_init_ram_ptr',
'memory_region_init_reservation',
'memory_region_is_iommu',
'memory_region_is_logging',
'memory_region_is_mapped',
'memory_region_is_ram',
'memory_region_is_rom',
'memory_region_is_romd',
'memory_region_is_skip_dump',
'memory_region_is_unassigned',
'memory_region_name',
'memory_region_need_escape',
'memory_region_oldmmio_read_accessor',
'memory_region_oldmmio_write_accessor',
'memory_region_present',
'memory_region_read_accessor',
'memory_region_readd_subregion',
'memory_region_ref',
'memory_region_resolve_container',
'memory_region_rom_device_set_romd',
'memory_region_section_get_iotlb',
'memory_region_set_address',
'memory_region_set_alias_offset',
'memory_region_set_enabled',
'memory_region_set_readonly',
'memory_region_set_skip_dump',
'memory_region_size',
'memory_region_to_address_space',
'memory_region_transaction_begin',
'memory_region_transaction_commit',
'memory_region_unref',
'memory_region_update_container_subregions',
'memory_region_write_accessor',
'memory_region_wrong_endianness',
'memory_try_enable_merging',
'module_call_init',
'module_load',
'mpidr_cp_reginfo',
'mpidr_read',
'msr_mask',
'mul128By64To192',
'mul128To256',
'mul64To128',
'muldiv64',
'neon_2rm_is_float_op',
'neon_2rm_sizes',
'neon_3r_sizes',
'neon_get_scalar',
'neon_load_reg',
'neon_load_reg64',
'neon_load_scratch',
'neon_ls_element_type',
'neon_reg_offset',
'neon_store_reg',
'neon_store_reg64',
'neon_store_scratch',
'new_ldst_label',
'next_list',
'normalizeFloat128Subnormal',
'normalizeFloat16Subnormal',
'normalizeFloat32Subnormal',
'normalizeFloat64Subnormal',
'normalizeFloatx80Subnormal',
'normalizeRoundAndPackFloat128',
'normalizeRoundAndPackFloat32',
'normalizeRoundAndPackFloat64',
'normalizeRoundAndPackFloatx80',
'not_v6_cp_reginfo',
'not_v7_cp_reginfo',
'not_v8_cp_reginfo',
'object_child_foreach',
'object_class_foreach',
'object_class_foreach_tramp',
'object_class_get_list',
'object_class_get_list_tramp',
'object_class_get_parent',
'object_deinit',
'object_dynamic_cast',
'object_finalize',
'object_finalize_child_property',
'object_get_child_property',
'object_get_link_property',
'object_get_root',
'object_initialize_with_type',
'object_init_with_type',
'object_instance_init',
'object_new_with_type',
'object_post_init_with_type',
'object_property_add_alias',
'object_property_add_link',
'object_property_add_uint16_ptr',
'object_property_add_uint32_ptr',
'object_property_add_uint64_ptr',
'object_property_add_uint8_ptr',
'object_property_allow_set_link',
'object_property_del',
'object_property_del_all',
'object_property_find',
'object_property_get',
'object_property_get_bool',
'object_property_get_int',
'object_property_get_link',
'object_property_get_qobject',
'object_property_get_str',
'object_property_get_type',
'object_property_is_child',
'object_property_set',
'object_property_set_description',
'object_property_set_link',
'object_property_set_qobject',
'object_release_link_property',
'object_resolve_abs_path',
'object_resolve_child_property',
'object_resolve_link',
'object_resolve_link_property',
'object_resolve_partial_path',
'object_resolve_path',
'object_resolve_path_component',
'object_resolve_path_type',
'object_set_link_property',
'object_unparent',
'omap_cachemaint_write',
'omap_cp_reginfo',
'omap_threadid_write',
'omap_ticonfig_write',
'omap_wfi_write',
'op_bits',
'open_modeflags',
'op_to_mov',
'op_to_movi',
'output_type_enum',
'packFloat128',
'packFloat16',
'packFloat32',
'packFloat64',
'packFloatx80',
'page_find',
'page_find_alloc',
'page_flush_tb',
'page_flush_tb_1',
'page_init',
'page_size_init',
'par',
'parse_array',
'parse_error',
'parse_escape',
'parse_keyword',
'parse_literal',
'parse_object',
'parse_optional',
'parse_option_bool',
'parse_option_number',
'parse_option_size',
'parse_pair',
'parser_context_free',
'parser_context_new',
'parser_context_peek_token',
'parser_context_pop_token',
'parser_context_restore',
'parser_context_save',
'parse_str',
'parse_type_bool',
'parse_type_int',
'parse_type_number',
'parse_type_size',
'parse_type_str',
'parse_value',
'par_write',
'patch_reloc',
'phys_map_node_alloc',
'phys_map_node_reserve',
'phys_mem_alloc',
'phys_mem_set_alloc',
'phys_page_compact',
'phys_page_compact_all',
'phys_page_find',
'phys_page_set',
'phys_page_set_level',
'phys_section_add',
'phys_section_destroy',
'phys_sections_free',
'pickNaN',
'pickNaNMulAdd',
'pmccfiltr_write',
'pmccntr_read',
'pmccntr_sync',
'pmccntr_write',
'pmccntr_write32',
'pmcntenclr_write',
'pmcntenset_write',
'pmcr_write',
'pmintenclr_write',
'pmintenset_write',
'pmovsr_write',
'pmreg_access',
'pmsav5_cp_reginfo',
'pmsav5_data_ap_read',
'pmsav5_data_ap_write',
'pmsav5_insn_ap_read',
'pmsav5_insn_ap_write',
'pmuserenr_write',
'pmxevtyper_write',
'print_type_bool',
'print_type_int',
'print_type_number',
'print_type_size',
'print_type_str',
'propagateFloat128NaN',
'propagateFloat32MulAddNaN',
'propagateFloat32NaN',
'propagateFloat64MulAddNaN',
'propagateFloat64NaN',
'propagateFloatx80NaN',
'property_get_alias',
'property_get_bool',
'property_get_str',
'property_get_uint16_ptr',
'property_get_uint32_ptr',
'property_get_uint64_ptr',
'property_get_uint8_ptr',
'property_release_alias',
'property_release_bool',
'property_release_str',
'property_resolve_alias',
'property_set_alias',
'property_set_bool',
'property_set_str',
'pstate_read',
'pstate_write',
'pxa250_initfn',
'pxa255_initfn',
'pxa260_initfn',
'pxa261_initfn',
'pxa262_initfn',
'pxa270a0_initfn',
'pxa270a1_initfn',
'pxa270b0_initfn',
'pxa270b1_initfn',
'pxa270c0_initfn',
'pxa270c5_initfn',
'qapi_dealloc_end_implicit_struct',
'qapi_dealloc_end_list',
'qapi_dealloc_end_struct',
'qapi_dealloc_get_visitor',
'qapi_dealloc_next_list',
'qapi_dealloc_pop',
'qapi_dealloc_push',
'qapi_dealloc_start_implicit_struct',
'qapi_dealloc_start_list',
'qapi_dealloc_start_struct',
'qapi_dealloc_start_union',
'qapi_dealloc_type_bool',
'qapi_dealloc_type_enum',
'qapi_dealloc_type_int',
'qapi_dealloc_type_number',
'qapi_dealloc_type_size',
'qapi_dealloc_type_str',
'qapi_dealloc_visitor_cleanup',
'qapi_dealloc_visitor_new',
'qapi_free_boolList',
'qapi_free_ErrorClassList',
'qapi_free_int16List',
'qapi_free_int32List',
'qapi_free_int64List',
'qapi_free_int8List',
'qapi_free_intList',
'qapi_free_numberList',
'qapi_free_strList',
'qapi_free_uint16List',
'qapi_free_uint32List',
'qapi_free_uint64List',
'qapi_free_uint8List',
'qapi_free_X86CPUFeatureWordInfo',
'qapi_free_X86CPUFeatureWordInfoList',
'qapi_free_X86CPURegister32List',
'qbool_destroy_obj',
'qbool_from_int',
'qbool_get_int',
'qbool_type',
'qbus_create',
'qbus_create_inplace',
'qbus_finalize',
'qbus_initfn',
'qbus_realize',
'qdev_create',
'qdev_get_type',
'qdev_register_types',
'qdev_set_parent_bus',
'qdev_try_create',
'qdict_add_key',
'qdict_array_split',
'qdict_clone_shallow',
'qdict_del',
'qdict_destroy_obj',
'qdict_entry_key',
'qdict_entry_value',
'qdict_extract_subqdict',
'qdict_find',
'qdict_first',
'qdict_flatten',
'qdict_flatten_qdict',
'qdict_flatten_qlist',
'qdict_get',
'qdict_get_bool',
'qdict_get_double',
'qdict_get_int',
'qdict_get_obj',
'qdict_get_qdict',
'qdict_get_qlist',
'qdict_get_str',
'qdict_get_try_bool',
'qdict_get_try_int',
'qdict_get_try_str',
'qdict_haskey',
'qdict_has_prefixed_entries',
'qdict_iter',
'qdict_join',
'qdict_new',
'qdict_next',
'qdict_next_entry',
'qdict_put_obj',
'qdict_size',
'qdict_type',
'qemu_clock_get_us',
'qemu_clock_ptr',
'qemu_clocks',
'qemu_get_cpu',
'qemu_get_guest_memory_mapping',
'qemu_get_guest_simple_memory_mapping',
'qemu_get_ram_block',
'qemu_get_ram_block_host_ptr',
'qemu_get_ram_fd',
'qemu_get_ram_ptr',
'qemu_host_page_mask',
'qemu_host_page_size',
'qemu_init_vcpu',
'qemu_ld_helpers',
'qemu_log_close',
'qemu_log_enabled',
'qemu_log_flush',
'qemu_loglevel_mask',
'qemu_log_vprintf',
'qemu_oom_check',
'qemu_parse_fd',
'qemu_ram_addr_from_host',
'qemu_ram_addr_from_host_nofail',
'qemu_ram_alloc',
'qemu_ram_alloc_from_ptr',
'qemu_ram_foreach_block',
'qemu_ram_free',
'qemu_ram_free_from_ptr',
'qemu_ram_ptr_length',
'qemu_ram_remap',
'qemu_ram_setup_dump',
'qemu_ram_unset_idstr',
'qemu_real_host_page_size',
'qemu_st_helpers',
'qemu_tcg_init_vcpu',
'qemu_try_memalign',
'qentry_destroy',
'qerror_human',
'qerror_report',
'qerror_report_err',
'qfloat_destroy_obj',
'qfloat_from_double',
'qfloat_get_double',
'qfloat_type',
'qint_destroy_obj',
'qint_from_int',
'qint_get_int',
'qint_type',
'qlist_append_obj',
'qlist_copy',
'qlist_copy_elem',
'qlist_destroy_obj',
'qlist_empty',
'qlist_entry_obj',
'qlist_first',
'qlist_iter',
'qlist_new',
'qlist_next',
'qlist_peek',
'qlist_pop',
'qlist_size',
'qlist_size_iter',
'qlist_type',
'qmp_input_end_implicit_struct',
'qmp_input_end_list',
'qmp_input_end_struct',
'qmp_input_get_next_type',
'qmp_input_get_object',
'qmp_input_get_visitor',
'qmp_input_next_list',
'qmp_input_optional',
'qmp_input_pop',
'qmp_input_push',
'qmp_input_start_implicit_struct',
'qmp_input_start_list',
'qmp_input_start_struct',
'qmp_input_type_bool',
'qmp_input_type_int',
'qmp_input_type_number',
'qmp_input_type_str',
'qmp_input_visitor_cleanup',
'qmp_input_visitor_new',
'qmp_input_visitor_new_strict',
'qmp_output_add_obj',
'qmp_output_end_list',
'qmp_output_end_struct',
'qmp_output_first',
'qmp_output_get_qobject',
'qmp_output_get_visitor',
'qmp_output_last',
'qmp_output_next_list',
'qmp_output_pop',
'qmp_output_push_obj',
'qmp_output_start_list',
'qmp_output_start_struct',
'qmp_output_type_bool',
'qmp_output_type_int',
'qmp_output_type_number',
'qmp_output_type_str',
'qmp_output_visitor_cleanup',
'qmp_output_visitor_new',
'qobject_decref',
'qobject_to_qbool',
'qobject_to_qdict',
'qobject_to_qfloat',
'qobject_to_qint',
'qobject_to_qlist',
'qobject_to_qstring',
'qobject_type',
'qstring_append',
'qstring_append_chr',
'qstring_append_int',
'qstring_destroy_obj',
'qstring_from_escaped_str',
'qstring_from_str',
'qstring_from_substr',
'qstring_get_length',
'qstring_get_str',
'qstring_new',
'qstring_type',
'ram_block_add',
'ram_size',
'range_compare',
'range_covers_byte',
'range_get_last',
'range_merge',
'ranges_can_merge',
'raw_read',
'raw_write',
'rcon',
'read_raw_cp_reg',
'recip_estimate',
'recip_sqrt_estimate',
'register_cp_regs_for_features',
'register_multipage',
'register_subpage',
'register_tm_clones',
'register_types_object',
'regnames',
'render_memory_region',
'reset_all_temps',
'reset_temp',
'rol32',
'rol64',
'ror32',
'ror64',
'roundAndPackFloat128',
'roundAndPackFloat16',
'roundAndPackFloat32',
'roundAndPackFloat64',
'roundAndPackFloatx80',
'roundAndPackInt32',
'roundAndPackInt64',
'roundAndPackUint64',
'round_to_inf',
'run_on_cpu',
's0',
'S0',
's1',
'S1',
'sa1100_initfn',
'sa1110_initfn',
'save_globals',
'scr_write',
'sctlr_write',
'set_bit',
'set_bits',
'set_default_nan_mode',
'set_feature',
'set_float_detect_tininess',
'set_float_exception_flags',
'set_float_rounding_mode',
'set_flush_inputs_to_zero',
'set_flush_to_zero',
'set_swi_errno',
'sextract32',
'sextract64',
'shift128ExtraRightJamming',
'shift128Right',
'shift128RightJamming',
'shift32RightJamming',
'shift64ExtraRightJamming',
'shift64RightJamming',
'shifter_out_im',
'shortShift128Left',
'shortShift192Left',
'simple_mpu_ap_bits',
'size_code_gen_buffer',
'softmmu_lock_user',
'softmmu_lock_user_string',
'softmmu_tget32',
'softmmu_tget8',
'softmmu_tput32',
'softmmu_unlock_user',
'sort_constraints',
'sp_el0_access',
'spsel_read',
'spsel_write',
'start_list',
'stb_p',
'stb_phys',
'stl_be_p',
'stl_be_phys',
'stl_he_p',
'stl_le_p',
'stl_le_phys',
'stl_phys',
'stl_phys_internal',
'stl_phys_notdirty',
'store_cpu_offset',
'store_reg',
'store_reg_bx',
'store_reg_from_load',
'stq_be_p',
'stq_be_phys',
'stq_he_p',
'stq_le_p',
'stq_le_phys',
'stq_phys',
'string_input_get_visitor',
'string_input_visitor_cleanup',
'string_input_visitor_new',
'strongarm_cp_reginfo',
'strstart',
'strtosz',
'strtosz_suffix',
'stw_be_p',
'stw_be_phys',
'stw_he_p',
'stw_le_p',
'stw_le_phys',
'stw_phys',
'stw_phys_internal',
'sub128',
'sub16_sat',
'sub16_usat',
'sub192',
'sub8_sat',
'sub8_usat',
'subFloat128Sigs',
'subFloat32Sigs',
'subFloat64Sigs',
'subFloatx80Sigs',
'subpage_accepts',
'subpage_init',
'subpage_ops',
'subpage_read',
'subpage_register',
'subpage_write',
'suffix_mul',
'swap_commutative',
'swap_commutative2',
'switch_mode',
'switch_v7m_sp',
'syn_aa32_bkpt',
'syn_aa32_hvc',
'syn_aa32_smc',
'syn_aa32_svc',
'syn_breakpoint',
'sync_globals',
'syn_cp14_rrt_trap',
'syn_cp14_rt_trap',
'syn_cp15_rrt_trap',
'syn_cp15_rt_trap',
'syn_data_abort',
'syn_fp_access_trap',
'syn_insn_abort',
'syn_swstep',
'syn_uncategorized',
'syn_watchpoint',
'syscall_err',
'system_bus_class_init',
'system_bus_info',
't2ee_cp_reginfo',
'table_logic_cc',
'target_parse_constraint',
'target_words_bigendian',
'tb_add_jump',
'tb_alloc',
'tb_alloc_page',
'tb_check_watchpoint',
'tb_find_fast',
'tb_find_pc',
'tb_find_slow',
'tb_flush',
'tb_flush_jmp_cache',
'tb_free',
'tb_gen_code',
'tb_hash_remove',
'tb_invalidate_phys_addr',
'tb_invalidate_phys_page_range',
'tb_invalidate_phys_range',
'tb_jmp_cache_hash_func',
'tb_jmp_cache_hash_page',
'tb_jmp_remove',
'tb_link_page',
'tb_page_remove',
'tb_phys_hash_func',
'tb_phys_invalidate',
'tb_reset_jump',
'tb_set_jmp_target',
'tcg_accel_class_init',
'tcg_accel_type',
'tcg_add_param_i32',
'tcg_add_param_i64',
'tcg_add_target_add_op_defs',
'tcg_allowed',
'tcg_canonicalize_memop',
'tcg_commit',
'tcg_cond_to_jcc',
'tcg_constant_folding',
'tcg_const_i32',
'tcg_const_i64',
'tcg_const_local_i32',
'tcg_const_local_i64',
'tcg_context_init',
'tcg_cpu_address_space_init',
'tcg_cpu_exec',
'tcg_current_code_size',
'tcg_dump_info',
'tcg_dump_ops',
'tcg_exec_all',
'tcg_find_helper',
'tcg_func_start',
'tcg_gen_abs_i32',
'tcg_gen_add2_i32',
'tcg_gen_add_i32',
'tcg_gen_add_i64',
'tcg_gen_addi_i32',
'tcg_gen_addi_i64',
'tcg_gen_andc_i32',
'tcg_gen_and_i32',
'tcg_gen_and_i64',
'tcg_gen_andi_i32',
'tcg_gen_andi_i64',
'tcg_gen_br',
'tcg_gen_brcond_i32',
'tcg_gen_brcond_i64',
'tcg_gen_brcondi_i32',
'tcg_gen_bswap16_i32',
'tcg_gen_bswap32_i32',
'tcg_gen_callN',
'tcg_gen_code',
'tcg_gen_code_common',
'tcg_gen_code_search_pc',
'tcg_gen_concat_i32_i64',
'tcg_gen_debug_insn_start',
'tcg_gen_deposit_i32',
'tcg_gen_exit_tb',
'tcg_gen_ext16s_i32',
'tcg_gen_ext16u_i32',
'tcg_gen_ext32s_i64',
'tcg_gen_ext32u_i64',
'tcg_gen_ext8s_i32',
'tcg_gen_ext8u_i32',
'tcg_gen_ext_i32_i64',
'tcg_gen_extu_i32_i64',
'tcg_gen_goto_tb',
'tcg_gen_ld_i32',
'tcg_gen_ld_i64',
'tcg_gen_ldst_op_i32',
'tcg_gen_ldst_op_i64',
'tcg_gen_movcond_i32',
'tcg_gen_movcond_i64',
'tcg_gen_mov_i32',
'tcg_gen_mov_i64',
'tcg_gen_movi_i32',
'tcg_gen_movi_i64',
'tcg_gen_mul_i32',
'tcg_gen_muls2_i32',
'tcg_gen_mulu2_i32',
'tcg_gen_neg_i32',
'tcg_gen_neg_i64',
'tcg_gen_not_i32',
'tcg_gen_op0',
'tcg_gen_op1i',
'tcg_gen_op2_i32',
'tcg_gen_op2_i64',
'tcg_gen_op2i_i32',
'tcg_gen_op2i_i64',
'tcg_gen_op3_i32',
'tcg_gen_op3_i64',
'tcg_gen_op4_i32',
'tcg_gen_op4i_i32',
'tcg_gen_op4ii_i32',
'tcg_gen_op4ii_i64',
'tcg_gen_op5ii_i32',
'tcg_gen_op6_i32',
'tcg_gen_op6i_i32',
'tcg_gen_op6i_i64',
'tcg_gen_orc_i32',
'tcg_gen_or_i32',
'tcg_gen_or_i64',
'tcg_gen_ori_i32',
'tcg_gen_qemu_ld_i32',
'tcg_gen_qemu_ld_i64',
'tcg_gen_qemu_st_i32',
'tcg_gen_qemu_st_i64',
'tcg_gen_rotl_i32',
'tcg_gen_rotli_i32',
'tcg_gen_rotr_i32',
'tcg_gen_rotri_i32',
'tcg_gen_sar_i32',
'tcg_gen_sari_i32',
'tcg_gen_setcond_i32',
'tcg_gen_shl_i32',
'tcg_gen_shl_i64',
'tcg_gen_shli_i32',
'tcg_gen_shli_i64',
'tcg_gen_shr_i32',
'tcg_gen_shifti_i64',
'tcg_gen_shr_i64',
'tcg_gen_shri_i32',
'tcg_gen_shri_i64',
'tcg_gen_st_i32',
'tcg_gen_st_i64',
'tcg_gen_sub_i32',
'tcg_gen_sub_i64',
'tcg_gen_subi_i32',
'tcg_gen_trunc_i64_i32',
'tcg_gen_trunc_shr_i64_i32',
'tcg_gen_xor_i32',
'tcg_gen_xor_i64',
'tcg_gen_xori_i32',
'tcg_get_arg_str_i32',
'tcg_get_arg_str_i64',
'tcg_get_arg_str_idx',
'tcg_global_mem_new_i32',
'tcg_global_mem_new_i64',
'tcg_global_mem_new_internal',
'tcg_global_reg_new_i32',
'tcg_global_reg_new_i64',
'tcg_global_reg_new_internal',
'tcg_handle_interrupt',
'tcg_init',
'tcg_invert_cond',
'tcg_la_bb_end',
'tcg_la_br_end',
'tcg_la_func_end',
'tcg_liveness_analysis',
'tcg_malloc',
'tcg_malloc_internal',
'tcg_op_defs_org',
'tcg_opt_gen_mov',
'tcg_opt_gen_movi',
'tcg_optimize',
'tcg_out16',
'tcg_out32',
'tcg_out64',
'tcg_out8',
'tcg_out_addi',
'tcg_out_branch',
'tcg_out_brcond32',
'tcg_out_brcond64',
'tcg_out_bswap32',
'tcg_out_bswap64',
'tcg_out_call',
'tcg_out_cmp',
'tcg_out_ext16s',
'tcg_out_ext16u',
'tcg_out_ext32s',
'tcg_out_ext32u',
'tcg_out_ext8s',
'tcg_out_ext8u',
'tcg_out_jmp',
'tcg_out_jxx',
'tcg_out_label',
'tcg_out_ld',
'tcg_out_modrm',
'tcg_out_modrm_offset',
'tcg_out_modrm_sib_offset',
'tcg_out_mov',
'tcg_out_movcond32',
'tcg_out_movcond64',
'tcg_out_movi',
'tcg_out_op',
'tcg_out_pop',
'tcg_out_push',
'tcg_out_qemu_ld',
'tcg_out_qemu_ld_direct',
'tcg_out_qemu_ld_slow_path',
'tcg_out_qemu_st',
'tcg_out_qemu_st_direct',
'tcg_out_qemu_st_slow_path',
'tcg_out_reloc',
'tcg_out_rolw_8',
'tcg_out_setcond32',
'tcg_out_setcond64',
'tcg_out_shifti',
'tcg_out_st',
'tcg_out_tb_finalize',
'tcg_out_tb_init',
'tcg_out_tlb_load',
'tcg_out_vex_modrm',
'tcg_patch32',
'tcg_patch8',
'tcg_pcrel_diff',
'tcg_pool_reset',
'tcg_prologue_init',
'tcg_ptr_byte_diff',
'tcg_reg_alloc',
'tcg_reg_alloc_bb_end',
'tcg_reg_alloc_call',
'tcg_reg_alloc_mov',
'tcg_reg_alloc_movi',
'tcg_reg_alloc_op',
'tcg_reg_alloc_start',
'tcg_reg_free',
'tcg_reg_sync',
'tcg_set_frame',
'tcg_set_nop',
'tcg_swap_cond',
'tcg_target_callee_save_regs',
'tcg_target_call_iarg_regs',
'tcg_target_call_oarg_regs',
'tcg_target_const_match',
'tcg_target_init',
'tcg_target_qemu_prologue',
'tcg_target_reg_alloc_order',
'tcg_temp_alloc',
'tcg_temp_free_i32',
'tcg_temp_free_i64',
'tcg_temp_free_internal',
'tcg_temp_local_new_i32',
'tcg_temp_local_new_i64',
'tcg_temp_new_i32',
'tcg_temp_new_i64',
'tcg_temp_new_internal',
'tcg_temp_new_internal_i32',
'tcg_temp_new_internal_i64',
'tdb_hash',
'teecr_write',
'teehbr_access',
'temp_allocate_frame',
'temp_dead',
'temps_are_copies',
'temp_save',
'temp_sync',
'tgen_arithi',
'tgen_arithr',
'thumb2_logic_op',
'ti925t_initfn',
'tlb_add_large_page',
'tlb_flush_entry',
'tlbi_aa64_asid_is_write',
'tlbi_aa64_asid_write',
'tlbi_aa64_vaa_is_write',
'tlbi_aa64_vaa_write',
'tlbi_aa64_va_is_write',
'tlbi_aa64_va_write',
'tlbiall_is_write',
'tlbiall_write',
'tlbiasid_is_write',
'tlbiasid_write',
'tlbimvaa_is_write',
'tlbimvaa_write',
'tlbimva_is_write',
'tlbimva_write',
'tlb_is_dirty_ram',
'tlb_protect_code',
'tlb_reset_dirty_range',
'tlb_reset_dirty_range_all',
'tlb_set_dirty',
'tlb_set_dirty1',
'tlb_unprotect_code_phys',
'tlb_vaddr_to_host',
'token_get_type',
'token_get_value',
'token_is_escape',
'token_is_keyword',
'token_is_operator',
'tokens_append_from_iter',
'to_qiv',
'to_qov',
'tosa_init',
'tosa_machine_init',
'tswap32',
'tswap64',
'type_class_get_size',
'type_get_by_name',
'type_get_parent',
'type_has_parent',
'type_initialize',
'type_initialize_interface',
'type_is_ancestor',
'type_new',
'type_object_get_size',
'type_register_internal',
'type_table_add',
'type_table_get',
'type_table_lookup',
'uint16_to_float32',
'uint16_to_float64',
'uint32_to_float32',
'uint32_to_float64',
'uint64_to_float128',
'uint64_to_float32',
'uint64_to_float64',
'unassigned_io_ops',
'unassigned_io_read',
'unassigned_io_write',
'unassigned_mem_accepts',
'unassigned_mem_ops',
'unassigned_mem_read',
'unassigned_mem_write',
'update_spsel',
'v6_cp_reginfo',
'v6k_cp_reginfo',
'v7_cp_reginfo',
'v7mp_cp_reginfo',
'v7m_pop',
'v7m_push',
'v8_cp_reginfo',
'v8_el2_cp_reginfo',
'v8_el3_cp_reginfo',
'v8_el3_no_el2_cp_reginfo',
'vapa_cp_reginfo',
'vbar_write',
'vfp_exceptbits_from_host',
'vfp_exceptbits_to_host',
'vfp_get_fpcr',
'vfp_get_fpscr',
'vfp_get_fpsr',
'vfp_reg_offset',
'vfp_set_fpcr',
'vfp_set_fpscr',
'vfp_set_fpsr',
'visit_end_implicit_struct',
'visit_end_list',
'visit_end_struct',
'visit_end_union',
'visit_get_next_type',
'visit_next_list',
'visit_optional',
'visit_start_implicit_struct',
'visit_start_list',
'visit_start_struct',
'visit_start_union',
'vmsa_cp_reginfo',
'vmsa_tcr_el1_write',
'vmsa_ttbcr_raw_write',
'vmsa_ttbcr_reset',
'vmsa_ttbcr_write',
'vmsa_ttbr_write',
'write_cpustate_to_list',
'write_list_to_cpustate',
'write_raw_cp_reg',
'X86CPURegister32_lookup',
'x86_op_defs',
'xpsr_read',
'xpsr_write',
'xscale_cpar_write',
'xscale_cp_reginfo'
)
arm_symbols = (
'ARM_REGS_STORAGE_SIZE',
)
aarch64_symbols = (
'ARM64_REGS_STORAGE_SIZE',
'arm64_release',
'arm64_reg_reset',
'arm64_reg_read',
'arm64_reg_write',
'gen_a64_set_pc_im',
'aarch64_cpu_register_types',
'helper_udiv64',
'helper_sdiv64',
'helper_cls64',
'helper_cls32',
'helper_rbit64',
'helper_vfp_cmps_a64',
'helper_vfp_cmpes_a64',
'helper_vfp_cmpd_a64',
'helper_vfp_cmped_a64',
'helper_vfp_mulxs',
'helper_vfp_mulxd',
'helper_simd_tbl',
'helper_neon_ceq_f64',
'helper_neon_cge_f64',
'helper_neon_cgt_f64',
'helper_recpsf_f32',
'helper_recpsf_f64',
'helper_rsqrtsf_f32',
'helper_rsqrtsf_f64',
'helper_neon_addlp_s8',
'helper_neon_addlp_u8',
'helper_neon_addlp_s16',
'helper_neon_addlp_u16',
'helper_frecpx_f32',
'helper_frecpx_f64',
'helper_fcvtx_f64_to_f32',
'helper_crc32_64',
'helper_crc32c_64',
'aarch64_cpu_do_interrupt',
)
mips_symbols = (
'cpu_mips_exec',
'cpu_mips_get_random',
'cpu_mips_get_count',
'cpu_mips_store_count',
'cpu_mips_store_compare',
'cpu_mips_start_count',
'cpu_mips_stop_count',
'mips_machine_init',
'cpu_mips_kseg0_to_phys',
'cpu_mips_phys_to_kseg0',
'cpu_mips_kvm_um_phys_to_kseg0',
'mips_cpu_register_types',
'cpu_mips_init',
'cpu_state_reset',
'helper_msa_andi_b',
'helper_msa_ori_b',
'helper_msa_nori_b',
'helper_msa_xori_b',
'helper_msa_bmnzi_b',
'helper_msa_bmzi_b',
'helper_msa_bseli_b',
'helper_msa_shf_df',
'helper_msa_and_v',
'helper_msa_or_v',
'helper_msa_nor_v',
'helper_msa_xor_v',
'helper_msa_bmnz_v',
'helper_msa_bmz_v',
'helper_msa_bsel_v',
'helper_msa_addvi_df',
'helper_msa_subvi_df',
'helper_msa_ceqi_df',
'helper_msa_clei_s_df',
'helper_msa_clei_u_df',
'helper_msa_clti_s_df',
'helper_msa_clti_u_df',
'helper_msa_maxi_s_df',
'helper_msa_maxi_u_df',
'helper_msa_mini_s_df',
'helper_msa_mini_u_df',
'helper_msa_ldi_df',
'helper_msa_slli_df',
'helper_msa_srai_df',
'helper_msa_srli_df',
'helper_msa_bclri_df',
'helper_msa_bseti_df',
'helper_msa_bnegi_df',
'helper_msa_sat_s_df',
'helper_msa_sat_u_df',
'helper_msa_srari_df',
'helper_msa_srlri_df',
'helper_msa_binsli_df',
'helper_msa_binsri_df',
'helper_msa_sll_df',
'helper_msa_sra_df',
'helper_msa_srl_df',
'helper_msa_bclr_df',
'helper_msa_bset_df',
'helper_msa_bneg_df',
'helper_msa_addv_df',
'helper_msa_subv_df',
'helper_msa_max_s_df',
'helper_msa_max_u_df',
'helper_msa_min_s_df',
'helper_msa_min_u_df',
'helper_msa_max_a_df',
'helper_msa_min_a_df',
'helper_msa_ceq_df',
'helper_msa_clt_s_df',
'helper_msa_clt_u_df',
'helper_msa_cle_s_df',
'helper_msa_cle_u_df',
'helper_msa_add_a_df',
'helper_msa_adds_a_df',
'helper_msa_adds_s_df',
'helper_msa_adds_u_df',
'helper_msa_ave_s_df',
'helper_msa_ave_u_df',
'helper_msa_aver_s_df',
'helper_msa_aver_u_df',
'helper_msa_subs_s_df',
'helper_msa_subs_u_df',
'helper_msa_subsus_u_df',
'helper_msa_subsuu_s_df',
'helper_msa_asub_s_df',
'helper_msa_asub_u_df',
'helper_msa_mulv_df',
'helper_msa_div_s_df',
'helper_msa_div_u_df',
'helper_msa_mod_s_df',
'helper_msa_mod_u_df',
'helper_msa_dotp_s_df',
'helper_msa_dotp_u_df',
'helper_msa_srar_df',
'helper_msa_srlr_df',
'helper_msa_hadd_s_df',
'helper_msa_hadd_u_df',
'helper_msa_hsub_s_df',
'helper_msa_hsub_u_df',
'helper_msa_mul_q_df',
'helper_msa_mulr_q_df',
'helper_msa_sld_df',
'helper_msa_maddv_df',
'helper_msa_msubv_df',
'helper_msa_dpadd_s_df',
'helper_msa_dpadd_u_df',
'helper_msa_dpsub_s_df',
'helper_msa_dpsub_u_df',
'helper_msa_binsl_df',
'helper_msa_binsr_df',
'helper_msa_madd_q_df',
'helper_msa_msub_q_df',
'helper_msa_maddr_q_df',
'helper_msa_msubr_q_df',
'helper_msa_splat_df',
'helper_msa_pckev_df',
'helper_msa_pckod_df',
'helper_msa_ilvl_df',
'helper_msa_ilvr_df',
'helper_msa_ilvev_df',
'helper_msa_ilvod_df',
'helper_msa_vshf_df',
'helper_msa_sldi_df',
'helper_msa_splati_df',
'helper_msa_copy_s_df',
'helper_msa_copy_u_df',
'helper_msa_insert_df',
'helper_msa_insve_df',
'helper_msa_ctcmsa',
'helper_msa_cfcmsa',
'helper_msa_move_v',
'helper_msa_fill_df',
'helper_msa_nlzc_df',
'helper_msa_nloc_df',
'helper_msa_pcnt_df',
'helper_msa_fcaf_df',
'helper_msa_fcun_df',
'helper_msa_fceq_df',
'helper_msa_fcueq_df',
'helper_msa_fclt_df',
'helper_msa_fcult_df',
'helper_msa_fcle_df',
'helper_msa_fcule_df',
'helper_msa_fsaf_df',
'helper_msa_fsun_df',
'helper_msa_fseq_df',
'helper_msa_fsueq_df',
'helper_msa_fslt_df',
'helper_msa_fsult_df',
'helper_msa_fsle_df',
'helper_msa_fsule_df',
'helper_msa_fcor_df',
'helper_msa_fcune_df',
'helper_msa_fcne_df',
'helper_msa_fsor_df',
'helper_msa_fsune_df',
'helper_msa_fsne_df',
'helper_msa_fadd_df',
'helper_msa_fsub_df',
'helper_msa_fmul_df',
'helper_msa_fdiv_df',
'helper_msa_fmadd_df',
'helper_msa_fmsub_df',
'helper_msa_fexp2_df',
'helper_msa_fexdo_df',
'helper_msa_ftq_df',
'helper_msa_fmin_df',
'helper_msa_fmin_a_df',
'helper_msa_fmax_df',
'helper_msa_fmax_a_df',
'helper_msa_fclass_df',
'helper_msa_ftrunc_s_df',
'helper_msa_ftrunc_u_df',
'helper_msa_fsqrt_df',
'helper_msa_frsqrt_df',
'helper_msa_frcp_df',
'helper_msa_frint_df',
'helper_msa_flog2_df',
'helper_msa_fexupl_df',
'helper_msa_fexupr_df',
'helper_msa_ffql_df',
'helper_msa_ffqr_df',
'helper_msa_ftint_s_df',
'helper_msa_ftint_u_df',
'helper_msa_ffint_s_df',
'helper_msa_ffint_u_df',
'helper_paddsb',
'helper_paddusb',
'helper_paddsh',
'helper_paddush',
'helper_paddb',
'helper_paddh',
'helper_paddw',
'helper_psubsb',
'helper_psubusb',
'helper_psubsh',
'helper_psubush',
'helper_psubb',
'helper_psubh',
'helper_psubw',
'helper_pshufh',
'helper_packsswh',
'helper_packsshb',
'helper_packushb',
'helper_punpcklwd',
'helper_punpckhwd',
'helper_punpcklhw',
'helper_punpckhhw',
'helper_punpcklbh',
'helper_punpckhbh',
'helper_pavgh',
'helper_pavgb',
'helper_pmaxsh',
'helper_pminsh',
'helper_pmaxub',
'helper_pminub',
'helper_pcmpeqw',
'helper_pcmpgtw',
'helper_pcmpeqh',
'helper_pcmpgth',
'helper_pcmpeqb',
'helper_pcmpgtb',
'helper_psllw',
'helper_psrlw',
'helper_psraw',
'helper_psllh',
'helper_psrlh',
'helper_psrah',
'helper_pmullh',
'helper_pmulhh',
'helper_pmulhuh',
'helper_pmaddhw',
'helper_pasubub',
'helper_biadd',
'helper_pmovmskb',
'helper_absq_s_ph',
'helper_absq_s_qb',
'helper_absq_s_w',
'helper_addqh_ph',
'helper_addqh_r_ph',
'helper_addqh_r_w',
'helper_addqh_w',
'helper_adduh_qb',
'helper_adduh_r_qb',
'helper_subqh_ph',
'helper_subqh_r_ph',
'helper_subqh_r_w',
'helper_subqh_w',
'helper_addq_ph',
'helper_addq_s_ph',
'helper_addq_s_w',
'helper_addu_ph',
'helper_addu_qb',
'helper_addu_s_ph',
'helper_addu_s_qb',
'helper_subq_ph',
'helper_subq_s_ph',
'helper_subq_s_w',
'helper_subu_ph',
'helper_subu_qb',
'helper_subu_s_ph',
'helper_subu_s_qb',
'helper_subuh_qb',
'helper_subuh_r_qb',
'helper_addsc',
'helper_addwc',
'helper_modsub',
'helper_raddu_w_qb',
'helper_precr_qb_ph',
'helper_precrq_qb_ph',
'helper_precr_sra_ph_w',
'helper_precr_sra_r_ph_w',
'helper_precrq_ph_w',
'helper_precrq_rs_ph_w',
'helper_precrqu_s_qb_ph',
'helper_precequ_ph_qbl',
'helper_precequ_ph_qbr',
'helper_precequ_ph_qbla',
'helper_precequ_ph_qbra',
'helper_preceu_ph_qbl',
'helper_preceu_ph_qbr',
'helper_preceu_ph_qbla',
'helper_preceu_ph_qbra',
'helper_shll_qb',
'helper_shrl_qb',
'helper_shra_qb',
'helper_shra_r_qb',
'helper_shll_ph',
'helper_shll_s_ph',
'helper_shll_s_w',
'helper_shra_r_w',
'helper_shrl_ph',
'helper_shra_ph',
'helper_shra_r_ph',
'helper_muleu_s_ph_qbl',
'helper_muleu_s_ph_qbr',
'helper_mulq_rs_ph',
'helper_mul_ph',
'helper_mul_s_ph',
'helper_mulq_s_ph',
'helper_muleq_s_w_phl',
'helper_muleq_s_w_phr',
'helper_mulsaq_s_w_ph',
'helper_mulsa_w_ph',
'helper_dpau_h_qbl',
'helper_dpau_h_qbr',
'helper_dpsu_h_qbl',
'helper_dpsu_h_qbr',
'helper_dpa_w_ph',
'helper_dpax_w_ph',
'helper_dps_w_ph',
'helper_dpsx_w_ph',
'helper_dpaq_s_w_ph',
'helper_dpaqx_s_w_ph',
'helper_dpsq_s_w_ph',
'helper_dpsqx_s_w_ph',
'helper_dpaqx_sa_w_ph',
'helper_dpsqx_sa_w_ph',
'helper_dpaq_sa_l_w',
'helper_dpsq_sa_l_w',
'helper_maq_s_w_phl',
'helper_maq_s_w_phr',
'helper_maq_sa_w_phl',
'helper_maq_sa_w_phr',
'helper_mulq_s_w',
'helper_mulq_rs_w',
'helper_bitrev',
'helper_insv',
'helper_cmpgu_eq_qb',
'helper_cmpgu_lt_qb',
'helper_cmpgu_le_qb',
'helper_cmpu_eq_qb',
'helper_cmpu_lt_qb',
'helper_cmpu_le_qb',
'helper_cmp_eq_ph',
'helper_cmp_lt_ph',
'helper_cmp_le_ph',
'helper_pick_qb',
'helper_pick_ph',
'helper_packrl_ph',
'helper_extr_w',
'helper_extr_r_w',
'helper_extr_rs_w',
'helper_extr_s_h',
'helper_extp',
'helper_extpdp',
'helper_shilo',
'helper_mthlip',
'cpu_wrdsp',
'helper_wrdsp',
'cpu_rddsp',
'helper_rddsp',
'helper_raise_exception_err',
'helper_clo',
'helper_clz',
'helper_muls',
'helper_mulsu',
'helper_macc',
'helper_macchi',
'helper_maccu',
'helper_macchiu',
'helper_msac',
'helper_msachi',
'helper_msacu',
'helper_msachiu',
'helper_mulhi',
'helper_mulhiu',
'helper_mulshi',
'helper_mulshiu',
'helper_bitswap',
'helper_ll',
'helper_sc',
'helper_swl',
'helper_swr',
'helper_lwm',
'helper_swm',
'helper_mfc0_mvpcontrol',
'helper_mfc0_mvpconf0',
'helper_mfc0_mvpconf1',
'helper_mfc0_random',
'helper_mfc0_tcstatus',
'helper_mftc0_tcstatus',
'helper_mfc0_tcbind',
'helper_mftc0_tcbind',
'helper_mfc0_tcrestart',
'helper_mftc0_tcrestart',
'helper_mfc0_tchalt',
'helper_mftc0_tchalt',
'helper_mfc0_tccontext',
'helper_mftc0_tccontext',
'helper_mfc0_tcschedule',
'helper_mftc0_tcschedule',
'helper_mfc0_tcschefback',
'helper_mftc0_tcschefback',
'helper_mfc0_count',
'helper_mftc0_entryhi',
'helper_mftc0_cause',
'helper_mftc0_status',
'helper_mfc0_lladdr',
'helper_mfc0_watchlo',
'helper_mfc0_watchhi',
'helper_mfc0_debug',
'helper_mftc0_debug',
'helper_mtc0_index',
'helper_mtc0_mvpcontrol',
'helper_mtc0_vpecontrol',
'helper_mttc0_vpecontrol',
'helper_mftc0_vpecontrol',
'helper_mftc0_vpeconf0',
'helper_mtc0_vpeconf0',
'helper_mttc0_vpeconf0',
'helper_mtc0_vpeconf1',
'helper_mtc0_yqmask',
'helper_mtc0_vpeopt',
'helper_mtc0_entrylo0',
'helper_mtc0_tcstatus',
'helper_mttc0_tcstatus',
'helper_mtc0_tcbind',
'helper_mttc0_tcbind',
'helper_mtc0_tcrestart',
'helper_mttc0_tcrestart',
'helper_mtc0_tchalt',
'helper_mttc0_tchalt',
'helper_mtc0_tccontext',
'helper_mttc0_tccontext',
'helper_mtc0_tcschedule',
'helper_mttc0_tcschedule',
'helper_mtc0_tcschefback',
'helper_mttc0_tcschefback',
'helper_mtc0_entrylo1',
'helper_mtc0_context',
'helper_mtc0_pagemask',
'helper_mtc0_pagegrain',
'helper_mtc0_wired',
'helper_mtc0_srsconf0',
'helper_mtc0_srsconf1',
'helper_mtc0_srsconf2',
'helper_mtc0_srsconf3',
'helper_mtc0_srsconf4',
'helper_mtc0_hwrena',
'helper_mtc0_count',
'helper_mtc0_entryhi',
'helper_mttc0_entryhi',
'helper_mtc0_compare',
'helper_mtc0_status',
'helper_mttc0_status',
'helper_mtc0_intctl',
'helper_mtc0_srsctl',
'helper_mtc0_cause',
'helper_mttc0_cause',
'helper_mftc0_epc',
'helper_mftc0_ebase',
'helper_mtc0_ebase',
'helper_mttc0_ebase',
'helper_mftc0_configx',
'helper_mtc0_config0',
'helper_mtc0_config2',
'helper_mtc0_config4',
'helper_mtc0_config5',
'helper_mtc0_lladdr',
'helper_mtc0_watchlo',
'helper_mtc0_watchhi',
'helper_mtc0_xcontext',
'helper_mtc0_framemask',
'helper_mtc0_debug',
'helper_mttc0_debug',
'helper_mtc0_performance0',
'helper_mtc0_taglo',
'helper_mtc0_datalo',
'helper_mtc0_taghi',
'helper_mtc0_datahi',
'helper_mftgpr',
'helper_mftlo',
'helper_mfthi',
'helper_mftacx',
'helper_mftdsp',
'helper_mttgpr',
'helper_mttlo',
'helper_mtthi',
'helper_mttacx',
'helper_mttdsp',
'helper_dmt',
'helper_emt',
'helper_dvpe',
'helper_evpe',
'helper_fork',
'helper_yield',
'r4k_helper_tlbinv',
'r4k_helper_tlbinvf',
'r4k_helper_tlbwi',
'r4k_helper_tlbwr',
'r4k_helper_tlbp',
'r4k_helper_tlbr',
'helper_tlbwi',
'helper_tlbwr',
'helper_tlbp',
'helper_tlbr',
'helper_tlbinv',
'helper_tlbinvf',
'helper_di',
'helper_ei',
'helper_eret',
'helper_deret',
'helper_rdhwr_cpunum',
'helper_rdhwr_synci_step',
'helper_rdhwr_cc',
'helper_rdhwr_ccres',
'helper_pmon',
'helper_wait',
'mips_cpu_do_unaligned_access',
'mips_cpu_unassigned_access',
'ieee_rm',
'helper_cfc1',
'helper_ctc1',
'ieee_ex_to_mips',
'helper_float_sqrt_d',
'helper_float_sqrt_s',
'helper_float_cvtd_s',
'helper_float_cvtd_w',
'helper_float_cvtd_l',
'helper_float_cvtl_d',
'helper_float_cvtl_s',
'helper_float_cvtps_pw',
'helper_float_cvtpw_ps',
'helper_float_cvts_d',
'helper_float_cvts_w',
'helper_float_cvts_l',
'helper_float_cvts_pl',
'helper_float_cvts_pu',
'helper_float_cvtw_s',
'helper_float_cvtw_d',
'helper_float_roundl_d',
'helper_float_roundl_s',
'helper_float_roundw_d',
'helper_float_roundw_s',
'helper_float_truncl_d',
'helper_float_truncl_s',
'helper_float_truncw_d',
'helper_float_truncw_s',
'helper_float_ceill_d',
'helper_float_ceill_s',
'helper_float_ceilw_d',
'helper_float_ceilw_s',
'helper_float_floorl_d',
'helper_float_floorl_s',
'helper_float_floorw_d',
'helper_float_floorw_s',
'helper_float_abs_d',
'helper_float_abs_s',
'helper_float_abs_ps',
'helper_float_chs_d',
'helper_float_chs_s',
'helper_float_chs_ps',
'helper_float_maddf_s',
'helper_float_maddf_d',
'helper_float_msubf_s',
'helper_float_msubf_d',
'helper_float_max_s',
'helper_float_max_d',
'helper_float_maxa_s',
'helper_float_maxa_d',
'helper_float_min_s',
'helper_float_min_d',
'helper_float_mina_s',
'helper_float_mina_d',
'helper_float_rint_s',
'helper_float_rint_d',
'helper_float_class_s',
'helper_float_class_d',
'helper_float_recip_d',
'helper_float_recip_s',
'helper_float_rsqrt_d',
'helper_float_rsqrt_s',
'helper_float_recip1_d',
'helper_float_recip1_s',
'helper_float_recip1_ps',
'helper_float_rsqrt1_d',
'helper_float_rsqrt1_s',
'helper_float_rsqrt1_ps',
'helper_float_add_d',
'helper_float_add_s',
'helper_float_add_ps',
'helper_float_sub_d',
'helper_float_sub_s',
'helper_float_sub_ps',
'helper_float_mul_d',
'helper_float_mul_s',
'helper_float_mul_ps',
'helper_float_div_d',
'helper_float_div_s',
'helper_float_div_ps',
'helper_float_madd_d',
'helper_float_madd_s',
'helper_float_madd_ps',
'helper_float_msub_d',
'helper_float_msub_s',
'helper_float_msub_ps',
'helper_float_nmadd_d',
'helper_float_nmadd_s',
'helper_float_nmadd_ps',
'helper_float_nmsub_d',
'helper_float_nmsub_s',
'helper_float_nmsub_ps',
'helper_float_recip2_d',
'helper_float_recip2_s',
'helper_float_recip2_ps',
'helper_float_rsqrt2_d',
'helper_float_rsqrt2_s',
'helper_float_rsqrt2_ps',
'helper_float_addr_ps',
'helper_float_mulr_ps',
'helper_cmp_d_f',
'helper_cmpabs_d_f',
'helper_cmp_d_un',
'helper_cmpabs_d_un',
'helper_cmp_d_eq',
'helper_cmpabs_d_eq',
'helper_cmp_d_ueq',
'helper_cmpabs_d_ueq',
'helper_cmp_d_olt',
'helper_cmpabs_d_olt',
'helper_cmp_d_ult',
'helper_cmpabs_d_ult',
'helper_cmp_d_ole',
'helper_cmpabs_d_ole',
'helper_cmp_d_ule',
'helper_cmpabs_d_ule',
'helper_cmp_d_sf',
'helper_cmpabs_d_sf',
'helper_cmp_d_ngle',
'helper_cmpabs_d_ngle',
'helper_cmp_d_seq',
'helper_cmpabs_d_seq',
'helper_cmp_d_ngl',
'helper_cmpabs_d_ngl',
'helper_cmp_d_lt',
'helper_cmpabs_d_lt',
'helper_cmp_d_nge',
'helper_cmpabs_d_nge',
'helper_cmp_d_le',
'helper_cmpabs_d_le',
'helper_cmp_d_ngt',
'helper_cmpabs_d_ngt',
'helper_cmp_s_f',
'helper_cmpabs_s_f',
'helper_cmp_s_un',
'helper_cmpabs_s_un',
'helper_cmp_s_eq',
'helper_cmpabs_s_eq',
'helper_cmp_s_ueq',
'helper_cmpabs_s_ueq',
'helper_cmp_s_olt',
'helper_cmpabs_s_olt',
'helper_cmp_s_ult',
'helper_cmpabs_s_ult',
'helper_cmp_s_ole',
'helper_cmpabs_s_ole',
'helper_cmp_s_ule',
'helper_cmpabs_s_ule',
'helper_cmp_s_sf',
'helper_cmpabs_s_sf',
'helper_cmp_s_ngle',
'helper_cmpabs_s_ngle',
'helper_cmp_s_seq',
'helper_cmpabs_s_seq',
'helper_cmp_s_ngl',
'helper_cmpabs_s_ngl',
'helper_cmp_s_lt',
'helper_cmpabs_s_lt',
'helper_cmp_s_nge',
'helper_cmpabs_s_nge',
'helper_cmp_s_le',
'helper_cmpabs_s_le',
'helper_cmp_s_ngt',
'helper_cmpabs_s_ngt',
'helper_cmp_ps_f',
'helper_cmpabs_ps_f',
'helper_cmp_ps_un',
'helper_cmpabs_ps_un',
'helper_cmp_ps_eq',
'helper_cmpabs_ps_eq',
'helper_cmp_ps_ueq',
'helper_cmpabs_ps_ueq',
'helper_cmp_ps_olt',
'helper_cmpabs_ps_olt',
'helper_cmp_ps_ult',
'helper_cmpabs_ps_ult',
'helper_cmp_ps_ole',
'helper_cmpabs_ps_ole',
'helper_cmp_ps_ule',
'helper_cmpabs_ps_ule',
'helper_cmp_ps_sf',
'helper_cmpabs_ps_sf',
'helper_cmp_ps_ngle',
'helper_cmpabs_ps_ngle',
'helper_cmp_ps_seq',
'helper_cmpabs_ps_seq',
'helper_cmp_ps_ngl',
'helper_cmpabs_ps_ngl',
'helper_cmp_ps_lt',
'helper_cmpabs_ps_lt',
'helper_cmp_ps_nge',
'helper_cmpabs_ps_nge',
'helper_cmp_ps_le',
'helper_cmpabs_ps_le',
'helper_cmp_ps_ngt',
'helper_cmpabs_ps_ngt',
'helper_r6_cmp_d_af',
'helper_r6_cmp_d_un',
'helper_r6_cmp_d_eq',
'helper_r6_cmp_d_ueq',
'helper_r6_cmp_d_lt',
'helper_r6_cmp_d_ult',
'helper_r6_cmp_d_le',
'helper_r6_cmp_d_ule',
'helper_r6_cmp_d_saf',
'helper_r6_cmp_d_sun',
'helper_r6_cmp_d_seq',
'helper_r6_cmp_d_sueq',
'helper_r6_cmp_d_slt',
'helper_r6_cmp_d_sult',
'helper_r6_cmp_d_sle',
'helper_r6_cmp_d_sule',
'helper_r6_cmp_d_or',
'helper_r6_cmp_d_une',
'helper_r6_cmp_d_ne',
'helper_r6_cmp_d_sor',
'helper_r6_cmp_d_sune',
'helper_r6_cmp_d_sne',
'helper_r6_cmp_s_af',
'helper_r6_cmp_s_un',
'helper_r6_cmp_s_eq',
'helper_r6_cmp_s_ueq',
'helper_r6_cmp_s_lt',
'helper_r6_cmp_s_ult',
'helper_r6_cmp_s_le',
'helper_r6_cmp_s_ule',
'helper_r6_cmp_s_saf',
'helper_r6_cmp_s_sun',
'helper_r6_cmp_s_seq',
'helper_r6_cmp_s_sueq',
'helper_r6_cmp_s_slt',
'helper_r6_cmp_s_sult',
'helper_r6_cmp_s_sle',
'helper_r6_cmp_s_sule',
'helper_r6_cmp_s_or',
'helper_r6_cmp_s_une',
'helper_r6_cmp_s_ne',
'helper_r6_cmp_s_sor',
'helper_r6_cmp_s_sune',
'helper_r6_cmp_s_sne',
'helper_msa_ld_df',
'helper_msa_st_df',
'no_mmu_map_address',
'fixed_mmu_map_address',
'r4k_map_address',
'mips_cpu_get_phys_page_debug',
'mips_cpu_handle_mmu_fault',
'cpu_mips_translate_address',
'exception_resume_pc',
'mips_cpu_do_interrupt',
'mips_cpu_exec_interrupt',
'r4k_invalidate_tlb',
'helper_absq_s_ob',
'helper_absq_s_qh',
'helper_absq_s_pw',
'helper_adduh_ob',
'helper_adduh_r_ob',
'helper_subuh_ob',
'helper_subuh_r_ob',
'helper_addq_pw',
'helper_addq_qh',
'helper_addq_s_pw',
'helper_addq_s_qh',
'helper_addu_ob',
'helper_addu_qh',
'helper_addu_s_ob',
'helper_addu_s_qh',
'helper_subq_pw',
'helper_subq_qh',
'helper_subq_s_pw',
'helper_subq_s_qh',
'helper_subu_ob',
'helper_subu_qh',
'helper_subu_s_ob',
'helper_subu_s_qh',
'helper_raddu_l_ob',
'helper_precr_ob_qh',
'helper_precr_sra_qh_pw',
'helper_precr_sra_r_qh_pw',
'helper_precrq_ob_qh',
'helper_precrq_qh_pw',
'helper_precrq_rs_qh_pw',
'helper_precrq_pw_l',
'helper_precrqu_s_ob_qh',
'helper_preceq_pw_qhl',
'helper_preceq_pw_qhr',
'helper_preceq_pw_qhla',
'helper_preceq_pw_qhra',
'helper_precequ_qh_obl',
'helper_precequ_qh_obr',
'helper_precequ_qh_obla',
'helper_precequ_qh_obra',
'helper_preceu_qh_obl',
'helper_preceu_qh_obr',
'helper_preceu_qh_obla',
'helper_preceu_qh_obra',
'helper_shll_ob',
'helper_shrl_ob',
'helper_shra_ob',
'helper_shra_r_ob',
'helper_shll_qh',
'helper_shll_s_qh',
'helper_shrl_qh',
'helper_shra_qh',
'helper_shra_r_qh',
'helper_shll_pw',
'helper_shll_s_pw',
'helper_shra_pw',
'helper_shra_r_pw',
'helper_muleu_s_qh_obl',
'helper_muleu_s_qh_obr',
'helper_mulq_rs_qh',
'helper_muleq_s_pw_qhl',
'helper_muleq_s_pw_qhr',
'helper_mulsaq_s_w_qh',
'helper_dpau_h_obl',
'helper_dpau_h_obr',
'helper_dpsu_h_obl',
'helper_dpsu_h_obr',
'helper_dpa_w_qh',
'helper_dpaq_s_w_qh',
'helper_dps_w_qh',
'helper_dpsq_s_w_qh',
'helper_dpaq_sa_l_pw',
'helper_dpsq_sa_l_pw',
'helper_mulsaq_s_l_pw',
'helper_maq_s_w_qhll',
'helper_maq_s_w_qhlr',
'helper_maq_s_w_qhrl',
'helper_maq_s_w_qhrr',
'helper_maq_sa_w_qhll',
'helper_maq_sa_w_qhlr',
'helper_maq_sa_w_qhrl',
'helper_maq_sa_w_qhrr',
'helper_maq_s_l_pwl',
'helper_maq_s_l_pwr',
'helper_dmadd',
'helper_dmaddu',
'helper_dmsub',
'helper_dmsubu',
'helper_dinsv',
'helper_cmpgu_eq_ob',
'helper_cmpgu_lt_ob',
'helper_cmpgu_le_ob',
'helper_cmpu_eq_ob',
'helper_cmpu_lt_ob',
'helper_cmpu_le_ob',
'helper_cmp_eq_qh',
'helper_cmp_lt_qh',
'helper_cmp_le_qh',
'helper_cmp_eq_pw',
'helper_cmp_lt_pw',
'helper_cmp_le_pw',
'helper_cmpgdu_eq_ob',
'helper_cmpgdu_lt_ob',
'helper_cmpgdu_le_ob',
'helper_pick_ob',
'helper_pick_qh',
'helper_pick_pw',
'helper_packrl_pw',
'helper_dextr_w',
'helper_dextr_r_w',
'helper_dextr_rs_w',
'helper_dextr_l',
'helper_dextr_r_l',
'helper_dextr_rs_l',
'helper_dextr_s_h',
'helper_dextp',
'helper_dextpdp',
'helper_dshilo',
'helper_dmthlip',
'helper_dclo',
'helper_dclz',
'helper_dbitswap',
'helper_lld',
'helper_scd',
'helper_sdl',
'helper_sdr',
'helper_ldm',
'helper_sdm',
'helper_dmfc0_tcrestart',
'helper_dmfc0_tchalt',
'helper_dmfc0_tccontext',
'helper_dmfc0_tcschedule',
'helper_dmfc0_tcschefback',
'helper_dmfc0_lladdr',
'helper_dmfc0_watchlo',
'helper_dmtc0_entrylo0',
'helper_dmtc0_entrylo1',
'mips_reg_reset',
'mips_reg_read',
'mips_reg_write',
'mips_tcg_init',
'mips_cpu_list',
'mips_release',
'MIPS64_REGS_STORAGE_SIZE',
'MIPS_REGS_STORAGE_SIZE'
)
sparc_symbols = (
'cpu_sparc_exec',
'helper_compute_psr',
'helper_compute_C_icc',
'cpu_sparc_init',
'cpu_sparc_set_id',
'sparc_cpu_register_types',
'helper_fadds',
'helper_faddd',
'helper_faddq',
'helper_fsubs',
'helper_fsubd',
'helper_fsubq',
'helper_fmuls',
'helper_fmuld',
'helper_fmulq',
'helper_fdivs',
'helper_fdivd',
'helper_fdivq',
'helper_fsmuld',
'helper_fdmulq',
'helper_fnegs',
'helper_fitos',
'helper_fitod',
'helper_fitoq',
'helper_fdtos',
'helper_fstod',
'helper_fqtos',
'helper_fstoq',
'helper_fqtod',
'helper_fdtoq',
'helper_fstoi',
'helper_fdtoi',
'helper_fqtoi',
'helper_fabss',
'helper_fsqrts',
'helper_fsqrtd',
'helper_fsqrtq',
'helper_fcmps',
'helper_fcmpd',
'helper_fcmpes',
'helper_fcmped',
'helper_fcmpq',
'helper_fcmpeq',
'helper_ldfsr',
'helper_debug',
'helper_udiv_cc',
'helper_sdiv_cc',
'helper_taddcctv',
'helper_tsubcctv',
'sparc_cpu_do_interrupt',
'helper_check_align',
'helper_ld_asi',
'helper_st_asi',
'helper_cas_asi',
'helper_ldqf',
'helper_stqf',
'sparc_cpu_unassigned_access',
'sparc_cpu_do_unaligned_access',
'sparc_cpu_handle_mmu_fault',
'dump_mmu',
'sparc_cpu_get_phys_page_debug',
'sparc_reg_reset',
'sparc_reg_read',
'sparc_reg_write',
'gen_intermediate_code_init',
'cpu_set_cwp',
'cpu_get_psr',
'cpu_put_psr',
'cpu_cwp_inc',
'cpu_cwp_dec',
'helper_save',
'helper_restore')
if __name__ == '__main__':
arch = sys.argv[1]
print("/* Autogen header for Unicorn Engine - DONOT MODIFY */")
print("#ifndef UNICORN_AUTOGEN_%s_H" %arch.upper())
print("#define UNICORN_AUTOGEN_%s_H" %arch.upper())
for s in symbols:
print("#define %s %s_%s" %(s, s, arch))
if 'arm' in arch:
for s in arm_symbols:
print("#define %s %s_%s" %(s, s, arch))
if 'aarch64' in arch:
for s in aarch64_symbols:
print("#define %s %s_%s" %(s, s, arch))
if 'mips' in arch:
for s in mips_symbols:
print("#define %s %s_%s" %(s, s, arch))
if 'sparc' in arch:
for s in sparc_symbols:
print("#define %s %s_%s" %(s, s, arch))
print("#endif")
| gpl-2.0 |
GoeGaming/lutris | lutris/runners/steam.py | 1 | 6910 | import os
import time
import subprocess
from lutris.runners.runner import Runner
from lutris.gui.dialogs import NoticeDialog
from lutris.thread import LutrisThread
from lutris.util.log import logger
from lutris.util import system
from lutris.util.steam import (get_path_from_appmanifest, read_config,
get_default_acf, to_vdf)
def shutdown():
"""Cleanly quit Steam"""
logger.debug("Shutting down Steam")
if is_running():
subprocess.call(['steam', '-shutdown'])
def get_steam_pid():
"""Return pid of Steam process"""
return system.get_pid('steam$')
def kill():
"""Force quit Steam"""
system.kill_pid(get_steam_pid())
def is_running():
"""Checks if Steam is running"""
return bool(get_steam_pid())
class steam(Runner):
""" Runs Steam for Linux games """
human_name = "Steam"
platform = "Steam for Linux"
game_options = [
{
"option": 'appid',
'label': "Application ID",
"type": "string",
'help': ("The application ID can be retrieved from the game's "
"page at steampowered.com. Example: 235320 is the "
"app ID for <i>Original War</i> in: \n"
"http://store.steampowered.com/app/<b>235320</b>/")
}
]
runner_options = [
{
'option': 'quit_steam_on_exit',
'label': "Stop Steam after game exits",
'type': 'bool',
'default': False,
'help': ("Quit Steam after the game has quit\n"
"(only if it was started by Lutris)")
}
]
system_options_override = [
{
'option': 'disable_runtime',
'default': True,
}
]
def __init__(self, config=None):
super(steam, self).__init__(config)
self.own_game_remove_method = "Remove game data (through Steam)"
self.no_game_remove_warning = True
self.original_steampid = None
@property
def browse_dir(self):
"""Return the path to open with the Browse Files action."""
if not self.is_installed():
installed = self.install_dialog()
if not installed:
return False
return self.game_path
@property
def steam_config(self):
"""Return the "Steam" part of Steam's config.vdf as a dict"""
if not self.steam_data_dir:
return
return read_config(self.steam_data_dir)
@property
def game_path(self):
appid = self.game_config.get('appid')
for apps_path in self.get_steamapps_dirs():
game_path = get_path_from_appmanifest(apps_path, appid)
if game_path:
return game_path
logger.warning("Data path for SteamApp %s not found.", appid)
@property
def steam_exe(self):
"""Return Steam exe's path"""
return 'steam'
@property
def steam_data_dir(self):
"""Return dir where Steam files lie"""
candidates = (
"~/.local/share/Steam/",
"~/.local/share/steam/",
"~/.steam/",
"~/.Steam/",
)
for candidate in candidates:
path = os.path.expanduser(candidate)
if os.path.exists(path):
return path
def get_game_path_from_appid(self, appid):
"""Return the game directory"""
for apps_path in self.get_steamapps_dirs():
game_path = get_path_from_appmanifest(apps_path, appid)
if game_path:
return game_path
logger.warning("Data path for SteamApp %s not found.", appid)
def get_steamapps_dirs(self):
"""Return a list of the Steam library main + custom folders."""
dirs = []
# Main steamapps dir
if self.steam_data_dir:
main_dir = os.path.join(self.steam_data_dir, 'SteamApps')
main_dir = system.fix_path_case(main_dir)
if main_dir:
dirs.append(main_dir)
# Custom dirs
steam_config = self.steam_config
if steam_config:
i = 1
while ('BaseInstallFolder_%s' % i) in steam_config:
path = steam_config['BaseInstallFolder_%s' % i] + '/SteamApps'
path = system.fix_path_case(path)
if path:
dirs.append(path)
i += 1
return dirs
def install(self):
message = "Steam for Linux installation is not handled by Lutris.\n" \
"Please go to " \
"<a href='http://steampowered.com'>http://steampowered.com</a>" \
" or install Steam with the package provided by your distribution."
NoticeDialog(message)
def is_installed(self):
return bool(system.find_executable(self.steam_exe))
def install_game(self, appid):
logger.debug("Installing steam game %s", appid)
acf_data = get_default_acf(appid, appid)
acf_content = to_vdf(acf_data)
steamapps_dirs = self.get_steamapps_dirs()
acf_path = os.path.join(steamapps_dirs[0], "appmanifest_%s.acf" % appid)
with open(acf_path, "w") as acf_file:
acf_file.write(acf_content)
if is_running():
shutdown()
time.sleep(5)
else:
logger.debug("Steam not running")
subprocess.Popen(["steam", "steam://preload/%s" % appid])
def prelaunch(self):
from lutris.runners import winesteam
if winesteam.is_running():
if winesteam.is_running():
logger.info("Steam does not shutdown, killing it...")
winesteam.kill()
time.sleep(2)
if winesteam.is_running():
logger.error("Failed to shutdown Steam for Windows :(")
return False
else:
logger.debug("winesteam not running")
return True
def play(self):
# Get current steam pid to act as the root pid instead of lutris
self.original_steampid = get_steam_pid()
appid = self.game_config.get('appid')
return {
'command': [self.steam_exe, 'steam://rungameid/%s' % appid],
'rootpid': self.original_steampid
}
def stop(self):
if self.runner_config.get('quit_steam_on_exit') \
and not self.original_steampid:
shutdown()
def remove_game_data(self, **kwargs):
if not self.is_installed():
installed = self.install_dialog()
if not installed:
return False
appid = self.game_config.get('appid')
logger.debug("Launching Wine Steam uninstall of game %s" % appid)
command = [self.steam_exe, 'steam://uninstall/%s' % appid]
thread = LutrisThread(command, runner=self)
thread.start()
| gpl-3.0 |
woddx/privacyidea | tests/test_lib_tokens_motp.py | 3 | 7378 | """
This test file tests the lib.tokens.spasstoken
This depends on lib.tokenclass
"""
from .base import MyTestCase
from privacyidea.lib.tokens.motptoken import MotpTokenClass
from privacyidea.lib.tokens.mOTP import mTimeOtp
from privacyidea.models import Token
from privacyidea.lib.resolver import save_resolver
from privacyidea.lib.realm import set_realm
from privacyidea.lib.user import User
PWFILE = "tests/testdata/passwords"
class MotpTokenTestCase(MyTestCase):
otppin = "topsecret"
motppin = "1234"
serial1 = "ser1"
serial2 = "ser2"
resolvername1 = "resolver1"
resolvername2 = "Resolver2"
resolvername3 = "reso3"
realm1 = "realm1"
realm2 = "realm2"
def test_00_create_user_realm(self):
rid = save_resolver({"resolver": self.resolvername1,
"type": "passwdresolver",
"fileName": PWFILE})
self.assertTrue(rid > 0, rid)
(added, failed) = set_realm(self.realm1,
[self.resolvername1])
self.assertTrue(len(failed) == 0)
self.assertTrue(len(added) == 1)
user = User(login="root",
realm=self.realm1,
resolver=self.resolvername1)
user_str = "%s" % user
self.assertTrue(user_str == "<root.resolver1@realm1>", user_str)
self.assertFalse(user.is_empty())
self.assertTrue(User().is_empty())
user_repr = "%r" % user
expected = "User(login='root', realm='realm1', resolver='resolver1')"
self.assertTrue(user_repr == expected, user_repr)
def test_01_create_token(self):
db_token = Token(self.serial1, tokentype="motp")
db_token.save()
token = MotpTokenClass(db_token)
token.update({"otpkey": "909a4d4ba980b2c6",
"motppin": self.motppin,
"pin": self.otppin})
self.assertTrue(token.token.serial == self.serial1, token)
self.assertTrue(token.token.tokentype == "motp", token.token.tokentype)
self.assertTrue(token.type == "motp", token)
class_prefix = token.get_class_prefix()
self.assertTrue(class_prefix == "PIMO", class_prefix)
self.assertTrue(token.get_class_type() == "motp", token)
def test_02_check_password(self):
db_token = Token.query.filter(Token.serial == self.serial1).first()
token = MotpTokenClass(db_token)
# Wrong OTP value
r = token.check_otp("aba73b")
self.assertTrue(r == -1, r)
# check pin+otp:
token.set_pin(self.otppin)
r = token.authenticate("%saba73b" % self.otppin)
self.assertTrue(r[0], r)
self.assertTrue(r[1] == -1, r)
def test_03_enroll_genkey(self):
db_token = Token(self.serial2, tokentype="motp")
db_token.save()
token = MotpTokenClass(db_token)
token.update({"genkey": "1",
"motppin": self.motppin,
"pin": self.otppin})
db_token = Token.query.filter(Token.serial == self.serial2).first()
token = MotpTokenClass(db_token)
# check that the userpin is set
self.assertTrue(token.token.user_pin, token.token.user_pin)
# check that the otp value is set
self.assertTrue(token.token.key_enc, token.token.key_enc)
def test_16_init_detail(self):
db_token = Token.query.filter_by(serial=self.serial2).first()
token = MotpTokenClass(db_token)
token.add_init_details("otpkey", "11223344556677889900")
token.set_user(User(login="cornelius",
realm=self.realm1))
token.save()
self.assertTrue(token.token.resolver_type == "passwdresolver",
token.token.resolver_type)
self.assertTrue(token.token.resolver == self.resolvername1,
token.token.resolver)
self.assertTrue(token.token.user_id == "1000",
token.token.user_id)
user_object = token.get_user()
self.assertTrue(user_object.login == "cornelius",
user_object)
self.assertTrue(user_object.resolver == self.resolvername1,
user_object)
detail = token.get_init_detail()
self.assertTrue("otpkey" in detail, detail)
# but the otpkey must not be written to token.token.info (DB)
# As this only writes the OTPkey to the internal init_details dict
self.assertTrue("otpkey" not in token.token.get_info(),
token.token.get_info())
# Now get the Token2 URL, which we only
# get, if a user is specified.
detail = token.get_init_detail(user=User("cornelius",
self.realm1))
self.assertTrue("otpkey" in detail, detail)
otpkey = detail.get("otpkey")
self.assertTrue("img" in otpkey, otpkey)
self.assertTrue("motpurl" in detail, detail)
motpurl = detail.get("motpurl").get("value")
self.assertTrue(motpurl == 'motp://privacyidea:mylabel?'
'secret=11223344556677889900', motpurl)
self.assertRaises(Exception, token.set_init_details, "unvalid value")
token.set_init_details({"detail1": "value1"})
self.assertTrue("detail1" in token.get_init_details(),
token.get_init_details())
def test_04_class_methods(self):
db_token = Token.query.filter(Token.serial == self.serial1).first()
token = MotpTokenClass(db_token)
info = token.get_class_info()
self.assertTrue(info.get("title") == "mOTP Token", info.get(
'title'))
info = token.get_class_info("title")
self.assertTrue(info == "mOTP Token", info)
def test_05_test_vector(self):
# Testvector from
# https://github.com/neush/otpn900/blob/master/src/test_motp.c
key = "0123456789abcdef"
epoch = [ 129612120, 129612130, 0, 4, 129612244, 129612253]
pins = ["6666", "6666", "1", "1", "77777777", "77777777"]
otps = ["6ed4e4", "502a59", "bd94a4", "fb596e", "7abf75", "4d4ac4"]
i = 0
motp1 = mTimeOtp(key=key, pin=pins[0])
for e in epoch:
pin = pins[i]
otp = otps[i]
sotp = motp1.calcOtp(e, key, pin)
self.assertTrue(sotp == otp, "%s==%s" % (sotp, otp))
i += 1
def test_06_reuse_otp_value(self):
key = "0123456789abcdef"
db_token = Token("motp002", tokentype="motp")
db_token.save()
token = MotpTokenClass(db_token)
token.update({"otpkey": key,
"motppin": "6666",
"pin": "test"})
self.assertTrue(token.token.tokentype == "motp", token.token.tokentype)
self.assertTrue(token.type == "motp", token)
class_prefix = token.get_class_prefix()
self.assertTrue(class_prefix == "PIMO", class_prefix)
self.assertTrue(token.get_class_type() == "motp", token)
# Correct OTP value
r = token.check_otp("6ed4e4", options={"initTime": 129612120})
self.assertTrue(r == 129612120, r)
# Check the same value again
r = token.check_otp("6ed4e4", options={"initTime": 129612120})
self.assertTrue(r == -1, r)
| agpl-3.0 |
VisionInternet/visionLiveSDK-Python | setup.py | 1 | 1303 | # -*- coding: utf-8 -*-
##############################################################################
#
# Python visionLive API
# Copyright 2017 Vision Internet (http://www.visioninternet.com)
#
##############################################################################
from setuptools import setup, find_packages
import os
import re
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
def get_version():
init = read(os.path.join('visionLiveSDK', '__init__.py'))
return re.search("__version__ = '([0-9.]*)'", init).group(1)
setup(name='visionLiveSDK',
version=get_version(),
description='SDK for visionLive API ',
long_description=read('README.md'),
author='Vision Internet',
author_email='yding@visioninternet.com',
url='https://github.com/VisionInternet/',
packages=find_packages(),
classifiers=[
'Development Status :: Production/Stable',
'Intended Audience :: Developers',
'License :: OSI Approved :: GNU Affero General Public License v3 or later (AGPLv3+)',
'Operating System :: OS Independent',
'Programming Language :: Python :: 2.7',
'Topic :: Software Development :: Libraries :: Python Modules',
],
license='AGPLv3+',
use_2to3=True,
) | gpl-3.0 |
zjutjsj1004/third | boost/tools/build/src/tools/rc.py | 1 | 7483 | # Status: being ported by Steven Watanabe
# Base revision: 47077
#
# Copyright (C) Andre Hentz 2003. Permission to copy, use, modify, sell and
# distribute this software is granted provided this copyright notice appears in
# all copies. This software is provided "as is" without express or implied
# warranty, and with no claim as to its suitability for any purpose.
#
# Copyright (c) 2006 Rene Rivera.
#
# Copyright (c) 2008 Steven Watanabe
#
# Use, modification and distribution is subject to the Boost Software
# License Version 1.0. (See accompanying file LICENSE_1_0.txt or
# http://www.boost.org/LICENSE_1_0.txt)
##import type ;
##import generators ;
##import feature ;
##import errors ;
##import scanner ;
##import toolset : flags ;
import os.path
import re
import bjam
from b2.build import type, toolset, generators, scanner, feature
from b2.exceptions import AlreadyDefined
from b2.tools import builtin
from b2.util import regex
from b2.build.toolset import flags
from b2.manager import get_manager
from b2.util import utility
__debug = None
def debug():
global __debug
if __debug is None:
__debug = "--debug-configuration" in bjam.variable("ARGV")
return __debug
type.register('RC', ['rc'])
def init():
pass
def configure (command = None, condition = None, options = None):
"""
Configures a new resource compilation command specific to a condition,
usually a toolset selection condition. The possible options are:
* <rc-type>(rc|windres) - Indicates the type of options the command
accepts.
Even though the arguments are all optional, only when a command, condition,
and at minimum the rc-type option are given will the command be configured.
This is so that callers don't have to check auto-configuration values
before calling this. And still get the functionality of build failures when
the resource compiler can't be found.
"""
rc_type = feature.get_values('<rc-type>', options)
if rc_type:
assert(len(rc_type) == 1)
rc_type = rc_type[0]
if command and condition and rc_type:
flags('rc.compile.resource', '.RC', condition, command)
flags('rc.compile.resource', '.RC_TYPE', condition, rc_type.lower())
flags('rc.compile.resource', 'DEFINES', [], ['<define>'])
flags('rc.compile.resource', 'INCLUDES', [], ['<include>'])
if debug():
print 'notice: using rc compiler ::', condition, '::', command
engine = get_manager().engine()
class RCAction:
"""Class representing bjam action defined from Python.
The function must register the action to execute."""
def __init__(self, action_name, function):
self.action_name = action_name
self.function = function
def __call__(self, targets, sources, property_set):
if self.function:
self.function(targets, sources, property_set)
# FIXME: What is the proper way to dispatch actions?
def rc_register_action(action_name, function = None):
global engine
if engine.actions.has_key(action_name):
raise AlreadyDefined("Bjam action %s is already defined" % action_name)
engine.actions[action_name] = RCAction(action_name, function)
def rc_compile_resource(targets, sources, properties):
rc_type = bjam.call('get-target-variable', targets, '.RC_TYPE')
global engine
engine.set_update_action('rc.compile.resource.' + rc_type, targets, sources, properties)
rc_register_action('rc.compile.resource', rc_compile_resource)
engine.register_action(
'rc.compile.resource.rc',
'"$(.RC)" -l 0x409 "-U$(UNDEFS)" "-D$(DEFINES)" -I"$(>:D)" -I"$(<:D)" -I"$(INCLUDES)" -fo "$(<)" "$(>)"')
engine.register_action(
'rc.compile.resource.windres',
'"$(.RC)" "-U$(UNDEFS)" "-D$(DEFINES)" -I"$(>:D)" -I"$(<:D)" -I"$(INCLUDES)" -o "$(<)" -i "$(>)"')
# FIXME: this was originally declared quietly
engine.register_action(
'compile.resource.null',
'as /dev/null -o "$(<)"')
# Since it's a common practice to write
# exe hello : hello.cpp hello.rc
# we change the name of object created from RC file, to
# avoid conflict with hello.cpp.
# The reason we generate OBJ and not RES, is that gcc does not
# seem to like RES files, but works OK with OBJ.
# See http://article.gmane.org/gmane.comp.lib.boost.build/5643/
#
# Using 'register-c-compiler' adds the build directory to INCLUDES
# FIXME: switch to generators
builtin.register_c_compiler('rc.compile.resource', ['RC'], ['OBJ(%_res)'], [])
__angle_include_re = "#include[ ]*<([^<]+)>"
# Register scanner for resources
class ResScanner(scanner.Scanner):
def __init__(self, includes):
scanner.__init__ ;
self.includes = includes
def pattern(self):
return "(([^ ]+[ ]+(BITMAP|CURSOR|FONT|ICON|MESSAGETABLE|RT_MANIFEST)" +\
"[ ]+([^ \"]+|\"[^\"]+\"))|(#include[ ]*(<[^<]+>|\"[^\"]+\")))" ;
def process(self, target, matches, binding):
binding = binding[0]
angle = regex.transform(matches, "#include[ ]*<([^<]+)>")
quoted = regex.transform(matches, "#include[ ]*\"([^\"]+)\"")
res = regex.transform(matches,
"[^ ]+[ ]+(BITMAP|CURSOR|FONT|ICON|MESSAGETABLE|RT_MANIFEST)" +\
"[ ]+(([^ \"]+)|\"([^\"]+)\")", [3, 4])
# Icons and other includes may referenced as
#
# IDR_MAINFRAME ICON "res\\icon.ico"
#
# so we have to replace double backslashes to single ones.
res = [ re.sub(r'\\\\', '/', match) for match in res if match is not None ]
# CONSIDER: the new scoping rule seem to defeat "on target" variables.
g = bjam.call('get-target-variable', target, 'HDRGRIST')[0]
b = os.path.normpath(os.path.dirname(binding))
# Attach binding of including file to included targets.
# When target is directly created from virtual target
# this extra information is unnecessary. But in other
# cases, it allows to distinguish between two headers of the
# same name included from different places.
# We don't need this extra information for angle includes,
# since they should not depend on including file (we can't
# get literal "." in include path).
g2 = g + "#" + b
g = "<" + g + ">"
g2 = "<" + g2 + ">"
angle = [g + x for x in angle]
quoted = [g2 + x for x in quoted]
res = [g2 + x for x in res]
all = angle + quoted
bjam.call('mark-included', target, all)
engine = get_manager().engine()
engine.add_dependency(target, res)
bjam.call('NOCARE', all + res)
engine.set_target_variable(angle, 'SEARCH', [utility.get_value(inc) for inc in self.includes])
engine.set_target_variable(quoted, 'SEARCH', [b + utility.get_value(inc) for inc in self.includes])
engine.set_target_variable(res, 'SEARCH', [b + utility.get_value(inc) for inc in self.includes])
# Just propagate current scanner to includes, in a hope
# that includes do not change scanners.
get_manager().scanners().propagate(self, angle + quoted)
scanner.register(ResScanner, 'include')
type.set_scanner('RC', ResScanner)
| mit |
mdaus/nitro | modules/python/nitf/samples/nitf_extract.py | 2 | 2822 | #!/usr/bin/env python
"""
* =========================================================================
* This file is part of NITRO
* =========================================================================
*
* (C) Copyright 2004 - 2016, MDA Information Systems LLC
*
* NITRO is free software; you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as published by
* the Free Software Foundation; either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this program; if not, If not,
* see <http://www.gnu.org/licenses/>.
*
*
"""
from nitf import *
import os, sys, logging, glob
logging.basicConfig(level=logging.INFO, stream=sys.stdout,
format='%(asctime)s %(levelname)s %(message)s')
def extract_image(subheader, index, imageReader, outDir=None, baseName=None):
window = SubWindow()
window.numRows = subheader['numRows'].intValue()
window.numCols = subheader['numCols'].intValue()
window.bandList = list(range(subheader.getBandCount()))
nbpp = subheader['numBitsPerPixel'].intValue()
bandData = imageReader.read(window)
if not outDir: outDir = os.getcwd()
if not baseName: baseName = os.path.basename(os.tempnam())
outNames = []
for band, data in enumerate(bandData):
outName = '%s_%d__%d_x_%d_%d_band_%d.out' % (
baseName, index, window.numRows, window.numCols, nbpp, band)
outName = os.path.join(outDir, outName)
f = open(outName, 'wb')
f.write(data)
f.close()
outNames.append(outName)
logging.info('Wrote band data to file %s' % outName)
return outNames
def extract_images(fileName, outDir=None):
if not outDir: outDir = os.getcwd()
if not os.path.exists(outDir): os.makedirs(outDir)
handle = IOHandle(fileName)
reader = Reader()
record = reader.read(handle)
logging.info('Dumping file: %s' % fileName)
for i, segment in enumerate(record.getImages()):
logging.info('--- Image [%d] ---' % i)
imReader = reader.newImageReader(i)
extract_image(segment.subheader, i, imReader, outDir, os.path.basename(fileName))
handle.close()
if __name__ == '__main__':
for arg in sys.argv[1:]:
if os.path.isfile(arg):
extract_images(arg)
elif os.path.isdir(arg):
map(extract_images, glob.glob(os.path.join(arg, '*.ntf')) + glob.glob(os.path.join(arg, '*.NTF')))
| lgpl-3.0 |
boorad/bigcouch | couchjs/scons/scons-local-2.0.1/SCons/Tool/mslink.py | 61 | 10682 | """SCons.Tool.mslink
Tool-specific initialization for the Microsoft linker.
There normally shouldn't be any need to import this module directly.
It will usually be imported through the generic SCons.Tool.Tool()
selection method.
"""
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Tool/mslink.py 5134 2010/08/16 23:02:40 bdeegan"
import os.path
import SCons.Action
import SCons.Defaults
import SCons.Errors
import SCons.Platform.win32
import SCons.Tool
import SCons.Tool.msvc
import SCons.Tool.msvs
import SCons.Util
from MSCommon import msvc_setup_env_once, msvc_exists
def pdbGenerator(env, target, source, for_signature):
try:
return ['/PDB:%s' % target[0].attributes.pdb, '/DEBUG']
except (AttributeError, IndexError):
return None
def _dllTargets(target, source, env, for_signature, paramtp):
listCmd = []
dll = env.FindIxes(target, '%sPREFIX' % paramtp, '%sSUFFIX' % paramtp)
if dll: listCmd.append("/out:%s"%dll.get_string(for_signature))
implib = env.FindIxes(target, 'LIBPREFIX', 'LIBSUFFIX')
if implib: listCmd.append("/implib:%s"%implib.get_string(for_signature))
return listCmd
def _dllSources(target, source, env, for_signature, paramtp):
listCmd = []
deffile = env.FindIxes(source, "WINDOWSDEFPREFIX", "WINDOWSDEFSUFFIX")
for src in source:
# Check explicitly for a non-None deffile so that the __cmp__
# method of the base SCons.Util.Proxy class used for some Node
# proxies doesn't try to use a non-existent __dict__ attribute.
if deffile and src == deffile:
# Treat this source as a .def file.
listCmd.append("/def:%s" % src.get_string(for_signature))
else:
# Just treat it as a generic source file.
listCmd.append(src)
return listCmd
def windowsShlinkTargets(target, source, env, for_signature):
return _dllTargets(target, source, env, for_signature, 'SHLIB')
def windowsShlinkSources(target, source, env, for_signature):
return _dllSources(target, source, env, for_signature, 'SHLIB')
def _windowsLdmodTargets(target, source, env, for_signature):
"""Get targets for loadable modules."""
return _dllTargets(target, source, env, for_signature, 'LDMODULE')
def _windowsLdmodSources(target, source, env, for_signature):
"""Get sources for loadable modules."""
return _dllSources(target, source, env, for_signature, 'LDMODULE')
def _dllEmitter(target, source, env, paramtp):
"""Common implementation of dll emitter."""
SCons.Tool.msvc.validate_vars(env)
extratargets = []
extrasources = []
dll = env.FindIxes(target, '%sPREFIX' % paramtp, '%sSUFFIX' % paramtp)
no_import_lib = env.get('no_import_lib', 0)
if not dll:
raise SCons.Errors.UserError('A shared library should have exactly one target with the suffix: %s' % env.subst('$%sSUFFIX' % paramtp))
insert_def = env.subst("$WINDOWS_INSERT_DEF")
if not insert_def in ['', '0', 0] and \
not env.FindIxes(source, "WINDOWSDEFPREFIX", "WINDOWSDEFSUFFIX"):
# append a def file to the list of sources
extrasources.append(
env.ReplaceIxes(dll,
'%sPREFIX' % paramtp, '%sSUFFIX' % paramtp,
"WINDOWSDEFPREFIX", "WINDOWSDEFSUFFIX"))
version_num, suite = SCons.Tool.msvs.msvs_parse_version(env.get('MSVS_VERSION', '6.0'))
if version_num >= 8.0 and env.get('WINDOWS_INSERT_MANIFEST', 0):
# MSVC 8 automatically generates .manifest files that must be installed
extratargets.append(
env.ReplaceIxes(dll,
'%sPREFIX' % paramtp, '%sSUFFIX' % paramtp,
"WINDOWSSHLIBMANIFESTPREFIX", "WINDOWSSHLIBMANIFESTSUFFIX"))
if 'PDB' in env and env['PDB']:
pdb = env.arg2nodes('$PDB', target=target, source=source)[0]
extratargets.append(pdb)
target[0].attributes.pdb = pdb
if not no_import_lib and \
not env.FindIxes(target, "LIBPREFIX", "LIBSUFFIX"):
# Append an import library to the list of targets.
extratargets.append(
env.ReplaceIxes(dll,
'%sPREFIX' % paramtp, '%sSUFFIX' % paramtp,
"LIBPREFIX", "LIBSUFFIX"))
# and .exp file is created if there are exports from a DLL
extratargets.append(
env.ReplaceIxes(dll,
'%sPREFIX' % paramtp, '%sSUFFIX' % paramtp,
"WINDOWSEXPPREFIX", "WINDOWSEXPSUFFIX"))
return (target+extratargets, source+extrasources)
def windowsLibEmitter(target, source, env):
return _dllEmitter(target, source, env, 'SHLIB')
def ldmodEmitter(target, source, env):
"""Emitter for loadable modules.
Loadable modules are identical to shared libraries on Windows, but building
them is subject to different parameters (LDMODULE*).
"""
return _dllEmitter(target, source, env, 'LDMODULE')
def prog_emitter(target, source, env):
SCons.Tool.msvc.validate_vars(env)
extratargets = []
exe = env.FindIxes(target, "PROGPREFIX", "PROGSUFFIX")
if not exe:
raise SCons.Errors.UserError("An executable should have exactly one target with the suffix: %s" % env.subst("$PROGSUFFIX"))
version_num, suite = SCons.Tool.msvs.msvs_parse_version(env.get('MSVS_VERSION', '6.0'))
if version_num >= 8.0 and env.get('WINDOWS_INSERT_MANIFEST', 0):
# MSVC 8 automatically generates .manifest files that have to be installed
extratargets.append(
env.ReplaceIxes(exe,
"PROGPREFIX", "PROGSUFFIX",
"WINDOWSPROGMANIFESTPREFIX", "WINDOWSPROGMANIFESTSUFFIX"))
if 'PDB' in env and env['PDB']:
pdb = env.arg2nodes('$PDB', target=target, source=source)[0]
extratargets.append(pdb)
target[0].attributes.pdb = pdb
return (target+extratargets,source)
def RegServerFunc(target, source, env):
if 'register' in env and env['register']:
ret = regServerAction([target[0]], [source[0]], env)
if ret:
raise SCons.Errors.UserError("Unable to register %s" % target[0])
else:
print "Registered %s sucessfully" % target[0]
return ret
return 0
regServerAction = SCons.Action.Action("$REGSVRCOM", "$REGSVRCOMSTR")
regServerCheck = SCons.Action.Action(RegServerFunc, None)
shlibLinkAction = SCons.Action.Action('${TEMPFILE("$SHLINK $SHLINKFLAGS $_SHLINK_TARGETS $_LIBDIRFLAGS $_LIBFLAGS $_PDB $_SHLINK_SOURCES")}')
compositeShLinkAction = shlibLinkAction + regServerCheck
ldmodLinkAction = SCons.Action.Action('${TEMPFILE("$LDMODULE $LDMODULEFLAGS $_LDMODULE_TARGETS $_LIBDIRFLAGS $_LIBFLAGS $_PDB $_LDMODULE_SOURCES")}')
compositeLdmodAction = ldmodLinkAction + regServerCheck
def generate(env):
"""Add Builders and construction variables for ar to an Environment."""
SCons.Tool.createSharedLibBuilder(env)
SCons.Tool.createProgBuilder(env)
env['SHLINK'] = '$LINK'
env['SHLINKFLAGS'] = SCons.Util.CLVar('$LINKFLAGS /dll')
env['_SHLINK_TARGETS'] = windowsShlinkTargets
env['_SHLINK_SOURCES'] = windowsShlinkSources
env['SHLINKCOM'] = compositeShLinkAction
env.Append(SHLIBEMITTER = [windowsLibEmitter])
env['LINK'] = 'link'
env['LINKFLAGS'] = SCons.Util.CLVar('/nologo')
env['_PDB'] = pdbGenerator
env['LINKCOM'] = '${TEMPFILE("$LINK $LINKFLAGS /OUT:$TARGET.windows $_LIBDIRFLAGS $_LIBFLAGS $_PDB $SOURCES.windows")}'
env.Append(PROGEMITTER = [prog_emitter])
env['LIBDIRPREFIX']='/LIBPATH:'
env['LIBDIRSUFFIX']=''
env['LIBLINKPREFIX']=''
env['LIBLINKSUFFIX']='$LIBSUFFIX'
env['WIN32DEFPREFIX'] = ''
env['WIN32DEFSUFFIX'] = '.def'
env['WIN32_INSERT_DEF'] = 0
env['WINDOWSDEFPREFIX'] = '${WIN32DEFPREFIX}'
env['WINDOWSDEFSUFFIX'] = '${WIN32DEFSUFFIX}'
env['WINDOWS_INSERT_DEF'] = '${WIN32_INSERT_DEF}'
env['WIN32EXPPREFIX'] = ''
env['WIN32EXPSUFFIX'] = '.exp'
env['WINDOWSEXPPREFIX'] = '${WIN32EXPPREFIX}'
env['WINDOWSEXPSUFFIX'] = '${WIN32EXPSUFFIX}'
env['WINDOWSSHLIBMANIFESTPREFIX'] = ''
env['WINDOWSSHLIBMANIFESTSUFFIX'] = '${SHLIBSUFFIX}.manifest'
env['WINDOWSPROGMANIFESTPREFIX'] = ''
env['WINDOWSPROGMANIFESTSUFFIX'] = '${PROGSUFFIX}.manifest'
env['REGSVRACTION'] = regServerCheck
env['REGSVR'] = os.path.join(SCons.Platform.win32.get_system_root(),'System32','regsvr32')
env['REGSVRFLAGS'] = '/s '
env['REGSVRCOM'] = '$REGSVR $REGSVRFLAGS ${TARGET.windows}'
# Set-up ms tools paths
msvc_setup_env_once(env)
# Loadable modules are on Windows the same as shared libraries, but they
# are subject to different build parameters (LDMODULE* variables).
# Therefore LDMODULE* variables correspond as much as possible to
# SHLINK*/SHLIB* ones.
SCons.Tool.createLoadableModuleBuilder(env)
env['LDMODULE'] = '$SHLINK'
env['LDMODULEPREFIX'] = '$SHLIBPREFIX'
env['LDMODULESUFFIX'] = '$SHLIBSUFFIX'
env['LDMODULEFLAGS'] = '$SHLINKFLAGS'
env['_LDMODULE_TARGETS'] = _windowsLdmodTargets
env['_LDMODULE_SOURCES'] = _windowsLdmodSources
env['LDMODULEEMITTER'] = [ldmodEmitter]
env['LDMODULECOM'] = compositeLdmodAction
def exists(env):
return msvc_exists()
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| apache-2.0 |
mancoast/CPythonPyc_test | fail/321_test_urllib2_localnet.py | 51 | 21528 | #!/usr/bin/env python3
import os
import email
import urllib.parse
import urllib.request
import http.server
import unittest
import hashlib
from test import support
threading = support.import_module('threading')
here = os.path.dirname(__file__)
# Self-signed cert file for 'localhost'
CERT_localhost = os.path.join(here, 'keycert.pem')
# Self-signed cert file for 'fakehostname'
CERT_fakehostname = os.path.join(here, 'keycert2.pem')
# Loopback http server infrastructure
class LoopbackHttpServer(http.server.HTTPServer):
"""HTTP server w/ a few modifications that make it useful for
loopback testing purposes.
"""
def __init__(self, server_address, RequestHandlerClass):
http.server.HTTPServer.__init__(self,
server_address,
RequestHandlerClass)
# Set the timeout of our listening socket really low so
# that we can stop the server easily.
self.socket.settimeout(0.1)
def get_request(self):
"""HTTPServer method, overridden."""
request, client_address = self.socket.accept()
# It's a loopback connection, so setting the timeout
# really low shouldn't affect anything, but should make
# deadlocks less likely to occur.
request.settimeout(10.0)
return (request, client_address)
class LoopbackHttpServerThread(threading.Thread):
"""Stoppable thread that runs a loopback http server."""
def __init__(self, request_handler):
threading.Thread.__init__(self)
self._stop_server = False
self.ready = threading.Event()
request_handler.protocol_version = "HTTP/1.0"
self.httpd = LoopbackHttpServer(("127.0.0.1", 0),
request_handler)
#print "Serving HTTP on %s port %s" % (self.httpd.server_name,
# self.httpd.server_port)
self.port = self.httpd.server_port
def stop(self):
"""Stops the webserver if it's currently running."""
# Set the stop flag.
self._stop_server = True
self.join()
self.httpd.server_close()
def run(self):
self.ready.set()
while not self._stop_server:
self.httpd.handle_request()
# Authentication infrastructure
class DigestAuthHandler:
"""Handler for performing digest authentication."""
def __init__(self):
self._request_num = 0
self._nonces = []
self._users = {}
self._realm_name = "Test Realm"
self._qop = "auth"
def set_qop(self, qop):
self._qop = qop
def set_users(self, users):
assert isinstance(users, dict)
self._users = users
def set_realm(self, realm):
self._realm_name = realm
def _generate_nonce(self):
self._request_num += 1
nonce = hashlib.md5(str(self._request_num).encode("ascii")).hexdigest()
self._nonces.append(nonce)
return nonce
def _create_auth_dict(self, auth_str):
first_space_index = auth_str.find(" ")
auth_str = auth_str[first_space_index+1:]
parts = auth_str.split(",")
auth_dict = {}
for part in parts:
name, value = part.split("=")
name = name.strip()
if value[0] == '"' and value[-1] == '"':
value = value[1:-1]
else:
value = value.strip()
auth_dict[name] = value
return auth_dict
def _validate_auth(self, auth_dict, password, method, uri):
final_dict = {}
final_dict.update(auth_dict)
final_dict["password"] = password
final_dict["method"] = method
final_dict["uri"] = uri
HA1_str = "%(username)s:%(realm)s:%(password)s" % final_dict
HA1 = hashlib.md5(HA1_str.encode("ascii")).hexdigest()
HA2_str = "%(method)s:%(uri)s" % final_dict
HA2 = hashlib.md5(HA2_str.encode("ascii")).hexdigest()
final_dict["HA1"] = HA1
final_dict["HA2"] = HA2
response_str = "%(HA1)s:%(nonce)s:%(nc)s:" \
"%(cnonce)s:%(qop)s:%(HA2)s" % final_dict
response = hashlib.md5(response_str.encode("ascii")).hexdigest()
return response == auth_dict["response"]
def _return_auth_challenge(self, request_handler):
request_handler.send_response(407, "Proxy Authentication Required")
request_handler.send_header("Content-Type", "text/html")
request_handler.send_header(
'Proxy-Authenticate', 'Digest realm="%s", '
'qop="%s",'
'nonce="%s", ' % \
(self._realm_name, self._qop, self._generate_nonce()))
# XXX: Not sure if we're supposed to add this next header or
# not.
#request_handler.send_header('Connection', 'close')
request_handler.end_headers()
request_handler.wfile.write(b"Proxy Authentication Required.")
return False
def handle_request(self, request_handler):
"""Performs digest authentication on the given HTTP request
handler. Returns True if authentication was successful, False
otherwise.
If no users have been set, then digest auth is effectively
disabled and this method will always return True.
"""
if len(self._users) == 0:
return True
if "Proxy-Authorization" not in request_handler.headers:
return self._return_auth_challenge(request_handler)
else:
auth_dict = self._create_auth_dict(
request_handler.headers["Proxy-Authorization"]
)
if auth_dict["username"] in self._users:
password = self._users[ auth_dict["username"] ]
else:
return self._return_auth_challenge(request_handler)
if not auth_dict.get("nonce") in self._nonces:
return self._return_auth_challenge(request_handler)
else:
self._nonces.remove(auth_dict["nonce"])
auth_validated = False
# MSIE uses short_path in its validation, but Python's
# urllib.request uses the full path, so we're going to see if
# either of them works here.
for path in [request_handler.path, request_handler.short_path]:
if self._validate_auth(auth_dict,
password,
request_handler.command,
path):
auth_validated = True
if not auth_validated:
return self._return_auth_challenge(request_handler)
return True
# Proxy test infrastructure
class FakeProxyHandler(http.server.BaseHTTPRequestHandler):
"""This is a 'fake proxy' that makes it look like the entire
internet has gone down due to a sudden zombie invasion. It main
utility is in providing us with authentication support for
testing.
"""
def __init__(self, digest_auth_handler, *args, **kwargs):
# This has to be set before calling our parent's __init__(), which will
# try to call do_GET().
self.digest_auth_handler = digest_auth_handler
http.server.BaseHTTPRequestHandler.__init__(self, *args, **kwargs)
def log_message(self, format, *args):
# Uncomment the next line for debugging.
# sys.stderr.write(format % args)
pass
def do_GET(self):
(scm, netloc, path, params, query, fragment) = urllib.parse.urlparse(
self.path, "http")
self.short_path = path
if self.digest_auth_handler.handle_request(self):
self.send_response(200, "OK")
self.send_header("Content-Type", "text/html")
self.end_headers()
self.wfile.write(bytes("You've reached %s!<BR>" % self.path,
"ascii"))
self.wfile.write(b"Our apologies, but our server is down due to "
b"a sudden zombie invasion.")
# Test cases
class ProxyAuthTests(unittest.TestCase):
URL = "http://localhost"
USER = "tester"
PASSWD = "test123"
REALM = "TestRealm"
def setUp(self):
super(ProxyAuthTests, self).setUp()
self.digest_auth_handler = DigestAuthHandler()
self.digest_auth_handler.set_users({self.USER: self.PASSWD})
self.digest_auth_handler.set_realm(self.REALM)
def create_fake_proxy_handler(*args, **kwargs):
return FakeProxyHandler(self.digest_auth_handler, *args, **kwargs)
self.server = LoopbackHttpServerThread(create_fake_proxy_handler)
self.server.start()
self.server.ready.wait()
proxy_url = "http://127.0.0.1:%d" % self.server.port
handler = urllib.request.ProxyHandler({"http" : proxy_url})
self.proxy_digest_handler = urllib.request.ProxyDigestAuthHandler()
self.opener = urllib.request.build_opener(
handler, self.proxy_digest_handler)
def tearDown(self):
self.server.stop()
super(ProxyAuthTests, self).tearDown()
def test_proxy_with_bad_password_raises_httperror(self):
self.proxy_digest_handler.add_password(self.REALM, self.URL,
self.USER, self.PASSWD+"bad")
self.digest_auth_handler.set_qop("auth")
self.assertRaises(urllib.error.HTTPError,
self.opener.open,
self.URL)
def test_proxy_with_no_password_raises_httperror(self):
self.digest_auth_handler.set_qop("auth")
self.assertRaises(urllib.error.HTTPError,
self.opener.open,
self.URL)
def test_proxy_qop_auth_works(self):
self.proxy_digest_handler.add_password(self.REALM, self.URL,
self.USER, self.PASSWD)
self.digest_auth_handler.set_qop("auth")
result = self.opener.open(self.URL)
while result.read():
pass
result.close()
def test_proxy_qop_auth_int_works_or_throws_urlerror(self):
self.proxy_digest_handler.add_password(self.REALM, self.URL,
self.USER, self.PASSWD)
self.digest_auth_handler.set_qop("auth-int")
try:
result = self.opener.open(self.URL)
except urllib.error.URLError:
# It's okay if we don't support auth-int, but we certainly
# shouldn't receive any kind of exception here other than
# a URLError.
result = None
if result:
while result.read():
pass
result.close()
def GetRequestHandler(responses):
class FakeHTTPRequestHandler(http.server.BaseHTTPRequestHandler):
server_version = "TestHTTP/"
requests = []
headers_received = []
port = 80
def do_GET(self):
body = self.send_head()
while body:
done = self.wfile.write(body)
body = body[done:]
def do_POST(self):
content_length = self.headers["Content-Length"]
post_data = self.rfile.read(int(content_length))
self.do_GET()
self.requests.append(post_data)
def send_head(self):
FakeHTTPRequestHandler.headers_received = self.headers
self.requests.append(self.path)
response_code, headers, body = responses.pop(0)
self.send_response(response_code)
for (header, value) in headers:
self.send_header(header, value % {'port':self.port})
if body:
self.send_header("Content-type", "text/plain")
self.end_headers()
return body
self.end_headers()
def log_message(self, *args):
pass
return FakeHTTPRequestHandler
class TestUrlopen(unittest.TestCase):
"""Tests urllib.request.urlopen using the network.
These tests are not exhaustive. Assuming that testing using files does a
good job overall of some of the basic interface features. There are no
tests exercising the optional 'data' and 'proxies' arguments. No tests
for transparent redirection have been written.
"""
def setUp(self):
super(TestUrlopen, self).setUp()
self.server = None
def tearDown(self):
if self.server is not None:
self.server.stop()
super(TestUrlopen, self).tearDown()
def urlopen(self, url, data=None, **kwargs):
l = []
f = urllib.request.urlopen(url, data, **kwargs)
try:
# Exercise various methods
l.extend(f.readlines(200))
l.append(f.readline())
l.append(f.read(1024))
l.append(f.read())
finally:
f.close()
return b"".join(l)
def start_server(self, responses=None):
if responses is None:
responses = [(200, [], b"we don't care")]
handler = GetRequestHandler(responses)
self.server = LoopbackHttpServerThread(handler)
self.server.start()
self.server.ready.wait()
port = self.server.port
handler.port = port
return handler
def start_https_server(self, responses=None, certfile=CERT_localhost):
if not hasattr(urllib.request, 'HTTPSHandler'):
self.skipTest('ssl support required')
from test.ssl_servers import make_https_server
if responses is None:
responses = [(200, [], b"we care a bit")]
handler = GetRequestHandler(responses)
server = make_https_server(self, certfile=certfile, handler_class=handler)
handler.port = server.port
return handler
def test_redirection(self):
expected_response = b"We got here..."
responses = [
(302, [("Location", "http://localhost:%(port)s/somewhere_else")],
""),
(200, [], expected_response)
]
handler = self.start_server(responses)
data = self.urlopen("http://localhost:%s/" % handler.port)
self.assertEqual(data, expected_response)
self.assertEqual(handler.requests, ["/", "/somewhere_else"])
def test_chunked(self):
expected_response = b"hello world"
chunked_start = (
b'a\r\n'
b'hello worl\r\n'
b'1\r\n'
b'd\r\n'
b'0\r\n'
)
response = [(200, [("Transfer-Encoding", "chunked")], chunked_start)]
handler = self.start_server(response)
data = self.urlopen("http://localhost:%s/" % handler.port)
self.assertEqual(data, expected_response)
def test_404(self):
expected_response = b"Bad bad bad..."
handler = self.start_server([(404, [], expected_response)])
try:
self.urlopen("http://localhost:%s/weeble" % handler.port)
except urllib.error.URLError as f:
data = f.read()
f.close()
else:
self.fail("404 should raise URLError")
self.assertEqual(data, expected_response)
self.assertEqual(handler.requests, ["/weeble"])
def test_200(self):
expected_response = b"pycon 2008..."
handler = self.start_server([(200, [], expected_response)])
data = self.urlopen("http://localhost:%s/bizarre" % handler.port)
self.assertEqual(data, expected_response)
self.assertEqual(handler.requests, ["/bizarre"])
def test_200_with_parameters(self):
expected_response = b"pycon 2008..."
handler = self.start_server([(200, [], expected_response)])
data = self.urlopen("http://localhost:%s/bizarre" % handler.port,
b"get=with_feeling")
self.assertEqual(data, expected_response)
self.assertEqual(handler.requests, ["/bizarre", b"get=with_feeling"])
def test_https(self):
handler = self.start_https_server()
data = self.urlopen("https://localhost:%s/bizarre" % handler.port)
self.assertEqual(data, b"we care a bit")
def test_https_with_cafile(self):
handler = self.start_https_server(certfile=CERT_localhost)
import ssl
# Good cert
data = self.urlopen("https://localhost:%s/bizarre" % handler.port,
cafile=CERT_localhost)
self.assertEqual(data, b"we care a bit")
# Bad cert
with self.assertRaises(urllib.error.URLError) as cm:
self.urlopen("https://localhost:%s/bizarre" % handler.port,
cafile=CERT_fakehostname)
# Good cert, but mismatching hostname
handler = self.start_https_server(certfile=CERT_fakehostname)
with self.assertRaises(ssl.CertificateError) as cm:
self.urlopen("https://localhost:%s/bizarre" % handler.port,
cafile=CERT_fakehostname)
def test_sending_headers(self):
handler = self.start_server()
req = urllib.request.Request("http://localhost:%s/" % handler.port,
headers={"Range": "bytes=20-39"})
urllib.request.urlopen(req)
self.assertEqual(handler.headers_received["Range"], "bytes=20-39")
def test_basic(self):
handler = self.start_server()
open_url = urllib.request.urlopen("http://localhost:%s" % handler.port)
for attr in ("read", "close", "info", "geturl"):
self.assertTrue(hasattr(open_url, attr), "object returned from "
"urlopen lacks the %s attribute" % attr)
try:
self.assertTrue(open_url.read(), "calling 'read' failed")
finally:
open_url.close()
def test_info(self):
handler = self.start_server()
try:
open_url = urllib.request.urlopen(
"http://localhost:%s" % handler.port)
info_obj = open_url.info()
self.assertIsInstance(info_obj, email.message.Message,
"object returned by 'info' is not an "
"instance of email.message.Message")
self.assertEqual(info_obj.get_content_subtype(), "plain")
finally:
self.server.stop()
def test_geturl(self):
# Make sure same URL as opened is returned by geturl.
handler = self.start_server()
open_url = urllib.request.urlopen("http://localhost:%s" % handler.port)
url = open_url.geturl()
self.assertEqual(url, "http://localhost:%s" % handler.port)
def test_bad_address(self):
# Make sure proper exception is raised when connecting to a bogus
# address.
self.assertRaises(IOError,
# Given that both VeriSign and various ISPs have in
# the past or are presently hijacking various invalid
# domain name requests in an attempt to boost traffic
# to their own sites, finding a domain name to use
# for this test is difficult. RFC2606 leads one to
# believe that '.invalid' should work, but experience
# seemed to indicate otherwise. Single character
# TLDs are likely to remain invalid, so this seems to
# be the best choice. The trailing '.' prevents a
# related problem: The normal DNS resolver appends
# the domain names from the search path if there is
# no '.' the end and, and if one of those domains
# implements a '*' rule a result is returned.
# However, none of this will prevent the test from
# failing if the ISP hijacks all invalid domain
# requests. The real solution would be to be able to
# parameterize the framework with a mock resolver.
urllib.request.urlopen,
"http://sadflkjsasf.i.nvali.d./")
def test_iteration(self):
expected_response = b"pycon 2008..."
handler = self.start_server([(200, [], expected_response)])
data = urllib.request.urlopen("http://localhost:%s" % handler.port)
for line in data:
self.assertEqual(line, expected_response)
def test_line_iteration(self):
lines = [b"We\n", b"got\n", b"here\n", b"verylong " * 8192 + b"\n"]
expected_response = b"".join(lines)
handler = self.start_server([(200, [], expected_response)])
data = urllib.request.urlopen("http://localhost:%s" % handler.port)
for index, line in enumerate(data):
self.assertEqual(line, lines[index],
"Fetched line number %s doesn't match expected:\n"
" Expected length was %s, got %s" %
(index, len(lines[index]), len(line)))
self.assertEqual(index + 1, len(lines))
@support.reap_threads
def test_main():
support.run_unittest(ProxyAuthTests, TestUrlopen)
if __name__ == "__main__":
test_main()
| gpl-3.0 |
lixt/lily2-gem5 | src/cpu/o3/FuncUnitConfig.py | 49 | 4259 | # Copyright (c) 2010 ARM Limited
# All rights reserved.
#
# The license below extends only to copyright in the software and shall
# not be construed as granting a license to any other intellectual
# property including but not limited to intellectual property relating
# to a hardware implementation of the functionality of the software
# licensed hereunder. You may use the software subject to the license
# terms below provided that you ensure that this notice is replicated
# unmodified and in its entirety in all distributions of the software,
# modified or unmodified, in source code or in binary form.
#
# Copyright (c) 2006-2007 The Regents of The University of Michigan
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Kevin Lim
from m5.SimObject import SimObject
from m5.params import *
from FuncUnit import *
class IntALU(FUDesc):
opList = [ OpDesc(opClass='IntAlu') ]
count = 6
class IntMultDiv(FUDesc):
opList = [ OpDesc(opClass='IntMult', opLat=3),
OpDesc(opClass='IntDiv', opLat=20, issueLat=19) ]
count=2
class FP_ALU(FUDesc):
opList = [ OpDesc(opClass='FloatAdd', opLat=2),
OpDesc(opClass='FloatCmp', opLat=2),
OpDesc(opClass='FloatCvt', opLat=2) ]
count = 4
class FP_MultDiv(FUDesc):
opList = [ OpDesc(opClass='FloatMult', opLat=4),
OpDesc(opClass='FloatDiv', opLat=12, issueLat=12),
OpDesc(opClass='FloatSqrt', opLat=24, issueLat=24) ]
count = 2
class SIMD_Unit(FUDesc):
opList = [ OpDesc(opClass='SimdAdd'),
OpDesc(opClass='SimdAddAcc'),
OpDesc(opClass='SimdAlu'),
OpDesc(opClass='SimdCmp'),
OpDesc(opClass='SimdCvt'),
OpDesc(opClass='SimdMisc'),
OpDesc(opClass='SimdMult'),
OpDesc(opClass='SimdMultAcc'),
OpDesc(opClass='SimdShift'),
OpDesc(opClass='SimdShiftAcc'),
OpDesc(opClass='SimdSqrt'),
OpDesc(opClass='SimdFloatAdd'),
OpDesc(opClass='SimdFloatAlu'),
OpDesc(opClass='SimdFloatCmp'),
OpDesc(opClass='SimdFloatCvt'),
OpDesc(opClass='SimdFloatDiv'),
OpDesc(opClass='SimdFloatMisc'),
OpDesc(opClass='SimdFloatMult'),
OpDesc(opClass='SimdFloatMultAcc'),
OpDesc(opClass='SimdFloatSqrt') ]
count = 4
class ReadPort(FUDesc):
opList = [ OpDesc(opClass='MemRead') ]
count = 0
class WritePort(FUDesc):
opList = [ OpDesc(opClass='MemWrite') ]
count = 0
class RdWrPort(FUDesc):
opList = [ OpDesc(opClass='MemRead'), OpDesc(opClass='MemWrite') ]
count = 4
class IprPort(FUDesc):
opList = [ OpDesc(opClass='IprAccess', opLat = 3, issueLat = 3) ]
count = 1
| bsd-3-clause |
Honry/crosswalk-test-suite | stability/stability-lowresource-android-tests/lowresource/Webapp_Operations_UnderLowDisk.py | 7 | 9653 | #!/usr/bin/env python
# coding=utf-8
#
# Copyright (c) 2015 Intel Corporation.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of works must retain the original copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the original copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of Intel Corporation nor the names of its contributors
# may be used to endorse or promote products derived from this work without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY INTEL CORPORATION "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL INTEL CORPORATION BE LIABLE FOR ANY DIRECT,
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors:
# Li, Hao<haox.li@intel.com>
import unittest
import os
import sys
import commands
import shutil
import time
import subprocess
import glob
from TestApp import *
reload(sys)
sys.setdefaultencoding('utf-8')
SCRIPT_PATH = os.path.realpath(__file__)
ConstPath = os.path.dirname(SCRIPT_PATH)
appsrc = ConstPath + "/../testapp/helloworld"
approot = ConstPath + "/helloworld"
app_tools_dir = os.environ.get('CROSSWALK_APP_TOOLS_CACHE_DIR')
instaled_app_list = []
def setUp():
global device, apptools, crosswalkzip
#device = 'E6OKCY411012'
device = os.environ.get('DEVICE_ID')
global device_abi
device_abi = getDeviceCpuAbi(device)
if not device:
print 'Get env error\n'
sys.exit(1)
if not app_tools_dir:
print ("Not find CROSSWALK_APP_TOOLS_CACHE_DIR\n")
sys.exit(1)
# app tools commend
apptools = "crosswalk-pkg"
if os.system(apptools) != 0:
apptools = app_tools_dir + "/crosswalk-app-tools/src/crosswalk-pkg"
# crosswalk lib
zips = glob.glob(os.path.join(app_tools_dir, "crosswalk-*.zip"))
if len(zips) == 0:
print ("Not find crosswalk zip in CROSSWALK_APP_TOOLS_CACHE_DIR\n")
sys.exit(1)
# latest version
zips.sort(reverse = True)
crosswalkzip = zips[0]
def getFreeDiskSize(device):
# Disk size: M
cmd = "%s -s %s shell df|grep %s |awk -F \" \" '{print $4}'" % (ADB_CMD, device, "/data")
(return_code, output) = doCMD(cmd)
for line in output:
if line.endswith("G"):
# 1G = 1024M
return int(float(line[0:-1]) * 1024)
else:
return int(float(line[0:-1]))
def getDeviceCpuAbi(device):
cmd = "%s -s %s shell getprop|grep \"\[ro.product.cpu.abi\]\"" % (ADB_CMD, device)
(return_code, output) = doCMD(cmd)
for line in output:
if "x86" in line:
return "x86"
else:
return "arm"
def getFileSize(filepath):
filesize = 0
if os.path.exists(filepath):
filesize = float(os.stat(filepath).st_size)
# size: M
filesize = filesize/1024/1024
else:
print "-->> %s does not exists" % filepath
return filesize
def createAPK(appname):
action_status = True
# Remove existed manifest.json
if os.path.exists(appsrc + "/manifest.json"):
os.remove(appsrc + "/manifest.json")
# build apk
cmd = "%s --crosswalk=%s --platforms=android --android=%s --targets=%s -m " \
"\"{\\\"name\\\": \\\"%s\\\", \\\"start_url\\\": \\\"index.html\\\", \\\"xwalk_package_id\\\": \\\"org.xwalk.%s\\\"}\" %s" % \
(apptools,
crosswalkzip,
"embedded",
device_abi,
appname,
appname,
appsrc)
(return_code, output) = doCMD(cmd)
if return_code == 0:
print "-->> org.xwalk.%s success to build." % appname
cmd = "mv *.apk %s/%s.apk" % (approot, appname)
(return_code, output) = doCMD(cmd)
else:
print "-->> org.xwalk.%s fail to build." % appname
action_status = False
return action_status
def deleteAPK(testapp):
cmd = "rm -rf %s" % (testapp.location)
(return_code, output) = doCMD(cmd)
if return_code == 0:
print "-->> %s success to delete." % testapp.location
return True
else:
print "-->> %s fail to delete." % testapp.location
return False
def cleanWork():
cmd = "rm -rf %s" % (appsrc + "/*.temp.mp4")
(return_code, output) = doCMD(cmd)
cmd = "rm -rf %s" % (approot)
(return_code, output) = doCMD(cmd)
for i in range(len(instaled_app_list)):
instaled_app_list[i].uninstall()
def makeLowDisk():
cleanWork()
action_status = False
if not os.path.exists(approot):
cmd = "mkdir %s" % approot
(return_code, output) = doCMD(cmd)
videofile = appsrc + "/res/w3c/movie_300.mp4"
videosize = getFileSize(videofile)
if videosize <= 0:
print "-->> Lack pre-condition resource files"
return False
tmpreadystate = [False, False, False]
global instaled_app_list
while not action_status:
freesize = getFreeDiskSize(device)
if (freesize >= 1024) and not tmpreadystate[0]:
# make app size: 500M
count = int((500 - videosize)/videosize)
for i in range(count):
cmd = "cp %s %s " % (videofile, appsrc + "/video" + str(i) +".temp.mp4")
(return_code, output) = doCMD(cmd)
tmpreadystate[0] = True
elif (freesize >= 512) and (freesize < 1024) and not tmpreadystate[1]:
# clean appsrc
if tmpreadystate[0]:
cmd = "rm -rf %s/*.temp.mp4" % (appsrc)
(return_code, output) = doCMD(cmd)
(return_code, output) = doCMD(cmd)
# make app size: 100M
count = int((100 - videosize)/videosize)
for i in range(count):
cmd = "cp %s %s " % (videofile, appsrc + "/video" + str(i) +".temp.mp4")
(return_code, output) = doCMD(cmd)
tmpreadystate[1] = True
elif (freesize < 512) and not tmpreadystate[2]:
# clean appsrc
cmd = "rm -rf %s/*.temp.mp4" % (appsrc)
(return_code, output) = doCMD(cmd)
tmpreadystate[2] = True
appname = "helloworld%s" % int(time.time())
if createAPK(appname):
apkname = appname[0].upper() + appname[1:]
apkpath = approot + "/" + appname + ".apk"
testapp = TestApp(device, apkpath,
"org.xwalk." + appname, apkname + "Activity")
#If app exists, go to next
if not testapp.isInstalled():
#if app is not installed successful, delete the package, return True
if not testapp.install():
action_status = True
deleteAPK(testapp)
# tmpreadystate[2] == True,
# means free disk is too small to install test app
# need to uninstall the last one to keep more free disk
if len(instaled_app_list) > 0 and tmpreadystate[2]:
testapp = instaled_app_list.pop(-1)
testapp.uninstall()
deleteAPK(testapp)
else:
instaled_app_list.append(testapp)
else:
break
return action_status
class TestStabilityInLowDiskFunctions(unittest.TestCase):
def test_app_repeatedly_in_lowdisk(self):
setUp()
if makeLowDisk():
testapp = TestApp(device, ConstPath + "/../testapp/lowresourcetest.apk",
"org.xwalk.lowresourcetest", "LowresourcetestActivity")
if testapp.isInstalled():
testapp.uninstall()
for i in range(20):
if testapp.install() and testapp.launch():
switchresult = False
for i in range(2):
time.sleep(1)
# swtich app
switchresult = testapp.switch()
if switchresult:
time.sleep(1)
if testapp.stop() and testapp.uninstall():
time.sleep(1)
else:
testapp.uninstall()
cleanWork()
self.assertTrue(False)
else:
testapp.uninstall()
cleanWork()
self.assertTrue(False)
else:
testapp.uninstall()
cleanWork()
self.assertTrue(False)
testapp.uninstall()
cleanWork()
self.assertTrue(True)
else:
print "-->> Test envrionment fail to set up"
cleanWork()
self.assertTrue(False)
if __name__ == '__main__':
unittest.main()
| bsd-3-clause |
LucaDiStasio/thinPlyMechanics | python/preprocessor.py | 1 | 2413 | # Autogenerated with SMOP
from smop.core import *
#
##
#==============================================================================
# Copyright (c) 2016-2017 Universite de Lorraine & Lulea tekniska universitet
# Author: Luca Di Stasio <luca.distasio@gmail.com>
# <luca.distasio@ingpec.eu>
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the distribution
# Neither the name of the Universite de Lorraine or Lulea tekniska universitet
# nor the names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#==============================================================================
# DESCRIPTION
#
# Abaqus preprocessor script
#
clear('all')
close_('all')
projectName=rve_mesh_create_project('H:/01_Luca/07_DocMASE/07_Data/03_FEM/CurvedInterface','D:\\OneDrive\\01_Luca\\07_DocMASE\\07_Data\\02_Material-Properties','projectsIndex',5,1,1,0,0,0,1,1,1,0,1,0,5,0,0.0,1.0,7.9e-05,1.0,0.0,0.174532925199,0.0,0.0,0.01,2,1,1,1,cat([1000000],[1],[1],[1],[1],[1],[1],[1e-18],[1e-06],[1],[1],[1],[1],[1e-12],[1000000]),0.43,0.95,1.05,1.0,10,80,80,60,20,0,0,1,20) | apache-2.0 |
staslev/incubator-beam | sdks/python/apache_beam/testing/test_pipeline_test.py | 9 | 4209 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Unit test for the TestPipeline class"""
import logging
import unittest
from hamcrest.core.assert_that import assert_that as hc_assert_that
from hamcrest.core.base_matcher import BaseMatcher
from apache_beam.internal import pickler
from apache_beam.options.pipeline_options import PipelineOptions
from apache_beam.testing.test_pipeline import TestPipeline
# A simple matcher that is ued for testing extra options appending.
class SimpleMatcher(BaseMatcher):
def _matches(self, item):
return True
class TestPipelineTest(unittest.TestCase):
TEST_CASE = {'options':
['--test-pipeline-options', '--job=mockJob --male --age=1'],
'expected_list': ['--job=mockJob', '--male', '--age=1'],
'expected_dict': {'job': 'mockJob',
'male': True,
'age': 1}}
# Used for testing pipeline option creation.
class TestParsingOptions(PipelineOptions):
@classmethod
def _add_argparse_args(cls, parser):
parser.add_argument('--job', action='store', help='mock job')
parser.add_argument('--male', action='store_true', help='mock gender')
parser.add_argument('--age', action='store', type=int, help='mock age')
def test_option_args_parsing(self):
test_pipeline = TestPipeline(argv=self.TEST_CASE['options'])
self.assertListEqual(
sorted(test_pipeline.get_full_options_as_args()),
sorted(self.TEST_CASE['expected_list']))
def test_empty_option_args_parsing(self):
test_pipeline = TestPipeline()
self.assertListEqual([],
test_pipeline.get_full_options_as_args())
def test_create_test_pipeline_options(self):
test_pipeline = TestPipeline(argv=self.TEST_CASE['options'])
test_options = PipelineOptions(test_pipeline.get_full_options_as_args())
self.assertDictContainsSubset(self.TEST_CASE['expected_dict'],
test_options.get_all_options())
EXTRA_OPT_CASES = [
{'options': {'name': 'Mark'},
'expected': ['--name=Mark']},
{'options': {'student': True},
'expected': ['--student']},
{'options': {'student': False},
'expected': []},
{'options': {'name': 'Mark', 'student': True},
'expected': ['--name=Mark', '--student']}
]
def test_append_extra_options(self):
test_pipeline = TestPipeline()
for case in self.EXTRA_OPT_CASES:
opt_list = test_pipeline.get_full_options_as_args(**case['options'])
self.assertListEqual(sorted(opt_list), sorted(case['expected']))
def test_append_verifier_in_extra_opt(self):
extra_opt = {'matcher': SimpleMatcher()}
opt_list = TestPipeline().get_full_options_as_args(**extra_opt)
_, value = opt_list[0].split('=', 1)
matcher = pickler.loads(value)
self.assertTrue(isinstance(matcher, BaseMatcher))
hc_assert_that(None, matcher)
def test_get_option(self):
name, value = ('job', 'mockJob')
test_pipeline = TestPipeline()
test_pipeline.options_list = ['--%s=%s' % (name, value)]
self.assertEqual(test_pipeline.get_option(name), value)
def test_skip_IT(self):
test_pipeline = TestPipeline(is_integration_test=True)
test_pipeline.run()
# Note that this will never be reached since it should be skipped above.
self.fail()
if __name__ == '__main__':
logging.getLogger().setLevel(logging.INFO)
unittest.main()
| apache-2.0 |
serialdoom/ansible | lib/ansible/plugins/lookup/ini.py | 4 | 4178 | # (c) 2015, Yannig Perre <yannig.perre(at)gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from io import StringIO
import os
import re
try:
# python2
import ConfigParser as configparser
except ImportError:
# python3
import configparser
from ansible.errors import AnsibleError
from ansible.plugins.lookup import LookupBase
from ansible.module_utils._text import to_bytes, to_text
def _parse_params(term):
'''Safely split parameter term to preserve spaces'''
keys = ['key', 'section', 'file', 're']
params = {}
for k in keys:
params[k] = ''
thiskey = 'key'
for idp,phrase in enumerate(term.split()):
for k in keys:
if ('%s=' % k) in phrase:
thiskey = k
if idp == 0 or not params[thiskey]:
params[thiskey] = phrase
else:
params[thiskey] += ' ' + phrase
rparams = [params[x] for x in keys if params[x]]
return rparams
class LookupModule(LookupBase):
def read_properties(self, filename, key, dflt, is_regexp):
config = StringIO()
current_cfg_file = open(to_bytes(filename, errors='surrogate_or_strict'), 'rb')
config.write(u'[java_properties]\n' + to_text(current_cfg_file.read(), errors='surrogate_or_strict'))
config.seek(0, os.SEEK_SET)
self.cp.readfp(config)
return self.get_value(key, 'java_properties', dflt, is_regexp)
def read_ini(self, filename, key, section, dflt, is_regexp):
self.cp.readfp(open(to_bytes(filename, errors='surrogate_or_strict')))
return self.get_value(key, section, dflt, is_regexp)
def get_value(self, key, section, dflt, is_regexp):
# Retrieve all values from a section using a regexp
if is_regexp:
return [v for k, v in self.cp.items(section) if re.match(key, k)]
value = None
# Retrieve a single value
try:
value = self.cp.get(section, key)
except configparser.NoOptionError:
return dflt
return value
def run(self, terms, variables=None, **kwargs):
basedir = self.get_basedir(variables)
self.basedir = basedir
self.cp = configparser.ConfigParser()
ret = []
for term in terms:
params = _parse_params(term)
key = params[0]
paramvals = {
'file' : 'ansible.ini',
're' : False,
'default' : None,
'section' : "global",
'type' : "ini",
}
# parameters specified?
try:
for param in params[1:]:
name, value = param.split('=')
assert(name in paramvals)
paramvals[name] = value
except (ValueError, AssertionError) as e:
raise AnsibleError(e)
path = self.find_file_in_search_path(variables, 'files', paramvals['file'])
if paramvals['type'] == "properties":
var = self.read_properties(path, key, paramvals['default'], paramvals['re'])
else:
var = self.read_ini(path, key, paramvals['section'], paramvals['default'], paramvals['re'])
if var is not None:
if type(var) is list:
for v in var:
ret.append(v)
else:
ret.append(var)
return ret
| gpl-3.0 |
shaistaansari/django | django/contrib/staticfiles/handlers.py | 581 | 2328 | from django.conf import settings
from django.contrib.staticfiles import utils
from django.contrib.staticfiles.views import serve
from django.core.handlers.wsgi import WSGIHandler, get_path_info
from django.utils.six.moves.urllib.parse import urlparse
from django.utils.six.moves.urllib.request import url2pathname
class StaticFilesHandler(WSGIHandler):
"""
WSGI middleware that intercepts calls to the static files directory, as
defined by the STATIC_URL setting, and serves those files.
"""
# May be used to differentiate between handler types (e.g. in a
# request_finished signal)
handles_files = True
def __init__(self, application):
self.application = application
self.base_url = urlparse(self.get_base_url())
super(StaticFilesHandler, self).__init__()
def get_base_url(self):
utils.check_settings()
return settings.STATIC_URL
def _should_handle(self, path):
"""
Checks if the path should be handled. Ignores the path if:
* the host is provided as part of the base_url
* the request's path isn't under the media path (or equal)
"""
return path.startswith(self.base_url[2]) and not self.base_url[1]
def file_path(self, url):
"""
Returns the relative path to the media file on disk for the given URL.
"""
relative_url = url[len(self.base_url[2]):]
return url2pathname(relative_url)
def serve(self, request):
"""
Actually serves the request path.
"""
return serve(request, self.file_path(request.path), insecure=True)
def get_response(self, request):
from django.http import Http404
if self._should_handle(request.path):
try:
return self.serve(request)
except Http404 as e:
if settings.DEBUG:
from django.views import debug
return debug.technical_404_response(request, e)
return super(StaticFilesHandler, self).get_response(request)
def __call__(self, environ, start_response):
if not self._should_handle(get_path_info(environ)):
return self.application(environ, start_response)
return super(StaticFilesHandler, self).__call__(environ, start_response)
| bsd-3-clause |
dscho/hg | hgext/zeroconf/Zeroconf.py | 1 | 58442 | from __future__ import absolute_import, print_function
""" Multicast DNS Service Discovery for Python, v0.12
Copyright (C) 2003, Paul Scott-Murphy
This module provides a framework for the use of DNS Service Discovery
using IP multicast. It has been tested against the JRendezvous
implementation from <a href="http://strangeberry.com">StrangeBerry</a>,
and against the mDNSResponder from Mac OS X 10.3.8.
This library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
This library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with this library; if not, see
<http://www.gnu.org/licenses/>.
"""
"""0.12 update - allow selection of binding interface
typo fix - Thanks A. M. Kuchlingi
removed all use of word 'Rendezvous' - this is an API change"""
"""0.11 update - correction to comments for addListener method
support for new record types seen from OS X
- IPv6 address
- hostinfo
ignore unknown DNS record types
fixes to name decoding
works alongside other processes using port 5353 (e.g. Mac OS X)
tested against Mac OS X 10.3.2's mDNSResponder
corrections to removal of list entries for service browser"""
"""0.10 update - Jonathon Paisley contributed these corrections:
always multicast replies, even when query is unicast
correct a pointer encoding problem
can now write records in any order
traceback shown on failure
better TXT record parsing
server is now separate from name
can cancel a service browser
modified some unit tests to accommodate these changes"""
"""0.09 update - remove all records on service unregistration
fix DOS security problem with readName"""
"""0.08 update - changed licensing to LGPL"""
"""0.07 update - faster shutdown on engine
pointer encoding of outgoing names
ServiceBrowser now works
new unit tests"""
"""0.06 update - small improvements with unit tests
added defined exception types
new style objects
fixed hostname/interface problem
fixed socket timeout problem
fixed addServiceListener() typo bug
using select() for socket reads
tested on Debian unstable with Python 2.2.2"""
"""0.05 update - ensure case insensitivity on domain names
support for unicast DNS queries"""
"""0.04 update - added some unit tests
added __ne__ adjuncts where required
ensure names end in '.local.'
timeout on receiving socket for clean shutdown"""
__author__ = "Paul Scott-Murphy"
__email__ = "paul at scott dash murphy dot com"
__version__ = "0.12"
import itertools
import select
import socket
import string
import struct
import threading
import time
import traceback
__all__ = ["Zeroconf", "ServiceInfo", "ServiceBrowser"]
# hook for threads
globals()['_GLOBAL_DONE'] = 0
# Some timing constants
_UNREGISTER_TIME = 125
_CHECK_TIME = 175
_REGISTER_TIME = 225
_LISTENER_TIME = 200
_BROWSER_TIME = 500
# Some DNS constants
_MDNS_ADDR = '224.0.0.251'
_MDNS_PORT = 5353
_DNS_PORT = 53
_DNS_TTL = 60 * 60 # one hour default TTL
_MAX_MSG_TYPICAL = 1460 # unused
_MAX_MSG_ABSOLUTE = 8972
_FLAGS_QR_MASK = 0x8000 # query response mask
_FLAGS_QR_QUERY = 0x0000 # query
_FLAGS_QR_RESPONSE = 0x8000 # response
_FLAGS_AA = 0x0400 # Authoritative answer
_FLAGS_TC = 0x0200 # Truncated
_FLAGS_RD = 0x0100 # Recursion desired
_FLAGS_RA = 0x8000 # Recursion available
_FLAGS_Z = 0x0040 # Zero
_FLAGS_AD = 0x0020 # Authentic data
_FLAGS_CD = 0x0010 # Checking disabled
_CLASS_IN = 1
_CLASS_CS = 2
_CLASS_CH = 3
_CLASS_HS = 4
_CLASS_NONE = 254
_CLASS_ANY = 255
_CLASS_MASK = 0x7FFF
_CLASS_UNIQUE = 0x8000
_TYPE_A = 1
_TYPE_NS = 2
_TYPE_MD = 3
_TYPE_MF = 4
_TYPE_CNAME = 5
_TYPE_SOA = 6
_TYPE_MB = 7
_TYPE_MG = 8
_TYPE_MR = 9
_TYPE_NULL = 10
_TYPE_WKS = 11
_TYPE_PTR = 12
_TYPE_HINFO = 13
_TYPE_MINFO = 14
_TYPE_MX = 15
_TYPE_TXT = 16
_TYPE_AAAA = 28
_TYPE_SRV = 33
_TYPE_ANY = 255
# Mapping constants to names
_CLASSES = { _CLASS_IN : "in",
_CLASS_CS : "cs",
_CLASS_CH : "ch",
_CLASS_HS : "hs",
_CLASS_NONE : "none",
_CLASS_ANY : "any" }
_TYPES = { _TYPE_A : "a",
_TYPE_NS : "ns",
_TYPE_MD : "md",
_TYPE_MF : "mf",
_TYPE_CNAME : "cname",
_TYPE_SOA : "soa",
_TYPE_MB : "mb",
_TYPE_MG : "mg",
_TYPE_MR : "mr",
_TYPE_NULL : "null",
_TYPE_WKS : "wks",
_TYPE_PTR : "ptr",
_TYPE_HINFO : "hinfo",
_TYPE_MINFO : "minfo",
_TYPE_MX : "mx",
_TYPE_TXT : "txt",
_TYPE_AAAA : "quada",
_TYPE_SRV : "srv",
_TYPE_ANY : "any" }
# utility functions
def currentTimeMillis():
"""Current system time in milliseconds"""
return time.time() * 1000
# Exceptions
class NonLocalNameException(Exception):
pass
class NonUniqueNameException(Exception):
pass
class NamePartTooLongException(Exception):
pass
class AbstractMethodException(Exception):
pass
class BadTypeInNameException(Exception):
pass
class BadDomainName(Exception):
def __init__(self, pos):
Exception.__init__(self, "at position %s" % pos)
class BadDomainNameCircular(BadDomainName):
pass
# implementation classes
class DNSEntry(object):
"""A DNS entry"""
def __init__(self, name, type, clazz):
self.key = string.lower(name)
self.name = name
self.type = type
self.clazz = clazz & _CLASS_MASK
self.unique = (clazz & _CLASS_UNIQUE) != 0
def __eq__(self, other):
"""Equality test on name, type, and class"""
if isinstance(other, DNSEntry):
return (self.name == other.name and self.type == other.type and
self.clazz == other.clazz)
return 0
def __ne__(self, other):
"""Non-equality test"""
return not self.__eq__(other)
def getClazz(self, clazz):
"""Class accessor"""
try:
return _CLASSES[clazz]
except KeyError:
return "?(%s)" % (clazz)
def getType(self, type):
"""Type accessor"""
try:
return _TYPES[type]
except KeyError:
return "?(%s)" % (type)
def toString(self, hdr, other):
"""String representation with additional information"""
result = ("%s[%s,%s" %
(hdr, self.getType(self.type), self.getClazz(self.clazz)))
if self.unique:
result += "-unique,"
else:
result += ","
result += self.name
if other is not None:
result += ",%s]" % (other)
else:
result += "]"
return result
class DNSQuestion(DNSEntry):
"""A DNS question entry"""
def __init__(self, name, type, clazz):
if not name.endswith(".local."):
raise NonLocalNameException(name)
DNSEntry.__init__(self, name, type, clazz)
def answeredBy(self, rec):
"""Returns true if the question is answered by the record"""
return (self.clazz == rec.clazz and
(self.type == rec.type or self.type == _TYPE_ANY) and
self.name == rec.name)
def __repr__(self):
"""String representation"""
return DNSEntry.toString(self, "question", None)
class DNSRecord(DNSEntry):
"""A DNS record - like a DNS entry, but has a TTL"""
def __init__(self, name, type, clazz, ttl):
DNSEntry.__init__(self, name, type, clazz)
self.ttl = ttl
self.created = currentTimeMillis()
def __eq__(self, other):
"""Tests equality as per DNSRecord"""
if isinstance(other, DNSRecord):
return DNSEntry.__eq__(self, other)
return 0
def suppressedBy(self, msg):
"""Returns true if any answer in a message can suffice for the
information held in this record."""
for record in msg.answers:
if self.suppressedByAnswer(record):
return 1
return 0
def suppressedByAnswer(self, other):
"""Returns true if another record has same name, type and class,
and if its TTL is at least half of this record's."""
if self == other and other.ttl > (self.ttl / 2):
return 1
return 0
def getExpirationTime(self, percent):
"""Returns the time at which this record will have expired
by a certain percentage."""
return self.created + (percent * self.ttl * 10)
def getRemainingTTL(self, now):
"""Returns the remaining TTL in seconds."""
return max(0, (self.getExpirationTime(100) - now) / 1000)
def isExpired(self, now):
"""Returns true if this record has expired."""
return self.getExpirationTime(100) <= now
def isStale(self, now):
"""Returns true if this record is at least half way expired."""
return self.getExpirationTime(50) <= now
def resetTTL(self, other):
"""Sets this record's TTL and created time to that of
another record."""
self.created = other.created
self.ttl = other.ttl
def write(self, out):
"""Abstract method"""
raise AbstractMethodException
def toString(self, other):
"""String representation with additional information"""
arg = ("%s/%s,%s" %
(self.ttl, self.getRemainingTTL(currentTimeMillis()), other))
return DNSEntry.toString(self, "record", arg)
class DNSAddress(DNSRecord):
"""A DNS address record"""
def __init__(self, name, type, clazz, ttl, address):
DNSRecord.__init__(self, name, type, clazz, ttl)
self.address = address
def write(self, out):
"""Used in constructing an outgoing packet"""
out.writeString(self.address, len(self.address))
def __eq__(self, other):
"""Tests equality on address"""
if isinstance(other, DNSAddress):
return self.address == other.address
return 0
def __repr__(self):
"""String representation"""
try:
return socket.inet_ntoa(self.address)
except Exception:
return self.address
class DNSHinfo(DNSRecord):
"""A DNS host information record"""
def __init__(self, name, type, clazz, ttl, cpu, os):
DNSRecord.__init__(self, name, type, clazz, ttl)
self.cpu = cpu
self.os = os
def write(self, out):
"""Used in constructing an outgoing packet"""
out.writeString(self.cpu, len(self.cpu))
out.writeString(self.os, len(self.os))
def __eq__(self, other):
"""Tests equality on cpu and os"""
if isinstance(other, DNSHinfo):
return self.cpu == other.cpu and self.os == other.os
return 0
def __repr__(self):
"""String representation"""
return self.cpu + " " + self.os
class DNSPointer(DNSRecord):
"""A DNS pointer record"""
def __init__(self, name, type, clazz, ttl, alias):
DNSRecord.__init__(self, name, type, clazz, ttl)
self.alias = alias
def write(self, out):
"""Used in constructing an outgoing packet"""
out.writeName(self.alias)
def __eq__(self, other):
"""Tests equality on alias"""
if isinstance(other, DNSPointer):
return self.alias == other.alias
return 0
def __repr__(self):
"""String representation"""
return self.toString(self.alias)
class DNSText(DNSRecord):
"""A DNS text record"""
def __init__(self, name, type, clazz, ttl, text):
DNSRecord.__init__(self, name, type, clazz, ttl)
self.text = text
def write(self, out):
"""Used in constructing an outgoing packet"""
out.writeString(self.text, len(self.text))
def __eq__(self, other):
"""Tests equality on text"""
if isinstance(other, DNSText):
return self.text == other.text
return 0
def __repr__(self):
"""String representation"""
if len(self.text) > 10:
return self.toString(self.text[:7] + "...")
else:
return self.toString(self.text)
class DNSService(DNSRecord):
"""A DNS service record"""
def __init__(self, name, type, clazz, ttl, priority, weight, port, server):
DNSRecord.__init__(self, name, type, clazz, ttl)
self.priority = priority
self.weight = weight
self.port = port
self.server = server
def write(self, out):
"""Used in constructing an outgoing packet"""
out.writeShort(self.priority)
out.writeShort(self.weight)
out.writeShort(self.port)
out.writeName(self.server)
def __eq__(self, other):
"""Tests equality on priority, weight, port and server"""
if isinstance(other, DNSService):
return (self.priority == other.priority and
self.weight == other.weight and
self.port == other.port and
self.server == other.server)
return 0
def __repr__(self):
"""String representation"""
return self.toString("%s:%s" % (self.server, self.port))
class DNSIncoming(object):
"""Object representation of an incoming DNS packet"""
def __init__(self, data):
"""Constructor from string holding bytes of packet"""
self.offset = 0
self.data = data
self.questions = []
self.answers = []
self.numquestions = 0
self.numanswers = 0
self.numauthorities = 0
self.numadditionals = 0
self.readHeader()
self.readQuestions()
self.readOthers()
def readHeader(self):
"""Reads header portion of packet"""
format = '!HHHHHH'
length = struct.calcsize(format)
info = struct.unpack(format,
self.data[self.offset:self.offset + length])
self.offset += length
self.id = info[0]
self.flags = info[1]
self.numquestions = info[2]
self.numanswers = info[3]
self.numauthorities = info[4]
self.numadditionals = info[5]
def readQuestions(self):
"""Reads questions section of packet"""
format = '!HH'
length = struct.calcsize(format)
for i in range(0, self.numquestions):
name = self.readName()
info = struct.unpack(format,
self.data[self.offset:self.offset + length])
self.offset += length
try:
question = DNSQuestion(name, info[0], info[1])
self.questions.append(question)
except NonLocalNameException:
pass
def readInt(self):
"""Reads an integer from the packet"""
format = '!I'
length = struct.calcsize(format)
info = struct.unpack(format,
self.data[self.offset:self.offset + length])
self.offset += length
return info[0]
def readCharacterString(self):
"""Reads a character string from the packet"""
length = ord(self.data[self.offset])
self.offset += 1
return self.readString(length)
def readString(self, len):
"""Reads a string of a given length from the packet"""
format = '!' + str(len) + 's'
length = struct.calcsize(format)
info = struct.unpack(format,
self.data[self.offset:self.offset + length])
self.offset += length
return info[0]
def readUnsignedShort(self):
"""Reads an unsigned short from the packet"""
format = '!H'
length = struct.calcsize(format)
info = struct.unpack(format,
self.data[self.offset:self.offset + length])
self.offset += length
return info[0]
def readOthers(self):
"""Reads answers, authorities and additionals section of the packet"""
format = '!HHiH'
length = struct.calcsize(format)
n = self.numanswers + self.numauthorities + self.numadditionals
for i in range(0, n):
domain = self.readName()
info = struct.unpack(format,
self.data[self.offset:self.offset + length])
self.offset += length
rec = None
if info[0] == _TYPE_A:
rec = DNSAddress(domain, info[0], info[1], info[2],
self.readString(4))
elif info[0] == _TYPE_CNAME or info[0] == _TYPE_PTR:
rec = DNSPointer(domain, info[0], info[1], info[2],
self.readName())
elif info[0] == _TYPE_TXT:
rec = DNSText(domain, info[0], info[1], info[2],
self.readString(info[3]))
elif info[0] == _TYPE_SRV:
rec = DNSService(domain, info[0], info[1], info[2],
self.readUnsignedShort(),
self.readUnsignedShort(),
self.readUnsignedShort(),
self.readName())
elif info[0] == _TYPE_HINFO:
rec = DNSHinfo(domain, info[0], info[1], info[2],
self.readCharacterString(),
self.readCharacterString())
elif info[0] == _TYPE_AAAA:
rec = DNSAddress(domain, info[0], info[1], info[2],
self.readString(16))
else:
# Try to ignore types we don't know about
# this may mean the rest of the name is
# unable to be parsed, and may show errors
# so this is left for debugging. New types
# encountered need to be parsed properly.
#
#print "UNKNOWN TYPE = " + str(info[0])
#raise BadTypeInNameException
self.offset += info[3]
if rec is not None:
self.answers.append(rec)
def isQuery(self):
"""Returns true if this is a query"""
return (self.flags & _FLAGS_QR_MASK) == _FLAGS_QR_QUERY
def isResponse(self):
"""Returns true if this is a response"""
return (self.flags & _FLAGS_QR_MASK) == _FLAGS_QR_RESPONSE
def readUTF(self, offset, len):
"""Reads a UTF-8 string of a given length from the packet"""
return self.data[offset:offset + len].decode('utf-8')
def readName(self):
"""Reads a domain name from the packet"""
result = ''
off = self.offset
next = -1
first = off
while True:
len = ord(self.data[off])
off += 1
if len == 0:
break
t = len & 0xC0
if t == 0x00:
result = ''.join((result, self.readUTF(off, len) + '.'))
off += len
elif t == 0xC0:
if next < 0:
next = off + 1
off = ((len & 0x3F) << 8) | ord(self.data[off])
if off >= first:
raise BadDomainNameCircular(off)
first = off
else:
raise BadDomainName(off)
if next >= 0:
self.offset = next
else:
self.offset = off
return result
class DNSOutgoing(object):
"""Object representation of an outgoing packet"""
def __init__(self, flags, multicast=1):
self.finished = 0
self.id = 0
self.multicast = multicast
self.flags = flags
self.names = {}
self.data = []
self.size = 12
self.questions = []
self.answers = []
self.authorities = []
self.additionals = []
def addQuestion(self, record):
"""Adds a question"""
self.questions.append(record)
def addAnswer(self, inp, record):
"""Adds an answer"""
if not record.suppressedBy(inp):
self.addAnswerAtTime(record, 0)
def addAnswerAtTime(self, record, now):
"""Adds an answer if if does not expire by a certain time"""
if record is not None:
if now == 0 or not record.isExpired(now):
self.answers.append((record, now))
def addAuthoritativeAnswer(self, record):
"""Adds an authoritative answer"""
self.authorities.append(record)
def addAdditionalAnswer(self, record):
"""Adds an additional answer"""
self.additionals.append(record)
def writeByte(self, value):
"""Writes a single byte to the packet"""
format = '!c'
self.data.append(struct.pack(format, chr(value)))
self.size += 1
def insertShort(self, index, value):
"""Inserts an unsigned short in a certain position in the packet"""
format = '!H'
self.data.insert(index, struct.pack(format, value))
self.size += 2
def writeShort(self, value):
"""Writes an unsigned short to the packet"""
format = '!H'
self.data.append(struct.pack(format, value))
self.size += 2
def writeInt(self, value):
"""Writes an unsigned integer to the packet"""
format = '!I'
self.data.append(struct.pack(format, int(value)))
self.size += 4
def writeString(self, value, length):
"""Writes a string to the packet"""
format = '!' + str(length) + 's'
self.data.append(struct.pack(format, value))
self.size += length
def writeUTF(self, s):
"""Writes a UTF-8 string of a given length to the packet"""
utfstr = s.encode('utf-8')
length = len(utfstr)
if length > 64:
raise NamePartTooLongException
self.writeByte(length)
self.writeString(utfstr, length)
def writeName(self, name):
"""Writes a domain name to the packet"""
try:
# Find existing instance of this name in packet
#
index = self.names[name]
except KeyError:
# No record of this name already, so write it
# out as normal, recording the location of the name
# for future pointers to it.
#
self.names[name] = self.size
parts = name.split('.')
if parts[-1] == '':
parts = parts[:-1]
for part in parts:
self.writeUTF(part)
self.writeByte(0)
return
# An index was found, so write a pointer to it
#
self.writeByte((index >> 8) | 0xC0)
self.writeByte(index)
def writeQuestion(self, question):
"""Writes a question to the packet"""
self.writeName(question.name)
self.writeShort(question.type)
self.writeShort(question.clazz)
def writeRecord(self, record, now):
"""Writes a record (answer, authoritative answer, additional) to
the packet"""
self.writeName(record.name)
self.writeShort(record.type)
if record.unique and self.multicast:
self.writeShort(record.clazz | _CLASS_UNIQUE)
else:
self.writeShort(record.clazz)
if now == 0:
self.writeInt(record.ttl)
else:
self.writeInt(record.getRemainingTTL(now))
index = len(self.data)
# Adjust size for the short we will write before this record
#
self.size += 2
record.write(self)
self.size -= 2
length = len(''.join(self.data[index:]))
self.insertShort(index, length) # Here is the short we adjusted for
def packet(self):
"""Returns a string containing the packet's bytes
No further parts should be added to the packet once this
is done."""
if not self.finished:
self.finished = 1
for question in self.questions:
self.writeQuestion(question)
for answer, time_ in self.answers:
self.writeRecord(answer, time_)
for authority in self.authorities:
self.writeRecord(authority, 0)
for additional in self.additionals:
self.writeRecord(additional, 0)
self.insertShort(0, len(self.additionals))
self.insertShort(0, len(self.authorities))
self.insertShort(0, len(self.answers))
self.insertShort(0, len(self.questions))
self.insertShort(0, self.flags)
if self.multicast:
self.insertShort(0, 0)
else:
self.insertShort(0, self.id)
return ''.join(self.data)
class DNSCache(object):
"""A cache of DNS entries"""
def __init__(self):
self.cache = {}
def add(self, entry):
"""Adds an entry"""
try:
list = self.cache[entry.key]
except KeyError:
list = self.cache[entry.key] = []
list.append(entry)
def remove(self, entry):
"""Removes an entry"""
try:
list = self.cache[entry.key]
list.remove(entry)
except KeyError:
pass
def get(self, entry):
"""Gets an entry by key. Will return None if there is no
matching entry."""
try:
list = self.cache[entry.key]
return list[list.index(entry)]
except (KeyError, ValueError):
return None
def getByDetails(self, name, type, clazz):
"""Gets an entry by details. Will return None if there is
no matching entry."""
entry = DNSEntry(name, type, clazz)
return self.get(entry)
def entriesWithName(self, name):
"""Returns a list of entries whose key matches the name."""
try:
return self.cache[name]
except KeyError:
return []
def entries(self):
"""Returns a list of all entries"""
try:
return list(itertools.chain.from_iterable(self.cache.values()))
except Exception:
return []
class Engine(threading.Thread):
"""An engine wraps read access to sockets, allowing objects that
need to receive data from sockets to be called back when the
sockets are ready.
A reader needs a handle_read() method, which is called when the socket
it is interested in is ready for reading.
Writers are not implemented here, because we only send short
packets.
"""
def __init__(self, zeroconf):
threading.Thread.__init__(self)
self.zeroconf = zeroconf
self.readers = {} # maps socket to reader
self.timeout = 5
self.condition = threading.Condition()
self.start()
def run(self):
while not globals()['_GLOBAL_DONE']:
rs = self.getReaders()
if len(rs) == 0:
# No sockets to manage, but we wait for the timeout
# or addition of a socket
#
self.condition.acquire()
self.condition.wait(self.timeout)
self.condition.release()
else:
try:
rr, wr, er = select.select(rs, [], [], self.timeout)
for sock in rr:
try:
self.readers[sock].handle_read()
except Exception:
if not globals()['_GLOBAL_DONE']:
traceback.print_exc()
except Exception:
pass
def getReaders(self):
self.condition.acquire()
result = self.readers.keys()
self.condition.release()
return result
def addReader(self, reader, socket):
self.condition.acquire()
self.readers[socket] = reader
self.condition.notify()
self.condition.release()
def delReader(self, socket):
self.condition.acquire()
del self.readers[socket]
self.condition.notify()
self.condition.release()
def notify(self):
self.condition.acquire()
self.condition.notify()
self.condition.release()
class Listener(object):
"""A Listener is used by this module to listen on the multicast
group to which DNS messages are sent, allowing the implementation
to cache information as it arrives.
It requires registration with an Engine object in order to have
the read() method called when a socket is available for reading."""
def __init__(self, zeroconf):
self.zeroconf = zeroconf
self.zeroconf.engine.addReader(self, self.zeroconf.socket)
def handle_read(self):
data, (addr, port) = self.zeroconf.socket.recvfrom(_MAX_MSG_ABSOLUTE)
self.data = data
msg = DNSIncoming(data)
if msg.isQuery():
# Always multicast responses
#
if port == _MDNS_PORT:
self.zeroconf.handleQuery(msg, _MDNS_ADDR, _MDNS_PORT)
# If it's not a multicast query, reply via unicast
# and multicast
#
elif port == _DNS_PORT:
self.zeroconf.handleQuery(msg, addr, port)
self.zeroconf.handleQuery(msg, _MDNS_ADDR, _MDNS_PORT)
else:
self.zeroconf.handleResponse(msg)
class Reaper(threading.Thread):
"""A Reaper is used by this module to remove cache entries that
have expired."""
def __init__(self, zeroconf):
threading.Thread.__init__(self)
self.zeroconf = zeroconf
self.start()
def run(self):
while True:
self.zeroconf.wait(10 * 1000)
if globals()['_GLOBAL_DONE']:
return
now = currentTimeMillis()
for record in self.zeroconf.cache.entries():
if record.isExpired(now):
self.zeroconf.updateRecord(now, record)
self.zeroconf.cache.remove(record)
class ServiceBrowser(threading.Thread):
"""Used to browse for a service of a specific type.
The listener object will have its addService() and
removeService() methods called when this browser
discovers changes in the services availability."""
def __init__(self, zeroconf, type, listener):
"""Creates a browser for a specific type"""
threading.Thread.__init__(self)
self.zeroconf = zeroconf
self.type = type
self.listener = listener
self.services = {}
self.nexttime = currentTimeMillis()
self.delay = _BROWSER_TIME
self.list = []
self.done = 0
self.zeroconf.addListener(self, DNSQuestion(self.type, _TYPE_PTR,
_CLASS_IN))
self.start()
def updateRecord(self, zeroconf, now, record):
"""Callback invoked by Zeroconf when new information arrives.
Updates information required by browser in the Zeroconf cache."""
if record.type == _TYPE_PTR and record.name == self.type:
expired = record.isExpired(now)
try:
oldrecord = self.services[record.alias.lower()]
if not expired:
oldrecord.resetTTL(record)
else:
del self.services[record.alias.lower()]
callback = (lambda x:
self.listener.removeService(x, self.type, record.alias))
self.list.append(callback)
return
except Exception:
if not expired:
self.services[record.alias.lower()] = record
callback = (lambda x:
self.listener.addService(x, self.type, record.alias))
self.list.append(callback)
expires = record.getExpirationTime(75)
if expires < self.nexttime:
self.nexttime = expires
def cancel(self):
self.done = 1
self.zeroconf.notifyAll()
def run(self):
while True:
event = None
now = currentTimeMillis()
if len(self.list) == 0 and self.nexttime > now:
self.zeroconf.wait(self.nexttime - now)
if globals()['_GLOBAL_DONE'] or self.done:
return
now = currentTimeMillis()
if self.nexttime <= now:
out = DNSOutgoing(_FLAGS_QR_QUERY)
out.addQuestion(DNSQuestion(self.type, _TYPE_PTR, _CLASS_IN))
for record in self.services.values():
if not record.isExpired(now):
out.addAnswerAtTime(record, now)
self.zeroconf.send(out)
self.nexttime = now + self.delay
self.delay = min(20 * 1000, self.delay * 2)
if len(self.list) > 0:
event = self.list.pop(0)
if event is not None:
event(self.zeroconf)
class ServiceInfo(object):
"""Service information"""
def __init__(self, type, name, address=None, port=None, weight=0,
priority=0, properties=None, server=None):
"""Create a service description.
type: fully qualified service type name
name: fully qualified service name
address: IP address as unsigned short, network byte order
port: port that the service runs on
weight: weight of the service
priority: priority of the service
properties: dictionary of properties (or a string holding the bytes for
the text field)
server: fully qualified name for service host (defaults to name)"""
if not name.endswith(type):
raise BadTypeInNameException
self.type = type
self.name = name
self.address = address
self.port = port
self.weight = weight
self.priority = priority
if server:
self.server = server
else:
self.server = name
self.setProperties(properties)
def setProperties(self, properties):
"""Sets properties and text of this info from a dictionary"""
if isinstance(properties, dict):
self.properties = properties
list = []
result = ''
for key in properties:
value = properties[key]
if value is None:
suffix = ''
elif isinstance(value, str):
suffix = value
elif isinstance(value, int):
if value:
suffix = 'true'
else:
suffix = 'false'
else:
suffix = ''
list.append('='.join((key, suffix)))
for item in list:
result = ''.join((result, struct.pack('!c', chr(len(item))),
item))
self.text = result
else:
self.text = properties
def setText(self, text):
"""Sets properties and text given a text field"""
self.text = text
try:
result = {}
end = len(text)
index = 0
strs = []
while index < end:
length = ord(text[index])
index += 1
strs.append(text[index:index + length])
index += length
for s in strs:
eindex = s.find('=')
if eindex == -1:
# No equals sign at all
key = s
value = 0
else:
key = s[:eindex]
value = s[eindex + 1:]
if value == 'true':
value = 1
elif value == 'false' or not value:
value = 0
# Only update non-existent properties
if key and result.get(key) is None:
result[key] = value
self.properties = result
except Exception:
traceback.print_exc()
self.properties = None
def getType(self):
"""Type accessor"""
return self.type
def getName(self):
"""Name accessor"""
if self.type is not None and self.name.endswith("." + self.type):
return self.name[:len(self.name) - len(self.type) - 1]
return self.name
def getAddress(self):
"""Address accessor"""
return self.address
def getPort(self):
"""Port accessor"""
return self.port
def getPriority(self):
"""Priority accessor"""
return self.priority
def getWeight(self):
"""Weight accessor"""
return self.weight
def getProperties(self):
"""Properties accessor"""
return self.properties
def getText(self):
"""Text accessor"""
return self.text
def getServer(self):
"""Server accessor"""
return self.server
def updateRecord(self, zeroconf, now, record):
"""Updates service information from a DNS record"""
if record is not None and not record.isExpired(now):
if record.type == _TYPE_A:
#if record.name == self.name:
if record.name == self.server:
self.address = record.address
elif record.type == _TYPE_SRV:
if record.name == self.name:
self.server = record.server
self.port = record.port
self.weight = record.weight
self.priority = record.priority
#self.address = None
self.updateRecord(zeroconf, now,
zeroconf.cache.getByDetails(self.server,
_TYPE_A, _CLASS_IN))
elif record.type == _TYPE_TXT:
if record.name == self.name:
self.setText(record.text)
def request(self, zeroconf, timeout):
"""Returns true if the service could be discovered on the
network, and updates this object with details discovered.
"""
now = currentTimeMillis()
delay = _LISTENER_TIME
next = now + delay
last = now + timeout
result = 0
try:
zeroconf.addListener(self, DNSQuestion(self.name, _TYPE_ANY,
_CLASS_IN))
while (self.server is None or self.address is None or
self.text is None):
if last <= now:
return 0
if next <= now:
out = DNSOutgoing(_FLAGS_QR_QUERY)
out.addQuestion(DNSQuestion(self.name, _TYPE_SRV,
_CLASS_IN))
out.addAnswerAtTime(
zeroconf.cache.getByDetails(self.name,
_TYPE_SRV,
_CLASS_IN),
now)
out.addQuestion(DNSQuestion(self.name, _TYPE_TXT,
_CLASS_IN))
out.addAnswerAtTime(
zeroconf.cache.getByDetails(self.name, _TYPE_TXT,
_CLASS_IN),
now)
if self.server is not None:
out.addQuestion(
DNSQuestion(self.server, _TYPE_A, _CLASS_IN))
out.addAnswerAtTime(
zeroconf.cache.getByDetails(self.server, _TYPE_A,
_CLASS_IN),
now)
zeroconf.send(out)
next = now + delay
delay = delay * 2
zeroconf.wait(min(next, last) - now)
now = currentTimeMillis()
result = 1
finally:
zeroconf.removeListener(self)
return result
def __eq__(self, other):
"""Tests equality of service name"""
if isinstance(other, ServiceInfo):
return other.name == self.name
return 0
def __ne__(self, other):
"""Non-equality test"""
return not self.__eq__(other)
def __repr__(self):
"""String representation"""
result = ("service[%s,%s:%s," %
(self.name, socket.inet_ntoa(self.getAddress()), self.port))
if self.text is None:
result += "None"
else:
if len(self.text) < 20:
result += self.text
else:
result += self.text[:17] + "..."
result += "]"
return result
class Zeroconf(object):
"""Implementation of Zeroconf Multicast DNS Service Discovery
Supports registration, unregistration, queries and browsing.
"""
def __init__(self, bindaddress=None):
"""Creates an instance of the Zeroconf class, establishing
multicast communications, listening and reaping threads."""
globals()['_GLOBAL_DONE'] = 0
if bindaddress is None:
self.intf = socket.gethostbyname(socket.gethostname())
else:
self.intf = bindaddress
self.group = ('', _MDNS_PORT)
self.socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
try:
self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT, 1)
except Exception:
# SO_REUSEADDR should be equivalent to SO_REUSEPORT for
# multicast UDP sockets (p 731, "TCP/IP Illustrated,
# Volume 2"), but some BSD-derived systems require
# SO_REUSEPORT to be specified explicitly. Also, not all
# versions of Python have SO_REUSEPORT available. So
# if you're on a BSD-based system, and haven't upgraded
# to Python 2.3 yet, you may find this library doesn't
# work as expected.
#
pass
self.socket.setsockopt(socket.SOL_IP, socket.IP_MULTICAST_TTL, "\xff")
self.socket.setsockopt(socket.SOL_IP, socket.IP_MULTICAST_LOOP, "\x01")
try:
self.socket.bind(self.group)
except Exception:
# Some versions of linux raise an exception even though
# SO_REUSEADDR and SO_REUSEPORT have been set, so ignore it
pass
self.socket.setsockopt(socket.SOL_IP, socket.IP_ADD_MEMBERSHIP,
socket.inet_aton(_MDNS_ADDR) + socket.inet_aton('0.0.0.0'))
self.listeners = []
self.browsers = []
self.services = {}
self.servicetypes = {}
self.cache = DNSCache()
self.condition = threading.Condition()
self.engine = Engine(self)
self.listener = Listener(self)
self.reaper = Reaper(self)
def isLoopback(self):
return self.intf.startswith("127.0.0.1")
def isLinklocal(self):
return self.intf.startswith("169.254.")
def wait(self, timeout):
"""Calling thread waits for a given number of milliseconds or
until notified."""
self.condition.acquire()
self.condition.wait(timeout / 1000)
self.condition.release()
def notifyAll(self):
"""Notifies all waiting threads"""
self.condition.acquire()
self.condition.notifyAll()
self.condition.release()
def getServiceInfo(self, type, name, timeout=3000):
"""Returns network's service information for a particular
name and type, or None if no service matches by the timeout,
which defaults to 3 seconds."""
info = ServiceInfo(type, name)
if info.request(self, timeout):
return info
return None
def addServiceListener(self, type, listener):
"""Adds a listener for a particular service type. This object
will then have its updateRecord method called when information
arrives for that type."""
self.removeServiceListener(listener)
self.browsers.append(ServiceBrowser(self, type, listener))
def removeServiceListener(self, listener):
"""Removes a listener from the set that is currently listening."""
for browser in self.browsers:
if browser.listener == listener:
browser.cancel()
del browser
def registerService(self, info, ttl=_DNS_TTL):
"""Registers service information to the network with a default TTL
of 60 seconds. Zeroconf will then respond to requests for
information for that service. The name of the service may be
changed if needed to make it unique on the network."""
self.checkService(info)
self.services[info.name.lower()] = info
if info.type in self.servicetypes:
self.servicetypes[info.type] += 1
else:
self.servicetypes[info.type] = 1
now = currentTimeMillis()
nexttime = now
i = 0
while i < 3:
if now < nexttime:
self.wait(nexttime - now)
now = currentTimeMillis()
continue
out = DNSOutgoing(_FLAGS_QR_RESPONSE | _FLAGS_AA)
out.addAnswerAtTime(DNSPointer(info.type, _TYPE_PTR,
_CLASS_IN, ttl, info.name), 0)
out.addAnswerAtTime(
DNSService(
info.name, _TYPE_SRV,
_CLASS_IN, ttl, info.priority, info.weight, info.port,
info.server),
0)
out.addAnswerAtTime(
DNSText(info.name, _TYPE_TXT, _CLASS_IN, ttl, info.text),
0)
if info.address:
out.addAnswerAtTime(DNSAddress(info.server, _TYPE_A,
_CLASS_IN, ttl, info.address), 0)
self.send(out)
i += 1
nexttime += _REGISTER_TIME
def unregisterService(self, info):
"""Unregister a service."""
try:
del self.services[info.name.lower()]
if self.servicetypes[info.type] > 1:
self.servicetypes[info.type] -= 1
else:
del self.servicetypes[info.type]
except KeyError:
pass
now = currentTimeMillis()
nexttime = now
i = 0
while i < 3:
if now < nexttime:
self.wait(nexttime - now)
now = currentTimeMillis()
continue
out = DNSOutgoing(_FLAGS_QR_RESPONSE | _FLAGS_AA)
out.addAnswerAtTime(
DNSPointer(info.type, _TYPE_PTR, _CLASS_IN, 0, info.name), 0)
out.addAnswerAtTime(
DNSService(info.name, _TYPE_SRV,
_CLASS_IN, 0, info.priority, info.weight, info.port,
info.name),
0)
out.addAnswerAtTime(DNSText(info.name, _TYPE_TXT,
_CLASS_IN, 0, info.text), 0)
if info.address:
out.addAnswerAtTime(DNSAddress(info.server, _TYPE_A,
_CLASS_IN, 0, info.address), 0)
self.send(out)
i += 1
nexttime += _UNREGISTER_TIME
def unregisterAllServices(self):
"""Unregister all registered services."""
if len(self.services) > 0:
now = currentTimeMillis()
nexttime = now
i = 0
while i < 3:
if now < nexttime:
self.wait(nexttime - now)
now = currentTimeMillis()
continue
out = DNSOutgoing(_FLAGS_QR_RESPONSE | _FLAGS_AA)
for info in self.services.values():
out.addAnswerAtTime(DNSPointer(info.type, _TYPE_PTR,
_CLASS_IN, 0, info.name), 0)
out.addAnswerAtTime(
DNSService(info.name, _TYPE_SRV,
_CLASS_IN, 0, info.priority, info.weight,
info.port, info.server),
0)
out.addAnswerAtTime(DNSText(info.name, _TYPE_TXT,
_CLASS_IN, 0, info.text), 0)
if info.address:
out.addAnswerAtTime(DNSAddress(info.server, _TYPE_A,
_CLASS_IN, 0, info.address), 0)
self.send(out)
i += 1
nexttime += _UNREGISTER_TIME
def checkService(self, info):
"""Checks the network for a unique service name, modifying the
ServiceInfo passed in if it is not unique."""
now = currentTimeMillis()
nexttime = now
i = 0
while i < 3:
for record in self.cache.entriesWithName(info.type):
if (record.type == _TYPE_PTR and not record.isExpired(now) and
record.alias == info.name):
if (info.name.find('.') < 0):
info.name = ("%w.[%s:%d].%s" %
(info.name, info.address, info.port, info.type))
self.checkService(info)
return
raise NonUniqueNameException
if now < nexttime:
self.wait(nexttime - now)
now = currentTimeMillis()
continue
out = DNSOutgoing(_FLAGS_QR_QUERY | _FLAGS_AA)
self.debug = out
out.addQuestion(DNSQuestion(info.type, _TYPE_PTR, _CLASS_IN))
out.addAuthoritativeAnswer(DNSPointer(info.type, _TYPE_PTR,
_CLASS_IN, _DNS_TTL, info.name))
self.send(out)
i += 1
nexttime += _CHECK_TIME
def addListener(self, listener, question):
"""Adds a listener for a given question. The listener will have
its updateRecord method called when information is available to
answer the question."""
now = currentTimeMillis()
self.listeners.append(listener)
if question is not None:
for record in self.cache.entriesWithName(question.name):
if question.answeredBy(record) and not record.isExpired(now):
listener.updateRecord(self, now, record)
self.notifyAll()
def removeListener(self, listener):
"""Removes a listener."""
try:
self.listeners.remove(listener)
self.notifyAll()
except Exception:
pass
def updateRecord(self, now, rec):
"""Used to notify listeners of new information that has updated
a record."""
for listener in self.listeners:
listener.updateRecord(self, now, rec)
self.notifyAll()
def handleResponse(self, msg):
"""Deal with incoming response packets. All answers
are held in the cache, and listeners are notified."""
now = currentTimeMillis()
for record in msg.answers:
expired = record.isExpired(now)
if record in self.cache.entries():
if expired:
self.cache.remove(record)
else:
entry = self.cache.get(record)
if entry is not None:
entry.resetTTL(record)
record = entry
else:
self.cache.add(record)
self.updateRecord(now, record)
def handleQuery(self, msg, addr, port):
"""Deal with incoming query packets. Provides a response if
possible."""
out = None
# Support unicast client responses
#
if port != _MDNS_PORT:
out = DNSOutgoing(_FLAGS_QR_RESPONSE | _FLAGS_AA, 0)
for question in msg.questions:
out.addQuestion(question)
for question in msg.questions:
if question.type == _TYPE_PTR:
if question.name == "_services._dns-sd._udp.local.":
for stype in self.servicetypes.keys():
if out is None:
out = DNSOutgoing(_FLAGS_QR_RESPONSE | _FLAGS_AA)
out.addAnswer(msg,
DNSPointer(
"_services._dns-sd._udp.local.",
_TYPE_PTR, _CLASS_IN,
_DNS_TTL, stype))
for service in self.services.values():
if question.name == service.type:
if out is None:
out = DNSOutgoing(_FLAGS_QR_RESPONSE | _FLAGS_AA)
out.addAnswer(msg, DNSPointer(service.type, _TYPE_PTR,
_CLASS_IN, _DNS_TTL, service.name))
else:
try:
if out is None:
out = DNSOutgoing(_FLAGS_QR_RESPONSE | _FLAGS_AA)
# Answer A record queries for any service addresses we know
if question.type == _TYPE_A or question.type == _TYPE_ANY:
for service in self.services.values():
if service.server == question.name.lower():
out.addAnswer(msg,
DNSAddress(question.name, _TYPE_A,
_CLASS_IN | _CLASS_UNIQUE,
_DNS_TTL, service.address))
service = self.services.get(question.name.lower(), None)
if not service: continue
if (question.type == _TYPE_SRV or
question.type == _TYPE_ANY):
out.addAnswer(msg,
DNSService(question.name, _TYPE_SRV,
_CLASS_IN | _CLASS_UNIQUE,
_DNS_TTL, service.priority,
service.weight, service.port,
service.server))
if (question.type == _TYPE_TXT or
question.type == _TYPE_ANY):
out.addAnswer(msg, DNSText(question.name, _TYPE_TXT,
_CLASS_IN | _CLASS_UNIQUE, _DNS_TTL, service.text))
if question.type == _TYPE_SRV:
out.addAdditionalAnswer(
DNSAddress(service.server, _TYPE_A,
_CLASS_IN | _CLASS_UNIQUE,
_DNS_TTL, service.address))
except Exception:
traceback.print_exc()
if out is not None and out.answers:
out.id = msg.id
self.send(out, addr, port)
def send(self, out, addr=_MDNS_ADDR, port=_MDNS_PORT):
"""Sends an outgoing packet."""
# This is a quick test to see if we can parse the packets we generate
#temp = DNSIncoming(out.packet())
try:
self.socket.sendto(out.packet(), 0, (addr, port))
except Exception:
# Ignore this, it may be a temporary loss of network connection
pass
def close(self):
"""Ends the background threads, and prevent this instance from
servicing further queries."""
if globals()['_GLOBAL_DONE'] == 0:
globals()['_GLOBAL_DONE'] = 1
self.notifyAll()
self.engine.notify()
self.unregisterAllServices()
self.socket.setsockopt(socket.SOL_IP, socket.IP_DROP_MEMBERSHIP,
socket.inet_aton(_MDNS_ADDR) + socket.inet_aton('0.0.0.0'))
self.socket.close()
# Test a few module features, including service registration, service
# query (for Zoe), and service unregistration.
if __name__ == '__main__':
print("Multicast DNS Service Discovery for Python, version", __version__)
r = Zeroconf()
print("1. Testing registration of a service...")
desc = {'version':'0.10','a':'test value', 'b':'another value'}
info = ServiceInfo("_http._tcp.local.",
"My Service Name._http._tcp.local.",
socket.inet_aton("127.0.0.1"), 1234, 0, 0, desc)
print(" Registering service...")
r.registerService(info)
print(" Registration done.")
print("2. Testing query of service information...")
print(" Getting ZOE service:",
str(r.getServiceInfo("_http._tcp.local.", "ZOE._http._tcp.local.")))
print(" Query done.")
print("3. Testing query of own service...")
print(" Getting self:",
str(r.getServiceInfo("_http._tcp.local.",
"My Service Name._http._tcp.local.")))
print(" Query done.")
print("4. Testing unregister of service information...")
r.unregisterService(info)
print(" Unregister done.")
r.close()
| gpl-2.0 |
xrg/django-static-gitified | tests/urls.py | 91 | 1189 | from django.conf.urls import patterns, include
urlpatterns = patterns('',
# test_client modeltest urls
(r'^test_client/', include('modeltests.test_client.urls')),
(r'^test_client_regress/', include('regressiontests.test_client_regress.urls')),
# File upload test views
(r'^file_uploads/', include('regressiontests.file_uploads.urls')),
# Always provide the auth system login and logout views
(r'^accounts/login/$', 'django.contrib.auth.views.login', {'template_name': 'login.html'}),
(r'^accounts/logout/$', 'django.contrib.auth.views.logout'),
# test urlconf for {% url %} template tag
(r'^url_tag/', include('regressiontests.templates.urls')),
# django built-in views
(r'^views/', include('regressiontests.views.urls')),
# test urlconf for middleware tests
(r'^middleware/', include('regressiontests.middleware.urls')),
# admin widget tests
(r'widget_admin/', include('regressiontests.admin_widgets.urls')),
# admin custom URL tests
(r'^custom_urls/', include('regressiontests.admin_custom_urls.urls')),
# admin scripts tests
(r'^admin_scripts/', include('regressiontests.admin_scripts.urls')),
)
| bsd-3-clause |
Microvellum/Fluid-Designer | win64-vc/2.78/python/lib/reportlab/pdfbase/_fontdata_enc_macexpert.py | 56 | 3058 | MacExpertEncoding = (None, None, None, None, None, None, None, None, None, None, None, None, None, None,
None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None,
'space', 'exclamsmall', 'Hungarumlautsmall', 'centoldstyle', 'dollaroldstyle', 'dollarsuperior', 'ampersandsmall',
'Acutesmall', 'parenleftsuperior', 'parenrightsuperior', 'twodotenleader', 'onedotenleader', 'comma', 'hyphen',
'period', 'fraction', 'zerooldstyle', 'oneoldstyle', 'twooldstyle', 'threeoldstyle', 'fouroldstyle',
'fiveoldstyle', 'sixoldstyle', 'sevenoldstyle', 'eightoldstyle', 'nineoldstyle', 'colon', 'semicolon', None,
'threequartersemdash', None, 'questionsmall', None, None, None, None, 'Ethsmall', None, None, 'onequarter',
'onehalf', 'threequarters', 'oneeighth', 'threeeighths', 'fiveeighths', 'seveneighths', 'onethird', 'twothirds',
None, None, None, None, None, None, 'ff', 'fi', 'fl', 'ffi', 'ffl', 'parenleftinferior', None,
'parenrightinferior', 'Circumflexsmall', 'hypheninferior', 'Gravesmall', 'Asmall', 'Bsmall', 'Csmall', 'Dsmall',
'Esmall', 'Fsmall', 'Gsmall', 'Hsmall', 'Ismall', 'Jsmall', 'Ksmall', 'Lsmall', 'Msmall', 'Nsmall', 'Osmall',
'Psmall', 'Qsmall', 'Rsmall', 'Ssmall', 'Tsmall', 'Usmall', 'Vsmall', 'Wsmall', 'Xsmall', 'Ysmall', 'Zsmall',
'colonmonetary', 'onefitted', 'rupiah', 'Tildesmall', None, None, 'asuperior', 'centsuperior', None, None, None,
None, 'Aacutesmall', 'Agravesmall', 'Acircumflexsmall', 'Adieresissmall', 'Atildesmall', 'Aringsmall',
'Ccedillasmall', 'Eacutesmall', 'Egravesmall', 'Ecircumflexsmall', 'Edieresissmall', 'Iacutesmall', 'Igravesmall',
'Icircumflexsmall', 'Idieresissmall', 'Ntildesmall', 'Oacutesmall', 'Ogravesmall', 'Ocircumflexsmall',
'Odieresissmall', 'Otildesmall', 'Uacutesmall', 'Ugravesmall', 'Ucircumflexsmall', 'Udieresissmall', None,
'eightsuperior', 'fourinferior', 'threeinferior', 'sixinferior', 'eightinferior', 'seveninferior', 'Scaronsmall',
None, 'centinferior', 'twoinferior', None, 'Dieresissmall', None, 'Caronsmall', 'osuperior', 'fiveinferior', None,
'commainferior', 'periodinferior', 'Yacutesmall', None, 'dollarinferior', None, None, 'Thornsmall', None,
'nineinferior', 'zeroinferior', 'Zcaronsmall', 'AEsmall', 'Oslashsmall', 'questiondownsmall', 'oneinferior',
'Lslashsmall', None, None, None, None, None, None, 'Cedillasmall', None, None, None, None, None, 'OEsmall',
'figuredash', 'hyphensuperior', None, None, None, None, 'exclamdownsmall', None, 'Ydieresissmall', None,
'onesuperior', 'twosuperior', 'threesuperior', 'foursuperior', 'fivesuperior', 'sixsuperior', 'sevensuperior',
'ninesuperior', 'zerosuperior', None, 'esuperior', 'rsuperior', 'tsuperior', None, None, 'isuperior', 'ssuperior',
'dsuperior', None, None, None, None, None, 'lsuperior', 'Ogoneksmall', 'Brevesmall', 'Macronsmall', 'bsuperior',
'nsuperior', 'msuperior', 'commasuperior', 'periodsuperior', 'Dotaccentsmall', 'Ringsmall', None, None, None, None)
| gpl-3.0 |
Komzpa/omim | 3party/Alohalytics/tests/googletest/xcode/Scripts/versiongenerate.py | 3088 | 4536 | #!/usr/bin/env python
#
# Copyright 2008, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""A script to prepare version informtion for use the gtest Info.plist file.
This script extracts the version information from the configure.ac file and
uses it to generate a header file containing the same information. The
#defines in this header file will be included in during the generation of
the Info.plist of the framework, giving the correct value to the version
shown in the Finder.
This script makes the following assumptions (these are faults of the script,
not problems with the Autoconf):
1. The AC_INIT macro will be contained within the first 1024 characters
of configure.ac
2. The version string will be 3 integers separated by periods and will be
surrounded by squre brackets, "[" and "]" (e.g. [1.0.1]). The first
segment represents the major version, the second represents the minor
version and the third represents the fix version.
3. No ")" character exists between the opening "(" and closing ")" of
AC_INIT, including in comments and character strings.
"""
import sys
import re
# Read the command line argument (the output directory for Version.h)
if (len(sys.argv) < 3):
print "Usage: versiongenerate.py input_dir output_dir"
sys.exit(1)
else:
input_dir = sys.argv[1]
output_dir = sys.argv[2]
# Read the first 1024 characters of the configure.ac file
config_file = open("%s/configure.ac" % input_dir, 'r')
buffer_size = 1024
opening_string = config_file.read(buffer_size)
config_file.close()
# Extract the version string from the AC_INIT macro
# The following init_expression means:
# Extract three integers separated by periods and surrounded by squre
# brackets(e.g. "[1.0.1]") between "AC_INIT(" and ")". Do not be greedy
# (*? is the non-greedy flag) since that would pull in everything between
# the first "(" and the last ")" in the file.
version_expression = re.compile(r"AC_INIT\(.*?\[(\d+)\.(\d+)\.(\d+)\].*?\)",
re.DOTALL)
version_values = version_expression.search(opening_string)
major_version = version_values.group(1)
minor_version = version_values.group(2)
fix_version = version_values.group(3)
# Write the version information to a header file to be included in the
# Info.plist file.
file_data = """//
// DO NOT MODIFY THIS FILE (but you can delete it)
//
// This file is autogenerated by the versiongenerate.py script. This script
// is executed in a "Run Script" build phase when creating gtest.framework. This
// header file is not used during compilation of C-source. Rather, it simply
// defines some version strings for substitution in the Info.plist. Because of
// this, we are not not restricted to C-syntax nor are we using include guards.
//
#define GTEST_VERSIONINFO_SHORT %s.%s
#define GTEST_VERSIONINFO_LONG %s.%s.%s
""" % (major_version, minor_version, major_version, minor_version, fix_version)
version_file = open("%s/Version.h" % output_dir, 'w')
version_file.write(file_data)
version_file.close()
| apache-2.0 |
aganezov/bg | bg/distances.py | 1 | 1464 | # -*- coding: utf-8 -*-
import networkx as nx
def get_all_cycles(breakpoint_graph):
visited = set()
cycles = []
for vertex in breakpoint_graph.nodes():
if vertex in visited:
continue
try:
cycle = nx.find_cycle(breakpoint_graph.bg, vertex)
new = False
for v1, v2, dir in cycle:
if v1 not in visited:
new = True
visited.add(v1)
if new:
cycles.append(cycle)
except:
pass
return cycles
def get_all_paths(breakpoint_graph):
ccs = []
for cc in breakpoint_graph.connected_components_subgraphs(copy=False):
if any(map(lambda vertex: vertex.is_irregular_vertex, cc.nodes())):
ccs.append(cc)
continue
return ccs
def scj(breakpoint_graph):
number_of_genes = len([v for v in breakpoint_graph.nodes() if v.is_regular_vertex]) / 2
cycles = get_all_cycles(breakpoint_graph=breakpoint_graph)
two_cycles = [cycle for cycle in cycles if len(cycle) == 2]
adjacency_graph_two_cycles = [cycle for cycle in two_cycles if all(map(lambda c_entry: c_entry[0].is_regular_vertex, cycle))]
adjacency_graph_paths = get_all_paths(breakpoint_graph=breakpoint_graph)
number_of_paths = len(adjacency_graph_paths)
return int(2 * number_of_genes - 2 * len(adjacency_graph_two_cycles) - number_of_paths)
single_cut_and_join_distance = scj
| mit |
psf/black | src/black/numerics.py | 1 | 1843 | """
Formatting numeric literals.
"""
from blib2to3.pytree import Leaf
def format_hex(text: str) -> str:
"""
Formats a hexadecimal string like "0x12B3"
"""
before, after = text[:2], text[2:]
return f"{before}{after.upper()}"
def format_scientific_notation(text: str) -> str:
"""Formats a numeric string utilizing scentific notation"""
before, after = text.split("e")
sign = ""
if after.startswith("-"):
after = after[1:]
sign = "-"
elif after.startswith("+"):
after = after[1:]
before = format_float_or_int_string(before)
return f"{before}e{sign}{after}"
def format_long_or_complex_number(text: str) -> str:
"""Formats a long or complex string like `10L` or `10j`"""
number = text[:-1]
suffix = text[-1]
# Capitalize in "2L" because "l" looks too similar to "1".
if suffix == "l":
suffix = "L"
return f"{format_float_or_int_string(number)}{suffix}"
def format_float_or_int_string(text: str) -> str:
"""Formats a float string like "1.0"."""
if "." not in text:
return text
before, after = text.split(".")
return f"{before or 0}.{after or 0}"
def normalize_numeric_literal(leaf: Leaf) -> None:
"""Normalizes numeric (float, int, and complex) literals.
All letters used in the representation are normalized to lowercase (except
in Python 2 long literals).
"""
text = leaf.value.lower()
if text.startswith(("0o", "0b")):
# Leave octal and binary literals alone.
pass
elif text.startswith("0x"):
text = format_hex(text)
elif "e" in text:
text = format_scientific_notation(text)
elif text.endswith(("j", "l")):
text = format_long_or_complex_number(text)
else:
text = format_float_or_int_string(text)
leaf.value = text
| mit |
xcgoner/dist-mxnet | python/mxnet/autograd.py | 7 | 14381 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# coding: utf-8
"""Autograd for NDArray."""
from __future__ import absolute_import
from __future__ import division
from threading import Lock
import traceback
import ctypes
from ctypes import c_int, c_void_p, CFUNCTYPE, POINTER, cast
from .base import _LIB, check_call, string_types
from .base import mx_uint, NDArrayHandle, c_array, MXCallbackList, SymbolHandle
from .ndarray import NDArray
from .symbol import _GRAD_REQ_MAP, Symbol
def set_recording(is_recording): #pylint: disable=redefined-outer-name
"""Set status to recording/not recording. When recording, graph will be constructed
for gradient computation.
Parameters
----------
is_recording: bool
Returns
-------
previous state before this set.
"""
prev = ctypes.c_int()
check_call(_LIB.MXAutogradSetIsRecording(
ctypes.c_int(is_recording), ctypes.byref(prev)))
return bool(prev.value)
def set_training(train_mode): #pylint: disable=redefined-outer-name
"""Set status to training/predicting. This affects ctx.is_train in operator
running context. For example, Dropout will drop inputs randomly when
train_mode=True while simply passing through if train_mode=False.
Parameters
----------
train_mode: bool
Returns
-------
previous state before this set.
"""
prev = ctypes.c_int()
check_call(_LIB.MXAutogradSetIsTraining(
ctypes.c_int(train_mode), ctypes.byref(prev)))
return bool(prev.value)
def is_recording():
"""Get status on recording/not recording.
Returns
-------
Current state of recording.
"""
curr = ctypes.c_bool()
check_call(_LIB.MXAutogradIsRecording(ctypes.byref(curr)))
return curr.value
def is_training():
"""Get status on training/predicting.
Returns
-------
Current state of training/predicting.
"""
curr = ctypes.c_bool()
check_call(_LIB.MXAutogradIsTraining(ctypes.byref(curr)))
return curr.value
class _RecordingStateScope(object):
"""Scope for managing training state.
Example::
with _RecordingStateScope(True, True):
y = model(x)
backward([y])
"""
def __init__(self, is_record, train_mode): #pylint: disable=redefined-outer-name
self._enter_is_record = is_record
self._enter_train_mode = train_mode
self._prev_is_record = None
self._prev_train_mode = None
def __enter__(self):
if self._enter_is_record is not None:
self._prev_is_record = set_recording(self._enter_is_record)
if self._enter_train_mode is not None:
self._prev_train_mode = set_training(self._enter_train_mode)
def __exit__(self, ptype, value, trace):
if self._enter_is_record is not None and self._prev_is_record != self._enter_is_record:
set_recording(self._prev_is_record)
if self._enter_train_mode is not None and self._prev_train_mode != self._enter_train_mode:
set_training(self._prev_train_mode)
def record(train_mode=True): #pylint: disable=redefined-outer-name
"""Returns an autograd recording scope context to be used in 'with' statement
and captures code that needs gradients to be calculated.
.. note:: When forwarding with train_mode=False, the corresponding backward
should also use train_mode=False, otherwise gradient is undefined.
Example::
with autograd.record():
y = model(x)
backward([y])
metric.update(...)
optim.step(...)
Parameters
----------
train_mode: bool, default True
Whether the forward pass is in training or predicting mode. This controls the behavior
of some layers such as Dropout, BatchNorm.
"""
return _RecordingStateScope(True, train_mode)
def pause(train_mode=False): #pylint: disable=redefined-outer-name
"""Returns a scope context to be used in 'with' statement for codes that do not need
gradients to be calculated.
Example::
with autograd.record():
y = model(x)
backward([y])
with autograd.pause():
# testing, IO, gradient updates...
Parameters
----------
train_mode: bool, default False
Whether to do forward for training or predicting.
"""
return _RecordingStateScope(False, train_mode)
def train_mode():
"""Returns a scope context to be used in 'with' statement
in which forward pass behavior is set to training mode,
without changing the recording states.
Example::
y = model(x)
with autograd.train_mode():
y = dropout(y)
"""
return _RecordingStateScope(None, True)
def predict_mode():
"""Returns a scope context to be used in 'with' statement
in which forward pass behavior is set to inference mode,
without changing the recording states.
Example::
with autograd.record():
y = model(x)
with autograd.predict_mode():
y = sampling(y)
backward([y])
"""
return _RecordingStateScope(None, False)
def mark_variables(variables, gradients, grad_reqs='write'):
"""Mark NDArrays as variables to compute gradient for autograd.
Parameters
----------
variables: NDArray or list of NDArray
gradients: NDArray or list of NDArray
grad_reqs: str or list of str
"""
if isinstance(variables, NDArray):
assert isinstance(gradients, NDArray)
variables = [variables]
gradients = [gradients]
variable_handles = []
gradient_handles = []
for var, gradvar in zip(variables, gradients):
variable_handles.append(var.handle)
gradient_handles.append(gradvar.handle)
if isinstance(grad_reqs, string_types):
grad_reqs = [_GRAD_REQ_MAP[grad_reqs]]*len(variables)
else:
grad_reqs = [_GRAD_REQ_MAP[i] for i in grad_reqs]
check_call(_LIB.MXAutogradMarkVariables(
len(variable_handles),
c_array(NDArrayHandle, variable_handles),
c_array(mx_uint, grad_reqs),
c_array(NDArrayHandle, gradient_handles)))
def backward(heads, head_grads=None, retain_graph=False, train_mode=True): #pylint: disable=redefined-outer-name
"""Compute the gradients of heads w.r.t previously marked variables.
Parameters
----------
heads: NDArray or list of NDArray
Output NDArray(s)
head_grads: NDArray or list of NDArray or None
Gradients with respect to heads.
train_mode: bool, optional
Whether to do backward for training or predicting.
"""
if isinstance(heads, NDArray):
assert head_grads is None or isinstance(head_grads, NDArray)
heads = [heads]
head_grads = [head_grads] if head_grads is not None else None
output_handles = []
for arr in heads:
output_handles.append(arr.handle)
if head_grads is None:
check_call(_LIB.MXAutogradBackwardEx(
len(output_handles),
c_array(NDArrayHandle, output_handles),
ctypes.c_void_p(0),
ctypes.c_int(retain_graph),
ctypes.c_int(train_mode)))
return
ograd_handles = []
for arr in head_grads:
if arr is not None:
ograd_handles.append(arr.handle)
else:
ograd_handles.append(NDArrayHandle(0))
assert len(ograd_handles) == len(output_handles), \
"heads and head_grads must have the same length"
check_call(_LIB.MXAutogradBackwardEx(
len(output_handles),
c_array(NDArrayHandle, output_handles),
c_array(NDArrayHandle, ograd_handles),
ctypes.c_int(retain_graph),
ctypes.c_int(train_mode)))
def get_symbol(x):
"""Retrieve recorded computation history as `Symbol`.
Parameters
----------
x : NDArray
Array representing the head of computation graph.
Returns
-------
Symbol
The retrieved Symbol.
"""
hdl = SymbolHandle()
check_call(_LIB.MXAutogradGetSymbol(x.handle, ctypes.byref(hdl)))
return Symbol(hdl)
class Function(object):
"""User-defined differentiable function.
Function allows defining both forward and backward computation for
custom operators. During gradient computation, the used-defined
backward function will be used instead of the default chain-rule.
You can also cast to numpy array and back for some operations in
forward and backward.
For example, a stable sigmoid function can be defined as::
class sigmoid(Function):
def forward(self, x):
y = 1 / (1 + mx.nd.exp(-x))
self.save_for_backward(y)
return y
def backward(self, dy):
# backward takes as many inputs as forward's return value,
# and returns as many NDArrays as forward's arguments.
y, = self.saved_tensors
return y * (1-y)
"""
_bwd_functype = CFUNCTYPE(c_int, c_int, c_int, POINTER(c_void_p),
POINTER(c_int), c_int, c_void_p)
_del_functype = CFUNCTYPE(c_int, c_void_p)
class _Registry(object):
"""CustomOp registry."""
def __init__(self):
self.ref_holder = {}
self.counter = 0
self.lock = Lock()
def inc(self):
"""Get index for new entry."""
self.lock.acquire()
cur = self.counter
self.counter += 1
self.lock.release()
return cur
_registry = _Registry()
def __init__(self):
self._used = False
self.saved_tensors = ()
def save_for_backward(self, *args):
self.saved_tensors = args
def __call__(self, *inputs):
assert not self._used, \
"Each Function instance can only be called once. "\
"Please create another instance."
self._used = True
prev_recording = set_recording(False)
outputs = self.forward(*inputs)
set_recording(prev_recording)
if not prev_recording:
return outputs
ret_outputs = outputs
if isinstance(outputs, NDArray):
outputs = (outputs,)
key = Function._registry.inc()
def backward_entry(num_ograds, num_igrads, ptrs, reqs, is_train, _):
"""entry point for backward."""
# pylint: disable=W0613
try:
output_grads = [NDArray(ctypes.cast(i, NDArrayHandle), writable=False) \
for i in ptrs[:num_ograds]]
input_grads = [NDArray(ctypes.cast(i, NDArrayHandle), writable=True) \
for i in ptrs[num_ograds:num_ograds+num_igrads]]
reqs = [reqs[i] for i in range(num_igrads)]
rets = self.backward(*output_grads)
if isinstance(rets, NDArray):
rets = (rets,)
assert len(rets) == len(input_grads), \
"%s.backward must return exactly the same number " \
"of NDArrays as the number of NDArrays arguments to forward." \
"Expecting %d got %d"%(self.__class__.name, len(input_grads), len(rets))
for igrad, ret, req in zip(input_grads, rets, reqs):
assert isinstance(ret, NDArray), \
"autograd.Function.backward must return NDArrays, not %s"%type(ret)
if req == 0: # null
return
elif req == 1 or req == 2: # write or inplace
igrad[:] = ret
elif req == 'add':
igrad[:] += ret
except Exception: # pylint: disable=broad-except
print('Error in Function.backward: %s' % traceback.format_exc())
return False
return True
def delete_entry(_):
"""C Callback for CustomFunction::delete"""
try:
del Function._registry.ref_holder[key]
except Exception: # pylint: disable=broad-except
print('Error in autograd.Function.delete: %s' % traceback.format_exc())
return False
return True
input_handles = [x.handle for x in inputs]
output_handles = [x.handle for x in outputs]
callbacks = [Function._bwd_functype(backward_entry),
Function._del_functype(delete_entry)]
callbacks = [cast(i, CFUNCTYPE(c_int)) for i in callbacks]
context = MXCallbackList(c_int(len(callbacks)),
cast(c_array(CFUNCTYPE(c_int), callbacks),
POINTER(CFUNCTYPE(c_int))),
cast(c_array(c_void_p, [None]*len(callbacks)),
POINTER(c_void_p)))
check_call(_LIB.MXCustomFunctionRecord(
c_int(len(inputs)),
c_array(NDArrayHandle, input_handles),
c_int(len(outputs)),
c_array(NDArrayHandle, output_handles),
ctypes.byref(context)))
Function._registry.ref_holder[key] = context
return ret_outputs
def forward(self, *inputs):
"""Forward computation."""
raise NotImplementedError
def backward(self, *output_grads):
"""Backward computation.
Takes as many inputs as forward's outputs,
and returns as many NDArrays as forward's inputs.
"""
raise NotImplementedError
| apache-2.0 |
prakashpp/trytond-customs-value | tests/test_product.py | 2 | 2895 | # -*- coding: utf-8 -*-
import sys
import os
import unittest
import trytond.tests.test_tryton
from trytond.tests.test_tryton import POOL, DB_NAME, USER, CONTEXT
from trytond.transaction import Transaction
from decimal import Decimal
DIR = os.path.abspath(os.path.normpath(os.path.join(
__file__, '..', '..', '..', '..', '..', 'trytond'
)))
if os.path.isdir(DIR):
sys.path.insert(0, os.path.dirname(DIR))
class TestBase(unittest.TestCase):
"""
Base Test Case
"""
def setUp(self):
"""
Set up data used in the tests.
this method is called before each test function execution.
"""
trytond.tests.test_tryton.install_module('customs_value')
self.Product = POOL.get('product.product')
self.Template = POOL.get('product.template')
self.Uom = POOL.get('product.uom')
def create_product_template(self):
"""
Creates default product template
"""
self.uom, = self.Uom.search([('name', '=', 'Unit')])
return self.Template.create([{
'name': 'product',
'list_price': Decimal('20'),
'cost_price': Decimal('5'),
'default_uom': self.uom.id,
}])[0]
class TestProduct(TestBase):
'''
Test Product
'''
def test0010_check_product_custom_value(self):
"""
Check custom value for product
"""
with Transaction().start(DB_NAME, USER, context=CONTEXT):
template = self.create_product_template()
product, = self.Product.create([{
'template': template,
}])
self.assertEqual(
product.use_list_price_as_customs_value, True
)
self.assertEqual(product.customs_value_used, product.list_price)
product, = self.Product.create([{
'template': template,
'customs_value': Decimal('50'),
'use_list_price_as_customs_value': False
}])
self.assertEqual(
product.use_list_price_as_customs_value, False
)
self.assertEqual(product.customs_value_used, product.customs_value)
product, = self.Product.create([{
'template': template,
'customs_value': Decimal('50'),
'use_list_price_as_customs_value': True
}])
self.assertEqual(
product.use_list_price_as_customs_value, True
)
self.assertEqual(product.customs_value_used, product.list_price)
def suite():
"""
Define suite
"""
test_suite = trytond.tests.test_tryton.suite()
test_suite.addTests(
unittest.TestLoader().loadTestsFromTestCase(TestProduct)
)
return test_suite
if __name__ == '__main__':
unittest.TextTestRunner(verbosity=2).run(suite())
| bsd-3-clause |
ecanzonieri/pyleus | examples/apparent_temperature/apparent_temperature/wind_speed_generator.py | 9 | 1135 | from __future__ import absolute_import
import logging
from collections import namedtuple
import random
from apparent_temperature.measure_generator import MeasureGeneratorSpout
log = logging.getLogger('wind_speed_generator')
WindSpeedMeasure = namedtuple(
"WindSpeedMeasure",
"id_sensor timestamp wind_speed")
class WindSpeedSpout(MeasureGeneratorSpout):
OUTPUT_FIELDS = WindSpeedMeasure
SENSORS = {
1042: (15, 8),
1077: (8, 6),
1078: (3, 7),
1079: (8, 5),
1082: (11, 4),
1126: (28, 9),
1156: (22, 5),
1178: (12, 12),
1201: (34, 18),
1234: (12, 5),
1312: (0, 12),
1448: (20, 8),
2089: (6, 6),
}
def measure(self, *args):
return max(0, random.normalvariate(*args))
def log(self, measure):
log.debug("id: {0}, time: {1}, wind-speed: {2} mph"
.format(*measure))
if __name__ == '__main__':
logging.basicConfig(
level=logging.DEBUG,
filename='/tmp/apparent_temperature_wind_speed.log',
filemode='a',
)
WindSpeedSpout().run()
| apache-2.0 |
rorasa/KeeTerm | Crypto/SelfTest/Cipher/test_XOR.py | 119 | 2538 | # -*- coding: utf-8 -*-
#
# SelfTest/Cipher/XOR.py: Self-test for the XOR "cipher"
#
# Written in 2008 by Dwayne C. Litzenberger <dlitz@dlitz.net>
#
# ===================================================================
# The contents of this file are dedicated to the public domain. To
# the extent that dedication to the public domain is not available,
# everyone is granted a worldwide, perpetual, royalty-free,
# non-exclusive license to exercise all rights associated with the
# contents of this file for any purpose whatsoever.
# No rights are reserved.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# ===================================================================
"""Self-test suite for Crypto.Cipher.XOR"""
import unittest
__revision__ = "$Id$"
from Crypto.Util.py3compat import *
# This is a list of (plaintext, ciphertext, key) tuples.
test_data = [
# Test vectors written from scratch. (Nobody posts XOR test vectors on the web? How disappointing.)
('01', '01',
'00',
'zero key'),
('0102040810204080', '0003050911214181',
'01',
'1-byte key'),
('0102040810204080', 'cda8c8a2dc8a8c2a',
'ccaa',
'2-byte key'),
('ff'*64, 'fffefdfcfbfaf9f8f7f6f5f4f3f2f1f0efeeedecebeae9e8e7e6e5e4e3e2e1e0'*2,
'000102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f',
'32-byte key'),
]
class TruncationSelfTest(unittest.TestCase):
def runTest(self):
"""33-byte key (should raise ValueError under current implementation)"""
# Crypto.Cipher.XOR previously truncated its inputs at 32 bytes. Now
# it should raise a ValueError if the length is too long.
self.assertRaises(ValueError, XOR.new, "x"*33)
def get_tests(config={}):
global XOR
from Crypto.Cipher import XOR
from common import make_stream_tests
return make_stream_tests(XOR, "XOR", test_data) + [TruncationSelfTest()]
if __name__ == '__main__':
import unittest
suite = lambda: unittest.TestSuite(get_tests())
unittest.main(defaultTest='suite')
# vim:set ts=4 sw=4 sts=4 expandtab:
| mit |
madan96/sympy | sympy/solvers/tests/test_solvers.py | 2 | 69392 | from sympy import (
Abs, And, Derivative, Dummy, Eq, Float, Function, Gt, I, Integral,
LambertW, Lt, Matrix, Or, Piecewise, Poly, Q, Rational, S, Symbol,
Wild, acos, asin, atan, atanh, cos, cosh, diff, erf, erfinv, erfc,
erfcinv, exp, im, log, pi, re, sec, sin,
sinh, solve, solve_linear, sqrt, sstr, symbols, sympify, tan, tanh,
root, simplify, atan2, arg, Mul, SparseMatrix, ask, Tuple, nsolve, oo)
from sympy.core.compatibility import range
from sympy.core.function import nfloat
from sympy.solvers import solve_linear_system, solve_linear_system_LU, \
solve_undetermined_coeffs
from sympy.solvers.solvers import _invert, unrad, checksol, posify, _ispow, \
det_quick, det_perm, det_minor, _simple_dens, check_assumptions
from sympy.physics.units import cm
from sympy.polys.rootoftools import CRootOf
from sympy.utilities.pytest import slow, XFAIL, SKIP, raises, skip, ON_TRAVIS
from sympy.utilities.randtest import verify_numerically as tn
from sympy.abc import a, b, c, d, k, h, p, x, y, z, t, q, m
def NS(e, n=15, **options):
return sstr(sympify(e).evalf(n, **options), full_prec=True)
def test_swap_back():
f, g = map(Function, 'fg')
fx, gx = f(x), g(x)
assert solve([fx + y - 2, fx - gx - 5], fx, y, gx) == \
{fx: gx + 5, y: -gx - 3}
assert solve(fx + gx*x - 2, [fx, gx]) == {fx: 2, gx: 0}
assert solve(fx + gx**2*x - y, [fx, gx]) == [{fx: y - gx**2*x}]
assert solve([f(1) - 2, x + 2]) == [{x: -2, f(1): 2}]
def guess_solve_strategy(eq, symbol):
try:
solve(eq, symbol)
return True
except (TypeError, NotImplementedError):
return False
def test_guess_poly():
# polynomial equations
assert guess_solve_strategy( S(4), x ) # == GS_POLY
assert guess_solve_strategy( x, x ) # == GS_POLY
assert guess_solve_strategy( x + a, x ) # == GS_POLY
assert guess_solve_strategy( 2*x, x ) # == GS_POLY
assert guess_solve_strategy( x + sqrt(2), x) # == GS_POLY
assert guess_solve_strategy( x + 2**Rational(1, 4), x) # == GS_POLY
assert guess_solve_strategy( x**2 + 1, x ) # == GS_POLY
assert guess_solve_strategy( x**2 - 1, x ) # == GS_POLY
assert guess_solve_strategy( x*y + y, x ) # == GS_POLY
assert guess_solve_strategy( x*exp(y) + y, x) # == GS_POLY
assert guess_solve_strategy(
(x - y**3)/(y**2*sqrt(1 - y**2)), x) # == GS_POLY
def test_guess_poly_cv():
# polynomial equations via a change of variable
assert guess_solve_strategy( sqrt(x) + 1, x ) # == GS_POLY_CV_1
assert guess_solve_strategy(
x**Rational(1, 3) + sqrt(x) + 1, x ) # == GS_POLY_CV_1
assert guess_solve_strategy( 4*x*(1 - sqrt(x)), x ) # == GS_POLY_CV_1
# polynomial equation multiplying both sides by x**n
assert guess_solve_strategy( x + 1/x + y, x ) # == GS_POLY_CV_2
def test_guess_rational_cv():
# rational functions
assert guess_solve_strategy( (x + 1)/(x**2 + 2), x) # == GS_RATIONAL
assert guess_solve_strategy(
(x - y**3)/(y**2*sqrt(1 - y**2)), y) # == GS_RATIONAL_CV_1
# rational functions via the change of variable y -> x**n
assert guess_solve_strategy( (sqrt(x) + 1)/(x**Rational(1, 3) + sqrt(x) + 1), x ) \
#== GS_RATIONAL_CV_1
def test_guess_transcendental():
#transcendental functions
assert guess_solve_strategy( exp(x) + 1, x ) # == GS_TRANSCENDENTAL
assert guess_solve_strategy( 2*cos(x) - y, x ) # == GS_TRANSCENDENTAL
assert guess_solve_strategy(
exp(x) + exp(-x) - y, x ) # == GS_TRANSCENDENTAL
assert guess_solve_strategy(3**x - 10, x) # == GS_TRANSCENDENTAL
assert guess_solve_strategy(-3**x + 10, x) # == GS_TRANSCENDENTAL
assert guess_solve_strategy(a*x**b - y, x) # == GS_TRANSCENDENTAL
def test_solve_args():
# equation container, issue 5113
ans = {x: -3, y: 1}
eqs = (x + 5*y - 2, -3*x + 6*y - 15)
assert all(solve(container(eqs), x, y) == ans for container in
(tuple, list, set, frozenset))
assert solve(Tuple(*eqs), x, y) == ans
# implicit symbol to solve for
assert set(solve(x**2 - 4)) == set([S(2), -S(2)])
assert solve([x + y - 3, x - y - 5]) == {x: 4, y: -1}
assert solve(x - exp(x), x, implicit=True) == [exp(x)]
# no symbol to solve for
assert solve(42) == []
assert solve([1, 2]) == []
# duplicate symbols removed
assert solve((x - 3, y + 2), x, y, x) == {x: 3, y: -2}
# unordered symbols
# only 1
assert solve(y - 3, set([y])) == [3]
# more than 1
assert solve(y - 3, set([x, y])) == [{y: 3}]
# multiple symbols: take the first linear solution
assert solve(x + y - 3, [x, y]) == [{x: 3 - y}]
# unless it is an undetermined coefficients system
assert solve(a + b*x - 2, [a, b]) == {a: 2, b: 0}
args = (a + b)*x - b**2 + 2, a, b
assert solve(*args) == \
[(-sqrt(2), sqrt(2)), (sqrt(2), -sqrt(2))]
assert solve(*args, set=True) == \
([a, b], set([(-sqrt(2), sqrt(2)), (sqrt(2), -sqrt(2))]))
assert solve(*args, dict=True) == \
[{b: sqrt(2), a: -sqrt(2)}, {b: -sqrt(2), a: sqrt(2)}]
eq = a*x**2 + b*x + c - ((x - h)**2 + 4*p*k)/4/p
flags = dict(dict=True)
assert solve(eq, [h, p, k], exclude=[a, b, c], **flags) == \
[{k: c - b**2/(4*a), h: -b/(2*a), p: 1/(4*a)}]
flags.update(dict(simplify=False))
assert solve(eq, [h, p, k], exclude=[a, b, c], **flags) == \
[{k: (4*a*c - b**2)/(4*a), h: -b/(2*a), p: 1/(4*a)}]
# failing undetermined system
assert solve(a*x + b**2/(x + 4) - 3*x - 4/x, a, b) == \
[{a: (-b**2*x + 3*x**3 + 12*x**2 + 4*x + 16)/(x**2*(x + 4))}]
# failed single equation
assert solve(1/(1/x - y + exp(y))) == []
raises(
NotImplementedError, lambda: solve(exp(x) + sin(x) + exp(y) + sin(y)))
# failed system
# -- when no symbols given, 1 fails
assert solve([y, exp(x) + x]) == [{x: -LambertW(1), y: 0}]
# both fail
assert solve(
(exp(x) - x, exp(y) - y)) == [{x: -LambertW(-1), y: -LambertW(-1)}]
# -- when symbols given
solve([y, exp(x) + x], x, y) == [(-LambertW(1), 0)]
# symbol is a number
assert solve(x**2 - pi, pi) == [x**2]
# no equations
assert solve([], [x]) == []
# overdetermined system
# - nonlinear
assert solve([(x + y)**2 - 4, x + y - 2]) == [{x: -y + 2}]
# - linear
assert solve((x + y - 2, 2*x + 2*y - 4)) == {x: -y + 2}
def test_solve_polynomial1():
assert solve(3*x - 2, x) == [Rational(2, 3)]
assert solve(Eq(3*x, 2), x) == [Rational(2, 3)]
assert set(solve(x**2 - 1, x)) == set([-S(1), S(1)])
assert set(solve(Eq(x**2, 1), x)) == set([-S(1), S(1)])
assert solve(x - y**3, x) == [y**3]
rx = root(x, 3)
assert solve(x - y**3, y) == [
rx, -rx/2 - sqrt(3)*I*rx/2, -rx/2 + sqrt(3)*I*rx/2]
a11, a12, a21, a22, b1, b2 = symbols('a11,a12,a21,a22,b1,b2')
assert solve([a11*x + a12*y - b1, a21*x + a22*y - b2], x, y) == \
{
x: (a22*b1 - a12*b2)/(a11*a22 - a12*a21),
y: (a11*b2 - a21*b1)/(a11*a22 - a12*a21),
}
solution = {y: S.Zero, x: S.Zero}
assert solve((x - y, x + y), x, y ) == solution
assert solve((x - y, x + y), (x, y)) == solution
assert solve((x - y, x + y), [x, y]) == solution
assert set(solve(x**3 - 15*x - 4, x)) == set([
-2 + 3**Rational(1, 2),
S(4),
-2 - 3**Rational(1, 2)
])
assert set(solve((x**2 - 1)**2 - a, x)) == \
set([sqrt(1 + sqrt(a)), -sqrt(1 + sqrt(a)),
sqrt(1 - sqrt(a)), -sqrt(1 - sqrt(a))])
def test_solve_polynomial2():
assert solve(4, x) == []
def test_solve_polynomial_cv_1a():
"""
Test for solving on equations that can be converted to a polynomial equation
using the change of variable y -> x**Rational(p, q)
"""
assert solve( sqrt(x) - 1, x) == [1]
assert solve( sqrt(x) - 2, x) == [4]
assert solve( x**Rational(1, 4) - 2, x) == [16]
assert solve( x**Rational(1, 3) - 3, x) == [27]
assert solve(sqrt(x) + x**Rational(1, 3) + x**Rational(1, 4), x) == [0]
def test_solve_polynomial_cv_1b():
assert set(solve(4*x*(1 - a*sqrt(x)), x)) == set([S(0), 1/a**2])
assert set(solve(x*(root(x, 3) - 3), x)) == set([S(0), S(27)])
def test_solve_polynomial_cv_2():
"""
Test for solving on equations that can be converted to a polynomial equation
multiplying both sides of the equation by x**m
"""
assert solve(x + 1/x - 1, x) in \
[[ Rational(1, 2) + I*sqrt(3)/2, Rational(1, 2) - I*sqrt(3)/2],
[ Rational(1, 2) - I*sqrt(3)/2, Rational(1, 2) + I*sqrt(3)/2]]
def test_quintics_1():
f = x**5 - 110*x**3 - 55*x**2 + 2310*x + 979
s = solve(f, check=False)
for root in s:
res = f.subs(x, root.n()).n()
assert tn(res, 0)
f = x**5 - 15*x**3 - 5*x**2 + 10*x + 20
s = solve(f)
for root in s:
assert root.func == CRootOf
# if one uses solve to get the roots of a polynomial that has a CRootOf
# solution, make sure that the use of nfloat during the solve process
# doesn't fail. Note: if you want numerical solutions to a polynomial
# it is *much* faster to use nroots to get them than to solve the
# equation only to get RootOf solutions which are then numerically
# evaluated. So for eq = x**5 + 3*x + 7 do Poly(eq).nroots() rather
# than [i.n() for i in solve(eq)] to get the numerical roots of eq.
assert nfloat(solve(x**5 + 3*x**3 + 7)[0], exponent=False) == \
CRootOf(x**5 + 3*x**3 + 7, 0).n()
def test_highorder_poly():
# just testing that the uniq generator is unpacked
sol = solve(x**6 - 2*x + 2)
assert all(isinstance(i, CRootOf) for i in sol) and len(sol) == 6
@slow
def test_quintics_2():
f = x**5 + 15*x + 12
s = solve(f, check=False)
for root in s:
res = f.subs(x, root.n()).n()
assert tn(res, 0)
f = x**5 - 15*x**3 - 5*x**2 + 10*x + 20
s = solve(f)
for root in s:
assert root.func == CRootOf
def test_solve_rational():
"""Test solve for rational functions"""
assert solve( ( x - y**3 )/( (y**2)*sqrt(1 - y**2) ), x) == [y**3]
def test_solve_nonlinear():
assert solve(x**2 - y**2, x, y) == [{x: -y}, {x: y}]
assert solve(x**2 - y**2/exp(x), x, y) == [{x: 2*LambertW(y/2)}]
assert solve(x**2 - y**2/exp(x), y, x) == [{y: -x*sqrt(exp(x))}, {y: x*sqrt(exp(x))}]
def test_issue_8666():
x = symbols('x')
assert solve(Eq(x**2 - 1/(x**2 - 4), 4 - 1/(x**2 - 4)), x) == []
assert solve(Eq(x + 1/x, 1/x), x) == []
def test_issue_7228():
assert solve(4**(2*(x**2) + 2*x) - 8, x) == [-Rational(3, 2), S.Half]
def test_issue_7190():
assert solve(log(x-3) + log(x+3), x) == [sqrt(10)]
def test_linear_system():
x, y, z, t, n = symbols('x, y, z, t, n')
assert solve([x - 1, x - y, x - 2*y, y - 1], [x, y]) == []
assert solve([x - 1, x - y, x - 2*y, x - 1], [x, y]) == []
assert solve([x - 1, x - 1, x - y, x - 2*y], [x, y]) == []
assert solve([x + 5*y - 2, -3*x + 6*y - 15], x, y) == {x: -3, y: 1}
M = Matrix([[0, 0, n*(n + 1), (n + 1)**2, 0],
[n + 1, n + 1, -2*n - 1, -(n + 1), 0],
[-1, 0, 1, 0, 0]])
assert solve_linear_system(M, x, y, z, t) == \
{x: -t - t/n, z: -t - t/n, y: 0}
assert solve([x + y + z + t, -z - t], x, y, z, t) == {x: -y, z: -t}
def test_linear_system_function():
a = Function('a')
assert solve([a(0, 0) + a(0, 1) + a(1, 0) + a(1, 1), -a(1, 0) - a(1, 1)],
a(0, 0), a(0, 1), a(1, 0), a(1, 1)) == {a(1, 0): -a(1, 1), a(0, 0): -a(0, 1)}
def test_linear_systemLU():
n = Symbol('n')
M = Matrix([[1, 2, 0, 1], [1, 3, 2*n, 1], [4, -1, n**2, 1]])
assert solve_linear_system_LU(M, [x, y, z]) == {z: -3/(n**2 + 18*n),
x: 1 - 12*n/(n**2 + 18*n),
y: 6*n/(n**2 + 18*n)}
# Note: multiple solutions exist for some of these equations, so the tests
# should be expected to break if the implementation of the solver changes
# in such a way that a different branch is chosen
def test_solve_transcendental():
from sympy.abc import a, b
assert solve(exp(x) - 3, x) == [log(3)]
assert set(solve((a*x + b)*(exp(x) - 3), x)) == set([-b/a, log(3)])
assert solve(cos(x) - y, x) == [-acos(y) + 2*pi, acos(y)]
assert solve(2*cos(x) - y, x) == [-acos(y/2) + 2*pi, acos(y/2)]
assert solve(Eq(cos(x), sin(x)), x) == [-3*pi/4, pi/4]
assert set(solve(exp(x) + exp(-x) - y, x)) in [set([
log(y/2 - sqrt(y**2 - 4)/2),
log(y/2 + sqrt(y**2 - 4)/2),
]), set([
log(y - sqrt(y**2 - 4)) - log(2),
log(y + sqrt(y**2 - 4)) - log(2)]),
set([
log(y/2 - sqrt((y - 2)*(y + 2))/2),
log(y/2 + sqrt((y - 2)*(y + 2))/2)])]
assert solve(exp(x) - 3, x) == [log(3)]
assert solve(Eq(exp(x), 3), x) == [log(3)]
assert solve(log(x) - 3, x) == [exp(3)]
assert solve(sqrt(3*x) - 4, x) == [Rational(16, 3)]
assert solve(3**(x + 2), x) == []
assert solve(3**(2 - x), x) == []
assert solve(x + 2**x, x) == [-LambertW(log(2))/log(2)]
ans = solve(3*x + 5 + 2**(-5*x + 3), x)
assert len(ans) == 1 and ans[0].expand() == \
-Rational(5, 3) + LambertW(-10240*root(2, 3)*log(2)/3)/(5*log(2))
assert solve(5*x - 1 + 3*exp(2 - 7*x), x) == \
[Rational(1, 5) + LambertW(-21*exp(Rational(3, 5))/5)/7]
assert solve(2*x + 5 + log(3*x - 2), x) == \
[Rational(2, 3) + LambertW(2*exp(-Rational(19, 3))/3)/2]
assert solve(3*x + log(4*x), x) == [LambertW(Rational(3, 4))/3]
assert set(solve((2*x + 8)*(8 + exp(x)), x)) == set([S(-4), log(8) + pi*I])
eq = 2*exp(3*x + 4) - 3
ans = solve(eq, x) # this generated a failure in flatten
assert len(ans) == 3 and all(eq.subs(x, a).n(chop=True) == 0 for a in ans)
assert solve(2*log(3*x + 4) - 3, x) == [(exp(Rational(3, 2)) - 4)/3]
assert solve(exp(x) + 1, x) == [pi*I]
eq = 2*(3*x + 4)**5 - 6*7**(3*x + 9)
result = solve(eq, x)
ans = [(log(2401) + 5*LambertW(-log(7**(7*3**Rational(1, 5)/5))))/(3*log(7))/-1]
assert result == ans
# it works if expanded, too
assert solve(eq.expand(), x) == result
assert solve(z*cos(x) - y, x) == [-acos(y/z) + 2*pi, acos(y/z)]
assert solve(z*cos(2*x) - y, x) == [-acos(y/z)/2 + pi, acos(y/z)/2]
assert solve(z*cos(sin(x)) - y, x) == [
asin(acos(y/z) - 2*pi) + pi, -asin(acos(y/z)) + pi,
-asin(acos(y/z) - 2*pi), asin(acos(y/z))]
assert solve(z*cos(x), x) == [pi/2, 3*pi/2]
# issue 4508
assert solve(y - b*x/(a + x), x) in [[-a*y/(y - b)], [a*y/(b - y)]]
assert solve(y - b*exp(a/x), x) == [a/log(y/b)]
# issue 4507
assert solve(y - b/(1 + a*x), x) in [[(b - y)/(a*y)], [-((y - b)/(a*y))]]
# issue 4506
assert solve(y - a*x**b, x) == [(y/a)**(1/b)]
# issue 4505
assert solve(z**x - y, x) == [log(y)/log(z)]
# issue 4504
assert solve(2**x - 10, x) == [log(10)/log(2)]
# issue 6744
assert solve(x*y) == [{x: 0}, {y: 0}]
assert solve([x*y]) == [{x: 0}, {y: 0}]
assert solve(x**y - 1) == [{x: 1}, {y: 0}]
assert solve([x**y - 1]) == [{x: 1}, {y: 0}]
assert solve(x*y*(x**2 - y**2)) == [{x: 0}, {x: -y}, {x: y}, {y: 0}]
assert solve([x*y*(x**2 - y**2)]) == [{x: 0}, {x: -y}, {x: y}, {y: 0}]
#issue 4739
assert solve(exp(log(5)*x) - 2**x, x) == [0]
# misc
# make sure that the right variables is picked up in tsolve
raises(NotImplementedError, lambda: solve((exp(x) + 1)**x - 2))
# shouldn't generate a GeneratorsNeeded error in _tsolve when the NaN is generated
# for eq_down. Actual answers, as determined numerically are approx. +/- 0.83
raises(NotImplementedError, lambda:
solve(sinh(x)*sinh(sinh(x)) + cosh(x)*cosh(sinh(x)) - 3))
# watch out for recursive loop in tsolve
raises(NotImplementedError, lambda: solve((x + 2)**y*x - 3, x))
# issue 7245
assert solve(sin(sqrt(x))) == [0, pi**2]
# issue 7602
a, b = symbols('a, b', real=True, negative=False)
assert str(solve(Eq(a, 0.5 - cos(pi*b)/2), b)) == \
'[-0.318309886183791*acos(-2.0*a + 1.0) + 2.0, 0.318309886183791*acos(-2.0*a + 1.0)]'
def test_solve_for_functions_derivatives():
t = Symbol('t')
x = Function('x')(t)
y = Function('y')(t)
a11, a12, a21, a22, b1, b2 = symbols('a11,a12,a21,a22,b1,b2')
soln = solve([a11*x + a12*y - b1, a21*x + a22*y - b2], x, y)
assert soln == {
x: (a22*b1 - a12*b2)/(a11*a22 - a12*a21),
y: (a11*b2 - a21*b1)/(a11*a22 - a12*a21),
}
assert solve(x - 1, x) == [1]
assert solve(3*x - 2, x) == [Rational(2, 3)]
soln = solve([a11*x.diff(t) + a12*y.diff(t) - b1, a21*x.diff(t) +
a22*y.diff(t) - b2], x.diff(t), y.diff(t))
assert soln == { y.diff(t): (a11*b2 - a21*b1)/(a11*a22 - a12*a21),
x.diff(t): (a22*b1 - a12*b2)/(a11*a22 - a12*a21) }
assert solve(x.diff(t) - 1, x.diff(t)) == [1]
assert solve(3*x.diff(t) - 2, x.diff(t)) == [Rational(2, 3)]
eqns = set((3*x - 1, 2*y - 4))
assert solve(eqns, set((x, y))) == { x: Rational(1, 3), y: 2 }
x = Symbol('x')
f = Function('f')
F = x**2 + f(x)**2 - 4*x - 1
assert solve(F.diff(x), diff(f(x), x)) == [(-x + 2)/f(x)]
# Mixed cased with a Symbol and a Function
x = Symbol('x')
y = Function('y')(t)
soln = solve([a11*x + a12*y.diff(t) - b1, a21*x +
a22*y.diff(t) - b2], x, y.diff(t))
assert soln == { y.diff(t): (a11*b2 - a21*b1)/(a11*a22 - a12*a21),
x: (a22*b1 - a12*b2)/(a11*a22 - a12*a21) }
def test_issue_3725():
f = Function('f')
F = x**2 + f(x)**2 - 4*x - 1
e = F.diff(x)
assert solve(e, f(x).diff(x)) in [[(2 - x)/f(x)], [-((x - 2)/f(x))]]
def test_issue_3870():
a, b, c, d = symbols('a b c d')
A = Matrix(2, 2, [a, b, c, d])
B = Matrix(2, 2, [0, 2, -3, 0])
C = Matrix(2, 2, [1, 2, 3, 4])
assert solve(A*B - C, [a, b, c, d]) == {a: 1, b: -S(1)/3, c: 2, d: -1}
assert solve([A*B - C], [a, b, c, d]) == {a: 1, b: -S(1)/3, c: 2, d: -1}
assert solve(Eq(A*B, C), [a, b, c, d]) == {a: 1, b: -S(1)/3, c: 2, d: -1}
assert solve([A*B - B*A], [a, b, c, d]) == {a: d, b: -S(2)/3*c}
assert solve([A*C - C*A], [a, b, c, d]) == {a: d - c, b: S(2)/3*c}
assert solve([A*B - B*A, A*C - C*A], [a, b, c, d]) == {a: d, b: 0, c: 0}
assert solve([Eq(A*B, B*A)], [a, b, c, d]) == {a: d, b: -S(2)/3*c}
assert solve([Eq(A*C, C*A)], [a, b, c, d]) == {a: d - c, b: S(2)/3*c}
assert solve([Eq(A*B, B*A), Eq(A*C, C*A)], [a, b, c, d]) == {a: d, b: 0, c: 0}
def test_solve_linear():
w = Wild('w')
assert solve_linear(x, x) == (0, 1)
assert solve_linear(x, exclude=[x]) == (0, 1)
assert solve_linear(x, symbols=[w]) == (0, 1)
assert solve_linear(x, y - 2*x) in [(x, y/3), (y, 3*x)]
assert solve_linear(x, y - 2*x, exclude=[x]) == (y, 3*x)
assert solve_linear(3*x - y, 0) in [(x, y/3), (y, 3*x)]
assert solve_linear(3*x - y, 0, [x]) == (x, y/3)
assert solve_linear(3*x - y, 0, [y]) == (y, 3*x)
assert solve_linear(x**2/y, 1) == (y, x**2)
assert solve_linear(w, x) in [(w, x), (x, w)]
assert solve_linear(cos(x)**2 + sin(x)**2 + 2 + y) == \
(y, -2 - cos(x)**2 - sin(x)**2)
assert solve_linear(cos(x)**2 + sin(x)**2 + 2 + y, symbols=[x]) == (0, 1)
assert solve_linear(Eq(x, 3)) == (x, 3)
assert solve_linear(1/(1/x - 2)) == (0, 0)
assert solve_linear((x + 1)*exp(-x), symbols=[x]) == (x, -1)
assert solve_linear((x + 1)*exp(x), symbols=[x]) == ((x + 1)*exp(x), 1)
assert solve_linear(x*exp(-x**2), symbols=[x]) == (x, 0)
assert solve_linear(0**x - 1) == (0**x - 1, 1)
assert solve_linear(1 + 1/(x - 1)) == (x, 0)
eq = y*cos(x)**2 + y*sin(x)**2 - y # = y*(1 - 1) = 0
assert solve_linear(eq) == (0, 1)
eq = cos(x)**2 + sin(x)**2 # = 1
assert solve_linear(eq) == (0, 1)
raises(ValueError, lambda: solve_linear(Eq(x, 3), 3))
def test_solve_undetermined_coeffs():
assert solve_undetermined_coeffs(a*x**2 + b*x**2 + b*x + 2*c*x + c + 1, [a, b, c], x) == \
{a: -2, b: 2, c: -1}
# Test that rational functions work
assert solve_undetermined_coeffs(a/x + b/(x + 1) - (2*x + 1)/(x**2 + x), [a, b], x) == \
{a: 1, b: 1}
# Test cancellation in rational functions
assert solve_undetermined_coeffs(((c + 1)*a*x**2 + (c + 1)*b*x**2 +
(c + 1)*b*x + (c + 1)*2*c*x + (c + 1)**2)/(c + 1), [a, b, c], x) == \
{a: -2, b: 2, c: -1}
def test_solve_inequalities():
x = Symbol('x')
system = [Lt(x**2 - 2, 0), Gt(x**2 - 1, 0)]
assert solve(system) == \
And(Or(And(Lt(-sqrt(2), x), Lt(x, -1)),
And(Lt(1, x), Lt(x, sqrt(2)))), Eq(0, 0))
x = Symbol('x', real=True)
system = [Lt(x**2 - 2, 0), Gt(x**2 - 1, 0)]
assert solve(system) == \
Or(And(Lt(-sqrt(2), x), Lt(x, -1)), And(Lt(1, x), Lt(x, sqrt(2))))
# issue 6627, 3448
assert solve((x - 3)/(x - 2) < 0, x) == And(Lt(2, x), Lt(x, 3))
assert solve(x/(x + 1) > 1, x) == And(Lt(-oo, x), Lt(x, -1))
assert solve(sin(x) > S.Half) == And(pi/6 < x, x < 5*pi/6)
def test_issue_4793():
assert solve(1/x) == []
assert solve(x*(1 - 5/x)) == [5]
assert solve(x + sqrt(x) - 2) == [1]
assert solve(-(1 + x)/(2 + x)**2 + 1/(2 + x)) == []
assert solve(-x**2 - 2*x + (x + 1)**2 - 1) == []
assert solve((x/(x + 1) + 3)**(-2)) == []
assert solve(x/sqrt(x**2 + 1), x) == [0]
assert solve(exp(x) - y, x) == [log(y)]
assert solve(exp(x)) == []
assert solve(x**2 + x + sin(y)**2 + cos(y)**2 - 1, x) in [[0, -1], [-1, 0]]
eq = 4*3**(5*x + 2) - 7
ans = solve(eq, x)
assert len(ans) == 5 and all(eq.subs(x, a).n(chop=True) == 0 for a in ans)
assert solve(log(x**2) - y**2/exp(x), x, y, set=True) == \
([y], set([
(-sqrt(exp(x)*log(x**2)),),
(sqrt(exp(x)*log(x**2)),)]))
assert solve(x**2*z**2 - z**2*y**2) == [{x: -y}, {x: y}, {z: 0}]
assert solve((x - 1)/(1 + 1/(x - 1))) == []
assert solve(x**(y*z) - x, x) == [1]
raises(NotImplementedError, lambda: solve(log(x) - exp(x), x))
raises(NotImplementedError, lambda: solve(2**x - exp(x) - 3))
def test_PR1964():
# issue 5171
assert solve(sqrt(x)) == solve(sqrt(x**3)) == [0]
assert solve(sqrt(x - 1)) == [1]
# issue 4462
a = Symbol('a')
assert solve(-3*a/sqrt(x), x) == []
# issue 4486
assert solve(2*x/(x + 2) - 1, x) == [2]
# issue 4496
assert set(solve((x**2/(7 - x)).diff(x))) == set([S(0), S(14)])
# issue 4695
f = Function('f')
assert solve((3 - 5*x/f(x))*f(x), f(x)) == [5*x/3]
# issue 4497
assert solve(1/root(5 + x, 5) - 9, x) == [-295244/S(59049)]
assert solve(sqrt(x) + sqrt(sqrt(x)) - 4) == [(-S.Half + sqrt(17)/2)**4]
assert set(solve(Poly(sqrt(exp(x)) + sqrt(exp(-x)) - 4))) in \
[
set([log((-sqrt(3) + 2)**2), log((sqrt(3) + 2)**2)]),
set([2*log(-sqrt(3) + 2), 2*log(sqrt(3) + 2)]),
set([log(-4*sqrt(3) + 7), log(4*sqrt(3) + 7)]),
]
assert set(solve(Poly(exp(x) + exp(-x) - 4))) == \
set([log(-sqrt(3) + 2), log(sqrt(3) + 2)])
assert set(solve(x**y + x**(2*y) - 1, x)) == \
set([(-S.Half + sqrt(5)/2)**(1/y), (-S.Half - sqrt(5)/2)**(1/y)])
assert solve(exp(x/y)*exp(-z/y) - 2, y) == [(x - z)/log(2)]
assert solve(
x**z*y**z - 2, z) in [[log(2)/(log(x) + log(y))], [log(2)/(log(x*y))]]
# if you do inversion too soon then multiple roots (as for the following)
# will be missed, e.g. if exp(3*x) = exp(3) -> 3*x = 3
E = S.Exp1
assert solve(exp(3*x) - exp(3), x) in [
[1, log(E*(-S.Half - sqrt(3)*I/2)), log(E*(-S.Half + sqrt(3)*I/2))],
[1, log(-E/2 - sqrt(3)*E*I/2), log(-E/2 + sqrt(3)*E*I/2)],
]
# coverage test
p = Symbol('p', positive=True)
assert solve((1/p + 1)**(p + 1)) == []
def test_issue_5197():
x = Symbol('x', real=True)
assert solve(x**2 + 1, x) == []
n = Symbol('n', integer=True, positive=True)
assert solve((n - 1)*(n + 2)*(2*n - 1), n) == [1]
x = Symbol('x', positive=True)
y = Symbol('y')
assert solve([x + 5*y - 2, -3*x + 6*y - 15], x, y) == []
# not {x: -3, y: 1} b/c x is positive
# The solution following should not contain (-sqrt(2), sqrt(2))
assert solve((x + y)*n - y**2 + 2, x, y) == [(sqrt(2), -sqrt(2))]
y = Symbol('y', positive=True)
# The solution following should not contain {y: -x*exp(x/2)}
assert solve(x**2 - y**2/exp(x), y, x) == [{y: x*exp(x/2)}]
assert solve(x**2 - y**2/exp(x), x, y) == [{x: 2*LambertW(y/2)}]
x, y, z = symbols('x y z', positive=True)
assert solve(z**2*x**2 - z**2*y**2/exp(x), y, x, z) == [{y: x*exp(x/2)}]
def test_checking():
assert set(
solve(x*(x - y/x), x, check=False)) == set([sqrt(y), S(0), -sqrt(y)])
assert set(solve(x*(x - y/x), x, check=True)) == set([sqrt(y), -sqrt(y)])
# {x: 0, y: 4} sets denominator to 0 in the following so system should return None
assert solve((1/(1/x + 2), 1/(y - 3) - 1)) == []
# 0 sets denominator of 1/x to zero so None is returned
assert solve(1/(1/x + 2)) == []
def test_issue_4671_4463_4467():
assert solve((sqrt(x**2 - 1) - 2)) in ([sqrt(5), -sqrt(5)],
[-sqrt(5), sqrt(5)])
assert solve((2**exp(y**2/x) + 2)/(x**2 + 15), y) == [
-sqrt(x)*sqrt(-log(log(2)) + log(log(2) + I*pi)),
sqrt(x)*sqrt(-log(log(2)) + log(log(2) + I*pi))]
C1, C2 = symbols('C1 C2')
f = Function('f')
assert solve(C1 + C2/x**2 - exp(-f(x)), f(x)) == [log(x**2/(C1*x**2 + C2))]
a = Symbol('a')
E = S.Exp1
assert solve(1 - log(a + 4*x**2), x) in (
[-sqrt(-a + E)/2, sqrt(-a + E)/2],
[sqrt(-a + E)/2, -sqrt(-a + E)/2]
)
assert solve(log(a**(-3) - x**2)/a, x) in (
[-sqrt(-1 + a**(-3)), sqrt(-1 + a**(-3))],
[sqrt(-1 + a**(-3)), -sqrt(-1 + a**(-3))],)
assert solve(1 - log(a + 4*x**2), x) in (
[-sqrt(-a + E)/2, sqrt(-a + E)/2],
[sqrt(-a + E)/2, -sqrt(-a + E)/2],)
assert set(solve((
a**2 + 1) * (sin(a*x) + cos(a*x)), x)) == set([-pi/(4*a), 3*pi/(4*a)])
assert solve(3 - (sinh(a*x) + cosh(a*x)), x) == [log(3)/a]
assert set(solve(3 - (sinh(a*x) + cosh(a*x)**2), x)) == \
set([log(-2 + sqrt(5))/a, log(-sqrt(2) + 1)/a,
log(-sqrt(5) - 2)/a, log(1 + sqrt(2))/a])
assert solve(atan(x) - 1) == [tan(1)]
def test_issue_5132():
r, t = symbols('r,t')
assert set(solve([r - x**2 - y**2, tan(t) - y/x], [x, y])) == \
set([(
-sqrt(r*cos(t)**2), -1*sqrt(r*cos(t)**2)*tan(t)),
(sqrt(r*cos(t)**2), sqrt(r*cos(t)**2)*tan(t))])
assert solve([exp(x) - sin(y), 1/y - 3], [x, y]) == \
[(log(sin(S(1)/3)), S(1)/3)]
assert solve([exp(x) - sin(y), 1/exp(y) - 3], [x, y]) == \
[(log(-sin(log(3))), -log(3))]
assert set(solve([exp(x) - sin(y), y**2 - 4], [x, y])) == \
set([(log(-sin(2)), -S(2)), (log(sin(2)), S(2))])
eqs = [exp(x)**2 - sin(y) + z**2, 1/exp(y) - 3]
assert solve(eqs, set=True) == \
([x, y], set([
(log(-sqrt(-z**2 - sin(log(3)))), -log(3)),
(log(sqrt(-z**2 - sin(log(3)))), -log(3))]))
assert solve(eqs, x, z, set=True) == \
([x], set([
(log(-sqrt(-z**2 + sin(y))),),
(log(sqrt(-z**2 + sin(y))),)]))
assert set(solve(eqs, x, y)) == \
set([
(log(-sqrt(-z**2 - sin(log(3)))), -log(3)),
(log(sqrt(-z**2 - sin(log(3)))), -log(3))])
assert set(solve(eqs, y, z)) == \
set([
(-log(3), -sqrt(-exp(2*x) - sin(log(3)))),
(-log(3), sqrt(-exp(2*x) - sin(log(3))))])
eqs = [exp(x)**2 - sin(y) + z, 1/exp(y) - 3]
assert solve(eqs, set=True) == ([x, y], set(
[
(log(-sqrt(-z - sin(log(3)))), -log(3)),
(log(sqrt(-z - sin(log(3)))), -log(3))]))
assert solve(eqs, x, z, set=True) == ([x], set(
[
(log(-sqrt(-z + sin(y))),),
(log(sqrt(-z + sin(y))),)]))
assert set(solve(eqs, x, y)) == set(
[
(log(-sqrt(-z - sin(log(3)))), -log(3)),
(log(sqrt(-z - sin(log(3)))), -log(3))])
assert solve(eqs, z, y) == \
[(-exp(2*x) - sin(log(3)), -log(3))]
assert solve((sqrt(x**2 + y**2) - sqrt(10), x + y - 4), set=True) == (
[x, y], set([(S(1), S(3)), (S(3), S(1))]))
assert set(solve((sqrt(x**2 + y**2) - sqrt(10), x + y - 4), x, y)) == \
set([(S(1), S(3)), (S(3), S(1))])
def test_issue_5335():
lam, a0, conc = symbols('lam a0 conc')
eqs = [lam + 2*y - a0*(1 - x/2)*x - 0.005*x/2*x,
a0*(1 - x/2)*x - 1*y - 0.743436700916726*y,
x + y - conc]
sym = [x, y, a0]
# there are 4 solutions but only two are valid
assert len(solve(eqs, sym, manual=True, minimal=True, simplify=False)) == 2
@SKIP("Hangs")
def _test_issue_5335_float():
# gives ZeroDivisionError: polynomial division
lam, a0, conc = symbols('lam a0 conc')
eqs = [lam + 2*y - a0*(1 - x/2)*x - 0.005*x/2*x,
a0*(1 - x/2)*x - 1*y - 0.743436700916726*y,
x + y - conc]
sym = [x, y, a0]
assert len(
solve(eqs, sym, rational=False, check=False, simplify=False)) == 2
def test_issue_5767():
assert set(solve([x**2 + y + 4], [x])) == \
set([(-sqrt(-y - 4),), (sqrt(-y - 4),)])
def test_polysys():
assert set(solve([x**2 + 2/y - 2, x + y - 3], [x, y])) == \
set([(S(1), S(2)), (1 + sqrt(5), 2 - sqrt(5)),
(1 - sqrt(5), 2 + sqrt(5))])
assert solve([x**2 + y - 2, x**2 + y]) == []
# the ordering should be whatever the user requested
assert solve([x**2 + y - 3, x - y - 4], (x, y)) != solve([x**2 +
y - 3, x - y - 4], (y, x))
@slow
def test_unrad1():
raises(NotImplementedError, lambda:
unrad(sqrt(x) + sqrt(x + 1) + sqrt(1 - sqrt(x)) + 3))
raises(NotImplementedError, lambda:
unrad(sqrt(x) + (x + 1)**Rational(1, 3) + 2*sqrt(y)))
s = symbols('s', cls=Dummy)
# checkers to deal with possibility of answer coming
# back with a sign change (cf issue 5203)
def check(rv, ans):
assert bool(rv[1]) == bool(ans[1])
if ans[1]:
return s_check(rv, ans)
e = rv[0].expand()
a = ans[0].expand()
return e in [a, -a] and rv[1] == ans[1]
def s_check(rv, ans):
# get the dummy
rv = list(rv)
d = rv[0].atoms(Dummy)
reps = list(zip(d, [s]*len(d)))
# replace s with this dummy
rv = (rv[0].subs(reps).expand(), [rv[1][0].subs(reps), rv[1][1].subs(reps)])
ans = (ans[0].subs(reps).expand(), [ans[1][0].subs(reps), ans[1][1].subs(reps)])
return str(rv[0]) in [str(ans[0]), str(-ans[0])] and \
str(rv[1]) == str(ans[1])
assert check(unrad(sqrt(x)),
(x, []))
assert check(unrad(sqrt(x) + 1),
(x - 1, []))
assert check(unrad(sqrt(x) + root(x, 3) + 2),
(s**3 + s**2 + 2, [s, s**6 - x]))
assert check(unrad(sqrt(x)*root(x, 3) + 2),
(x**5 - 64, []))
assert check(unrad(sqrt(x) + (x + 1)**Rational(1, 3)),
(x**3 - (x + 1)**2, []))
assert check(unrad(sqrt(x) + sqrt(x + 1) + sqrt(2*x)),
(-2*sqrt(2)*x - 2*x + 1, []))
assert check(unrad(sqrt(x) + sqrt(x + 1) + 2),
(16*x - 9, []))
assert check(unrad(sqrt(x) + sqrt(x + 1) + sqrt(1 - x)),
(5*x**2 - 4*x, []))
assert check(unrad(a*sqrt(x) + b*sqrt(x) + c*sqrt(y) + d*sqrt(y)),
((a*sqrt(x) + b*sqrt(x))**2 - (c*sqrt(y) + d*sqrt(y))**2, []))
assert check(unrad(sqrt(x) + sqrt(1 - x)),
(2*x - 1, []))
assert check(unrad(sqrt(x) + sqrt(1 - x) - 3),
(x**2 - x + 16, []))
assert check(unrad(sqrt(x) + sqrt(1 - x) + sqrt(2 + x)),
(5*x**2 - 2*x + 1, []))
assert unrad(sqrt(x) + sqrt(1 - x) + sqrt(2 + x) - 3) in [
(25*x**4 + 376*x**3 + 1256*x**2 - 2272*x + 784, []),
(25*x**8 - 476*x**6 + 2534*x**4 - 1468*x**2 + 169, [])]
assert unrad(sqrt(x) + sqrt(1 - x) + sqrt(2 + x) - sqrt(1 - 2*x)) == \
(41*x**4 + 40*x**3 + 232*x**2 - 160*x + 16, []) # orig root at 0.487
assert check(unrad(sqrt(x) + sqrt(x + 1)), (S(1), []))
eq = sqrt(x) + sqrt(x + 1) + sqrt(1 - sqrt(x))
assert check(unrad(eq),
(16*x**2 - 9*x, []))
assert set(solve(eq, check=False)) == set([S(0), S(9)/16])
assert solve(eq) == []
# but this one really does have those solutions
assert set(solve(sqrt(x) - sqrt(x + 1) + sqrt(1 - sqrt(x)))) == \
set([S.Zero, S(9)/16])
assert check(unrad(sqrt(x) + root(x + 1, 3) + 2*sqrt(y), y),
(S('2*sqrt(x)*(x + 1)**(1/3) + x - 4*y + (x + 1)**(2/3)'), []))
assert check(unrad(sqrt(x/(1 - x)) + (x + 1)**Rational(1, 3)),
(x**5 - x**4 - x**3 + 2*x**2 + x - 1, []))
assert check(unrad(sqrt(x/(1 - x)) + 2*sqrt(y), y),
(4*x*y + x - 4*y, []))
assert check(unrad(sqrt(x)*sqrt(1 - x) + 2, x),
(x**2 - x + 4, []))
# http://tutorial.math.lamar.edu/
# Classes/Alg/SolveRadicalEqns.aspx#Solve_Rad_Ex2_a
assert solve(Eq(x, sqrt(x + 6))) == [3]
assert solve(Eq(x + sqrt(x - 4), 4)) == [4]
assert solve(Eq(1, x + sqrt(2*x - 3))) == []
assert set(solve(Eq(sqrt(5*x + 6) - 2, x))) == set([-S(1), S(2)])
assert set(solve(Eq(sqrt(2*x - 1) - sqrt(x - 4), 2))) == set([S(5), S(13)])
assert solve(Eq(sqrt(x + 7) + 2, sqrt(3 - x))) == [-6]
# http://www.purplemath.com/modules/solverad.htm
assert solve((2*x - 5)**Rational(1, 3) - 3) == [16]
assert set(solve(x + 1 - root(x**4 + 4*x**3 - x, 4))) == \
set([-S(1)/2, -S(1)/3])
assert set(solve(sqrt(2*x**2 - 7) - (3 - x))) == set([-S(8), S(2)])
assert solve(sqrt(2*x + 9) - sqrt(x + 1) - sqrt(x + 4)) == [0]
assert solve(sqrt(x + 4) + sqrt(2*x - 1) - 3*sqrt(x - 1)) == [5]
assert solve(sqrt(x)*sqrt(x - 7) - 12) == [16]
assert solve(sqrt(x - 3) + sqrt(x) - 3) == [4]
assert solve(sqrt(9*x**2 + 4) - (3*x + 2)) == [0]
assert solve(sqrt(x) - 2 - 5) == [49]
assert solve(sqrt(x - 3) - sqrt(x) - 3) == []
assert solve(sqrt(x - 1) - x + 7) == [10]
assert solve(sqrt(x - 2) - 5) == [27]
assert solve(sqrt(17*x - sqrt(x**2 - 5)) - 7) == [3]
assert solve(sqrt(x) - sqrt(x - 1) + sqrt(sqrt(x))) == []
# don't posify the expression in unrad and do use _mexpand
z = sqrt(2*x + 1)/sqrt(x) - sqrt(2 + 1/x)
p = posify(z)[0]
assert solve(p) == []
assert solve(z) == []
assert solve(z + 6*I) == [-S(1)/11]
assert solve(p + 6*I) == []
# issue 8622
assert unrad((root(x + 1, 5) - root(x, 3))) == (
x**5 - x**3 - 3*x**2 - 3*x - 1, [])
# issue #8679
assert check(unrad(x + root(x, 3) + root(x, 3)**2 + sqrt(y), x),
(s**3 + s**2 + s + sqrt(y), [s, s**3 - x]))
# for coverage
assert check(unrad(sqrt(x) + root(x, 3) + y),
(s**3 + s**2 + y, [s, s**6 - x]))
assert solve(sqrt(x) + root(x, 3) - 2) == [1]
raises(NotImplementedError, lambda:
solve(sqrt(x) + root(x, 3) + root(x + 1, 5) - 2))
# fails through a different code path
raises(NotImplementedError, lambda: solve(-sqrt(2) + cosh(x)/x))
# unrad some
assert solve(sqrt(x + root(x, 3))+root(x - y, 5), y) == [
x + (x**(S(1)/3) + x)**(S(5)/2)]
assert check(unrad(sqrt(x) - root(x + 1, 3)*sqrt(x + 2) + 2),
(s**10 + 8*s**8 + 24*s**6 - 12*s**5 - 22*s**4 - 160*s**3 - 212*s**2 -
192*s - 56, [s, s**2 - x]))
e = root(x + 1, 3) + root(x, 3)
assert unrad(e) == (2*x + 1, [])
eq = (sqrt(x) + sqrt(x + 1) + sqrt(1 - x) - 6*sqrt(5)/5)
assert check(unrad(eq),
(15625*x**4 + 173000*x**3 + 355600*x**2 - 817920*x + 331776, []))
assert check(unrad(root(x, 4) + root(x, 4)**3 - 1),
(s**3 + s - 1, [s, s**4 - x]))
assert check(unrad(root(x, 2) + root(x, 2)**3 - 1),
(x**3 + 2*x**2 + x - 1, []))
assert unrad(x**0.5) is None
assert check(unrad(t + root(x + y, 5) + root(x + y, 5)**3),
(s**3 + s + t, [s, s**5 - x - y]))
assert check(unrad(x + root(x + y, 5) + root(x + y, 5)**3, y),
(s**3 + s + x, [s, s**5 - x - y]))
assert check(unrad(x + root(x + y, 5) + root(x + y, 5)**3, x),
(s**5 + s**3 + s - y, [s, s**5 - x - y]))
assert check(unrad(root(x - 1, 3) + root(x + 1, 5) + root(2, 5)),
(s**5 + 5*2**(S(1)/5)*s**4 + s**3 + 10*2**(S(2)/5)*s**3 +
10*2**(S(3)/5)*s**2 + 5*2**(S(4)/5)*s + 4, [s, s**3 - x + 1]))
raises(NotImplementedError, lambda:
unrad((root(x, 2) + root(x, 3) + root(x, 4)).subs(x, x**5 - x + 1)))
# the simplify flag should be reset to False for unrad results;
# if it's not then this next test will take a long time
assert solve(root(x, 3) + root(x, 5) - 2) == [1]
eq = (sqrt(x) + sqrt(x + 1) + sqrt(1 - x) - 6*sqrt(5)/5)
assert check(unrad(eq),
((5*x - 4)*(3125*x**3 + 37100*x**2 + 100800*x - 82944), []))
ans = S('''
[4/5, -1484/375 + 172564/(140625*(114*sqrt(12657)/78125 +
12459439/52734375)**(1/3)) +
4*(114*sqrt(12657)/78125 + 12459439/52734375)**(1/3)]''')
assert solve(eq) == ans
# duplicate radical handling
assert check(unrad(sqrt(x + root(x + 1, 3)) - root(x + 1, 3) - 2),
(s**3 - s**2 - 3*s - 5, [s, s**3 - x - 1]))
# cov post-processing
e = root(x**2 + 1, 3) - root(x**2 - 1, 5) - 2
assert check(unrad(e),
(s**5 - 10*s**4 + 39*s**3 - 80*s**2 + 80*s - 30,
[s, s**3 - x**2 - 1]))
e = sqrt(x + root(x + 1, 2)) - root(x + 1, 3) - 2
assert check(unrad(e),
(s**6 - 2*s**5 - 7*s**4 - 3*s**3 + 26*s**2 + 40*s + 25,
[s, s**3 - x - 1]))
assert check(unrad(e, _reverse=True),
(s**6 - 14*s**5 + 73*s**4 - 187*s**3 + 276*s**2 - 228*s + 89,
[s, s**2 - x - sqrt(x + 1)]))
# this one needs r0, r1 reversal to work
assert check(unrad(sqrt(x + sqrt(root(x, 3) - 1)) - root(x, 6) - 2),
(s**12 - 2*s**8 - 8*s**7 - 8*s**6 + s**4 + 8*s**3 + 23*s**2 +
32*s + 17, [s, s**6 - x]))
# is this needed?
#assert unrad(root(cosh(x), 3)/x*root(x + 1, 5) - 1) == (
# x**15 - x**3*cosh(x)**5 - 3*x**2*cosh(x)**5 - 3*x*cosh(x)**5 - cosh(x)**5, [])
raises(NotImplementedError, lambda:
unrad(sqrt(cosh(x)/x) + root(x + 1,3)*sqrt(x) - 1))
assert unrad(S('(x+y)**(2*y/3) + (x+y)**(1/3) + 1')) is None
assert check(unrad(S('(x+y)**(2*y/3) + (x+y)**(1/3) + 1'), x),
(s**(2*y) + s + 1, [s, s**3 - x - y]))
# This tests two things: that if full unrad is attempted and fails
# the solution should still be found; also it tests that the use of
# composite
assert len(solve(sqrt(y)*x + x**3 - 1, x)) == 3
assert len(solve(-512*y**3 + 1344*(x + 2)**(S(1)/3)*y**2 -
1176*(x + 2)**(S(2)/3)*y - 169*x + 686, y, _unrad=False)) == 3
# watch out for when the cov doesn't involve the symbol of interest
eq = S('-x + (7*y/8 - (27*x/2 + 27*sqrt(x**2)/2)**(1/3)/3)**3 - 1')
assert solve(eq, y) == [
4*2**(S(2)/3)*(27*x + 27*sqrt(x**2))**(S(1)/3)/21 - (-S(1)/2 -
sqrt(3)*I/2)*(-6912*x/343 + sqrt((-13824*x/343 - S(13824)/343)**2)/2 -
S(6912)/343)**(S(1)/3)/3, 4*2**(S(2)/3)*(27*x + 27*sqrt(x**2))**(S(1)/3)/21 -
(-S(1)/2 + sqrt(3)*I/2)*(-6912*x/343 + sqrt((-13824*x/343 -
S(13824)/343)**2)/2 - S(6912)/343)**(S(1)/3)/3, 4*2**(S(2)/3)*(27*x +
27*sqrt(x**2))**(S(1)/3)/21 - (-6912*x/343 + sqrt((-13824*x/343 -
S(13824)/343)**2)/2 - S(6912)/343)**(S(1)/3)/3]
eq = root(x + 1, 3) - (root(x, 3) + root(x, 5))
assert check(unrad(eq),
(3*s**13 + 3*s**11 + s**9 - 1, [s, s**15 - x]))
assert check(unrad(eq - 2),
(3*s**13 + 3*s**11 + 6*s**10 + s**9 + 12*s**8 + 6*s**6 + 12*s**5 +
12*s**3 + 7, [s, s**15 - x]))
assert check(unrad(root(x, 3) - root(x + 1, 4)/2 + root(x + 2, 3)),
(4096*s**13 + 960*s**12 + 48*s**11 - s**10 - 1728*s**4,
[s, s**4 - x - 1])) # orig expr has two real roots: -1, -.389
assert check(unrad(root(x, 3) + root(x + 1, 4) - root(x + 2, 3)/2),
(343*s**13 + 2904*s**12 + 1344*s**11 + 512*s**10 - 1323*s**9 -
3024*s**8 - 1728*s**7 + 1701*s**5 + 216*s**4 - 729*s, [s, s**4 - x -
1])) # orig expr has one real root: -0.048
assert check(unrad(root(x, 3)/2 - root(x + 1, 4) + root(x + 2, 3)),
(729*s**13 - 216*s**12 + 1728*s**11 - 512*s**10 + 1701*s**9 -
3024*s**8 + 1344*s**7 + 1323*s**5 - 2904*s**4 + 343*s, [s, s**4 - x -
1])) # orig expr has 2 real roots: -0.91, -0.15
assert check(unrad(root(x, 3)/2 - root(x + 1, 4) + root(x + 2, 3) - 2),
(729*s**13 + 1242*s**12 + 18496*s**10 + 129701*s**9 + 388602*s**8 +
453312*s**7 - 612864*s**6 - 3337173*s**5 - 6332418*s**4 - 7134912*s**3
- 5064768*s**2 - 2111913*s - 398034, [s, s**4 - x - 1]))
# orig expr has 1 real root: 19.53
ans = solve(sqrt(x) + sqrt(x + 1) -
sqrt(1 - x) - sqrt(2 + x))
assert len(ans) == 1 and NS(ans[0])[:4] == '0.73'
# the fence optimization problem
# https://github.com/sympy/sympy/issues/4793#issuecomment-36994519
F = Symbol('F')
eq = F - (2*x + 2*y + sqrt(x**2 + y**2))
ans = 2*F/7 - sqrt(2)*F/14
X = solve(eq, x, check=False)
for xi in reversed(X): # reverse since currently, ans is the 2nd one
Y = solve((x*y).subs(x, xi).diff(y), y, simplify=False, check=False)
if any((a - ans).expand().is_zero for a in Y):
break
else:
assert None # no answer was found
assert solve(sqrt(x + 1) + root(x, 3) - 2) == S('''
[(-11/(9*(47/54 + sqrt(93)/6)**(1/3)) + 1/3 + (47/54 +
sqrt(93)/6)**(1/3))**3]''')
assert solve(sqrt(sqrt(x + 1)) + x**Rational(1, 3) - 2) == S('''
[(-sqrt(-2*(-1/16 + sqrt(6913)/16)**(1/3) + 6/(-1/16 +
sqrt(6913)/16)**(1/3) + 17/2 + 121/(4*sqrt(-6/(-1/16 +
sqrt(6913)/16)**(1/3) + 2*(-1/16 + sqrt(6913)/16)**(1/3) + 17/4)))/2 +
sqrt(-6/(-1/16 + sqrt(6913)/16)**(1/3) + 2*(-1/16 +
sqrt(6913)/16)**(1/3) + 17/4)/2 + 9/4)**3]''')
assert solve(sqrt(x) + root(sqrt(x) + 1, 3) - 2) == S('''
[(-(81/2 + 3*sqrt(741)/2)**(1/3)/3 + (81/2 + 3*sqrt(741)/2)**(-1/3) +
2)**2]''')
eq = S('''
-x + (1/2 - sqrt(3)*I/2)*(3*x**3/2 - x*(3*x**2 - 34)/2 + sqrt((-3*x**3
+ x*(3*x**2 - 34) + 90)**2/4 - 39304/27) - 45)**(1/3) + 34/(3*(1/2 -
sqrt(3)*I/2)*(3*x**3/2 - x*(3*x**2 - 34)/2 + sqrt((-3*x**3 + x*(3*x**2
- 34) + 90)**2/4 - 39304/27) - 45)**(1/3))''')
assert check(unrad(eq),
(-s*(-s**6 + sqrt(3)*s**6*I - 153*2**(S(2)/3)*3**(S(1)/3)*s**4 +
51*12**(S(1)/3)*s**4 - 102*2**(S(2)/3)*3**(S(5)/6)*s**4*I - 1620*s**3 +
1620*sqrt(3)*s**3*I + 13872*18**(S(1)/3)*s**2 - 471648 +
471648*sqrt(3)*I), [s, s**3 - 306*x - sqrt(3)*sqrt(31212*x**2 -
165240*x + 61484) + 810]))
assert solve(eq) == [] # not other code errors
@slow
def test_unrad_slow():
# this has roots with multiplicity > 1; there should be no
# repeats in roots obtained, however
eq = (sqrt(1 + sqrt(1 - 4*x**2)) - x*((1 + sqrt(1 + 2*sqrt(1 - 4*x**2)))))
assert solve(eq) == [S.Half]
@XFAIL
def test_unrad_fail():
# this only works if we check real_root(eq.subs(x, S(1)/3))
# but checksol doesn't work like that
assert solve(root(x**3 - 3*x**2, 3) + 1 - x) == [S(1)/3]
assert solve(root(x + 1, 3) + root(x**2 - 2, 5) + 1) == [
-1, -1 + CRootOf(x**5 + x**4 + 5*x**3 + 8*x**2 + 10*x + 5, 0)**3]
def test_checksol():
x, y, r, t = symbols('x, y, r, t')
eq = r - x**2 - y**2
dict_var_soln = {y: - sqrt(r) / sqrt(tan(t)**2 + 1),
x: -sqrt(r)*tan(t)/sqrt(tan(t)**2 + 1)}
assert checksol(eq, dict_var_soln) == True
def test__invert():
assert _invert(x - 2) == (2, x)
assert _invert(2) == (2, 0)
assert _invert(exp(1/x) - 3, x) == (1/log(3), x)
assert _invert(exp(1/x + a/x) - 3, x) == ((a + 1)/log(3), x)
assert _invert(a, x) == (a, 0)
def test_issue_4463():
assert solve(-a*x + 2*x*log(x), x) == [exp(a/2)]
assert solve(a/x + exp(x/2), x) == [2*LambertW(-a/2)]
assert solve(x**x) == []
assert solve(x**x - 2) == [exp(LambertW(log(2)))]
assert solve(((x - 3)*(x - 2))**((x - 3)*(x - 4))) == [2]
assert solve(
(a/x + exp(x/2)).diff(x), x) == [4*LambertW(sqrt(2)*sqrt(a)/4)]
def test_issue_5114():
a, b, c, d, e, f, g, h, i, j, k, l, m, n, o, p, q, r = symbols('a:r')
# there is no 'a' in the equation set but this is how the
# problem was originally posed
syms = a, b, c, f, h, k, n
eqs = [b + r/d - c/d,
c*(1/d + 1/e + 1/g) - f/g - r/d,
f*(1/g + 1/i + 1/j) - c/g - h/i,
h*(1/i + 1/l + 1/m) - f/i - k/m,
k*(1/m + 1/o + 1/p) - h/m - n/p,
n*(1/p + 1/q) - k/p]
assert len(solve(eqs, syms, manual=True, check=False, simplify=False)) == 1
def test_issue_5849():
I1, I2, I3, I4, I5, I6 = symbols('I1:7')
dI1, dI4, dQ2, dQ4, Q2, Q4 = symbols('dI1,dI4,dQ2,dQ4,Q2,Q4')
e = (
I1 - I2 - I3,
I3 - I4 - I5,
I4 + I5 - I6,
-I1 + I2 + I6,
-2*I1 - 2*I3 - 2*I5 - 3*I6 - dI1/2 + 12,
-I4 + dQ4,
-I2 + dQ2,
2*I3 + 2*I5 + 3*I6 - Q2,
I4 - 2*I5 + 2*Q4 + dI4
)
ans = [{
dQ4: I3 - I5,
dI1: -4*I2 - 8*I3 - 4*I5 - 6*I6 + 24,
I4: I3 - I5,
dQ2: I2,
Q2: 2*I3 + 2*I5 + 3*I6,
I1: I2 + I3,
Q4: -I3/2 + 3*I5/2 - dI4/2}]
v = I1, I4, Q2, Q4, dI1, dI4, dQ2, dQ4
assert solve(e, *v, **dict(manual=True, check=False)) == ans
assert solve(e, *v, **dict(manual=True)) == []
# the matrix solver (tested below) doesn't like this because it produces
# a zero row in the matrix. Is this related to issue 4551?
assert [ei.subs(
ans[0]) for ei in e] == [0, 0, I3 - I6, -I3 + I6, 0, 0, 0, 0, 0]
def test_issue_5849_matrix():
'''Same as test_2750 but solved with the matrix solver.'''
I1, I2, I3, I4, I5, I6 = symbols('I1:7')
dI1, dI4, dQ2, dQ4, Q2, Q4 = symbols('dI1,dI4,dQ2,dQ4,Q2,Q4')
e = (
I1 - I2 - I3,
I3 - I4 - I5,
I4 + I5 - I6,
-I1 + I2 + I6,
-2*I1 - 2*I3 - 2*I5 - 3*I6 - dI1/2 + 12,
-I4 + dQ4,
-I2 + dQ2,
2*I3 + 2*I5 + 3*I6 - Q2,
I4 - 2*I5 + 2*Q4 + dI4
)
assert solve(e, I1, I4, Q2, Q4, dI1, dI4, dQ2, dQ4) == {
dI4: -I3 + 3*I5 - 2*Q4,
dI1: -4*I2 - 8*I3 - 4*I5 - 6*I6 + 24,
dQ2: I2,
I1: I2 + I3,
Q2: 2*I3 + 2*I5 + 3*I6,
dQ4: I3 - I5,
I4: I3 - I5}
def test_issue_5901():
f, g, h = map(Function, 'fgh')
a = Symbol('a')
D = Derivative(f(x), x)
G = Derivative(g(a), a)
assert solve(f(x) + f(x).diff(x), f(x)) == \
[-D]
assert solve(f(x) - 3, f(x)) == \
[3]
assert solve(f(x) - 3*f(x).diff(x), f(x)) == \
[3*D]
assert solve([f(x) - 3*f(x).diff(x)], f(x)) == \
{f(x): 3*D}
assert solve([f(x) - 3*f(x).diff(x), f(x)**2 - y + 4], f(x), y) == \
[{f(x): 3*D, y: 9*D**2 + 4}]
assert solve(-f(a)**2*g(a)**2 + f(a)**2*h(a)**2 + g(a).diff(a),
h(a), g(a), set=True) == \
([g(a)], set([
(-sqrt(h(a)**2*f(a)**2 + G)/f(a),),
(sqrt(h(a)**2*f(a)**2+ G)/f(a),)]))
args = [f(x).diff(x, 2)*(f(x) + g(x)) - g(x)**2 + 2, f(x), g(x)]
assert set(solve(*args)) == \
set([(-sqrt(2), sqrt(2)), (sqrt(2), -sqrt(2))])
eqs = [f(x)**2 + g(x) - 2*f(x).diff(x), g(x)**2 - 4]
assert solve(eqs, f(x), g(x), set=True) == \
([f(x), g(x)], set([
(-sqrt(2*D - 2), S(2)),
(sqrt(2*D - 2), S(2)),
(-sqrt(2*D + 2), -S(2)),
(sqrt(2*D + 2), -S(2))]))
# the underlying problem was in solve_linear that was not masking off
# anything but a Mul or Add; it now raises an error if it gets anything
# but a symbol and solve handles the substitutions necessary so solve_linear
# won't make this error
raises(
ValueError, lambda: solve_linear(f(x) + f(x).diff(x), symbols=[f(x)]))
assert solve_linear(f(x) + f(x).diff(x), symbols=[x]) == \
(f(x) + Derivative(f(x), x), 1)
assert solve_linear(f(x) + Integral(x, (x, y)), symbols=[x]) == \
(f(x) + Integral(x, (x, y)), 1)
assert solve_linear(f(x) + Integral(x, (x, y)) + x, symbols=[x]) == \
(x + f(x) + Integral(x, (x, y)), 1)
assert solve_linear(f(y) + Integral(x, (x, y)) + x, symbols=[x]) == \
(x, -f(y) - Integral(x, (x, y)))
assert solve_linear(x - f(x)/a + (f(x) - 1)/a, symbols=[x]) == \
(x, 1/a)
assert solve_linear(x + Derivative(2*x, x)) == \
(x, -2)
assert solve_linear(x + Integral(x, y), symbols=[x]) == \
(x, 0)
assert solve_linear(x + Integral(x, y) - 2, symbols=[x]) == \
(x, 2/(y + 1))
assert set(solve(x + exp(x)**2, exp(x))) == \
set([-sqrt(-x), sqrt(-x)])
assert solve(x + exp(x), x, implicit=True) == \
[-exp(x)]
assert solve(cos(x) - sin(x), x, implicit=True) == []
assert solve(x - sin(x), x, implicit=True) == \
[sin(x)]
assert solve(x**2 + x - 3, x, implicit=True) == \
[-x**2 + 3]
assert solve(x**2 + x - 3, x**2, implicit=True) == \
[-x + 3]
def test_issue_5912():
assert set(solve(x**2 - x - 0.1, rational=True)) == \
set([S(1)/2 + sqrt(35)/10, -sqrt(35)/10 + S(1)/2])
ans = solve(x**2 - x - 0.1, rational=False)
assert len(ans) == 2 and all(a.is_Number for a in ans)
ans = solve(x**2 - x - 0.1)
assert len(ans) == 2 and all(a.is_Number for a in ans)
def test_float_handling():
def test(e1, e2):
return len(e1.atoms(Float)) == len(e2.atoms(Float))
assert solve(x - 0.5, rational=True)[0].is_Rational
assert solve(x - 0.5, rational=False)[0].is_Float
assert solve(x - S.Half, rational=False)[0].is_Rational
assert solve(x - 0.5, rational=None)[0].is_Float
assert solve(x - S.Half, rational=None)[0].is_Rational
assert test(nfloat(1 + 2*x), 1.0 + 2.0*x)
for contain in [list, tuple, set]:
ans = nfloat(contain([1 + 2*x]))
assert type(ans) is contain and test(list(ans)[0], 1.0 + 2.0*x)
k, v = list(nfloat({2*x: [1 + 2*x]}).items())[0]
assert test(k, 2*x) and test(v[0], 1.0 + 2.0*x)
assert test(nfloat(cos(2*x)), cos(2.0*x))
assert test(nfloat(3*x**2), 3.0*x**2)
assert test(nfloat(3*x**2, exponent=True), 3.0*x**2.0)
assert test(nfloat(exp(2*x)), exp(2.0*x))
assert test(nfloat(x/3), x/3.0)
assert test(nfloat(x**4 + 2*x + cos(S(1)/3) + 1),
x**4 + 2.0*x + 1.94495694631474)
# don't call nfloat if there is no solution
tot = 100 + c + z + t
assert solve(((.7 + c)/tot - .6, (.2 + z)/tot - .3, t/tot - .1)) == []
def test_check_assumptions():
x = symbols('x', positive=True)
assert solve(x**2 - 1) == [1]
assert check_assumptions(1, x) == True
def test_issue_6056():
assert solve(tanh(x + 3)*tanh(x - 3) - 1) == []
assert set([simplify(w) for w in solve(tanh(x - 1)*tanh(x + 1) + 1)]) == set([
-log(2)/2 + log(1 - I),
-log(2)/2 + log(-1 - I),
-log(2)/2 + log(1 + I),
-log(2)/2 + log(-1 + I),])
assert set([simplify(w) for w in solve((tanh(x + 3)*tanh(x - 3) + 1)**2)]) == set([
-log(2)/2 + log(1 - I),
-log(2)/2 + log(-1 - I),
-log(2)/2 + log(1 + I),
-log(2)/2 + log(-1 + I),])
def test_issue_6060():
x = Symbol('x')
absxm3 = Piecewise(
(x - 3, S(0) <= x - 3),
(3 - x, S(0) > x - 3)
)
y = Symbol('y')
assert solve(absxm3 - y, x) == [
Piecewise((-y + 3, y > 0), (S.NaN, True)),
Piecewise((y + 3, 0 <= y), (S.NaN, True))
]
y = Symbol('y', positive=True)
assert solve(absxm3 - y, x) == [-y + 3, y + 3]
def test_issue_5673():
eq = -x + exp(exp(LambertW(log(x)))*LambertW(log(x)))
assert checksol(eq, x, 2) is True
assert checksol(eq, x, 2, numerical=False) is None
def test_exclude():
R, C, Ri, Vout, V1, Vminus, Vplus, s = \
symbols('R, C, Ri, Vout, V1, Vminus, Vplus, s')
Rf = symbols('Rf', positive=True) # to eliminate Rf = 0 soln
eqs = [C*V1*s + Vplus*(-2*C*s - 1/R),
Vminus*(-1/Ri - 1/Rf) + Vout/Rf,
C*Vplus*s + V1*(-C*s - 1/R) + Vout/R,
-Vminus + Vplus]
assert solve(eqs, exclude=s*C*R) == [
{
Rf: Ri*(C*R*s + 1)**2/(C*R*s),
Vminus: Vplus,
V1: 2*Vplus + Vplus/(C*R*s),
Vout: C*R*Vplus*s + 3*Vplus + Vplus/(C*R*s)},
{
Vplus: 0,
Vminus: 0,
V1: 0,
Vout: 0},
]
# TODO: Investingate why currently solution [0] is preferred over [1].
assert solve(eqs, exclude=[Vplus, s, C]) in [[{
Vminus: Vplus,
V1: Vout/2 + Vplus/2 + sqrt((Vout - 5*Vplus)*(Vout - Vplus))/2,
R: (Vout - 3*Vplus - sqrt(Vout**2 - 6*Vout*Vplus + 5*Vplus**2))/(2*C*Vplus*s),
Rf: Ri*(Vout - Vplus)/Vplus,
}, {
Vminus: Vplus,
V1: Vout/2 + Vplus/2 - sqrt((Vout - 5*Vplus)*(Vout - Vplus))/2,
R: (Vout - 3*Vplus + sqrt(Vout**2 - 6*Vout*Vplus + 5*Vplus**2))/(2*C*Vplus*s),
Rf: Ri*(Vout - Vplus)/Vplus,
}], [{
Vminus: Vplus,
Vout: (V1**2 - V1*Vplus - Vplus**2)/(V1 - 2*Vplus),
Rf: Ri*(V1 - Vplus)**2/(Vplus*(V1 - 2*Vplus)),
R: Vplus/(C*s*(V1 - 2*Vplus)),
}]]
def test_high_order_roots():
s = x**5 + 4*x**3 + 3*x**2 + S(7)/4
assert set(solve(s)) == set(Poly(s*4, domain='ZZ').all_roots())
def test_minsolve_linear_system():
def count(dic):
return len([x for x in dic.values() if x == 0])
assert count(solve([x + y + z, y + z + a + t], particular=True, quick=True)) \
== 3
assert count(solve([x + y + z, y + z + a + t], particular=True, quick=False)) \
== 3
assert count(solve([x + y + z, y + z + a], particular=True, quick=True)) == 1
assert count(solve([x + y + z, y + z + a], particular=True, quick=False)) == 2
def test_real_roots():
# cf. issue 6650
x = Symbol('x', real=True)
assert len(solve(x**5 + x**3 + 1)) == 1
def test_issue_6528():
eqs = [
327600995*x**2 - 37869137*x + 1809975124*y**2 - 9998905626,
895613949*x**2 - 273830224*x*y + 530506983*y**2 - 10000000000]
# two expressions encountered are > 1400 ops long so if this hangs
# it is likely because simplification is being done
assert len(solve(eqs, y, x, check=False)) == 4
def test_overdetermined():
x = symbols('x', real=True)
eqs = [Abs(4*x - 7) - 5, Abs(3 - 8*x) - 1]
assert solve(eqs, x) == [(S.Half,)]
assert solve(eqs, x, manual=True) == [(S.Half,)]
assert solve(eqs, x, manual=True, check=False) == [(S.Half,), (S(3),)]
def test_issue_6605():
x = symbols('x')
assert solve(4**(x/2) - 2**(x/3)) == [0, 3*I*pi/log(2)]
# while the first one passed, this one failed
x = symbols('x', real=True)
assert solve(5**(x/2) - 2**(x/3)) == [0]
b = sqrt(6)*sqrt(log(2))/sqrt(log(5))
assert solve(5**(x/2) - 2**(3/x)) == [-b, b]
def test__ispow():
assert _ispow(x**2)
assert not _ispow(x)
assert not _ispow(True)
def test_issue_6644():
eq = -sqrt((m - q)**2 + (-m/(2*q) + S(1)/2)**2) + sqrt((-m**2/2 - sqrt(
4*m**4 - 4*m**2 + 8*m + 1)/4 - S(1)/4)**2 + (m**2/2 - m - sqrt(
4*m**4 - 4*m**2 + 8*m + 1)/4 - S(1)/4)**2)
sol = solve(eq, q, simplify=False, check=False)
assert len(sol) == 5
def test_issue_6752():
assert solve([a**2 + a, a - b], [a, b]) == [(-1, -1), (0, 0)]
assert solve([a**2 + a*c, a - b], [a, b]) == [(0, 0), (-c, -c)]
def test_issue_6792():
assert solve(x*(x - 1)**2*(x + 1)*(x**6 - x + 1)) == [
-1, 0, 1, CRootOf(x**6 - x + 1, 0), CRootOf(x**6 - x + 1, 1),
CRootOf(x**6 - x + 1, 2), CRootOf(x**6 - x + 1, 3),
CRootOf(x**6 - x + 1, 4), CRootOf(x**6 - x + 1, 5)]
def test_issues_6819_6820_6821_6248_8692():
# issue 6821
x, y = symbols('x y', real=True)
assert solve(abs(x + 3) - 2*abs(x - 3)) == [1, 9]
assert solve([abs(x) - 2, arg(x) - pi], x) == [(-2,), (2,)]
assert set(solve(abs(x - 7) - 8)) == set([-S(1), S(15)])
# issue 8692
assert solve(Eq(Abs(x + 1) + Abs(x**2 - 7), 9), x) == [
-S(1)/2 + sqrt(61)/2, -sqrt(69)/2 + S(1)/2]
# issue 7145
assert solve(2*abs(x) - abs(x - 1)) == [-1, Rational(1, 3)]
x = symbols('x')
assert solve([re(x) - 1, im(x) - 2], x) == [
{re(x): 1, x: 1 + 2*I, im(x): 2}]
# check for 'dict' handling of solution
eq = sqrt(re(x)**2 + im(x)**2) - 3
assert solve(eq) == solve(eq, x)
i = symbols('i', imaginary=True)
assert solve(abs(i) - 3) == [-3*I, 3*I]
raises(NotImplementedError, lambda: solve(abs(x) - 3))
w = symbols('w', integer=True)
assert solve(2*x**w - 4*y**w, w) == solve((x/y)**w - 2, w)
x, y = symbols('x y', real=True)
assert solve(x + y*I + 3) == {y: 0, x: -3}
# issue 2642
assert solve(x*(1 + I)) == [0]
x, y = symbols('x y', imaginary=True)
assert solve(x + y*I + 3 + 2*I) == {x: -2*I, y: 3*I}
x = symbols('x', real=True)
assert solve(x + y + 3 + 2*I) == {x: -3, y: -2*I}
# issue 6248
f = Function('f')
assert solve(f(x + 1) - f(2*x - 1)) == [2]
assert solve(log(x + 1) - log(2*x - 1)) == [2]
x = symbols('x')
assert solve(2**x + 4**x) == [I*pi/log(2)]
def test_issue_6989():
f = Function('f')
assert solve(Eq(-f(x), Piecewise((1, x > 0), (0, True))), f(x)) == \
[Piecewise((-1, x > 0), (0, True))]
def test_lambert_multivariate():
from sympy.abc import a, x, y
from sympy.solvers.bivariate import _filtered_gens, _lambert, _solve_lambert
assert _filtered_gens(Poly(x + 1/x + exp(x) + y), x) == set([x, exp(x)])
assert _lambert(x, x) == []
assert solve((x**2 - 2*x + 1).subs(x, log(x) + 3*x)) == [LambertW(3*S.Exp1)/3]
assert solve((x**2 - 2*x + 1).subs(x, (log(x) + 3*x)**2 - 1)) == \
[LambertW(3*exp(-sqrt(2)))/3, LambertW(3*exp(sqrt(2)))/3]
assert solve((x**2 - 2*x - 2).subs(x, log(x) + 3*x)) == \
[LambertW(3*exp(1 + sqrt(3)))/3, LambertW(3*exp(-sqrt(3) + 1))/3]
assert solve(x*log(x) + 3*x + 1, x) == [exp(-3 + LambertW(-exp(3)))]
eq = (x*exp(x) - 3).subs(x, x*exp(x))
assert solve(eq) == [LambertW(3*exp(-LambertW(3)))]
# coverage test
raises(NotImplementedError, lambda: solve(x - sin(x)*log(y - x), x))
# if sign is unknown then only this one solution is obtained
assert solve(3*log(a**(3*x + 5)) + a**(3*x + 5), x) == [
-((log(a**5) + LambertW(S(1)/3))/(3*log(a)))]
p = symbols('p', positive=True)
_13 = S(1)/3
_56 = S(5)/6
_53 = S(5)/3
assert solve(3*log(p**(3*x + 5)) + p**(3*x + 5), x) == [
log((-3**_13 - 3**_56*I)*LambertW(_13)**_13/(2*p**_53))/log(p),
log((-3**_13 + 3**_56*I)*LambertW(_13)**_13/(2*p**_53))/log(p),
log((3*LambertW(_13)/p**5)**(1/(3*log(p))))]
# check collection
assert solve(3*log(a**(3*x + 5)) + b*log(a**(3*x + 5)) + a**(3*x + 5), x) == [
-((log(a**5) + LambertW(1/(b + 3)))/(3*log(a)))]
eq = 4*2**(2*p + 3) - 2*p - 3
assert _solve_lambert(eq, p, _filtered_gens(Poly(eq), p)) == [
-S(3)/2 - LambertW(-4*log(2))/(2*log(2))]
# issue 4271
assert solve((a/x + exp(x/2)).diff(x, 2), x) == [
6*LambertW(root(-1, 3)*root(a, 3)/3)]
assert solve((log(x) + x).subs(x, x**2 + 1)) == [
-I*sqrt(-LambertW(1) + 1), sqrt(-1 + LambertW(1))]
# these only give one of the solutions (see XFAIL below)
assert solve(x**3 - 3**x, x) == [-3/log(3)*LambertW(-log(3)/3),
-3*LambertW(-log(3)/3, -1)/log(3)]
# replacing 3 with 2 in the above solution gives 2
assert solve(x**2 - 2**x, x) == [2, -2*LambertW(-log(2)/2, -1)/log(2)]
assert solve(-x**2 + 2**x, x) == [2, -2*LambertW(-log(2)/2, -1)/log(2)]
assert solve(3**cos(x) - cos(x)**3) == [
acos(-3*LambertW(-log(3)/3)/log(3)),
acos(-3*LambertW(-log(3)/3, -1)/log(3))]
@XFAIL
def test_other_lambert():
from sympy.abc import x
assert solve(3*sin(x) - x*sin(3), x) == [3]
assert set(solve(3*log(x) - x*log(3))) == set(
[3, -3*LambertW(-log(3)/3)/log(3)])
a = S(6)/5
assert set(solve(x**a - a**x)) == set(
[a, -a*LambertW(-log(a)/a)/log(a)])
assert set(solve(3**cos(x) - cos(x)**3)) == set(
[acos(3), acos(-3*LambertW(-log(3)/3)/log(3))])
assert set(solve(x**2 - 2**x)) == set(
[2, -2/log(2)*LambertW(log(2)/2)])
def test_rewrite_trig():
assert solve(sin(x) + tan(x)) == [0, -pi, pi, 2*pi]
assert solve(sin(x) + sec(x)) == [
-2*atan(-S.Half + sqrt(2)*sqrt(1 - sqrt(3)*I)/2 + sqrt(3)*I/2),
2*atan(S.Half - sqrt(2)*sqrt(1 + sqrt(3)*I)/2 + sqrt(3)*I/2), 2*atan(S.Half
+ sqrt(2)*sqrt(1 + sqrt(3)*I)/2 + sqrt(3)*I/2), 2*atan(S.Half -
sqrt(3)*I/2 + sqrt(2)*sqrt(1 - sqrt(3)*I)/2)]
assert solve(sinh(x) + tanh(x)) == [0, I*pi]
# issue 6157
assert solve(2*sin(x) - cos(x), x) == [-2*atan(2 + sqrt(5)),
-2*atan(-sqrt(5) + 2)]
@XFAIL
def test_rewrite_trigh():
# if this import passes then the test below should also pass
from sympy import sech
assert solve(sinh(x) + sech(x)) == [
2*atanh(-S.Half + sqrt(5)/2 - sqrt(-2*sqrt(5) + 2)/2),
2*atanh(-S.Half + sqrt(5)/2 + sqrt(-2*sqrt(5) + 2)/2),
2*atanh(-sqrt(5)/2 - S.Half + sqrt(2 + 2*sqrt(5))/2),
2*atanh(-sqrt(2 + 2*sqrt(5))/2 - sqrt(5)/2 - S.Half)]
def test_uselogcombine():
eq = z - log(x) + log(y/(x*(-1 + y**2/x**2)))
assert solve(eq, x, force=True) == [-sqrt(y*(y - exp(z))), sqrt(y*(y - exp(z)))]
assert solve(log(x + 3) + log(1 + 3/x) - 3) in [
[-3 + sqrt(-12 + exp(3))*exp(S(3)/2)/2 + exp(3)/2,
-sqrt(-12 + exp(3))*exp(S(3)/2)/2 - 3 + exp(3)/2],
[-3 + sqrt(-36 + (-exp(3) + 6)**2)/2 + exp(3)/2,
-3 - sqrt(-36 + (-exp(3) + 6)**2)/2 + exp(3)/2],
]
assert solve(log(exp(2*x) + 1) + log(-tanh(x) + 1) - log(2)) == []
def test_atan2():
assert solve(atan2(x, 2) - pi/3, x) == [2*sqrt(3)]
def test_errorinverses():
assert solve(erf(x) - y, x) == [erfinv(y)]
assert solve(erfinv(x) - y, x) == [erf(y)]
assert solve(erfc(x) - y, x) == [erfcinv(y)]
assert solve(erfcinv(x) - y, x) == [erfc(y)]
def test_issue_2725():
R = Symbol('R')
eq = sqrt(2)*R*sqrt(1/(R + 1)) + (R + 1)*(sqrt(2)*sqrt(1/(R + 1)) - 1)
sol = solve(eq, R, set=True)[1]
assert sol == set([(S(5)/3 + (-S(1)/2 - sqrt(3)*I/2)*(S(251)/27 +
sqrt(111)*I/9)**(S(1)/3) + 40/(9*((-S(1)/2 - sqrt(3)*I/2)*(S(251)/27 +
sqrt(111)*I/9)**(S(1)/3))),), (S(5)/3 + 40/(9*(S(251)/27 +
sqrt(111)*I/9)**(S(1)/3)) + (S(251)/27 + sqrt(111)*I/9)**(S(1)/3),)])
def test_issue_5114_6611():
# See that it doesn't hang; this solves in about 2 seconds.
# Also check that the solution is relatively small.
# Note: the system in issue 6611 solves in about 5 seconds and has
# an op-count of 138336 (with simplify=False).
b, c, d, e, f, g, h, i, j, k, l, m, n, o, p, q, r = symbols('b:r')
eqs = Matrix([
[b - c/d + r/d], [c*(1/g + 1/e + 1/d) - f/g - r/d],
[-c/g + f*(1/j + 1/i + 1/g) - h/i], [-f/i + h*(1/m + 1/l + 1/i) - k/m],
[-h/m + k*(1/p + 1/o + 1/m) - n/p], [-k/p + n*(1/q + 1/p)]])
v = Matrix([f, h, k, n, b, c])
ans = solve(list(eqs), list(v), simplify=False)
# If time is taken to simplify then then 2617 below becomes
# 1168 and the time is about 50 seconds instead of 2.
assert sum([s.count_ops() for s in ans.values()]) <= 2617
def test_det_quick():
m = Matrix(3, 3, symbols('a:9'))
assert m.det() == det_quick(m) # calls det_perm
m[0, 0] = 1
assert m.det() == det_quick(m) # calls det_minor
m = Matrix(3, 3, list(range(9)))
assert m.det() == det_quick(m) # defaults to .det()
# make sure they work with Sparse
s = SparseMatrix(2, 2, (1, 2, 1, 4))
assert det_perm(s) == det_minor(s) == s.det()
def test_piecewise():
# if no symbol is given the piecewise detection must still work
assert solve(Piecewise((x - 2, Gt(x, 2)), (2 - x, True)) - 3) == [-1, 5]
def test_real_imag_splitting():
a, b = symbols('a b', real=True)
assert solve(sqrt(a**2 + b**2) - 3, a) == \
[-sqrt(-b**2 + 9), sqrt(-b**2 + 9)]
a, b = symbols('a b', imaginary=True)
assert solve(sqrt(a**2 + b**2) - 3, a) == []
def test_issue_7110():
y = -2*x**3 + 4*x**2 - 2*x + 5
assert any(ask(Q.real(i)) for i in solve(y))
def test_units():
assert solve(1/x - 1/(2*cm)) == [2*cm]
def test_issue_7547():
A, B, V = symbols('A,B,V')
eq1 = Eq(630.26*(V - 39.0)*V*(V + 39) - A + B, 0)
eq2 = Eq(B, 1.36*10**8*(V - 39))
eq3 = Eq(A, 5.75*10**5*V*(V + 39.0))
sol = Matrix(nsolve(Tuple(eq1, eq2, eq3), [A, B, V], (0, 0, 0)))
assert str(sol) == str(Matrix(
[['4442890172.68209'],
['4289299466.1432'],
['70.5389666628177']]))
def test_issue_7895():
r = symbols('r', real=True)
assert solve(sqrt(r) - 2) == [4]
def test_issue_2777():
# the equations represent two circles
x, y = symbols('x y', real=True)
e1, e2 = sqrt(x**2 + y**2) - 10, sqrt(y**2 + (-x + 10)**2) - 3
a, b = 191/S(20), 3*sqrt(391)/20
ans = [(a, -b), (a, b)]
assert solve((e1, e2), (x, y)) == ans
assert solve((e1, e2/(x - a)), (x, y)) == []
# make the 2nd circle's radius be -3
e2 += 6
assert solve((e1, e2), (x, y)) == []
assert solve((e1, e2), (x, y), check=False) == ans
def test_issue_7322():
number = 5.62527e-35
assert solve(x - number, x)[0] == number
def test_nsolve():
raises(ValueError, lambda: nsolve(x, (-1, 1), method='bisect'))
raises(TypeError, lambda: nsolve((x - y + 3,x + y,z - y),(x,y,z),(-50,50)))
raises(TypeError, lambda: nsolve((x + y, x - y), (0, 1)))
def test_issue_8587():
f = Piecewise((2*x**2, And(S(0) < x, x < 1)), (2, True))
assert solve(f - 1) == [1/sqrt(2)]
def test_high_order_multivariate():
assert len(solve(a*x**3 - x + 1, x)) == 3
assert len(solve(a*x**4 - x + 1, x)) == 4
assert solve(a*x**5 - x + 1, x) == [] # incomplete solution allowed
raises(NotImplementedError, lambda:
solve(a*x**5 - x + 1, x, incomplete=False))
# result checking must always consider the denominator and CRootOf
# must be checked, too
d = x**5 - x + 1
assert solve(d*(1 + 1/d)) == [CRootOf(d + 1, i) for i in range(5)]
d = x - 1
assert solve(d*(2 + 1/d)) == [S.Half]
def test_base_0_exp_0():
assert solve(0**x - 1) == [0]
assert solve(0**(x - 2) - 1) == [2]
assert solve(S('x*(1/x**0 - x)', evaluate=False)) == \
[0, 1]
def test__simple_dens():
assert _simple_dens(1/x**0, [x]) == set()
assert _simple_dens(1/x**y, [x]) == set([x**y])
assert _simple_dens(1/root(x, 3), [x]) == set([x])
def test_issue_8755():
# This tests two things: that if full unrad is attempted and fails
# the solution should still be found; also it tests the use of
# keyword `composite`.
assert len(solve(sqrt(y)*x + x**3 - 1, x)) == 3
assert len(solve(-512*y**3 + 1344*(x + 2)**(S(1)/3)*y**2 -
1176*(x + 2)**(S(2)/3)*y - 169*x + 686, y, _unrad=False)) == 3
@slow
def test_issue_8828():
x1 = 0
y1 = -620
r1 = 920
x2 = 126
y2 = 276
x3 = 51
y3 = 205
r3 = 104
v = x, y, z
f1 = (x - x1)**2 + (y - y1)**2 - (r1 - z)**2
f2 = (x2 - x)**2 + (y2 - y)**2 - z**2
f3 = (x - x3)**2 + (y - y3)**2 - (r3 - z)**2
F = f1,f2,f3
g1 = sqrt((x - x1)**2 + (y - y1)**2) + z - r1
g2 = f2
g3 = sqrt((x - x3)**2 + (y - y3)**2) + z - r3
G = g1,g2,g3
A = solve(F, v)
B = solve(G, v)
C = solve(G, v, manual=True)
p, q, r = [set([tuple(i.evalf(2) for i in j) for j in R]) for R in [A, B, C]]
assert p == q == r
def test_issue_2840_8155():
assert solve(sin(3*x) + sin(6*x)) == [
0, -pi, pi, 2*pi, -2*I*log(-(-1)**(S(1)/9)), -2*I*log(-(-1)**(S(2)/9)),
-2*I*log((-1)**(S(7)/9)), -2*I*log((-1)**(S(8)/9)), -2*I*log(-S(1)/2 -
sqrt(3)*I/2), -2*I*log(-S(1)/2 + sqrt(3)*I/2), -2*I*log(S(1)/2 -
sqrt(3)*I/2), -2*I*log(S(1)/2 + sqrt(3)*I/2), -2*I*log(-sqrt(3)/2 - I/2),
-2*I*log(-sqrt(3)/2 + I/2), -2*I*log(sqrt(3)/2 - I/2),
-2*I*log(sqrt(3)/2 + I/2), -2*I*log(-sin(pi/18) - I*cos(pi/18)),
-2*I*log(-sin(pi/18) + I*cos(pi/18)), -2*I*log(sin(pi/18) -
I*cos(pi/18)), -2*I*log(sin(pi/18) + I*cos(pi/18)),
-2*I*log(exp(-2*I*pi/9)), -2*I*log(exp(-I*pi/9)),
-2*I*log(exp(I*pi/9)), -2*I*log(exp(2*I*pi/9))]
assert solve(2*sin(x) - 2*sin(2*x)) == [
0, -pi, pi, -2*I*log(-sqrt(3)/2 - I/2), -2*I*log(-sqrt(3)/2 + I/2),
-2*I*log(sqrt(3)/2 - I/2), -2*I*log(sqrt(3)/2 + I/2)]
def test_issue_9567():
assert solve(1 + 1/(x - 1)) == [0]
| bsd-3-clause |
ebmdatalab/openprescribing | openprescribing/dmd/forms.py | 1 | 2175 | from django import forms
from crispy_forms.helper import FormHelper
from crispy_forms.layout import Layout, Field, ButtonHolder, Submit
from crispy_forms.bootstrap import InlineCheckboxes
obj_types_choices = [
("vtm", "VTMs"),
("vmp", "VMPs"),
("amp", "AMPs"),
("vmpp", "VMPPs"),
("ampp", "AMPPs"),
]
include_choices = [
("unavailable", "Unavailable items"),
("invalid", "Invalid items"),
("no_bnf_code", "Items with no BNF code"),
]
class SearchForm(forms.Form):
q = forms.CharField(
label="Query, SNOMED code, GTIN, or BNF code/prefix", min_length=3
)
obj_types = forms.MultipleChoiceField(
label="Search...",
choices=obj_types_choices,
required=False,
initial=[tpl[0] for tpl in obj_types_choices],
)
include = forms.MultipleChoiceField(
label="Include...",
choices=include_choices,
required=False,
help_text="Unavailable items are not available to be prescribed and/or have been discontinued.",
)
# This is only used in tests
max_results_per_obj_type = forms.IntegerField(
required=False, widget=forms.HiddenInput()
)
def __init__(self, *args, **kwargs):
super(SearchForm, self).__init__(*args, **kwargs)
self.helper = FormHelper()
self.helper.form_method = "GET"
self.helper.layout = Layout(
Field("q"),
InlineCheckboxes("obj_types"),
InlineCheckboxes("include"),
ButtonHolder(Submit("submit", "Search")),
)
class AdvancedSearchForm(forms.Form):
search = forms.CharField(required=True, widget=forms.HiddenInput())
include = forms.MultipleChoiceField(
label="Include...",
choices=include_choices,
required=False,
help_text="Unavailable items are not available to be prescribed and/or have been discontinued.",
)
def __init__(self, *args, **kwargs):
super(AdvancedSearchForm, self).__init__(*args, **kwargs)
self.helper = FormHelper()
self.helper.form_method = "GET"
self.helper.layout = Layout(Field("search"), InlineCheckboxes("include"))
| mit |
maas/maas | src/maasserver/fields.py | 1 | 29287 | # Copyright 2012-2017 Canonical Ltd. This software is licensed under the
# GNU Affero General Public License version 3 (see the file LICENSE).
"""Custom model and form fields."""
__all__ = [
"CIDRField",
"EditableBinaryField",
"Field",
"HostListFormField",
"IPListFormField",
"IPv4CIDRField",
"MAC",
"MACAddressField",
"MACAddressFormField",
"MODEL_NAME_VALIDATOR",
"NodeChoiceField",
"register_mac_type",
"VerboseRegexValidator",
"VersionedTextFileField",
"validate_mac",
]
from copy import deepcopy
from json import dumps, loads
import re
from django import forms
from django.core.exceptions import ObjectDoesNotExist, ValidationError
from django.core.validators import RegexValidator, URLValidator
from django.db import connections
from django.db.models import BinaryField, CharField
from django.db.models import Field as _BrokenField
from django.db.models import GenericIPAddressField, IntegerField, Q, URLField
from django.utils.deconstruct import deconstructible
from django.utils.encoding import force_text
from django.utils.translation import ugettext_lazy as _
from netaddr import AddrFormatError, IPAddress, IPNetwork
import psycopg2.extensions
from maasserver.models.versionedtextfile import VersionedTextFile
from maasserver.utils.dns import validate_domain_name, validate_hostname
from maasserver.utils.orm import get_one, validate_in_transaction
from provisioningserver.utils import typed
# Validator for the name attribute of model entities.
MODEL_NAME_VALIDATOR = RegexValidator(r"^\w[ \w-]*$")
class Field(_BrokenField):
"""Django's `Field` has a mutable default argument, hence is broken.
This fixes it.
"""
def __init__(self, *args, validators=None, **kwargs):
kwargs["validators"] = [] if validators is None else validators
super().__init__(*args, **kwargs)
MAC_RE = re.compile(
r"^\s*("
r"([0-9a-fA-F]{1,2}:){5}[0-9a-fA-F]{1,2}|"
r"([0-9a-fA-F]{1,2}-){5}[0-9a-fA-F]{1,2}|"
r"([0-9a-fA-F]{3,4}.){2}[0-9a-fA-F]{3,4}"
r")\s*$"
)
MAC_ERROR_MSG = "'%(value)s' is not a valid MAC address."
class VerboseRegexValidator(RegexValidator):
"""A verbose `RegexValidator`.
This `RegexValidator` includes the checked value in the rendered error
message when the validation fails.
"""
# Set a bugus code to circumvent Django's attempt to re-interpret a
# validator's error message using the field's message it is attached
# to.
code = "bogus-code"
def __call__(self, value):
"""Validates that the input matches the regular expression."""
if not self.regex.search(force_text(value)):
raise ValidationError(
self.message % {"value": value}, code=self.code
)
mac_validator = VerboseRegexValidator(regex=MAC_RE, message=MAC_ERROR_MSG)
def validate_mac(value):
"""Django validator for a MAC."""
if isinstance(value, MAC):
value = value.get_raw()
mac_validator(value)
class StrippedCharField(forms.CharField):
"""A CharField that will strip surrounding whitespace before validation."""
def clean(self, value):
value = self.to_python(value).strip()
return super().clean(value)
class UnstrippedCharField(forms.CharField):
"""A version of forms.CharField that never strips the whitespace.
Django 1.9 has introduced a strip argument that controls stripping of
whitespace *and* which defaults to True, thus breaking compatibility with
1.8 and earlier.
"""
def __init__(self, *args, **kwargs):
# Instead of relying on a version check, we check for CharField
# constructor having a strip kwarg instead.
parent_init = super().__init__
if "strip" in parent_init.__code__.co_varnames:
parent_init(*args, strip=False, **kwargs)
else:
# In Django versions that do not support strip, False was the
# default.
parent_init(*args, **kwargs)
class VerboseRegexField(forms.CharField):
def __init__(self, regex, message, *args, **kwargs):
"""A field that validates its value with a regular expression.
:param regex: Either a string or a compiled regular expression object.
:param message: Error message to use when the validation fails.
"""
super().__init__(*args, **kwargs)
self.validators.append(
VerboseRegexValidator(regex=regex, message=message)
)
class MACAddressFormField(VerboseRegexField):
"""Form field type: MAC address."""
def __init__(self, *args, **kwargs):
super().__init__(regex=MAC_RE, message=MAC_ERROR_MSG, *args, **kwargs)
class MACAddressField(Field):
"""Model field type: MAC address."""
description = "MAC address"
default_validators = [validate_mac]
def db_type(self, *args, **kwargs):
return "macaddr"
def to_python(self, value):
return MAC(value)
def from_db_value(self, value, expression, connection, context):
return MAC(value)
def get_prep_value(self, value):
value = super().get_prep_value(value)
# Convert empty string to None.
if not value:
return None
return value
class MAC:
"""A MAC address represented as a database value.
PostgreSQL supports MAC addresses as a native type. They show up
client-side as this class. It is essentially a wrapper for a string.
This NEVER represents a null or empty MAC address.
"""
def __new__(cls, value):
"""Return `None` if `value` is `None` or the empty string."""
if value is None:
return None
elif isinstance(value, (bytes, str)):
return None if len(value) == 0 else super().__new__(cls)
else:
return super().__new__(cls)
def __init__(self, value):
"""Wrap a MAC address, or None, into a `MAC`.
:param value: A MAC address, in the form of a string or a `MAC`;
or None.
"""
# The wrapped attribute is stored as self._wrapped, following
# ISQLQuote's example.
if isinstance(value, MAC):
self._wrapped = value._wrapped
elif isinstance(value, bytes):
self._wrapped = value.decode("ascii")
elif isinstance(value, str):
self._wrapped = value
else:
raise TypeError("expected MAC or string, got: %r" % (value,))
def __conform__(self, protocol):
"""Tell psycopg2 that this type implements the adapter protocol."""
# The psychopg2 docs say to check that the protocol is ISQLQuote,
# but not what to do if it isn't.
assert protocol == psycopg2.extensions.ISQLQuote, (
"Unsupported psycopg2 adapter protocol: %s" % protocol
)
return self
def getquoted(self):
"""Render this object in SQL.
This is part of psycopg2's adapter protocol.
"""
return "'%s'::macaddr" % self._wrapped
def get_raw(self):
"""Return the wrapped value."""
return self._wrapped
@property
def raw(self):
"""The MAC address as a string."""
return self._wrapped
@classmethod
def parse(cls, value, cur):
"""Turn a value as received from the database into a MAC."""
return cls(value)
def __repr__(self):
"""Represent the MAC as a string."""
return "<MAC %s>" % self._wrapped
def __str__(self):
"""Represent the MAC as a Unicode string."""
return self._wrapped
def __bytes__(self):
return self._wrapped.encode("ascii")
def __eq__(self, other):
"""Two `MAC`s are equal if they wrap the same value.
A MAC is is also equal to the value it wraps. This is non-commutative,
but it supports Django code that compares input values to various
kinds of "null" or "empty."
"""
if isinstance(other, MAC):
return self._wrapped == other._wrapped
else:
return self._wrapped == other
def __ne__(self, other):
return not (self == other)
def __hash__(self):
return hash(self._wrapped)
def register_mac_type(cursor):
"""Register our `MAC` type with psycopg2 and Django."""
# This is standard, but not built-in, magic to register a type in
# psycopg2: execute a query that returns a field of the corresponding
# database type, then get its oid out of the cursor, use that to create
# a "typecaster" in psycopg (by calling new_type(), confusingly!), then
# register that type in psycopg.
cursor.execute("SELECT NULL::macaddr")
oid = cursor.description[0][1]
mac_caster = psycopg2.extensions.new_type((oid,), "macaddr", MAC.parse)
psycopg2.extensions.register_type(mac_caster)
# Now do the same for the type array-of-MACs. The "typecaster" created
# for MAC is passed in; it gets used for parsing an individual element
# of an array's text representation as received from the database.
cursor.execute("SELECT '{}'::macaddr[]")
oid = cursor.description[0][1]
psycopg2.extensions.register_type(
psycopg2.extensions.new_array_type((oid,), "macaddr", mac_caster)
)
class JSONObjectField(Field):
"""A field that will store any jsonizable python object."""
def to_python(self, value):
"""db -> python: json load."""
assert not isinstance(value, bytes)
if value is not None:
if isinstance(value, str):
try:
return loads(value)
except ValueError:
pass
return value
else:
return None
def from_db_value(self, value, expression, connection, context):
return self.to_python(value)
def get_db_prep_value(self, value, connection=None, prepared=False):
"""python -> db: json dump.
Keys are sorted when dumped to guarantee stable output. DB field can
guarantee uniqueness and be queried (the same dict makes the same
JSON).
"""
if value is not None:
return dumps(deepcopy(value), sort_keys=True)
else:
return None
def get_internal_type(self):
return "TextField"
def formfield(self, form_class=None, **kwargs):
"""Return a plain `forms.Field` here to avoid "helpful" conversions.
Django's base model field defaults to returning a `CharField`, which
means that anything that's not character data gets smooshed to text by
`CharField.to_pytnon` in forms (via the woefully named `smart_text`).
This is not helpful.
"""
if form_class is None:
form_class = forms.Field
return super().formfield(form_class=form_class, **kwargs)
class XMLField(Field):
"""A field for storing xml natively.
This is not like the removed Django XMLField which just added basic python
level checking on top of a text column.
Really inserts should be wrapped like `XMLPARSE(DOCUMENT value)` but it's
hard to do from django so rely on postgres supporting casting from char.
"""
description = "XML document or fragment"
def db_type(self, connection):
return "xml"
class EditableBinaryField(BinaryField):
"""An editable binary field.
An editable version of Django's BinaryField.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.editable = True
def deconstruct(self):
# Override deconstruct not to fail on the removal of the 'editable'
# field: the Django migration module assumes the field has its default
# value (False).
return Field.deconstruct(self)
class LargeObjectFile:
"""Large object file.
Proxy the access from this object to psycopg2.
"""
def __init__(self, oid=0, field=None, instance=None, block_size=(1 << 16)):
self.oid = oid
self.field = field
self.instance = instance
self.block_size = block_size
self._lobject = None
def __getattr__(self, name):
if self._lobject is None:
raise IOError("LargeObjectFile is not opened.")
return getattr(self._lobject, name)
def __enter__(self, *args, **kwargs):
return self
def __exit__(self, *args, **kwargs):
self.close()
def __iter__(self):
return self
@typed
def write(self, data: bytes):
"""Write `data` to the underlying large object.
This exists so that type annotations can be enforced.
"""
self._lobject.write(data)
def open(
self, mode="rwb", new_file=None, using="default", connection=None
):
"""Opens the internal large object instance."""
if "b" not in mode:
raise ValueError("Large objects must be opened in binary mode.")
if connection is None:
connection = connections[using]
validate_in_transaction(connection)
self._lobject = connection.connection.lobject(
self.oid, mode, 0, new_file
)
self.oid = self._lobject.oid
return self
def unlink(self):
"""Removes the large object."""
if self._lobject is None:
# Need to open the lobject so we get a reference to it in the
# database, to perform the unlink.
self.open()
self.close()
self._lobject.unlink()
self._lobject = None
self.oid = 0
def __next__(self):
r = self.read(self.block_size)
if len(r) == 0:
raise StopIteration
return r
class LargeObjectDescriptor:
"""LargeObjectField descriptor."""
def __init__(self, field):
self.field = field
def __get__(self, instance, type=None):
if instance is None:
return self
return instance.__dict__[self.field.name]
def __set__(self, instance, value):
value = self.field.to_python(value)
if value is not None:
if not isinstance(value, LargeObjectFile):
value = LargeObjectFile(value, self.field, instance)
instance.__dict__[self.field.name] = value
class LargeObjectField(IntegerField):
"""A field that stores large amounts of data into postgres large object
storage.
Internally the field on the model is an `oid` field, that returns a proxy
to the referenced large object.
"""
def __init__(self, *args, **kwargs):
self.block_size = kwargs.pop("block_size", 1 << 16)
super().__init__(*args, **kwargs)
@property
def validators(self):
# No validation. IntegerField will add incorrect validation. This
# removes that validation.
return []
def db_type(self, connection):
"""Returns the database column data type for LargeObjectField."""
# oid is the column type postgres uses to reference a large object
return "oid"
def contribute_to_class(self, cls, name):
"""Set the descriptor for the large object."""
super().contribute_to_class(cls, name)
setattr(cls, self.name, LargeObjectDescriptor(self))
def get_db_prep_value(self, value, connection=None, prepared=False):
"""python -> db: `oid` value"""
if value is None:
return None
if isinstance(value, LargeObjectFile):
if value.oid > 0:
return value.oid
raise AssertionError(
"LargeObjectFile's oid must be greater than 0."
)
raise AssertionError(
"Invalid LargeObjectField value (expected LargeObjectFile): '%s'"
% repr(value)
)
def to_python(self, value):
"""db -> python: `LargeObjectFile`"""
if value is None:
return None
elif isinstance(value, LargeObjectFile):
return value
elif isinstance(value, int):
return LargeObjectFile(value, self, self.model, self.block_size)
raise AssertionError(
"Invalid LargeObjectField value (expected integer): '%s'"
% repr(value)
)
class CIDRField(Field):
description = "PostgreSQL CIDR field"
def parse_cidr(self, value):
try:
return str(IPNetwork(value).cidr)
except AddrFormatError as e:
raise ValidationError(str(e)) from e
def db_type(self, connection):
return "cidr"
def get_prep_value(self, value):
if value is None or value == "":
return None
return self.parse_cidr(value)
def from_db_value(self, value, expression, connection, context):
if value is None:
return value
return self.parse_cidr(value)
def to_python(self, value):
if value is None or value == "":
return None
if isinstance(value, IPNetwork):
return str(value)
if not value:
return value
return self.parse_cidr(value)
def formfield(self, **kwargs):
defaults = {"form_class": forms.CharField}
defaults.update(kwargs)
return super().formfield(**defaults)
class IPv4CIDRField(CIDRField):
"""IPv4-only CIDR"""
def get_prep_value(self, value):
if value is None or value == "":
return None
return self.to_python(value)
def to_python(self, value):
if value is None or value == "":
return None
else:
try:
cidr = IPNetwork(value)
except AddrFormatError:
raise ValidationError(
"Invalid network: %(cidr)s", params={"cidr": value}
)
if cidr.cidr.version != 4:
raise ValidationError(
"%(cidr)s: Only IPv4 networks supported.",
params={"cidr": value},
)
return str(cidr.cidr)
class IPListFormField(forms.CharField):
"""Accepts a space/comma separated list of IP addresses.
This field normalizes the list to a space-separated list.
"""
separators = re.compile(r"[,\s]+")
def clean(self, value):
if value is None:
return None
else:
ips = re.split(self.separators, value)
ips = [ip.strip() for ip in ips if ip != ""]
for ip in ips:
try:
GenericIPAddressField().clean(ip, model_instance=None)
except ValidationError:
raise ValidationError(
"Invalid IP address: %s; provide a list of "
"space-separated IP addresses" % ip
)
return " ".join(ips)
class HostListFormField(forms.CharField):
"""Accepts a space/comma separated list of hostnames or IP addresses.
This field normalizes the list to a space-separated list.
"""
separators = re.compile(r"[,\s]+")
# Regular expressions to sniff out things that look like IP addresses;
# additional and more robust validation ought to be done to make sure.
pt_ipv4 = r"(?: \d{1,3} [.] \d{1,3} [.] \d{1,3} [.] \d{1,3} )"
pt_ipv6 = r"(?: (?: [\da-fA-F]+ :+)+ (?: [\da-fA-F]+ | %s )+ )" % pt_ipv4
pt_ip = re.compile(r"^ (?: %s | %s ) $" % (pt_ipv4, pt_ipv6), re.VERBOSE)
def clean(self, value):
if value is None:
return None
else:
values = map(str.strip, self.separators.split(value))
values = (value for value in values if len(value) != 0)
values = map(self._clean_addr_or_host, values)
return " ".join(values)
def _clean_addr_or_host(self, value):
looks_like_ip = self.pt_ip.match(value) is not None
if looks_like_ip:
return self._clean_addr(value)
elif ":" in value:
# This is probably an IPv6 address. It's definitely not a
# hostname.
return self._clean_addr(value)
else:
return self._clean_host(value)
def _clean_addr(self, addr):
try:
addr = IPAddress(addr)
except AddrFormatError as error:
message = str(error) # netaddr has good messages.
message = message[:1].upper() + message[1:] + "."
raise ValidationError(message)
else:
return str(addr)
def _clean_host(self, host):
try:
validate_hostname(host)
except ValidationError as error:
raise ValidationError("Invalid hostname: " + error.message)
else:
return host
class SubnetListFormField(forms.CharField):
"""Accepts a space/comma separated list of hostnames, Subnets or IPs.
This field normalizes the list to a space-separated list.
"""
separators = re.compile(r"[,\s]+")
# Regular expressions to sniff out things that look like IP addresses;
# additional and more robust validation ought to be done to make sure.
pt_ipv4 = r"(?: \d{1,3} [.] \d{1,3} [.] \d{1,3} [.] \d{1,3} )"
pt_ipv6 = r"(?: (|[0-9A-Fa-f]{1,4}) [:] (|[0-9A-Fa-f]{1,4}) [:] (.*))"
pt_ip = re.compile(r"^ (?: %s | %s ) $" % (pt_ipv4, pt_ipv6), re.VERBOSE)
pt_subnet = re.compile(
r"^ (?: %s | %s ) \/\d+$" % (pt_ipv4, pt_ipv6), re.VERBOSE
)
def clean(self, value):
if value is None:
return None
else:
values = map(str.strip, self.separators.split(value))
values = (value for value in values if len(value) != 0)
values = map(self._clean_addr_or_host, values)
return " ".join(values)
def _clean_addr_or_host(self, value):
looks_like_ip = self.pt_ip.match(value) is not None
looks_like_subnet = self.pt_subnet.match(value) is not None
if looks_like_subnet:
return self._clean_subnet(value)
elif looks_like_ip:
return self._clean_addr(value)
else:
return self._clean_host(value)
def _clean_addr(self, value):
try:
addr = IPAddress(value)
except ValueError:
return
except AddrFormatError:
raise ValidationError("Invalid IP address: %s." % value)
else:
return str(addr)
def _clean_subnet(self, value):
try:
cidr = IPNetwork(value)
except AddrFormatError:
raise ValidationError("Invalid network: %s." % value)
else:
return str(cidr)
def _clean_host(self, host):
try:
validate_hostname(host)
except ValidationError as error:
raise ValidationError("Invalid hostname: " + error.message)
else:
return host
class CaseInsensitiveChoiceField(forms.ChoiceField):
"""ChoiceField that allows the input to be case insensitive."""
def to_python(self, value):
if value not in self.empty_values:
value = value.lower()
return super().to_python(value)
class SpecifierOrModelChoiceField(forms.ModelChoiceField):
"""ModelChoiceField which is also able to accept input in the format
of a specifiers string.
"""
def to_python(self, value):
try:
return super().to_python(value)
except ValidationError as e:
if isinstance(value, str):
object_id = self.queryset.get_object_id(value)
if object_id is None:
obj = get_one(
self.queryset.filter_by_specifiers(value),
exception_class=ValidationError,
)
if obj is not None:
return obj
else:
try:
return self.queryset.get(id=object_id)
except ObjectDoesNotExist:
# Re-raising this as a ValidationError prevents the API
# from returning an internal server error rather than
# a bad request.
raise ValidationError("None found with id=%s." % value)
raise e
class DomainNameField(CharField):
"""Custom Django field that strips whitespace and trailing '.' characters
from DNS domain names before validating and saving to the database. Also,
validates that the domain name is valid according to RFCs 952 and 1123.
(Note that this field type should NOT be used for hostnames, since the set
of valid hostnames is smaller than the set of valid domain names.)
"""
def __init__(self, *args, **kwargs):
validators = kwargs.pop("validators", [])
validators.append(validate_domain_name)
kwargs["validators"] = validators
super().__init__(*args, **kwargs)
def deconstruct(self):
name, path, args, kwargs = super().deconstruct()
del kwargs["validators"]
return name, path, args, kwargs
# Here we are using (abusing?) the to_python() function to coerce and
# normalize this type. Django does not have a function intended purely
# to normalize before saving to the database, so to_python() is the next
# closest alternative. For more information, see:
# https://docs.djangoproject.com/en/1.6/ref/forms/validation/
# https://code.djangoproject.com/ticket/6362
def to_python(self, value):
value = super().to_python(value)
if value is None:
return None
value = value.strip().rstrip(".")
return value
class NodeChoiceField(forms.ModelChoiceField):
def __init__(self, queryset, *args, **kwargs):
super().__init__(queryset=queryset.distinct(), *args, **kwargs)
def clean(self, value):
if not value:
return None
# Avoid circular imports
from maasserver.models.node import Node
if isinstance(value, Node):
if value not in self.queryset:
raise ValidationError(
"Select a valid choice. "
"%s is not one of the available choices." % value.system_id
)
return value
try:
return self.queryset.get(Q(system_id=value) | Q(hostname=value))
except Node.DoesNotExist:
raise ValidationError(
"Select a valid choice. "
"%s is not one of the available choices." % value
)
def to_python(self, value):
# Avoid circular imports
from maasserver.models.node import Node
try:
return self.queryset.get(Q(system_id=value) | Q(hostname=value))
except Node.DoesNotExist:
raise ValidationError(
"Select a valid choice. "
"%s is not one of the available choices." % value
)
class VersionedTextFileField(forms.ModelChoiceField):
def __init__(self, *args, **kwargs):
super().__init__(queryset=None, *args, **kwargs)
def clean(self, value):
if self.initial is None:
if value is None:
raise ValidationError("Must be given a value")
# Create a new VersionedTextFile if one doesn't exist
if isinstance(value, dict):
return VersionedTextFile.objects.create(**value)
else:
return VersionedTextFile.objects.create(data=value)
elif value is None:
return self.initial
else:
# Create and return a new VersionedTextFile linked to the previous
# VersionedTextFile
if isinstance(value, dict):
return self.initial.update(**value)
else:
return self.initial.update(value)
@deconstructible
class URLOrPPAValidator(URLValidator):
message = _("Enter a valid repository URL or PPA location.")
ppa_re = (
r"ppa:" + URLValidator.hostname_re + r"/" + URLValidator.hostname_re
)
def __call__(self, value):
match = re.search(URLOrPPAValidator.ppa_re, force_text(value))
# If we don't have a PPA location, let URLValidator do its job.
if not match:
super().__call__(value)
class URLOrPPAFormField(forms.URLField):
widget = forms.URLInput
default_error_messages = {
"invalid": _("Enter a valid repository URL or PPA location.")
}
default_validators = [URLOrPPAValidator()]
def to_python(self, value):
# Call grandparent method (CharField) to get string value.
value = super(forms.URLField, self).to_python(value)
# If it's a PPA locator, return it, else run URL pythonator.
match = re.search(URLOrPPAValidator.ppa_re, value)
return value if match else super().to_python(value)
class URLOrPPAField(URLField):
default_validators = [URLOrPPAValidator()]
description = _("URLOrPPAField")
# Copied from URLField, with modified form_class.
def formfield(self, **kwargs):
defaults = {"form_class": URLOrPPAFormField}
defaults.update(kwargs)
return super(URLField, self).formfield(**defaults)
| agpl-3.0 |
jckarter/swift | utils/swift_build_support/swift_build_support/build_graph.py | 13 | 6374 | # swift_build_support/build_graph.py ----------------------------*- python -*-
#
# This source file is part of the Swift.org open source project
#
# Copyright (c) 2014 - 2017 Apple Inc. and the Swift project authors
# Licensed under Apache License v2.0 with Runtime Library Exception
#
# See https://swift.org/LICENSE.txt for license information
# See https://swift.org/CONTRIBUTORS.txt for the list of Swift project authors
#
# ----------------------------------------------------------------------------
#
# This is a simple implementation of an acyclic build graph. We require no
# cycles, so we just perform a reverse post order traversal to get a topological
# ordering. We check during the reverse post order traversal that we do not
# visit any node multiple times.
#
# Nodes are assumed to be a product's class.
#
# ----------------------------------------------------------------------------
def _get_po_ordered_nodes(root, invertedDepMap):
# Then setup our worklist/visited node set.
worklist = [root]
visitedNodes = set([])
# TODO: Can we unify po_ordered_nodes and visitedNodes in some way?
po_ordered_nodes = []
# Until we no longer have nodes to visit...
while not len(worklist) == 0:
# First grab the last element of the worklist. If we have already
# visited this node, just pop it and skip it.
#
# DISCUSSION: Consider the following build graph:
#
# A -> [C, B]
# B -> [C]
#
# In this case, we will most likely get the following worklist
# before actually processing anything:
#
# A, C, B, C
#
# In this case, we want to ignore the initial C pushed onto the
# worklist by visiting A since we will have visited C already due to
# the edge from B -> C.
node = worklist[-1]
if node in visitedNodes:
worklist.pop()
continue
# Then grab the dependents of our node.
deps = invertedDepMap.get(node, set([]))
assert(isinstance(deps, set))
# Then visit those and see if we have not visited any of them. Push
# any such nodes onto the worklist and continue. If we have already
# visited all of our dependents, then we can actually process this
# node.
foundDep = False
for d in deps:
if d not in visitedNodes:
foundDep = True
worklist.append(d)
if foundDep:
continue
# Now process the node by popping it off the worklist, adding it to
# the visited nodes set, and append it to the po_ordered_nodes in
# its final position.
worklist.pop()
visitedNodes.add(node)
po_ordered_nodes.append(node)
return po_ordered_nodes
class BuildDAG(object):
def __init__(self):
self.root = None
# A map from a node to a list of nodes that depend on the given node.
#
# NOTE: This is an inverted dependency map implying that the root will
# be a "final element" of the graph.
self.invertedDepMap = {}
def add_edge(self, pred, succ):
self.invertedDepMap.setdefault(pred, set([succ])) \
.add(succ)
def set_root(self, root):
# Assert that we always only have one root.
assert(self.root is None)
self.root = root
def produce_schedule(self):
# Grab the root and make sure it is not None
root = self.root
assert(root is not None)
# Then perform a post order traversal from root using our inverted
# dependency map to compute a list of our nodes in post order.
#
# NOTE: The index of each node in this list is the post order number of
# the node.
po_ordered_nodes = _get_po_ordered_nodes(root, self.invertedDepMap)
# Ok, we have our post order list. We want to provide our user a reverse
# post order, so we take our array and construct a dictionary of an
# enumeration of the list. This will give us a dictionary mapping our
# product names to their reverse post order number.
rpo_ordered_nodes = list(reversed(po_ordered_nodes))
node_to_rpot_map = dict((y, x) for x, y in enumerate(rpo_ordered_nodes))
# Now before we return our rpo_ordered_nodes and our node_to_rpot_map, lets
# verify that we didn't find any cycles. We can do this by traversing
# our dependency graph in reverse post order and making sure all
# dependencies of each node we visit has a later reverse post order
# number than the node we are checking.
for n, node in enumerate(rpo_ordered_nodes):
for dep in self.invertedDepMap.get(node, []):
if node_to_rpot_map[dep] < n:
print('n: {}. node: {}.'.format(n, node))
print('dep: {}.'.format(dep))
print('inverted dependency map: {}'.format(self.invertedDepMap))
print('rpo ordered nodes: {}'.format(rpo_ordered_nodes))
print('rpo node to rpo number map: {}'.format(node_to_rpot_map))
raise RuntimeError('Found cycle in build graph!')
return (rpo_ordered_nodes, node_to_rpot_map)
def produce_scheduled_build(input_product_classes):
"""For a given a subset input_input_product_classes of
all_input_product_classes, compute a topological ordering of the
input_input_product_classes + topological closures that respects the
dependency graph.
"""
dag = BuildDAG()
worklist = list(input_product_classes)
visited = set(input_product_classes)
# Construct the DAG.
while len(worklist) > 0:
entry = worklist.pop()
deps = entry.get_dependencies()
if len(deps) == 0:
dag.set_root(entry)
for d in deps:
dag.add_edge(d, entry)
if d not in visited:
worklist.append(d)
visited = visited.union(deps)
# Then produce the schedule.
schedule = dag.produce_schedule()
# Finally check that all of our input_product_classes are in the schedule.
if len(set(input_product_classes) - set(schedule[0])) != 0:
raise RuntimeError('Found disconnected graph?!')
return schedule
| apache-2.0 |
Jgarcia-IAS/SAT | openerp/addons/account_payment/account_payment.py | 212 | 19161 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import logging
import time
from openerp.osv import fields, osv
_logger = logging.getLogger(__name__)
class payment_mode(osv.osv):
_name= 'payment.mode'
_description= 'Payment Mode'
_columns = {
'name': fields.char('Name', required=True, help='Mode of Payment'),
'bank_id': fields.many2one('res.partner.bank', "Bank account",
required=True,help='Bank Account for the Payment Mode'),
'journal': fields.many2one('account.journal', 'Journal', required=True,
domain=[('type', 'in', ('bank','cash'))], help='Bank or Cash Journal for the Payment Mode'),
'company_id': fields.many2one('res.company', 'Company',required=True),
'partner_id':fields.related('company_id','partner_id',type='many2one',relation='res.partner',string='Partner',store=True,),
}
_defaults = {
'company_id': lambda self,cr,uid,c: self.pool.get('res.users').browse(cr, uid, uid, c).company_id.id
}
def suitable_bank_types(self, cr, uid, payment_code=None, context=None):
"""Return the codes of the bank type that are suitable
for the given payment type code"""
if not payment_code:
return []
cr.execute(""" SELECT pb.state
FROM res_partner_bank pb
JOIN payment_mode pm ON (pm.bank_id = pb.id)
WHERE pm.id = %s """, [payment_code])
return [x[0] for x in cr.fetchall()]
def onchange_company_id (self, cr, uid, ids, company_id=False, context=None):
result = {}
if company_id:
partner_id = self.pool.get('res.company').browse(cr, uid, company_id, context=context).partner_id.id
result['partner_id'] = partner_id
return {'value': result}
class payment_order(osv.osv):
_name = 'payment.order'
_description = 'Payment Order'
_rec_name = 'reference'
_order = 'id desc'
#dead code
def get_wizard(self, type):
_logger.warning("No wizard found for the payment type '%s'.", type)
return None
def _total(self, cursor, user, ids, name, args, context=None):
if not ids:
return {}
res = {}
for order in self.browse(cursor, user, ids, context=context):
if order.line_ids:
res[order.id] = reduce(lambda x, y: x + y.amount, order.line_ids, 0.0)
else:
res[order.id] = 0.0
return res
_columns = {
'date_scheduled': fields.date('Scheduled Date', states={'done':[('readonly', True)]}, help='Select a date if you have chosen Preferred Date to be fixed.'),
'reference': fields.char('Reference', required=1, states={'done': [('readonly', True)]}, copy=False),
'mode': fields.many2one('payment.mode', 'Payment Mode', select=True, required=1, states={'done': [('readonly', True)]}, help='Select the Payment Mode to be applied.'),
'state': fields.selection([
('draft', 'Draft'),
('cancel', 'Cancelled'),
('open', 'Confirmed'),
('done', 'Done')], 'Status', select=True, copy=False,
help='When an order is placed the status is \'Draft\'.\n Once the bank is confirmed the status is set to \'Confirmed\'.\n Then the order is paid the status is \'Done\'.'),
'line_ids': fields.one2many('payment.line', 'order_id', 'Payment lines', states={'done': [('readonly', True)]}),
'total': fields.function(_total, string="Total", type='float'),
'user_id': fields.many2one('res.users', 'Responsible', required=True, states={'done': [('readonly', True)]}),
'date_prefered': fields.selection([
('now', 'Directly'),
('due', 'Due date'),
('fixed', 'Fixed date')
], "Preferred Date", change_default=True, required=True, states={'done': [('readonly', True)]}, help="Choose an option for the Payment Order:'Fixed' stands for a date specified by you.'Directly' stands for the direct execution.'Due date' stands for the scheduled date of execution."),
'date_created': fields.date('Creation Date', readonly=True),
'date_done': fields.date('Execution Date', readonly=True),
'company_id': fields.related('mode', 'company_id', type='many2one', relation='res.company', string='Company', store=True, readonly=True),
}
_defaults = {
'user_id': lambda self,cr,uid,context: uid,
'state': 'draft',
'date_prefered': 'due',
'date_created': lambda *a: time.strftime('%Y-%m-%d'),
'reference': lambda self,cr,uid,context: self.pool.get('ir.sequence').get(cr, uid, 'payment.order'),
}
def set_to_draft(self, cr, uid, ids, *args):
self.write(cr, uid, ids, {'state': 'draft'})
self.create_workflow(cr, uid, ids)
return True
def action_open(self, cr, uid, ids, *args):
ir_seq_obj = self.pool.get('ir.sequence')
for order in self.read(cr, uid, ids, ['reference']):
if not order['reference']:
reference = ir_seq_obj.get(cr, uid, 'payment.order')
self.write(cr, uid, order['id'], {'reference':reference})
return True
def set_done(self, cr, uid, ids, *args):
self.write(cr, uid, ids, {'date_done': time.strftime('%Y-%m-%d')})
self.signal_workflow(cr, uid, ids, 'done')
return True
def write(self, cr, uid, ids, vals, context=None):
if context is None:
context = {}
payment_line_obj = self.pool.get('payment.line')
payment_line_ids = []
if (vals.get('date_prefered', False) == 'fixed' and not vals.get('date_scheduled', False)) or vals.get('date_scheduled', False):
for order in self.browse(cr, uid, ids, context=context):
for line in order.line_ids:
payment_line_ids.append(line.id)
payment_line_obj.write(cr, uid, payment_line_ids, {'date': vals.get('date_scheduled', False)}, context=context)
elif vals.get('date_prefered', False) == 'due':
vals.update({'date_scheduled': False})
for order in self.browse(cr, uid, ids, context=context):
for line in order.line_ids:
payment_line_obj.write(cr, uid, [line.id], {'date': line.ml_maturity_date}, context=context)
elif vals.get('date_prefered', False) == 'now':
vals.update({'date_scheduled': False})
for order in self.browse(cr, uid, ids, context=context):
for line in order.line_ids:
payment_line_ids.append(line.id)
payment_line_obj.write(cr, uid, payment_line_ids, {'date': False}, context=context)
return super(payment_order, self).write(cr, uid, ids, vals, context=context)
class payment_line(osv.osv):
_name = 'payment.line'
_description = 'Payment Line'
def translate(self, orig):
return {
"due_date": "date_maturity",
"reference": "ref"}.get(orig, orig)
def _info_owner(self, cr, uid, ids, name=None, args=None, context=None):
result = {}
for line in self.browse(cr, uid, ids, context=context):
owner = line.order_id.mode.bank_id.partner_id
result[line.id] = self._get_info_partner(cr, uid, owner, context=context)
return result
def _get_info_partner(self,cr, uid, partner_record, context=None):
if not partner_record:
return False
st = partner_record.street or ''
st1 = partner_record.street2 or ''
zip = partner_record.zip or ''
city = partner_record.city or ''
zip_city = zip + ' ' + city
cntry = partner_record.country_id and partner_record.country_id.name or ''
return partner_record.name + "\n" + st + " " + st1 + "\n" + zip_city + "\n" +cntry
def _info_partner(self, cr, uid, ids, name=None, args=None, context=None):
result = {}
for line in self.browse(cr, uid, ids, context=context):
result[line.id] = False
if not line.partner_id:
break
result[line.id] = self._get_info_partner(cr, uid, line.partner_id, context=context)
return result
#dead code
def select_by_name(self, cr, uid, ids, name, args, context=None):
if not ids: return {}
partner_obj = self.pool.get('res.partner')
cr.execute("""SELECT pl.id, ml.%s
FROM account_move_line ml
INNER JOIN payment_line pl
ON (ml.id = pl.move_line_id)
WHERE pl.id IN %%s"""% self.translate(name),
(tuple(ids),))
res = dict(cr.fetchall())
if name == 'partner_id':
partner_name = {}
for p_id, p_name in partner_obj.name_get(cr, uid,
filter(lambda x:x and x != 0,res.values()), context=context):
partner_name[p_id] = p_name
for id in ids:
if id in res and partner_name:
res[id] = (res[id],partner_name[res[id]])
else:
res[id] = (False,False)
else:
for id in ids:
res.setdefault(id, (False, ""))
return res
def _amount(self, cursor, user, ids, name, args, context=None):
if not ids:
return {}
currency_obj = self.pool.get('res.currency')
if context is None:
context = {}
res = {}
for line in self.browse(cursor, user, ids, context=context):
ctx = context.copy()
ctx['date'] = line.order_id.date_done or time.strftime('%Y-%m-%d')
res[line.id] = currency_obj.compute(cursor, user, line.currency.id,
line.company_currency.id,
line.amount_currency, context=ctx)
return res
def _get_currency(self, cr, uid, context=None):
user_obj = self.pool.get('res.users')
currency_obj = self.pool.get('res.currency')
user = user_obj.browse(cr, uid, uid, context=context)
if user.company_id:
return user.company_id.currency_id.id
else:
return currency_obj.search(cr, uid, [('rate', '=', 1.0)])[0]
def _get_date(self, cr, uid, context=None):
if context is None:
context = {}
payment_order_obj = self.pool.get('payment.order')
date = False
if context.get('order_id') and context['order_id']:
order = payment_order_obj.browse(cr, uid, context['order_id'], context=context)
if order.date_prefered == 'fixed':
date = order.date_scheduled
else:
date = time.strftime('%Y-%m-%d')
return date
def _get_ml_inv_ref(self, cr, uid, ids, *a):
res = {}
for id in self.browse(cr, uid, ids):
res[id.id] = False
if id.move_line_id:
if id.move_line_id.invoice:
res[id.id] = id.move_line_id.invoice.id
return res
def _get_ml_maturity_date(self, cr, uid, ids, *a):
res = {}
for id in self.browse(cr, uid, ids):
if id.move_line_id:
res[id.id] = id.move_line_id.date_maturity
else:
res[id.id] = False
return res
def _get_ml_created_date(self, cr, uid, ids, *a):
res = {}
for id in self.browse(cr, uid, ids):
if id.move_line_id:
res[id.id] = id.move_line_id.date_created
else:
res[id.id] = False
return res
_columns = {
'name': fields.char('Your Reference', required=True),
'communication': fields.char('Communication', required=True, help="Used as the message between ordering customer and current company. Depicts 'What do you want to say to the recipient about this order ?'"),
'communication2': fields.char('Communication 2', help='The successor message of Communication.'),
'move_line_id': fields.many2one('account.move.line', 'Entry line', domain=[('reconcile_id', '=', False), ('account_id.type', '=', 'payable')], help='This Entry Line will be referred for the information of the ordering customer.'),
'amount_currency': fields.float('Amount in Partner Currency', digits=(16, 2),
required=True, help='Payment amount in the partner currency'),
'currency': fields.many2one('res.currency','Partner Currency', required=True),
'company_currency': fields.many2one('res.currency', 'Company Currency', readonly=True),
'bank_id': fields.many2one('res.partner.bank', 'Destination Bank Account'),
'order_id': fields.many2one('payment.order', 'Order', required=True,
ondelete='cascade', select=True),
'partner_id': fields.many2one('res.partner', string="Partner", required=True, help='The Ordering Customer'),
'amount': fields.function(_amount, string='Amount in Company Currency',
type='float',
help='Payment amount in the company currency'),
'ml_date_created': fields.function(_get_ml_created_date, string="Effective Date",
type='date', help="Invoice Effective Date"),
'ml_maturity_date': fields.function(_get_ml_maturity_date, type='date', string='Due Date'),
'ml_inv_ref': fields.function(_get_ml_inv_ref, type='many2one', relation='account.invoice', string='Invoice Ref.'),
'info_owner': fields.function(_info_owner, string="Owner Account", type="text", help='Address of the Main Partner'),
'info_partner': fields.function(_info_partner, string="Destination Account", type="text", help='Address of the Ordering Customer.'),
'date': fields.date('Payment Date', help="If no payment date is specified, the bank will treat this payment line directly"),
'create_date': fields.datetime('Created', readonly=True),
'state': fields.selection([('normal','Free'), ('structured','Structured')], 'Communication Type', required=True),
'bank_statement_line_id': fields.many2one('account.bank.statement.line', 'Bank statement line'),
'company_id': fields.related('order_id', 'company_id', type='many2one', relation='res.company', string='Company', store=True, readonly=True),
}
_defaults = {
'name': lambda obj, cursor, user, context: obj.pool.get('ir.sequence'
).get(cursor, user, 'payment.line'),
'state': 'normal',
'currency': _get_currency,
'company_currency': _get_currency,
'date': _get_date,
}
_sql_constraints = [
('name_uniq', 'UNIQUE(name)', 'The payment line name must be unique!'),
]
def onchange_move_line(self, cr, uid, ids, move_line_id, payment_type, date_prefered, date_scheduled, currency=False, company_currency=False, context=None):
data = {}
move_line_obj = self.pool.get('account.move.line')
data['amount_currency'] = data['communication'] = data['partner_id'] = data['bank_id'] = data['amount'] = False
if move_line_id:
line = move_line_obj.browse(cr, uid, move_line_id, context=context)
data['amount_currency'] = line.amount_residual_currency
res = self.onchange_amount(cr, uid, ids, data['amount_currency'], currency,
company_currency, context)
if res:
data['amount'] = res['value']['amount']
data['partner_id'] = line.partner_id.id
temp = line.currency_id and line.currency_id.id or False
if not temp:
if line.invoice:
data['currency'] = line.invoice.currency_id.id
else:
data['currency'] = temp
# calling onchange of partner and updating data dictionary
temp_dict = self.onchange_partner(cr, uid, ids, line.partner_id.id, payment_type)
data.update(temp_dict['value'])
data['communication'] = line.ref
if date_prefered == 'now':
#no payment date => immediate payment
data['date'] = False
elif date_prefered == 'due':
data['date'] = line.date_maturity
elif date_prefered == 'fixed':
data['date'] = date_scheduled
return {'value': data}
def onchange_amount(self, cr, uid, ids, amount, currency, cmpny_currency, context=None):
if (not amount) or (not cmpny_currency):
return {'value': {'amount': False}}
res = {}
currency_obj = self.pool.get('res.currency')
company_amount = currency_obj.compute(cr, uid, currency, cmpny_currency, amount)
res['amount'] = company_amount
return {'value': res}
def onchange_partner(self, cr, uid, ids, partner_id, payment_type, context=None):
data = {}
partner_obj = self.pool.get('res.partner')
payment_mode_obj = self.pool.get('payment.mode')
data['info_partner'] = data['bank_id'] = False
if partner_id:
part_obj = partner_obj.browse(cr, uid, partner_id, context=context)
partner = part_obj.name or ''
data['info_partner'] = self._get_info_partner(cr, uid, part_obj, context=context)
if part_obj.bank_ids and payment_type:
bank_type = payment_mode_obj.suitable_bank_types(cr, uid, payment_type, context=context)
for bank in part_obj.bank_ids:
if bank.state in bank_type:
data['bank_id'] = bank.id
break
return {'value': data}
def fields_get(self, cr, uid, fields=None, context=None, write_access=True, attributes=None):
res = super(payment_line, self).fields_get(cr, uid, fields, context, write_access, attributes)
if 'communication2' in res:
res['communication2'].setdefault('states', {})
res['communication2']['states']['structured'] = [('readonly', True)]
res['communication2']['states']['normal'] = [('readonly', False)]
return res
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
willthames/ansible-modules-core | cloud/amazon/ec2_key.py | 26 | 7232 | #!/usr/bin/python
# -*- coding: utf-8 -*-
DOCUMENTATION = '''
---
module: ec2_key
version_added: "1.5"
short_description: maintain an ec2 key pair.
description:
- maintains ec2 key pairs. This module has a dependency on python-boto >= 2.5
options:
name:
description:
- Name of the key pair.
required: true
key_material:
description:
- Public key material.
required: false
region:
description:
- the EC2 region to use
required: false
default: null
aliases: []
state:
description:
- create or delete keypair
required: false
default: 'present'
aliases: []
wait:
description:
- Wait for the specified action to complete before returning.
required: false
default: false
aliases: []
version_added: "1.6"
wait_timeout:
description:
- How long before wait gives up, in seconds
required: false
default: 300
aliases: []
version_added: "1.6"
extends_documentation_fragment: aws
author: Vincent Viallet
'''
EXAMPLES = '''
# Note: None of these examples set aws_access_key, aws_secret_key, or region.
# It is assumed that their matching environment variables are set.
# Creates a new ec2 key pair named `example` if not present, returns generated
# private key
- name: example ec2 key
ec2_key:
name: example
# Creates a new ec2 key pair named `example` if not present using provided key
# material. This could use the 'file' lookup plugin to pull this off disk.
- name: example2 ec2 key
ec2_key:
name: example2
key_material: 'ssh-rsa AAAAxyz...== me@example.com'
state: present
# Creates a new ec2 key pair named `example` if not present using provided key
# material
- name: example3 ec2 key
ec2_key:
name: example3
key_material: "{{ item }}"
with_file: /path/to/public_key.id_rsa.pub
# Removes ec2 key pair by name
- name: remove example key
ec2_key:
name: example
state: absent
'''
try:
import boto.ec2
except ImportError:
print "failed=True msg='boto required for this module'"
sys.exit(1)
import random
import string
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(dict(
name=dict(required=True),
key_material=dict(required=False),
state = dict(default='present', choices=['present', 'absent']),
wait = dict(type='bool', default=False),
wait_timeout = dict(default=300),
)
)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
)
name = module.params['name']
state = module.params.get('state')
key_material = module.params.get('key_material')
wait = module.params.get('wait')
wait_timeout = int(module.params.get('wait_timeout'))
changed = False
ec2 = ec2_connect(module)
# find the key if present
key = ec2.get_key_pair(name)
# Ensure requested key is absent
if state == 'absent':
if key:
'''found a match, delete it'''
try:
key.delete()
if wait:
start = time.time()
action_complete = False
while (time.time() - start) < wait_timeout:
if not ec2.get_key_pair(name):
action_complete = True
break
time.sleep(1)
if not action_complete:
module.fail_json(msg="timed out while waiting for the key to be removed")
except Exception, e:
module.fail_json(msg="Unable to delete key pair '%s' - %s" % (key, e))
else:
key = None
changed = True
else:
'''no match found, no changes required'''
# Ensure requested key is present
elif state == 'present':
if key:
# existing key found
if key_material:
# EC2's fingerprints are non-trivial to generate, so push this key
# to a temporary name and make ec2 calculate the fingerprint for us.
#
# http://blog.jbrowne.com/?p=23
# https://forums.aws.amazon.com/thread.jspa?messageID=352828
# find an unused name
test = 'empty'
while test:
randomchars = [random.choice(string.ascii_letters + string.digits) for x in range(0,10)]
tmpkeyname = "ansible-" + ''.join(randomchars)
test = ec2.get_key_pair(tmpkeyname)
# create tmp key
tmpkey = ec2.import_key_pair(tmpkeyname, key_material)
# get tmp key fingerprint
tmpfingerprint = tmpkey.fingerprint
# delete tmp key
tmpkey.delete()
if key.fingerprint != tmpfingerprint:
if not module.check_mode:
key.delete()
key = ec2.import_key_pair(name, key_material)
if wait:
start = time.time()
action_complete = False
while (time.time() - start) < wait_timeout:
if ec2.get_key_pair(name):
action_complete = True
break
time.sleep(1)
if not action_complete:
module.fail_json(msg="timed out while waiting for the key to be re-created")
changed = True
pass
# if the key doesn't exist, create it now
else:
'''no match found, create it'''
if not module.check_mode:
if key_material:
'''We are providing the key, need to import'''
key = ec2.import_key_pair(name, key_material)
else:
'''
No material provided, let AWS handle the key creation and
retrieve the private key
'''
key = ec2.create_key_pair(name)
if wait:
start = time.time()
action_complete = False
while (time.time() - start) < wait_timeout:
if ec2.get_key_pair(name):
action_complete = True
break
time.sleep(1)
if not action_complete:
module.fail_json(msg="timed out while waiting for the key to be created")
changed = True
if key:
data = {
'name': key.name,
'fingerprint': key.fingerprint
}
if key.material:
data.update({'private_key': key.material})
module.exit_json(changed=changed, key=data)
else:
module.exit_json(changed=changed, key=None)
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.ec2 import *
main()
| gpl-3.0 |
davidgardenier/frbpoppy | tests/lognlogs/local.py | 1 | 1611 | """Check the log N log F slope of a local population."""
import numpy as np
import matplotlib.pyplot as plt
from frbpoppy import CosmicPopulation, Survey, SurveyPopulation
from frbpoppy.population import unpickle
from tests.convenience import plot_aa_style, rel_path
MAKE = True
if MAKE:
population = CosmicPopulation.simple(1e5, generate=True)
survey = Survey('perfect')
surv_pop = SurveyPopulation(population, survey)
surv_pop.name = 'lognlogflocal'
surv_pop.save()
else:
surv_pop = unpickle('lognlogflocal')
# Get parameter
parms = surv_pop.frbs.fluence
min_p = min(parms)
max_p = max(parms)
# Bin up
min_f = np.log10(min(parms))
max_f = np.log10(max(parms))
log_bins = np.logspace(min_f, max_f, 50)
hist, edges = np.histogram(parms, bins=log_bins)
n_gt_s = np.cumsum(hist[::-1])[::-1]
# Calculate alpha
alpha, alpha_err, norm = surv_pop.frbs.calc_logn_logs(parameter='fluence',
min_p=min_p,
max_p=max_p)
print(alpha, alpha_err, norm)
xs = 10**((np.log10(edges[:-1]) + np.log10(edges[1:])) / 2)
xs = xs[xs >= min_p]
xs = xs[xs <= max_p]
ys = [norm*x**(alpha) for x in xs]
plot_aa_style()
fig = plt.figure()
ax = fig.add_subplot(111)
plt.step(edges[:-1], n_gt_s, where='post')
plt.plot(xs, ys, linestyle='--',
label=rf'$\alpha$ = {alpha:.3} $\pm$ {round(abs(alpha_err), 2)}')
plt.xlabel('Fluence (Jy ms)')
plt.ylabel(r'N(${>}Fluence$)')
plt.xscale('log')
plt.yscale('log')
plt.legend()
plt.tight_layout()
plt.savefig(rel_path('plots/logn_logf_local.pdf'))
| mit |
Baumelbi/IntroPython2016 | students/crobison/session04/file_lab.py | 3 | 1359 | # Charles Robison
# 2016.10.21
# File Lab
#!/usr/bin/env python
import os
cwd = os.getcwd()
# write a program which prints the full path to all files
# in the current directory, one per line
for item in os.listdir(cwd):
print(cwd + "/" + item)
# write a program which copies a file from a source, to a
# destination (without using shutil, or the OS copy command)
file = open('file_lab01.txt', 'r')
file_text = file.read()
file_new = open('file_lab02.txt', 'w')
file_new.write(file_text)
file.close()
file_new.close()
# advanced: make it work for any size file: i.e. don’t read
# the entire contents of the file into memory at once.
with open('file_lab01.txt','r') as r, open('file_lab02.txt', 'w') as w:
for line in r:
w.write(line)
r.close()
w.close()
# You will find the list I generated in the first class of all
# the students in the class, and what programming languages they
# have used in the past.
# Write a little script that reads that file, and generates a list
# of all the languages that have been used.
languages = []
with open('students_test.txt', 'r') as infile:
for line in infile:
if not ':' in line:
continue
for student, language in line.split(':'):
languages.append(language)
print(languages)
# Getting value error:
# ValueError: too many values to unpack (expected 2)
| unlicense |
flwh/KK_mt6589_iq451 | prebuilts/python/linux-x86/2.7.5/lib/python2.7/lib2to3/btm_utils.py | 374 | 10011 | "Utility functions used by the btm_matcher module"
from . import pytree
from .pgen2 import grammar, token
from .pygram import pattern_symbols, python_symbols
syms = pattern_symbols
pysyms = python_symbols
tokens = grammar.opmap
token_labels = token
TYPE_ANY = -1
TYPE_ALTERNATIVES = -2
TYPE_GROUP = -3
class MinNode(object):
"""This class serves as an intermediate representation of the
pattern tree during the conversion to sets of leaf-to-root
subpatterns"""
def __init__(self, type=None, name=None):
self.type = type
self.name = name
self.children = []
self.leaf = False
self.parent = None
self.alternatives = []
self.group = []
def __repr__(self):
return str(self.type) + ' ' + str(self.name)
def leaf_to_root(self):
"""Internal method. Returns a characteristic path of the
pattern tree. This method must be run for all leaves until the
linear subpatterns are merged into a single"""
node = self
subp = []
while node:
if node.type == TYPE_ALTERNATIVES:
node.alternatives.append(subp)
if len(node.alternatives) == len(node.children):
#last alternative
subp = [tuple(node.alternatives)]
node.alternatives = []
node = node.parent
continue
else:
node = node.parent
subp = None
break
if node.type == TYPE_GROUP:
node.group.append(subp)
#probably should check the number of leaves
if len(node.group) == len(node.children):
subp = get_characteristic_subpattern(node.group)
node.group = []
node = node.parent
continue
else:
node = node.parent
subp = None
break
if node.type == token_labels.NAME and node.name:
#in case of type=name, use the name instead
subp.append(node.name)
else:
subp.append(node.type)
node = node.parent
return subp
def get_linear_subpattern(self):
"""Drives the leaf_to_root method. The reason that
leaf_to_root must be run multiple times is because we need to
reject 'group' matches; for example the alternative form
(a | b c) creates a group [b c] that needs to be matched. Since
matching multiple linear patterns overcomes the automaton's
capabilities, leaf_to_root merges each group into a single
choice based on 'characteristic'ity,
i.e. (a|b c) -> (a|b) if b more characteristic than c
Returns: The most 'characteristic'(as defined by
get_characteristic_subpattern) path for the compiled pattern
tree.
"""
for l in self.leaves():
subp = l.leaf_to_root()
if subp:
return subp
def leaves(self):
"Generator that returns the leaves of the tree"
for child in self.children:
for x in child.leaves():
yield x
if not self.children:
yield self
def reduce_tree(node, parent=None):
"""
Internal function. Reduces a compiled pattern tree to an
intermediate representation suitable for feeding the
automaton. This also trims off any optional pattern elements(like
[a], a*).
"""
new_node = None
#switch on the node type
if node.type == syms.Matcher:
#skip
node = node.children[0]
if node.type == syms.Alternatives :
#2 cases
if len(node.children) <= 2:
#just a single 'Alternative', skip this node
new_node = reduce_tree(node.children[0], parent)
else:
#real alternatives
new_node = MinNode(type=TYPE_ALTERNATIVES)
#skip odd children('|' tokens)
for child in node.children:
if node.children.index(child)%2:
continue
reduced = reduce_tree(child, new_node)
if reduced is not None:
new_node.children.append(reduced)
elif node.type == syms.Alternative:
if len(node.children) > 1:
new_node = MinNode(type=TYPE_GROUP)
for child in node.children:
reduced = reduce_tree(child, new_node)
if reduced:
new_node.children.append(reduced)
if not new_node.children:
# delete the group if all of the children were reduced to None
new_node = None
else:
new_node = reduce_tree(node.children[0], parent)
elif node.type == syms.Unit:
if (isinstance(node.children[0], pytree.Leaf) and
node.children[0].value == '('):
#skip parentheses
return reduce_tree(node.children[1], parent)
if ((isinstance(node.children[0], pytree.Leaf) and
node.children[0].value == '[')
or
(len(node.children)>1 and
hasattr(node.children[1], "value") and
node.children[1].value == '[')):
#skip whole unit if its optional
return None
leaf = True
details_node = None
alternatives_node = None
has_repeater = False
repeater_node = None
has_variable_name = False
for child in node.children:
if child.type == syms.Details:
leaf = False
details_node = child
elif child.type == syms.Repeater:
has_repeater = True
repeater_node = child
elif child.type == syms.Alternatives:
alternatives_node = child
if hasattr(child, 'value') and child.value == '=': # variable name
has_variable_name = True
#skip variable name
if has_variable_name:
#skip variable name, '='
name_leaf = node.children[2]
if hasattr(name_leaf, 'value') and name_leaf.value == '(':
# skip parenthesis
name_leaf = node.children[3]
else:
name_leaf = node.children[0]
#set node type
if name_leaf.type == token_labels.NAME:
#(python) non-name or wildcard
if name_leaf.value == 'any':
new_node = MinNode(type=TYPE_ANY)
else:
if hasattr(token_labels, name_leaf.value):
new_node = MinNode(type=getattr(token_labels, name_leaf.value))
else:
new_node = MinNode(type=getattr(pysyms, name_leaf.value))
elif name_leaf.type == token_labels.STRING:
#(python) name or character; remove the apostrophes from
#the string value
name = name_leaf.value.strip("'")
if name in tokens:
new_node = MinNode(type=tokens[name])
else:
new_node = MinNode(type=token_labels.NAME, name=name)
elif name_leaf.type == syms.Alternatives:
new_node = reduce_tree(alternatives_node, parent)
#handle repeaters
if has_repeater:
if repeater_node.children[0].value == '*':
#reduce to None
new_node = None
elif repeater_node.children[0].value == '+':
#reduce to a single occurence i.e. do nothing
pass
else:
#TODO: handle {min, max} repeaters
raise NotImplementedError
pass
#add children
if details_node and new_node is not None:
for child in details_node.children[1:-1]:
#skip '<', '>' markers
reduced = reduce_tree(child, new_node)
if reduced is not None:
new_node.children.append(reduced)
if new_node:
new_node.parent = parent
return new_node
def get_characteristic_subpattern(subpatterns):
"""Picks the most characteristic from a list of linear patterns
Current order used is:
names > common_names > common_chars
"""
if not isinstance(subpatterns, list):
return subpatterns
if len(subpatterns)==1:
return subpatterns[0]
# first pick out the ones containing variable names
subpatterns_with_names = []
subpatterns_with_common_names = []
common_names = ['in', 'for', 'if' , 'not', 'None']
subpatterns_with_common_chars = []
common_chars = "[]().,:"
for subpattern in subpatterns:
if any(rec_test(subpattern, lambda x: type(x) is str)):
if any(rec_test(subpattern,
lambda x: isinstance(x, str) and x in common_chars)):
subpatterns_with_common_chars.append(subpattern)
elif any(rec_test(subpattern,
lambda x: isinstance(x, str) and x in common_names)):
subpatterns_with_common_names.append(subpattern)
else:
subpatterns_with_names.append(subpattern)
if subpatterns_with_names:
subpatterns = subpatterns_with_names
elif subpatterns_with_common_names:
subpatterns = subpatterns_with_common_names
elif subpatterns_with_common_chars:
subpatterns = subpatterns_with_common_chars
# of the remaining subpatterns pick out the longest one
return max(subpatterns, key=len)
def rec_test(sequence, test_func):
"""Tests test_func on all items of sequence and items of included
sub-iterables"""
for x in sequence:
if isinstance(x, (list, tuple)):
for y in rec_test(x, test_func):
yield y
else:
yield test_func(x)
| gpl-2.0 |
40223121/w17 | static/Brython3.1.1-20150328-091302/Lib/_thread.py | 740 | 4879 | """Drop-in replacement for the thread module.
Meant to be used as a brain-dead substitute so that threaded code does
not need to be rewritten for when the thread module is not present.
Suggested usage is::
try:
import _thread
except ImportError:
import _dummy_thread as _thread
"""
# Exports only things specified by thread documentation;
# skipping obsolete synonyms allocate(), start_new(), exit_thread().
__all__ = ['error', 'start_new_thread', 'exit', 'get_ident', 'allocate_lock',
'interrupt_main', 'LockType']
# A dummy value
TIMEOUT_MAX = 2**31
# NOTE: this module can be imported early in the extension building process,
# and so top level imports of other modules should be avoided. Instead, all
# imports are done when needed on a function-by-function basis. Since threads
# are disabled, the import lock should not be an issue anyway (??).
error = RuntimeError
def start_new_thread(function, args, kwargs={}):
"""Dummy implementation of _thread.start_new_thread().
Compatibility is maintained by making sure that ``args`` is a
tuple and ``kwargs`` is a dictionary. If an exception is raised
and it is SystemExit (which can be done by _thread.exit()) it is
caught and nothing is done; all other exceptions are printed out
by using traceback.print_exc().
If the executed function calls interrupt_main the KeyboardInterrupt will be
raised when the function returns.
"""
if type(args) != type(tuple()):
raise TypeError("2nd arg must be a tuple")
if type(kwargs) != type(dict()):
raise TypeError("3rd arg must be a dict")
global _main
_main = False
try:
function(*args, **kwargs)
except SystemExit:
pass
except:
import traceback
traceback.print_exc()
_main = True
global _interrupt
if _interrupt:
_interrupt = False
raise KeyboardInterrupt
def exit():
"""Dummy implementation of _thread.exit()."""
raise SystemExit
def get_ident():
"""Dummy implementation of _thread.get_ident().
Since this module should only be used when _threadmodule is not
available, it is safe to assume that the current process is the
only thread. Thus a constant can be safely returned.
"""
return -1
def allocate_lock():
"""Dummy implementation of _thread.allocate_lock()."""
return LockType()
def stack_size(size=None):
"""Dummy implementation of _thread.stack_size()."""
if size is not None:
raise error("setting thread stack size not supported")
return 0
class LockType(object):
"""Class implementing dummy implementation of _thread.LockType.
Compatibility is maintained by maintaining self.locked_status
which is a boolean that stores the state of the lock. Pickling of
the lock, though, should not be done since if the _thread module is
then used with an unpickled ``lock()`` from here problems could
occur from this class not having atomic methods.
"""
def __init__(self):
self.locked_status = False
def acquire(self, waitflag=None, timeout=-1):
"""Dummy implementation of acquire().
For blocking calls, self.locked_status is automatically set to
True and returned appropriately based on value of
``waitflag``. If it is non-blocking, then the value is
actually checked and not set if it is already acquired. This
is all done so that threading.Condition's assert statements
aren't triggered and throw a little fit.
"""
if waitflag is None or waitflag:
self.locked_status = True
return True
else:
if not self.locked_status:
self.locked_status = True
return True
else:
if timeout > 0:
import time
time.sleep(timeout)
return False
__enter__ = acquire
def __exit__(self, typ, val, tb):
self.release()
def release(self):
"""Release the dummy lock."""
# XXX Perhaps shouldn't actually bother to test? Could lead
# to problems for complex, threaded code.
if not self.locked_status:
raise error
self.locked_status = False
return True
def locked(self):
return self.locked_status
# Used to signal that interrupt_main was called in a "thread"
_interrupt = False
# True when not executing in a "thread"
_main = True
def interrupt_main():
"""Set _interrupt flag to True to have start_new_thread raise
KeyboardInterrupt upon exiting."""
if _main:
raise KeyboardInterrupt
else:
global _interrupt
_interrupt = True
# Brython-specific to avoid circular references between threading and _threading_local
class _local:
pass | gpl-3.0 |
VanirAOSP/external_chromium_org | tools/perf/metrics/statistics_unittest.py | 23 | 6400 | # Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import unittest
import random
from metrics import statistics
def Relax(samples, iterations=10):
"""Lloyd relaxation in 1D.
Keeps the position of the first and last sample.
"""
for _ in xrange(0, iterations):
voronoi_boundaries = []
for i in xrange(1, len(samples)):
voronoi_boundaries.append((samples[i] + samples[i-1]) * 0.5)
relaxed_samples = []
relaxed_samples.append(samples[0])
for i in xrange(1, len(samples)-1):
relaxed_samples.append(
(voronoi_boundaries[i-1] + voronoi_boundaries[i]) * 0.5)
relaxed_samples.append(samples[-1])
samples = relaxed_samples
return samples
class StatisticsUnitTest(unittest.TestCase):
def testNormalizeSamples(self):
samples = []
normalized_samples, scale = statistics.NormalizeSamples(samples)
self.assertEquals(normalized_samples, samples)
self.assertEquals(scale, 1.0)
samples = [0.0, 0.0]
normalized_samples, scale = statistics.NormalizeSamples(samples)
self.assertEquals(normalized_samples, samples)
self.assertEquals(scale, 1.0)
samples = [0.0, 1.0/3.0, 2.0/3.0, 1.0]
normalized_samples, scale = statistics.NormalizeSamples(samples)
self.assertEquals(normalized_samples, [1.0/8.0, 3.0/8.0, 5.0/8.0, 7.0/8.0])
self.assertEquals(scale, 0.75)
samples = [1.0/8.0, 3.0/8.0, 5.0/8.0, 7.0/8.0]
normalized_samples, scale = statistics.NormalizeSamples(samples)
self.assertEquals(normalized_samples, samples)
self.assertEquals(scale, 1.0)
def testDiscrepancyRandom(self):
"""Tests NormalizeSamples and Discrepancy with random samples.
Generates 10 sets of 10 random samples, computes the discrepancy,
relaxes the samples using Llloyd's algorithm in 1D, and computes the
discrepancy of the relaxed samples. Discrepancy of the relaxed samples
must be less than or equal to the discrepancy of the original samples.
"""
random.seed(1234567)
for _ in xrange(0, 10):
samples = []
num_samples = 10
clock = 0.0
samples.append(clock)
for _ in xrange(1, num_samples):
clock += random.random()
samples.append(clock)
samples = statistics.NormalizeSamples(samples)[0]
d = statistics.Discrepancy(samples)
relaxed_samples = Relax(samples)
d_relaxed = statistics.Discrepancy(relaxed_samples)
self.assertTrue(d_relaxed <= d)
def testDiscrepancyAnalytic(self):
"""Computes discrepancy for sample sets with known statistics."""
interval_multiplier = 100000
samples = []
d = statistics.Discrepancy(samples, interval_multiplier)
self.assertEquals(d, 1.0)
samples = [0.5]
d = statistics.Discrepancy(samples, interval_multiplier)
self.assertEquals(round(d), 1.0)
samples = [0.0, 1.0]
d = statistics.Discrepancy(samples, interval_multiplier)
self.assertAlmostEquals(round(d, 2), 1.0)
samples = [0.5, 0.5, 0.5]
d = statistics.Discrepancy(samples, interval_multiplier)
self.assertAlmostEquals(d, 1.0)
samples = [1.0/8.0, 3.0/8.0, 5.0/8.0, 7.0/8.0]
d = statistics.Discrepancy(samples, interval_multiplier)
self.assertAlmostEquals(round(d, 2), 0.25)
samples = [0.0, 1.0/3.0, 2.0/3.0, 1.0]
d = statistics.Discrepancy(samples, interval_multiplier)
self.assertAlmostEquals(round(d, 2), 0.5)
samples = statistics.NormalizeSamples(samples)[0]
d = statistics.Discrepancy(samples, interval_multiplier)
self.assertAlmostEquals(round(d, 2), 0.25)
time_stamps_a = [0, 1, 2, 3, 5, 6]
time_stamps_b = [0, 1, 2, 3, 5, 7]
time_stamps_c = [0, 2, 3, 4]
time_stamps_d = [0, 2, 3, 4, 5]
d_abs_a = statistics.FrameDiscrepancy(time_stamps_a, True,
interval_multiplier)
d_abs_b = statistics.FrameDiscrepancy(time_stamps_b, True,
interval_multiplier)
d_abs_c = statistics.FrameDiscrepancy(time_stamps_c, True,
interval_multiplier)
d_abs_d = statistics.FrameDiscrepancy(time_stamps_d, True,
interval_multiplier)
d_rel_a = statistics.FrameDiscrepancy(time_stamps_a, False,
interval_multiplier)
d_rel_b = statistics.FrameDiscrepancy(time_stamps_b, False,
interval_multiplier)
d_rel_c = statistics.FrameDiscrepancy(time_stamps_c, False,
interval_multiplier)
d_rel_d = statistics.FrameDiscrepancy(time_stamps_d, False,
interval_multiplier)
self.assertTrue(d_abs_a < d_abs_b)
self.assertTrue(d_rel_a < d_rel_b)
self.assertTrue(d_rel_d < d_rel_c)
self.assertEquals(round(d_abs_d, 2), round(d_abs_c, 2))
def testPercentile(self):
# The 50th percentile is the median value.
self.assertEquals(3, statistics.Percentile([4, 5, 1, 3, 2], 50))
self.assertEquals(2.5, statistics.Percentile([5, 1, 3, 2], 50))
# When the list of values is empty, 0 is returned.
self.assertEquals(0, statistics.Percentile([], 50))
# When the given percentage is very low, the lowest value is given.
self.assertEquals(1, statistics.Percentile([2, 1, 5, 4, 3], 5))
# When the given percentage is very high, the highest value is given.
self.assertEquals(5, statistics.Percentile([5, 2, 4, 1, 3], 95))
# Linear interpolation between closest ranks is used. Using the example
# from <http://en.wikipedia.org/wiki/Percentile>:
self.assertEquals(27.5, statistics.Percentile([15, 20, 35, 40, 50], 40))
def testArithmeticMean(self):
# The ArithmeticMean function computes the simple average.
self.assertAlmostEquals(40/3.0, statistics.ArithmeticMean([10, 10, 20], 3))
self.assertAlmostEquals(15.0, statistics.ArithmeticMean([10, 20], 2))
# Both lists of values or single values can be given for either argument.
self.assertAlmostEquals(40/3.0, statistics.ArithmeticMean(40, [1, 1, 1]))
# If the 'count' is zero, then zero is returned.
self.assertEquals(0, statistics.ArithmeticMean(4.0, 0))
self.assertEquals(0, statistics.ArithmeticMean(4.0, []))
| bsd-3-clause |
ocefpaf/cartopy | lib/cartopy/tests/crs/test_transverse_mercator.py | 1 | 4985 | # (C) British Crown Copyright 2013 - 2019, Met Office
#
# This file is part of cartopy.
#
# cartopy is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# cartopy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with cartopy. If not, see <https://www.gnu.org/licenses/>.
"""
Tests for the Transverse Mercator projection, including OSGB and OSNI.
"""
from __future__ import (absolute_import, division, print_function)
import numpy as np
import pytest
import cartopy.crs as ccrs
@pytest.mark.parametrize('approx', [True, False])
class TestTransverseMercator(object):
def setup_class(self):
self.point_a = (-3.474083, 50.727301)
self.point_b = (0.5, 50.5)
self.src_crs = ccrs.PlateCarree()
def test_default(self, approx):
proj = ccrs.TransverseMercator(approx=approx)
res = proj.transform_point(*self.point_a, src_crs=self.src_crs)
np.testing.assert_array_almost_equal(res,
(-245269.53181, 5627508.74355),
decimal=5)
res = proj.transform_point(*self.point_b, src_crs=self.src_crs)
np.testing.assert_array_almost_equal(res, (35474.63566645,
5596583.41949901))
def test_osgb_vals(self, approx):
proj = ccrs.TransverseMercator(central_longitude=-2,
central_latitude=49,
scale_factor=0.9996012717,
false_easting=400000,
false_northing=-100000,
globe=ccrs.Globe(datum='OSGB36',
ellipse='airy'),
approx=approx)
res = proj.transform_point(*self.point_a, src_crs=self.src_crs)
np.testing.assert_array_almost_equal(res, (295971.28668, 93064.27666),
decimal=5)
res = proj.transform_point(*self.point_b, src_crs=self.src_crs)
np.testing.assert_array_almost_equal(res, (577274.98380, 69740.49227),
decimal=5)
def test_nan(self, approx):
if not approx:
pytest.xfail('Proj does not return NaN correctly with etmerc.')
proj = ccrs.TransverseMercator(approx=approx)
res = proj.transform_point(0.0, float('nan'), src_crs=self.src_crs)
assert np.all(np.isnan(res))
res = proj.transform_point(float('nan'), 0.0, src_crs=self.src_crs)
assert np.all(np.isnan(res))
class TestOSGB(object):
def setup_class(self):
self.point_a = (-3.474083, 50.727301)
self.point_b = (0.5, 50.5)
self.src_crs = ccrs.PlateCarree()
self.nan = float('nan')
@pytest.mark.parametrize('approx', [True, False])
def test_default(self, approx):
proj = ccrs.OSGB(approx=approx)
res = proj.transform_point(*self.point_a, src_crs=self.src_crs)
np.testing.assert_array_almost_equal(res, (295971.28668, 93064.27666),
decimal=5)
res = proj.transform_point(*self.point_b, src_crs=self.src_crs)
np.testing.assert_array_almost_equal(res, (577274.98380, 69740.49227),
decimal=5)
def test_nan(self):
proj = ccrs.OSGB(approx=True)
res = proj.transform_point(0.0, float('nan'), src_crs=self.src_crs)
assert np.all(np.isnan(res))
res = proj.transform_point(float('nan'), 0.0, src_crs=self.src_crs)
assert np.all(np.isnan(res))
class TestOSNI(object):
def setup_class(self):
self.point_a = (-6.826286, 54.725116)
self.src_crs = ccrs.PlateCarree()
self.nan = float('nan')
@pytest.mark.parametrize('approx', [True, False])
def test_default(self, approx):
proj = ccrs.OSNI(approx=approx)
res = proj.transform_point(*self.point_a, src_crs=self.src_crs)
np.testing.assert_array_almost_equal(
res, (275614.26762651594, 386984.206429612),
decimal=0 if ccrs.PROJ4_VERSION < (5, 0, 0) else 6)
def test_nan(self):
proj = ccrs.OSNI(approx=True)
res = proj.transform_point(0.0, float('nan'), src_crs=self.src_crs)
assert np.all(np.isnan(res))
res = proj.transform_point(float('nan'), 0.0, src_crs=self.src_crs)
assert np.all(np.isnan(res))
| lgpl-3.0 |
devGregA/code | build/lib.linux-x86_64-2.7/scrapy/commands/crawl.py | 8 | 2610 | import os
from scrapy.command import ScrapyCommand
from scrapy.utils.conf import arglist_to_dict
from scrapy.exceptions import UsageError
class Command(ScrapyCommand):
requires_project = True
def syntax(self):
return "[options] <spider>"
def short_desc(self):
return "Run a spider"
def add_options(self, parser):
ScrapyCommand.add_options(self, parser)
parser.add_option("-a", dest="spargs", action="append", default=[], metavar="NAME=VALUE",
help="set spider argument (may be repeated)")
parser.add_option("-o", "--output", metavar="FILE",
help="dump scraped items into FILE (use - for stdout)")
parser.add_option("-t", "--output-format", metavar="FORMAT",
help="format to use for dumping items with -o")
def process_options(self, args, opts):
ScrapyCommand.process_options(self, args, opts)
try:
opts.spargs = arglist_to_dict(opts.spargs)
except ValueError:
raise UsageError("Invalid -a value, use -a NAME=VALUE", print_help=False)
if opts.output:
if opts.output == '-':
self.settings.set('FEED_URI', 'stdout:', priority='cmdline')
else:
self.settings.set('FEED_URI', opts.output, priority='cmdline')
valid_output_formats = (
list(self.settings.getdict('FEED_EXPORTERS').keys()) +
list(self.settings.getdict('FEED_EXPORTERS_BASE').keys())
)
if not opts.output_format:
opts.output_format = os.path.splitext(opts.output)[1].replace(".", "")
if opts.output_format not in valid_output_formats:
raise UsageError("Unrecognized output format '%s', set one"
" using the '-t' switch or as a file extension"
" from the supported list %s" % (opts.output_format,
tuple(valid_output_formats)))
self.settings.set('FEED_FORMAT', opts.output_format, priority='cmdline')
def run(self, args, opts):
if len(args) < 1:
raise UsageError()
elif len(args) > 1:
raise UsageError("running 'scrapy crawl' with more than one spider is no longer supported")
spname = args[0]
crawler = self.crawler_process.create_crawler()
spider = crawler.spiders.create(spname, **opts.spargs)
crawler.crawl(spider)
self.crawler_process.start()
| bsd-3-clause |
edsu/tweepy | tests/test_cursors.py | 44 | 1861 | from tweepy import Cursor
from .config import create_auth
from .config import TweepyTestCase, username, use_replay, tape
import six
if six.PY3:
import unittest
else:
import unittest2 as unittest
class TweepyCursorTests(TweepyTestCase):
@tape.use_cassette('testidcursoritems.json')
def testidcursoritems(self):
items = list(Cursor(self.api.user_timeline).items(25))
self.assertEqual(len(items), 25)
@tape.use_cassette('testidcursorpages.json')
def testidcursorpages(self):
pages = list(Cursor(self.api.user_timeline).pages(5))
self.assertEqual(len(pages), 5)
@tape.use_cassette('testcursorcursoritems.json')
def testcursorcursoritems(self):
items = list(Cursor(self.api.friends_ids).items(10))
self.assertEqual(len(items), 10)
items = list(Cursor(self.api.followers_ids, username).items(10))
self.assertEqual(len(items), 10)
@tape.use_cassette('testcursorcursorpages.json')
def testcursorcursorpages(self):
pages = list(Cursor(self.api.friends_ids).pages(1))
self.assert_(len(pages) == 1)
pages = list(Cursor(self.api.followers_ids, username).pages(1))
self.assert_(len(pages) == 1)
@tape.use_cassette('testcursorsetstartcursor.json')
def testcursorsetstartcursor(self):
c = Cursor(self.api.friends_ids, cursor=123456)
self.assertEqual(c.iterator.next_cursor, 123456)
self.assertFalse('cursor' in c.iterator.kargs)
@tape.use_cassette('testcursornext.json')
def testcursornext(self):
"""
Test cursor.next() behavior, id being passed correctly.
Regression test for issue #518
"""
cursor = Cursor(self.api.user_timeline, id='twitter').items(5)
status = cursor.next()
self.assertEquals(status.user.screen_name, 'twitter')
| mit |
cytec/SickRage | lib/hachoir_parser/container/riff.py | 86 | 16938 | # -*- coding: UTF-8 -*-
"""
RIFF parser, able to parse:
* AVI video container
* WAV audio container
* CDA file
Documents:
- libavformat source code from ffmpeg library
http://ffmpeg.mplayerhq.hu/
- Video for Windows Programmer's Guide
http://www.opennet.ru/docs/formats/avi.txt
- What is an animated cursor?
http://www.gdgsoft.com/anituner/help/aniformat.htm
Authors:
* Aurélien Jacobs
* Mickaël KENIKSSI
* Victor Stinner
Changelog:
* 2007-03-30: support ACON (animated icons)
* 2006-08-08: merge AVI, WAV and CDA parsers into RIFF parser
* 2006-08-03: creation of CDA parser by Mickaël KENIKSSI
* 2005-06-21: creation of WAV parser by Victor Stinner
* 2005-06-08: creation of AVI parser by Victor Stinner and Aurélien Jacobs
Thanks to:
* Wojtek Kaniewski (wojtekka AT logonet.com.pl) for its CDA file
format information
"""
from hachoir_parser import Parser
from hachoir_core.field import (FieldSet, ParserError,
UInt8, UInt16, UInt32, Enum,
Bit, NullBits, NullBytes,
RawBytes, String, PaddingBytes,
SubFile)
from hachoir_core.tools import alignValue, humanDuration
from hachoir_core.endian import LITTLE_ENDIAN
from hachoir_core.text_handler import filesizeHandler, textHandler
from hachoir_parser.video.fourcc import audio_codec_name, video_fourcc_name
from hachoir_parser.image.ico import IcoFile
from datetime import timedelta
def parseText(self):
yield String(self, "text", self["size"].value,
strip=" \0", truncate="\0",
charset="ISO-8859-1")
def parseRawFormat(self, size):
yield RawBytes(self, "raw_format", size)
def parseVideoFormat(self, size):
yield UInt32(self, "video_size", "Video format: Size")
yield UInt32(self, "width", "Video format: Width")
yield UInt32(self, "height", "Video format: Height")
yield UInt16(self, "panes", "Video format: Panes")
yield UInt16(self, "depth", "Video format: Depth")
yield UInt32(self, "tag1", "Video format: Tag1")
yield UInt32(self, "img_size", "Video format: Image size")
yield UInt32(self, "xpels_meter", "Video format: XPelsPerMeter")
yield UInt32(self, "ypels_meter", "Video format: YPelsPerMeter")
yield UInt32(self, "clr_used", "Video format: ClrUsed")
yield UInt32(self, "clr_important", "Video format: ClrImportant")
def parseAudioFormat(self, size):
yield Enum(UInt16(self, "codec", "Audio format: Codec id"), audio_codec_name)
yield UInt16(self, "channel", "Audio format: Channels")
yield UInt32(self, "sample_rate", "Audio format: Sample rate")
yield UInt32(self, "bit_rate", "Audio format: Bit rate")
yield UInt16(self, "block_align", "Audio format: Block align")
if size >= 16:
yield UInt16(self, "bits_per_sample", "Audio format: Bits per sample")
if size >= 18:
yield UInt16(self, "ext_size", "Audio format: Size of extra information")
if size >= 28: # and self["a_channel"].value > 2
yield UInt16(self, "reserved", "Audio format: ")
yield UInt32(self, "channel_mask", "Audio format: channels placement bitmask")
yield UInt32(self, "subformat", "Audio format: Subformat id")
def parseAVIStreamFormat(self):
size = self["size"].value
strtype = self["../stream_hdr/stream_type"].value
TYPE_HANDLER = {
"vids": (parseVideoFormat, 40),
"auds": (parseAudioFormat, 16)
}
handler = parseRawFormat
if strtype in TYPE_HANDLER:
info = TYPE_HANDLER[strtype]
if info[1] <= size:
handler = info[0]
for field in handler(self, size):
yield field
def parseAVIStreamHeader(self):
if self["size"].value != 56:
raise ParserError("Invalid stream header size")
yield String(self, "stream_type", 4, "Stream type four character code", charset="ASCII")
field = String(self, "fourcc", 4, "Stream four character code", strip=" \0", charset="ASCII")
if self["stream_type"].value == "vids":
yield Enum(field, video_fourcc_name, lambda text: text.upper())
else:
yield field
yield UInt32(self, "flags", "Stream flags")
yield UInt16(self, "priority", "Stream priority")
yield String(self, "language", 2, "Stream language", charset="ASCII", strip="\0")
yield UInt32(self, "init_frames", "InitialFrames")
yield UInt32(self, "scale", "Time scale")
yield UInt32(self, "rate", "Divide by scale to give frame rate")
yield UInt32(self, "start", "Stream start time (unit: rate/scale)")
yield UInt32(self, "length", "Stream length (unit: rate/scale)")
yield UInt32(self, "buf_size", "Suggested buffer size")
yield UInt32(self, "quality", "Stream quality")
yield UInt32(self, "sample_size", "Size of samples")
yield UInt16(self, "left", "Destination rectangle (left)")
yield UInt16(self, "top", "Destination rectangle (top)")
yield UInt16(self, "right", "Destination rectangle (right)")
yield UInt16(self, "bottom", "Destination rectangle (bottom)")
class RedBook(FieldSet):
"""
RedBook offset parser, used in CD audio (.cda) file
"""
def createFields(self):
yield UInt8(self, "frame")
yield UInt8(self, "second")
yield UInt8(self, "minute")
yield PaddingBytes(self, "notused", 1)
def formatSerialNumber(field):
"""
Format an disc serial number.
Eg. 0x00085C48 => "0008-5C48"
"""
sn = field.value
return "%04X-%04X" % (sn >> 16, sn & 0xFFFF)
def parseCDDA(self):
"""
HSG address format: number of 1/75 second
HSG offset = (minute*60 + second)*75 + frame + 150 (from RB offset)
HSG length = (minute*60 + second)*75 + frame (from RB length)
"""
yield UInt16(self, "cda_version", "CD file version (currently 1)")
yield UInt16(self, "track_no", "Number of track")
yield textHandler(UInt32(self, "disc_serial", "Disc serial number"),
formatSerialNumber)
yield UInt32(self, "hsg_offset", "Track offset (HSG format)")
yield UInt32(self, "hsg_length", "Track length (HSG format)")
yield RedBook(self, "rb_offset", "Track offset (Red-book format)")
yield RedBook(self, "rb_length", "Track length (Red-book format)")
def parseWAVFormat(self):
size = self["size"].value
if size not in (16, 18):
self.warning("Format with size of %s bytes is not supported!" % size)
yield Enum(UInt16(self, "codec", "Audio codec"), audio_codec_name)
yield UInt16(self, "nb_channel", "Number of audio channel")
yield UInt32(self, "sample_per_sec", "Sample per second")
yield UInt32(self, "byte_per_sec", "Average byte per second")
yield UInt16(self, "block_align", "Block align")
yield UInt16(self, "bit_per_sample", "Bits per sample")
def parseWAVFact(self):
yield UInt32(self, "nb_sample", "Number of samples in audio stream")
def parseAviHeader(self):
yield UInt32(self, "microsec_per_frame", "Microsecond per frame")
yield UInt32(self, "max_byte_per_sec", "Maximum byte per second")
yield NullBytes(self, "reserved", 4)
# Flags
yield NullBits(self, "reserved[]", 4)
yield Bit(self, "has_index")
yield Bit(self, "must_use_index")
yield NullBits(self, "reserved[]", 2)
yield Bit(self, "is_interleaved")
yield NullBits(self, "reserved[]", 2)
yield Bit(self, "trust_cktype")
yield NullBits(self, "reserved[]", 4)
yield Bit(self, "was_capture_file")
yield Bit(self, "is_copyrighted")
yield NullBits(self, "reserved[]", 14)
yield UInt32(self, "total_frame", "Total number of frames in the video")
yield UInt32(self, "init_frame", "Initial frame (used in interleaved video)")
yield UInt32(self, "nb_stream", "Number of streams")
yield UInt32(self, "sug_buf_size", "Suggested buffer size")
yield UInt32(self, "width", "Width in pixel")
yield UInt32(self, "height", "Height in pixel")
yield UInt32(self, "scale")
yield UInt32(self, "rate")
yield UInt32(self, "start")
yield UInt32(self, "length")
def parseODML(self):
yield UInt32(self, "total_frame", "Real number of frame of OpenDML video")
padding = self["size"].value - 4
if 0 < padding:
yield NullBytes(self, "padding[]", padding)
class AVIIndexEntry(FieldSet):
size = 16*8
def createFields(self):
yield String(self, "tag", 4, "Tag", charset="ASCII")
yield UInt32(self, "flags")
yield UInt32(self, "start", "Offset from start of movie data")
yield UInt32(self, "length")
def parseIndex(self):
while not self.eof:
yield AVIIndexEntry(self, "index[]")
class Chunk(FieldSet):
TAG_INFO = {
# This dictionnary is edited by RiffFile.validate()
"LIST": ("list[]", None, "Sub-field list"),
"JUNK": ("junk[]", None, "Junk (padding)"),
# Metadata
"INAM": ("title", parseText, "Document title"),
"IART": ("artist", parseText, "Artist"),
"ICMT": ("comment", parseText, "Comment"),
"ICOP": ("copyright", parseText, "Copyright"),
"IENG": ("author", parseText, "Author"),
"ICRD": ("creation_date", parseText, "Creation date"),
"ISFT": ("producer", parseText, "Producer"),
"IDIT": ("datetime", parseText, "Date time"),
# TODO: Todo: see below
# "strn": Stream description
# TWOCC code, movie/field[]/tag.value[2:4]:
# "db": "Uncompressed video frame",
# "dc": "Compressed video frame",
# "wb": "Audio data",
# "pc": "Palette change"
}
subtag_info = {
"INFO": ("info", "File informations"),
"hdrl": ("headers", "Headers"),
"strl": ("stream[]", "Stream header list"),
"movi": ("movie", "Movie stream"),
"odml": ("odml", "ODML"),
}
def __init__(self, *args, **kw):
FieldSet.__init__(self, *args, **kw)
self._size = (8 + alignValue(self["size"].value, 2)) * 8
tag = self["tag"].value
if tag in self.TAG_INFO:
self.tag_info = self.TAG_INFO[tag]
if tag == "LIST":
subtag = self["subtag"].value
if subtag in self.subtag_info:
info = self.subtag_info[subtag]
self.tag_info = (info[0], None, info[1])
self._name = self.tag_info[0]
self._description = self.tag_info[2]
else:
self.tag_info = ("field[]", None, None)
def createFields(self):
yield String(self, "tag", 4, "Tag", charset="ASCII")
yield filesizeHandler(UInt32(self, "size", "Size"))
if not self["size"].value:
return
if self["tag"].value == "LIST":
yield String(self, "subtag", 4, "Sub-tag", charset="ASCII")
handler = self.tag_info[1]
while 8 < (self.size - self.current_size)/8:
field = self.__class__(self, "field[]")
yield field
if (field.size/8) % 2 != 0:
yield UInt8(self, "padding[]", "Padding")
else:
handler = self.tag_info[1]
if handler:
for field in handler(self):
yield field
else:
yield RawBytes(self, "raw_content", self["size"].value)
padding = self.seekBit(self._size)
if padding:
yield padding
def createDescription(self):
tag = self["tag"].display
return u"Chunk (tag %s)" % tag
class ChunkAVI(Chunk):
TAG_INFO = Chunk.TAG_INFO.copy()
TAG_INFO.update({
"strh": ("stream_hdr", parseAVIStreamHeader, "Stream header"),
"strf": ("stream_fmt", parseAVIStreamFormat, "Stream format"),
"avih": ("avi_hdr", parseAviHeader, "AVI header"),
"idx1": ("index", parseIndex, "Stream index"),
"dmlh": ("odml_hdr", parseODML, "ODML header"),
})
class ChunkCDDA(Chunk):
TAG_INFO = Chunk.TAG_INFO.copy()
TAG_INFO.update({
'fmt ': ("cdda", parseCDDA, "CD audio informations"),
})
class ChunkWAVE(Chunk):
TAG_INFO = Chunk.TAG_INFO.copy()
TAG_INFO.update({
'fmt ': ("format", parseWAVFormat, "Audio format"),
'fact': ("nb_sample", parseWAVFact, "Number of samples"),
'data': ("audio_data", None, "Audio stream data"),
})
def parseAnimationHeader(self):
yield UInt32(self, "hdr_size", "Size of header (36 bytes)")
if self["hdr_size"].value != 36:
self.warning("Animation header with unknown size (%s)" % self["size"].value)
yield UInt32(self, "nb_frame", "Number of unique Icons in this cursor")
yield UInt32(self, "nb_step", "Number of Blits before the animation cycles")
yield UInt32(self, "cx")
yield UInt32(self, "cy")
yield UInt32(self, "bit_count")
yield UInt32(self, "planes")
yield UInt32(self, "jiffie_rate", "Default Jiffies (1/60th of a second) if rate chunk not present")
yield Bit(self, "is_icon")
yield NullBits(self, "padding", 31)
def parseAnimationSequence(self):
while not self.eof:
yield UInt32(self, "icon[]")
def formatJiffie(field):
sec = float(field.value) / 60
return humanDuration(timedelta(seconds=sec))
def parseAnimationRate(self):
while not self.eof:
yield textHandler(UInt32(self, "rate[]"), formatJiffie)
def parseIcon(self):
yield SubFile(self, "icon_file", self["size"].value, parser_class=IcoFile)
class ChunkACON(Chunk):
TAG_INFO = Chunk.TAG_INFO.copy()
TAG_INFO.update({
'anih': ("anim_hdr", parseAnimationHeader, "Animation header"),
'seq ': ("anim_seq", parseAnimationSequence, "Animation sequence"),
'rate': ("anim_rate", parseAnimationRate, "Animation sequence"),
'icon': ("icon[]", parseIcon, "Icon"),
})
class RiffFile(Parser):
PARSER_TAGS = {
"id": "riff",
"category": "container",
"file_ext": ("avi", "cda", "wav", "ani"),
"min_size": 16*8,
"mime": (u"video/x-msvideo", u"audio/x-wav", u"audio/x-cda"),
# FIXME: Use regex "RIFF.{4}(WAVE|CDDA|AVI )"
"magic": (
("AVI LIST", 8*8),
("WAVEfmt ", 8*8),
("CDDAfmt ", 8*8),
("ACONanih", 8*8),
),
"description": "Microsoft RIFF container"
}
VALID_TYPES = {
"WAVE": (ChunkWAVE, u"audio/x-wav", u"Microsoft WAVE audio", ".wav"),
"CDDA": (ChunkCDDA, u"audio/x-cda", u"Microsoft Windows audio CD file (cda)", ".cda"),
"AVI ": (ChunkAVI, u"video/x-msvideo", u"Microsoft AVI video", ".avi"),
"ACON": (ChunkACON, u"image/x-ani", u"Microsoft Windows animated cursor", ".ani"),
}
endian = LITTLE_ENDIAN
def validate(self):
if self.stream.readBytes(0, 4) != "RIFF":
return "Wrong signature"
if self["type"].value not in self.VALID_TYPES:
return "Unknown RIFF content type"
return True
def createFields(self):
yield String(self, "signature", 4, "AVI header (RIFF)", charset="ASCII")
yield filesizeHandler(UInt32(self, "filesize", "File size"))
yield String(self, "type", 4, "Content type (\"AVI \", \"WAVE\", ...)", charset="ASCII")
# Choose chunk type depending on file type
try:
chunk_cls = self.VALID_TYPES[self["type"].value][0]
except KeyError:
chunk_cls = Chunk
# Parse all chunks up to filesize
while self.current_size < self["filesize"].value*8+8:
yield chunk_cls(self, "chunk[]")
if not self.eof:
yield RawBytes(self, "padding[]", (self.size-self.current_size)/8)
def createMimeType(self):
try:
return self.VALID_TYPES[self["type"].value][1]
except KeyError:
return None
def createDescription(self):
tag = self["type"].value
if tag == "AVI ":
desc = u"Microsoft AVI video"
if "headers/avi_hdr" in self:
header = self["headers/avi_hdr"]
desc += ": %ux%u pixels" % (header["width"].value, header["height"].value)
microsec = header["microsec_per_frame"].value
if microsec:
desc += ", %.1f fps" % (1000000.0 / microsec)
if "total_frame" in header and header["total_frame"].value:
delta = timedelta(seconds=float(header["total_frame"].value) * microsec)
desc += ", " + humanDuration(delta)
return desc
else:
try:
return self.VALID_TYPES[tag][2]
except KeyError:
return u"Microsoft RIFF container"
def createContentSize(self):
size = (self["filesize"].value + 8) * 8
return min(size, self.stream.size)
def createFilenameSuffix(self):
try:
return self.VALID_TYPES[self["type"].value][3]
except KeyError:
return ".riff"
| gpl-3.0 |
imply/chuu | build/android/pylib/device_stats_monitor.py | 31 | 4305 | # Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Utilities for iotop/top style profiling for android."""
import collections
import json
import os
import subprocess
import sys
import urllib
import constants
import io_stats_parser
class DeviceStatsMonitor(object):
"""Class for collecting device stats such as IO/CPU usage.
Args:
adb: Instance of AndroidComannds.
hz: Frequency at which to sample device stats.
"""
DEVICE_PATH = constants.TEST_EXECUTABLE_DIR + '/device_stats_monitor'
PROFILE_PATH = (constants.DEVICE_PERF_OUTPUT_DIR +
'/device_stats_monitor.profile')
RESULT_VIEWER_PATH = os.path.abspath(os.path.join(
os.path.dirname(os.path.realpath(__file__)), 'device_stats_monitor.html'))
def __init__(self, adb, hz, build_type):
self._adb = adb
host_path = os.path.abspath(os.path.join(
constants.DIR_SOURCE_ROOT, 'out', build_type, 'device_stats_monitor'))
self._adb.PushIfNeeded(host_path, DeviceStatsMonitor.DEVICE_PATH)
self._hz = hz
def Start(self):
"""Starts device stats monitor on the device."""
self._adb.SetProtectedFileContents(DeviceStatsMonitor.PROFILE_PATH, '')
self._process = subprocess.Popen(
['adb', 'shell', '%s --hz=%d %s' % (
DeviceStatsMonitor.DEVICE_PATH, self._hz,
DeviceStatsMonitor.PROFILE_PATH)])
def StopAndCollect(self, output_path):
"""Stops monitoring and saves results.
Args:
output_path: Path to save results.
Returns:
String of URL to load results in browser.
"""
assert self._process
self._adb.KillAll(DeviceStatsMonitor.DEVICE_PATH)
self._process.wait()
profile = self._adb.GetFileContents(DeviceStatsMonitor.PROFILE_PATH)
results = collections.defaultdict(list)
last_io_stats = None
last_cpu_stats = None
for line in profile:
if ' mmcblk0 ' in line:
stats = io_stats_parser.ParseIoStatsLine(line)
if last_io_stats:
results['sectors_read'].append(stats.num_sectors_read -
last_io_stats.num_sectors_read)
results['sectors_written'].append(stats.num_sectors_written -
last_io_stats.num_sectors_written)
last_io_stats = stats
elif line.startswith('cpu '):
stats = self._ParseCpuStatsLine(line)
if last_cpu_stats:
results['user'].append(stats.user - last_cpu_stats.user)
results['nice'].append(stats.nice - last_cpu_stats.nice)
results['system'].append(stats.system - last_cpu_stats.system)
results['idle'].append(stats.idle - last_cpu_stats.idle)
results['iowait'].append(stats.iowait - last_cpu_stats.iowait)
results['irq'].append(stats.irq - last_cpu_stats.irq)
results['softirq'].append(stats.softirq- last_cpu_stats.softirq)
last_cpu_stats = stats
units = {
'sectors_read': 'sectors',
'sectors_written': 'sectors',
'user': 'jiffies',
'nice': 'jiffies',
'system': 'jiffies',
'idle': 'jiffies',
'iowait': 'jiffies',
'irq': 'jiffies',
'softirq': 'jiffies',
}
with open(output_path, 'w') as f:
f.write('display(%d, %s, %s);' % (self._hz, json.dumps(results), units))
return 'file://%s?results=file://%s' % (
DeviceStatsMonitor.RESULT_VIEWER_PATH, urllib.quote(output_path))
@staticmethod
def _ParseCpuStatsLine(line):
"""Parses a line of cpu stats into a CpuStats named tuple."""
# Field definitions: http://www.linuxhowtos.org/System/procstat.htm
cpu_stats = collections.namedtuple('CpuStats',
['device',
'user',
'nice',
'system',
'idle',
'iowait',
'irq',
'softirq',
])
fields = line.split()
return cpu_stats._make([fields[0]] + [int(f) for f in fields[1:8]])
| bsd-3-clause |
anryko/ansible | lib/ansible/plugins/action/nxos_file_copy.py | 9 | 22728 | #
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import copy
import hashlib
import os
import re
import time
from ansible.errors import AnsibleError
from ansible.module_utils._text import to_text, to_bytes, to_native
from ansible.module_utils.common import validation
from ansible.module_utils.connection import Connection
from ansible.plugins.action import ActionBase
from ansible.utils.display import Display
from ansible.module_utils.compat.paramiko import paramiko
from ansible.module_utils import six
try:
from scp import SCPClient
HAS_SCP = True
except ImportError:
HAS_SCP = False
try:
import pexpect
HAS_PEXPECT = True
except ImportError:
HAS_PEXPECT = False
display = Display()
class ActionModule(ActionBase):
def process_playbook_values(self):
''' Get playbook values and perform input validation '''
argument_spec = dict(
vrf=dict(type='str', default='management'),
connect_ssh_port=dict(type='int', default=22),
file_system=dict(type='str', default='bootflash:'),
file_pull=dict(type='bool', default=False),
file_pull_timeout=dict(type='int', default=300),
file_pull_compact=dict(type='bool', default=False),
file_pull_kstack=dict(type='bool', default=False),
local_file=dict(type='path'),
local_file_directory=dict(type='path'),
remote_file=dict(type='path'),
remote_scp_server=dict(type='str'),
remote_scp_server_user=dict(type='str'),
remote_scp_server_password=dict(no_log=True),
)
playvals = {}
# Process key value pairs from playbook task
for key in argument_spec.keys():
playvals[key] = self._task.args.get(key, argument_spec[key].get('default'))
if playvals[key] is None:
continue
option_type = argument_spec[key].get('type', 'str')
try:
if option_type == 'str':
playvals[key] = validation.check_type_str(playvals[key])
elif option_type == 'int':
playvals[key] = validation.check_type_int(playvals[key])
elif option_type == 'bool':
playvals[key] = validation.check_type_bool(playvals[key])
elif option_type == 'path':
playvals[key] = validation.check_type_path(playvals[key])
else:
raise AnsibleError('Unrecognized type <{0}> for playbook parameter <{1}>'.format(option_type, key))
except (TypeError, ValueError) as e:
raise AnsibleError("argument %s is of type %s and we were unable to convert to %s: %s"
% (key, type(playvals[key]), option_type, to_native(e)))
# Validate playbook dependencies
if playvals['file_pull']:
if playvals.get('remote_file') is None:
raise AnsibleError('Playbook parameter <remote_file> required when <file_pull> is True')
if playvals.get('remote_scp_server') is None:
raise AnsibleError('Playbook parameter <remote_scp_server> required when <file_pull> is True')
if playvals['remote_scp_server'] or \
playvals['remote_scp_server_user']:
if None in (playvals['remote_scp_server'],
playvals['remote_scp_server_user']):
params = '<remote_scp_server>, <remote_scp_server_user>'
raise AnsibleError('Playbook parameters {0} must be set together'.format(params))
return playvals
def check_library_dependencies(self, file_pull):
if file_pull:
if not HAS_PEXPECT:
msg = 'library pexpect is required when file_pull is True but does not appear to be '
msg += 'installed. It can be installed using `pip install pexpect`'
raise AnsibleError(msg)
else:
if paramiko is None:
msg = 'library paramiko is required when file_pull is False but does not appear to be '
msg += 'installed. It can be installed using `pip install paramiko`'
raise AnsibleError(msg)
if not HAS_SCP:
msg = 'library scp is required when file_pull is False but does not appear to be '
msg += 'installed. It can be installed using `pip install scp`'
raise AnsibleError(msg)
def md5sum_check(self, dst, file_system):
command = 'show file {0}{1} md5sum'.format(file_system, dst)
remote_filehash = self.conn.exec_command(command)
remote_filehash = to_bytes(remote_filehash, errors='surrogate_or_strict')
local_file = self.playvals['local_file']
try:
with open(local_file, 'rb') as f:
filecontent = f.read()
except (OSError, IOError) as exc:
raise AnsibleError('Error reading the file: {0}'.format(to_text(exc)))
filecontent = to_bytes(filecontent, errors='surrogate_or_strict')
local_filehash = hashlib.md5(filecontent).hexdigest()
if local_filehash == remote_filehash:
return True
else:
return False
def remote_file_exists(self, remote_file, file_system):
command = 'dir {0}/{1}'.format(file_system, remote_file)
body = self.conn.exec_command(command)
if 'No such file' in body:
return False
else:
return self.md5sum_check(remote_file, file_system)
def verify_remote_file_exists(self, dst, file_system):
command = 'dir {0}/{1}'.format(file_system, dst)
body = self.conn.exec_command(command)
if 'No such file' in body:
return 0
return body.split()[0].strip()
def local_file_exists(self, file):
return os.path.isfile(file)
def get_flash_size(self, file_system):
command = 'dir {0}'.format(file_system)
body = self.conn.exec_command(command)
match = re.search(r'(\d+) bytes free', body)
if match:
bytes_free = match.group(1)
return int(bytes_free)
match = re.search(r'No such file or directory', body)
if match:
raise AnsibleError('Invalid nxos filesystem {0}'.format(file_system))
else:
raise AnsibleError('Unable to determine size of filesystem {0}'.format(file_system))
def enough_space(self, file, file_system):
flash_size = self.get_flash_size(file_system)
file_size = os.path.getsize(file)
if file_size > flash_size:
return False
return True
def transfer_file_to_device(self, remote_file):
timeout = self.socket_timeout
local_file = self.playvals['local_file']
file_system = self.playvals['file_system']
if not self.enough_space(local_file, file_system):
raise AnsibleError('Could not transfer file. Not enough space on device.')
# frp = full_remote_path, flp = full_local_path
frp = '{0}{1}'.format(file_system, remote_file)
flp = os.path.join(os.path.abspath(local_file))
try:
self.conn.copy_file(source=flp, destination=frp, proto='scp', timeout=timeout)
except Exception as exc:
self.results['failed'] = True
self.results['msg'] = ('Exception received : %s' % exc)
def file_push(self):
local_file = self.playvals['local_file']
remote_file = self.playvals['remote_file'] or os.path.basename(local_file)
file_system = self.playvals['file_system']
if not self.local_file_exists(local_file):
raise AnsibleError('Local file {0} not found'.format(local_file))
remote_file = remote_file or os.path.basename(local_file)
remote_exists = self.remote_file_exists(remote_file, file_system)
if not remote_exists:
self.results['changed'] = True
file_exists = False
else:
self.results['transfer_status'] = 'No Transfer: File already copied to remote device.'
file_exists = True
if not self.play_context.check_mode and not file_exists:
self.transfer_file_to_device(remote_file)
self.results['transfer_status'] = 'Sent: File copied to remote device.'
self.results['local_file'] = local_file
if remote_file is None:
remote_file = os.path.basename(local_file)
self.results['remote_file'] = remote_file
def copy_file_from_remote(self, local, local_file_directory, file_system):
self.results['failed'] = False
nxos_hostname = self.play_context.remote_addr
nxos_username = self.play_context.remote_user
nxos_password = self.play_context.password
port = self.playvals['connect_ssh_port']
# Build copy command components that will be used to initiate copy from the nxos device.
cmdroot = 'copy scp://'
ruser = self.playvals['remote_scp_server_user'] + '@'
rserver = self.playvals['remote_scp_server']
rfile = self.playvals['remote_file'] + ' '
vrf = ' vrf ' + self.playvals['vrf']
local_dir_root = '/'
if self.playvals['file_pull_compact']:
compact = ' compact '
else:
compact = ''
if self.playvals['file_pull_kstack']:
kstack = ' use-kstack '
else:
kstack = ''
def process_outcomes(session, timeout=None):
if timeout is None:
timeout = 10
outcome = {}
outcome['user_response_required'] = False
outcome['password_prompt_detected'] = False
outcome['existing_file_with_same_name'] = False
outcome['final_prompt_detected'] = False
outcome['copy_complete'] = False
outcome['expect_timeout'] = False
outcome['error'] = False
outcome['error_data'] = None
# Possible outcomes key:
# 0) - Are you sure you want to continue connecting (yes/no)
# 1) - Password: or @servers's password:
# 2) - Warning: There is already a file existing with this name. Do you want to overwrite (y/n)?[n]
# 3) - Timeout conditions
# 4) - No space on nxos device file_system
# 5) - Username/Password or file permission issues
# 6) - File does not exist on remote scp server
# 7) - invalid nxos command
# 8) - compact option not supported
# 9) - compaction attempt failed
# 10) - other failures like attempting to compact non image file
# 11) - failure to resolve hostname
# 12) - Too many authentication failures
# 13) - Copy to / from this server not permitted
# 14) - Copy completed without issues
# 15) - nxos_router_prompt#
# 16) - pexpect timeout
possible_outcomes = [r'sure you want to continue connecting \(yes/no\)\? ',
'(?i)Password: ',
'file existing with this name',
'timed out',
'(?i)No space.*#',
'(?i)Permission denied.*#',
'(?i)No such file.*#',
'.*Invalid command.*#',
'Compaction is not supported on this platform.*#',
'Compact of.*failed.*#',
'(?i)Failed.*#',
'(?i)Could not resolve hostname',
'(?i)Too many authentication failures',
r'(?i)Copying to\/from this server name is not permitted',
'(?i)Copy complete',
r'#\s',
pexpect.TIMEOUT]
index = session.expect(possible_outcomes, timeout=timeout)
# Each index maps to items in possible_outcomes
if index == 0:
outcome['user_response_required'] = True
return outcome
elif index == 1:
outcome['password_prompt_detected'] = True
return outcome
elif index == 2:
outcome['existing_file_with_same_name'] = True
return outcome
elif index in [3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13]:
before = session.before.strip().replace(' \x08', '')
after = session.after.strip().replace(' \x08', '')
outcome['error'] = True
outcome['error_data'] = 'COMMAND {0} ERROR {1}'.format(before, after)
return outcome
elif index == 14:
outcome['copy_complete'] = True
return outcome
elif index == 15:
outcome['final_prompt_detected'] = True
return outcome
elif index == 16:
# The before property will contain all text up to the expected string pattern.
# The after string will contain the text that was matched by the expected pattern.
outcome['expect_timeout'] = True
outcome['error_data'] = 'Expect Timeout error occurred: BEFORE {0} AFTER {1}'.format(session.before, session.after)
return outcome
else:
outcome['error'] = True
outcome['error_data'] = 'Unrecognized error occurred: BEFORE {0} AFTER {1}'.format(session.before, session.after)
return outcome
return outcome
# Spawn pexpect connection to NX-OS device.
nxos_session = pexpect.spawn('ssh ' + nxos_username + '@' + nxos_hostname + ' -p' + str(port))
# There might be multiple user_response_required prompts or intermittent timeouts
# spawning the expect session so loop up to 24 times during the spawn process.
max_attempts = 24
for connect_attempt in range(max_attempts):
outcome = process_outcomes(nxos_session)
if outcome['user_response_required']:
nxos_session.sendline('yes')
continue
if outcome['password_prompt_detected']:
time.sleep(3)
nxos_session.sendline(nxos_password)
continue
if outcome['final_prompt_detected']:
break
if outcome['error'] or outcome['expect_timeout']:
# Error encountered, try to spawn expect session n more times up to max_attempts - 1
if connect_attempt < max_attempts:
outcome['error'] = False
outcome['expect_timeout'] = False
nxos_session.close()
nxos_session = pexpect.spawn('ssh ' + nxos_username + '@' + nxos_hostname + ' -p' + str(port))
continue
self.results['failed'] = True
outcome['error_data'] = re.sub(nxos_password, '', outcome['error_data'])
self.results['error_data'] = 'Failed to spawn expect session! ' + outcome['error_data']
nxos_session.close()
return
else:
# The before property will contain all text up to the expected string pattern.
# The after string will contain the text that was matched by the expected pattern.
msg = 'After {0} attempts, failed to spawn pexpect session to {1}'
msg += 'BEFORE: {2}, AFTER: {3}'
error_msg = msg.format(connect_attempt, nxos_hostname, nxos_session.before, nxos_session.after)
re.sub(nxos_password, '', error_msg)
nxos_session.close()
raise AnsibleError(error_msg)
# Create local file directory under NX-OS filesystem if
# local_file_directory playbook parameter is set.
if local_file_directory:
dir_array = local_file_directory.split('/')
for each in dir_array:
if each:
mkdir_cmd = 'mkdir ' + local_dir_root + each
nxos_session.sendline(mkdir_cmd)
outcome = process_outcomes(nxos_session)
if outcome['error'] or outcome['expect_timeout']:
self.results['mkdir_cmd'] = mkdir_cmd
self.results['failed'] = True
outcome['error_data'] = re.sub(nxos_password, '', outcome['error_data'])
self.results['error_data'] = outcome['error_data']
return
local_dir_root += each + '/'
# Initiate file copy
copy_cmd = (cmdroot + ruser + rserver + rfile + file_system + local_dir_root + local + compact + vrf + kstack)
self.results['copy_cmd'] = copy_cmd
nxos_session.sendline(copy_cmd)
for copy_attempt in range(6):
outcome = process_outcomes(nxos_session, self.playvals['file_pull_timeout'])
if outcome['user_response_required']:
nxos_session.sendline('yes')
continue
if outcome['password_prompt_detected']:
if self.playvals.get('remote_scp_server_password'):
nxos_session.sendline(self.playvals['remote_scp_server_password'])
else:
err_msg = 'Remote scp server {0} requires a password.'.format(rserver)
err_msg += ' Set the <remote_scp_server_password> playbook parameter or configure nxos device for passwordless scp'
raise AnsibleError(err_msg)
continue
if outcome['existing_file_with_same_name']:
nxos_session.sendline('y')
continue
if outcome['copy_complete']:
self.results['transfer_status'] = 'Received: File copied/pulled to nxos device from remote scp server.'
break
if outcome['error'] or outcome['expect_timeout']:
self.results['failed'] = True
outcome['error_data'] = re.sub(nxos_password, '', outcome['error_data'])
if self.playvals.get('remote_scp_server_password'):
outcome['error_data'] = re.sub(self.playvals['remote_scp_server_password'], '', outcome['error_data'])
self.results['error_data'] = outcome['error_data']
nxos_session.close()
return
else:
# The before property will contain all text up to the expected string pattern.
# The after string will contain the text that was matched by the expected pattern.
msg = 'After {0} attempts, failed to copy file to {1}'
msg += 'BEFORE: {2}, AFTER: {3}, CMD: {4}'
error_msg = msg.format(copy_attempt, nxos_hostname, nxos_session.before, nxos_session.before, copy_cmd)
re.sub(nxos_password, '', error_msg)
if self.playvals.get('remote_scp_server_password'):
re.sub(self.playvals['remote_scp_server_password'], '', error_msg)
nxos_session.close()
raise AnsibleError(error_msg)
nxos_session.close()
def file_pull(self):
local_file = self.playvals['local_file']
remote_file = self.playvals['remote_file']
file_system = self.playvals['file_system']
# Note: This is the local file directory on the remote nxos device.
local_file_dir = self.playvals['local_file_directory']
local_file = local_file or self.playvals['remote_file'].split('/')[-1]
if not self.play_context.check_mode:
self.copy_file_from_remote(local_file, local_file_dir, file_system)
if not self.results['failed']:
self.results['changed'] = True
self.results['remote_file'] = remote_file
if local_file_dir:
dir = local_file_dir
else:
dir = ''
self.results['local_file'] = file_system + dir + '/' + local_file
self.results['remote_scp_server'] = self.playvals['remote_scp_server']
# This is the main run method for the action plugin to copy files
def run(self, tmp=None, task_vars=None):
socket_path = None
self.play_context = copy.deepcopy(self._play_context)
self.results = super(ActionModule, self).run(task_vars=task_vars)
if self.play_context.connection.split('.')[-1] != 'network_cli':
# Plugin is supported only with network_cli
self.results['failed'] = True
self.results['msg'] = 'Connection type must be fully qualified name for network_cli connection type, got %s' % self.play_context.connection
return self.results
# Get playbook values
self.playvals = self.process_playbook_values()
file_pull = self.playvals['file_pull']
self.check_library_dependencies(file_pull)
if socket_path is None:
socket_path = self._connection.socket_path
self.conn = Connection(socket_path)
# Call get_capabilities() to start the connection to the device.
self.conn.get_capabilities()
self.socket_timeout = self.conn.get_option('persistent_command_timeout')
# This action plugin support two modes of operation.
# - file_pull is False - Push files from the ansible controller to nxos switch.
# - file_pull is True - Initiate copy from the device to pull files to the nxos switch.
self.results['transfer_status'] = 'No Transfer'
self.results['file_system'] = self.playvals['file_system']
if file_pull:
self.file_pull()
else:
self.file_push()
return self.results
| gpl-3.0 |
lorin/umdinst | umdinst/setup/verifyperms.py | 1 | 3064 | #!/usr/bin/env python
import os
from stat import *
def parent(path):
"""Return the parent of the specified directory"""
# Remove the trailing slash, if any
if path.endswith('/'):
return os.path.dirname(path[:-1])
else:
return os.path.dirname(path)
def create_and_set_permissions(dirname,perms):
"""Create a directory and set permissions"""
try:
os.stat(dirname) # Will raise OSError if the file doesn't exist
if not os.path.isdir(dirname):
raise ValueError, "%s was specified as a directory, but it is not a directory" % mode
except OSError:
print "Directory %s does not exist, creating" % dirname
#os.mkdir(dirname)
os.makedirs(dirname)
# Set permissions on the directory directory
os.chmod(dirname,perms)
def check_parent_permissions(dirname,perms):
"""Check the permissions of the directory its parents to
make sure the appropriate permissions are there."""
pathname = dirname
while(pathname != ''):
st = os.stat(pathname)
dirperms = S_IMODE(st[ST_MODE])
if (dirperms & perms) < perms:
#raise ValueError, pathname + " does not have the right permissions."
print '''Error: %s does not have the right permission.
The installation mode specified requires the installation directory
to be readable and executable by other users. Change the permission setting,
or change the installation directory with the '--prefix' option.''' % pathname
import sys
sys.exit(-1)
pathname = parent(pathname)
def check(installdir):
# Sanity check, make sure that the directories up to this point have
# the appropriate permissions
#
permissions_are_ok = True
# User and group read execute
grp_rx_perms = S_IRWXU | S_IRGRP | S_IXGRP
# All user read execute
all_rx_perms = S_IRWXU | S_IRGRP | S_IXGRP | S_IROTH | S_IXOTH
# The parent directories may not actually all have user write access
# Some systems don't allow all users to access all parent directories,
# So we change it to checking for group accesses.
check_perms = grp_rx_perms & ~S_IWUSR
try:
check_parent_permissions(parent(installdir),check_perms)
except OSError, e:
# I'm going to be naive here and assume that errno's are the same across
# all systems. Hopefully it's a POSIX thing.
if e.errno==2: # No such file or directory
raise ValueError, "There is a problem with the value for 'installdir' in config.py: " + installdir
else:
permissions_are_ok = False
#create_and_set_permissions(installdir,all_rx_perms)
#check_parent_permissions(parent(logfiledir),check_perms)
#allperms = S_IRWXU | S_IRWXG | S_IRWXO
#create_and_set_permissions(logfiledir,allperms)
if not permissions_are_ok:
print "Warning: Cannot confirm that permissions are set correctly."
print "Please check to make sure that subjects have read access to umdinst/setup and umdinst/bin, and write accesss to umdinst/.data"
| bsd-3-clause |
hydraplatform/hydra-base | unittests/test_templates.py | 1 | 37663 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# (c) Copyright 2013 to 2017 University of Manchester
#
# HydraPlatform is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# HydraPlatform is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with HydraPlatform. If not, see <http://www.gnu.org/licenses/>
#
import server
from lxml import etree
from hydra_base import config
from hydra_base.lib.objects import JSONObject
from hydra_base import *
import logging
from hydra_base.exceptions import HydraError
import util
import datetime
log = logging.getLogger(__name__)
class TemplatesTest(server.SoapServerTest):
"""
Test for templates
"""
def set_template(self, template):
if template is None:
self.template = self.test_add_template()
else:
self.template = template
def get_template(self):
if hasattr(self, 'template'):
try:
get_template(self.template.id)
return self.template
except:
self.template = self.test_add_template()
else:
self.template = self.test_add_template()
return self.template
def test_add_xml(self):
template_file = open('template.xml', 'r')
file_contents = template_file.read()
new_tmpl = JSONObject(import_template_xml(file_contents))
assert new_tmpl is not None, "Adding template from XML was not successful!"
assert len(new_tmpl.templatetypes) == 2
for ta in new_tmpl.templatetypes[0].typeattrs:
assert ta.data_type == 'scalar'
assert new_tmpl.templatetypes[0].typeattrs[-1].properties is not None
assert eval(new_tmpl.templatetypes[0].typeattrs[-1].properties)['template_property'] == "Test property from template"
return new_tmpl
def test_get_xml(self):
xml_tmpl = self.test_add_xml()
db_template = get_template_as_xml(xml_tmpl.id)
assert db_template is not None
template_xsd_path = config.get('templates', 'template_xsd_path')
xmlschema_doc = etree.parse(template_xsd_path)
xmlschema = etree.XMLSchema(xmlschema_doc)
xml_tree = etree.fromstring(db_template)
xmlschema.assertValid(xml_tree)
def test_get_dict(self):
#Upload the xml file initally to avoid having to manage 2 template files
xml_tmpl = self.test_add_xml()
template_dict = get_template_as_dict(xml_tmpl.id)
#Error that there's already a template with this name.
self.assertRaises(HydraError, import_template_dict, template_dict, allow_update=False)
typename = template_dict['template']['templatetypes'][0]['name']
template_dict['template']['templatetypes'][0].name = typename + "_updated"
#Finds a template with this name and updates it to mirror this dict.
#This includes deleting types if they're not in this dict.
#Changing the name of a type has this effect, as a new template does not have
#any reference to existing types in Hydra.
updated_template = JSONObject(import_template_dict(template_dict))
assert updated_template['templatetypes'][-1]['name'] == typename + "_updated"
#Now put it back to the original name so other tests will work
template_dict['template']['templatetypes'][0].name = typename
updated_template = JSONObject(import_template_dict(template_dict))
assert updated_template['templatetypes'][-1]['name'] == typename
def test_add_template(self):
link_attr_1 = self.create_attr("link_attr_1", dimension='Pressure')
link_attr_2 = self.create_attr("link_attr_2", dimension='Speed')
node_attr_1 = self.create_attr("node_attr_1", dimension='Volume')
node_attr_2 = self.create_attr("node_attr_2", dimension='Speed')
net_attr_1 = self.create_attr("net_attr_2", dimension='Speed')
template = JSONObject()
template.name = 'Test template @ %s'%datetime.datetime.now()
layout = {}
layout['groups'] = '<groups>...</groups>'
template.layout = layout
template.templatetypes = []
#**********************
#type 1 #
#**********************
type1 = JSONObject()
type1.name = "Node type"
type1.alias = "Node type alias"
type1.resource_type = 'NODE'
type1.typeattrs = []
tattr_1 = JSONObject()
tattr_1.attr_id = node_attr_1.id
tattr_1.description = "Type attribute 1 description"
tattr_1.properties = {'test_property': "test property add type"}
tattr_1.data_restriction = {'LESSTHAN': 10, 'NUMPLACES': 1}
type1.typeattrs.append(tattr_1)
tattr_2 = JSONObject()
tattr_2.attr_id = node_attr_2.id
tattr_2.description = "Type attribute 2 description"
tattr_2.data_restriction = {'INCREASING': None}
type1.typeattrs.append(tattr_2)
template.templatetypes.append(type1)
#**********************
#type 2 #
#**********************
type2 = JSONObject()
type2.name = "Link type"
type2.alias = "Link type alias"
type2.resource_type = 'LINK'
type2.typeattrs = []
tattr_1 = JSONObject()
tattr_1.attr_id = link_attr_1.id
type2.typeattrs.append(tattr_1)
tattr_2 = JSONObject()
tattr_2.attr_id = link_attr_2.id
type2.typeattrs.append(tattr_2)
template.templatetypes.append(type2)
#**********************
#type 3 #
#**********************
type3 = JSONObject()
type3.name = "Network Type"
type3.alias = "Network Type alias"
type3.resource_type = 'NETWORK'
type3.typeattrs = []
tattr_3 = JSONObject()
tattr_3.attr_id = net_attr_1.id
tattr_3.data_restriction = {}
type3.typeattrs.append(tattr_3)
template.templatetypes.append(type3)
new_template_i = add_template(template)
#TODO: HACK to load the attr
for tt in new_template_i.templatetypes:
for ta in tt.typeattrs:
ta.attr
new_template_j = JSONObject(new_template_i)
assert new_template_j.name == template.name, "Names are not the same!"
assert str(new_template_j.layout) == str(template.layout), "Layouts are not the same!"
assert new_template_j.id is not None, "New Template has no ID!"
assert new_template_j.id > 0, "New Template has incorrect ID!"
assert len(new_template_j.templatetypes) == 3, "Resource types did not add correctly"
for t in new_template_j.templatetypes[0].typeattrs:
assert t.attr_id in (node_attr_1.id, node_attr_2.id);
"Node types were not added correctly!"
for t in new_template_j.templatetypes[1].typeattrs:
assert t.attr_id in (link_attr_1.id, link_attr_2.id);
"Node types were not added correctly!"
return new_template_j
def test_update_template(self):
attr_1 = self.create_attr("link_attr_1", dimension='Pressure')
attr_2 = self.create_attr("link_attr_2", dimension='Speed')
attr_3 = self.create_attr("node_attr_1", dimension='Volume')
template = JSONObject()
template.name = 'Test Template @ %s'%datetime.datetime.now()
template.templatetypes = []
type_1 = JSONObject()
type_1.name = "Node type 2"
type_1.alias = "Node type 2 alias"
type_1.resource_type = 'NODE'
type_1.typeattrs = []
type_2 = JSONObject()
type_2.name = "Link type 2"
type_2.alias = "Link type 2 alias"
type_2.resource_type = 'LINK'
type_2.typeattrs = []
tattr_1 = JSONObject()
tattr_1.attr_id = attr_1.id
tattr_1.unit = 'bar'
tattr_1.description = "typeattr description 1"
tattr_1.properties = {"test_property": "property value"}
type_1.typeattrs.append(tattr_1)
tattr_2 = JSONObject()
tattr_2.attr_id = attr_2.id
tattr_2.unit = 'mph'
tattr_2.description = "typeattr description 2"
type_2.typeattrs.append(tattr_2)
template.templatetypes.append(type_1)
template.templatetypes.append(type_2)
new_template_i = add_template(template)
new_template_j = JSONObject(new_template_i)
assert new_template_j.name == template.name, "Names are not the same!"
assert new_template_j.id is not None, "New Template has no ID!"
assert new_template_j.id > 0, "New Template has incorrect ID!"
assert len(new_template_j.templatetypes) == 2, "Resource types did not add correctly"
assert len(new_template_j.templatetypes[0].typeattrs) == 1, "Resource type attrs did not add correctly"
assert new_template_j.templatetypes[0].typeattrs[0].unit == 'bar'
#update the name of one of the types
new_template_j.templatetypes[0].name = "Test type 3"
updated_type_id = new_template_j.templatetypes[0].id
#add an template attr to one of the types
tattr_3 = JSONObject()
tattr_3.attr_id = attr_3.id
tattr_3.description = "updated typeattr description 1"
tattr_3.properties = {"test_property_of_added_type": "property value"}
new_template_j.templatetypes[0].typeattrs.append(tattr_3)
updated_template_i = update_template(new_template_j)
updated_template_j = JSONObject(updated_template_i)
assert updated_template_j.name == template.name, "Names are not the same!"
updated_type = None
for tmpltype in new_template_j.templatetypes:
if tmpltype.id == updated_type_id:
updated_type = tmpltype
break
assert updated_type.name == "Test type 3", "Resource types did not update correctly"
assert len(updated_type.typeattrs) == 2, "Resource type template attr did not update correctly"
#Test that when setting a unit on a type attr, it matches the dimension of its attr
#In this case, setting m^3(Volume) fails as the attr has a dimension of 'Pressure'
updated_template_j.templatetypes[0].typeattrs[0].unit = 'm^3'
self.assertRaises(HydraError, update_template, updated_template_j)
def test_delete_template(self):
network = self.create_network_with_data()
new_template = self.test_add_template()
retrieved_template_i = get_template(new_template.id)
assert retrieved_template_i is not None
retrieved_template_j = JSONObject(retrieved_template_i)
apply_template_to_network(retrieved_template_j.id, network.id)
updated_network = get_network(network.id, user_id=self.user_id)
assert len(updated_network.types) == 2
expected_net_type = None
for t in new_template.templatetypes:
if t.resource_type == 'NETWORK':
expected_net_type = t.id
network_type = updated_network.types[1].type_id
assert expected_net_type == network_type
delete_template(new_template.id)
self.assertRaises(HydraError, get_template, new_template.id)
network_deleted_templatetypes = get_network(network.id, user_id=self.user_id)
assert len(network_deleted_templatetypes.types) == 1
def test_add_type(self):
template = self.get_template()
attr_1 = self.create_attr("link_attr_1", dimension='Pressure')
attr_2 = self.create_attr("link_attr_2", dimension='Speed')
attr_3 = self.create_attr("node_attr_1", dimension='Volume')
templatetype = JSONObject()
templatetype.name = "Test type name @ %s"%(datetime.datetime.now())
templatetype.alias = "%s alias" % templatetype.name
templatetype.resource_type = 'LINK'
templatetype.template_id = template.id
templatetype.layout = {"color": "red", "shapefile": "blah.shp"}
templatetype.typeattrs = []
tattr_1 = JSONObject()
tattr_1.attr_id = attr_1.id
tattr_1.description = "added type description 1"
tattr_1.properties = {"add_type_test_property": "property value"}
templatetype.typeattrs.append(tattr_1)
tattr_2 = JSONObject()
tattr_2.attr_id = attr_2.id
tattr_1.description = "added type description 2"
templatetype.typeattrs.append(tattr_2)
tattr_3 = JSONObject()
tattr_3.attr_id = attr_3.id
templatetype.typeattrs.append(tattr_3)
new_type_i = add_templatetype(templatetype)
new_type_j = JSONObject(new_type_i)
assert new_type_j.name == templatetype.name, "Names are not the same!"
assert new_type_j.alias == templatetype.alias, "Aliases are not the same!"
assert json.loads(new_type_j.layout) == templatetype.layout, "Layouts are not the same!"
assert new_type_j.id is not None, "New type has no ID!"
assert new_type_j.id > 0, "New type has incorrect ID!"
assert len(new_type_j.typeattrs) == 3, "Resource type attrs did not add correctly"
return new_type_j
def test_update_type(self):
template = self.get_template()
attr_1 = self.create_attr("link_attr_1", dimension='Pressure')
attr_2 = self.create_attr("link_attr_2", dimension='Speed')
attr_3 = self.create_attr("node_attr_1", dimension='Volume')
templatetype = JSONObject()
templatetype.name = "Test type name @ %s" % (datetime.datetime.now())
templatetype.alias = templatetype.name + " alias"
templatetype.template_id = self.get_template().id
templatetype.resource_type = 'NODE'
templatetype.template_id = template.id
tattr_1 = JSONObject()
tattr_1.attr_id = attr_1.id
tattr_2 = JSONObject()
tattr_2.attr_id = attr_2.id
templatetype.typeattrs = [tattr_1, tattr_2]
new_type_i = add_templatetype(templatetype)
new_type_j = JSONObject(new_type_i)
assert new_type_j.name == templatetype.name, "Names are not the same!"
assert new_type_j.alias == templatetype.alias, "Aliases are not the same!"
assert new_type_j.id is not templatetype, "New type has no ID!"
assert new_type_j.id > 0, "New type has incorrect ID!"
assert len(new_type_j.typeattrs) == 2, "Resource type attrs did not add correctly"
new_type_j.name = "Updated type name @ %s"%(datetime.datetime.now())
new_type_j.alias = templatetype.name + " alias"
new_type_j.resource_type = 'NODE'
tattr_3 = JSONObject()
tattr_3.attr_id = attr_3.id
tattr_3.description = "Descripton of added typeattr"
tattr_3.properties = {"update_type_test_property": "property value"}
new_type_j.typeattrs.append(tattr_3)
new_type_j.typeattrs[0].description = "Updated typeattr description"
updated_type_i = update_templatetype(new_type_j)
updated_type_j = JSONObject(updated_type_i)
assert new_type_j.name == updated_type_j.name, "Names are not the same!"
assert new_type_j.alias == updated_type_j.alias, "Aliases are not the same!"
assert new_type_j.id == updated_type_j.id, "type ids to not match!"
assert new_type_j.id > 0, "New type has incorrect ID!"
assert new_type_j.typeattrs[0].description == "Updated typeattr description"
assert new_type_j.typeattrs[-1].properties['update_type_test_property'] == "property value"
assert len(updated_type_j.typeattrs) == 3, "Template type attrs did not update correctly"
def test_delete_type(self):
new_template = self.test_add_template()
retrieved_template = get_template(new_template.id)
assert retrieved_template is not None
templatetype = new_template.templatetypes[0]
delete_templatetype(templatetype.id)
updated_template = JSONObject(get_template(new_template.id))
for tmpltype in updated_template.templatetypes:
assert tmpltype.id != templatetype.id
def test_get_type(self):
new_type = self.get_template().templatetypes[0]
new_type = get_templatetype(new_type.id)
assert new_type is not None, "Resource type attrs not retrived by ID!"
def test_get_type_by_name(self):
new_type = self.get_template().templatetypes[0]
new_type = get_templatetype_by_name(new_type.template_id, new_type.name)
assert new_type is not None, "Resource type attrs not retrived by name!"
def test_add_typeattr(self):
attr_1 = self.create_attr("link_attr_1", dimension='Pressure')
attr_2 = self.create_attr("link_attr_2", dimension='Speed')
attr_3 = self.create_attr("node_attr_1", dimension='Volume')
templatetype = JSONObject()
templatetype.name = "Test type name @ %s"%(datetime.datetime.now())
templatetype.alias = templatetype.name + " alias"
templatetype.template_id = self.get_template().id
templatetype.resource_type = 'NODE'
tattr_1 = JSONObject()
tattr_1.attr_id = attr_1.id
tattr_2 = JSONObject()
tattr_2.attr_id = attr_2.id
tattr_2.description = "Description of typeattr from test_add_typeattr"
tattr_2.properties = {"test_property":"property value"}
templatetype.typeattrs = [tattr_1, tattr_2]
new_type = JSONObject(add_templatetype(templatetype))
tattr_3 = JSONObject()
tattr_3.attr_id = attr_3.id
tattr_3.type_id = new_type.id
tattr_3.description = "Description of additional typeattr from test_add_typeattr"
tattr_3.properties = {"add_typeattr_test_property": "property value"}
log.info("Adding Test Type attribute")
add_typeattr(tattr_3)
updated_type = JSONObject(get_templatetype(new_type.id,user_id=self.user_id))
assert len(updated_type.typeattrs) == 3, "Type attr did not add correctly"
assert eval(updated_type.typeattrs[-1].properties)['add_typeattr_test_property'] == "property value"
def test_delete_typeattr(self):
template = self.test_add_template()
attr_1 = self.create_attr("link_attr_1", dimension='Pressure')
attr_2 = self.create_attr("link_attr_2", dimension='Speed')
templatetype = JSONObject()
templatetype.name = "Test type name @ %s"%(datetime.datetime.now())
templatetype.alias = templatetype.name + " alias"
templatetype.resource_type = 'NODE'
templatetype.template_id = template.id
tattr_1 = JSONObject()
tattr_1.attr_id = attr_1.id
tattr_2 = JSONObject()
tattr_2.attr_id = attr_2.id
templatetype.typeattrs = [tattr_1, tattr_2]
new_type = JSONObject(add_templatetype(templatetype))
tattr_2.type_id = new_type.id
delete_typeattr(tattr_2)
updated_type = JSONObject(get_templatetype(new_type.id))
log.info(len(updated_type.typeattrs))
assert len(updated_type.typeattrs) == 1, "Resource type attr did not add correctly"
def test_get_templates(self):
self.get_template()
templates = [JSONObject(t) for t in get_templates()]
for t in templates:
for typ in t.templatetypes:
assert typ.resource_type is not None
assert len(templates) > 0, "Templates were not retrieved!"
def test_get_template(self):
template = self.get_template()
new_template = JSONObject(get_template(template.id))
assert new_template.name == template.name, "Names are not the same! Retrieval by ID did not work!"
def test_get_template_by_name_good(self):
template = self.get_template()
new_template = JSONObject(get_template_by_name(template.name))
assert new_template.name == template.name, "Names are not the same! Retrieval by name did not work!"
def test_get_template_by_name_bad(self):
new_template = get_template_by_name("Not a template!")
assert new_template is None
def test_add_resource_type(self):
template = self.get_template()
types = template.templatetypes
type_name = types[0].name
type_id = types[0].id
project = self.create_project('test')
network = JSONObject()
nnodes = 3
nlinks = 2
x = [0, 0, 1]
y = [0, 1, 0]
network.nodes = []
network.links = []
for i in range(nnodes):
node = JSONObject()
node.id = i * -1
node.name = 'Node ' + str(i)
node.description = 'Test node ' + str(i)
node.x = x[i]
node.y = y[i]
type_summary = JSONObject()
type_summary.template_id = template.id
type_summary.template_name = template.name
type_summary.id = type_id
type_summary.name = type_name
node.types = [type_summary]
network.nodes.append(node)
for i in range(nlinks):
link = JSONObject()
link.id = i * -1
link.name = 'Link ' + str(i)
link.description = 'Test link ' + str(i)
link.node_1_id = network.nodes[i].id
link.node_2_id = network.nodes[i + 1].id
network.links.append(link)
network.project_id = project.id
network.name = 'Test @ %s'%(datetime.datetime.now())
network.description = 'A network for SOAP unit tests.'
net_summary = add_network(network, user_id=self.user_id)
new_net = get_network(net_summary.id, user_id=self.user_id)
for node in new_net.nodes:
assert node.types is not None and node.types[0].type_name == "Node type"; "type was not added correctly!"
def test_find_matching_resource_types(self):
network = self.create_network_with_data()
node_to_check = network.nodes[0]
matching_types_i = get_matching_resource_types('NODE', node_to_check.id)
matching_types_j = [JSONObject(i) for i in matching_types_i]
assert len(matching_types_j) > 0, "No types returned!"
matching_type_ids = []
for tmpltype in matching_types_j:
matching_type_ids.append(tmpltype.id)
assert node_to_check.types[0].id in matching_type_ids, "TemplateType ID not found in matching types!"
def test_assign_type_to_resource(self):
network = self.create_network_with_data()
template = self.get_template()
templatetype = template.templatetypes[0]
node_to_assign = network.nodes[0]
result = JSONObject(assign_type_to_resource(templatetype.id, 'NODE', node_to_assign.id))
node = get_node(node_to_assign.id)
assert node.types is not None, \
'Assigning type did not work properly.'
assert str(result.id) in [str(x.type_id) for x in node.types]
def test_remove_type_from_resource(self):
network = self.create_network_with_data()
template = self.get_template()
templatetype = template.templatetypes[0]
node_to_assign = network.nodes[0]
result1_i = assign_type_to_resource(templatetype.id, 'NODE', node_to_assign.id)
result1_j = JSONObject(result1_i)
node_j = JSONObject(get_node(node_to_assign.id))
assert node_j.types is not None, \
'Assigning type did not work properly.'
assert str(result1_j.id) in [str(x.type_id) for x in node_j.types]
remove_result = remove_type_from_resource(templatetype.id,
'NODE',
node_to_assign.id)
print remove_result
assert remove_result == 'OK'
updated_node_j = JSONObject(get_node(node_to_assign.id))
assert updated_node_j.types is None or str(result1_j.id) not in [str(x.type_id) for x in updated_node_j.types]
def test_create_template_from_network(self):
network = self.create_network_with_data()
net_template = get_network_as_xml_template(network.id)
assert net_template is not None
template_xsd_path = config.get('templates', 'template_xsd_path')
xmlschema_doc = etree.parse(template_xsd_path)
xmlschema = etree.XMLSchema(xmlschema_doc)
xml_tree = etree.fromstring(net_template)
xmlschema.assertValid(xml_tree)
def test_apply_template_to_network(self):
net_to_update = self.create_network_with_data()
template = self.get_template()
#Test the links as it's easier
empty_links = []
for l in net_to_update.links:
if l.types is None:
empty_links.append(l.id)
#Add the resource attributes to the links, so we can guarantee
#that these links will match those in the template.
for t in template.templatetypes:
if t.resource_type == 'LINK':
link_type = t
break
link_ra_1 = JSONObject(dict(
attr_id=link_type.typeattrs[0].attr_id
))
link_ra_2 = JSONObject(dict(
attr_id=link_type.typeattrs[1].attr_id
))
for link in net_to_update.links:
if link.types is None:
link.attributes.append(link_ra_1)
link.attributes.append(link_ra_2)
network = update_network(net_to_update, user_id=self.user_id)
for n in network.nodes:
assert len(n.types) == 1
assert n.types[0].name == 'Default Node'
apply_template_to_network(template.id, network.id)
network = get_network(network.id, user_id=self.user_id)
assert len(network.types) == 2
assert network.types[1].type_name == 'Network Type'
for l in network.links:
if l.id in empty_links:
assert l.types is not None
assert len(l.types) == 1
assert l.types[0].type_name == 'Link type'
#THe assignment of the template hasn't affected the nodes
#as they do not have the appropriate attributes.
for n in network.nodes:
assert len(n.types) == 1
assert n.types[0].template_name == 'Default Template'
def test_apply_template_to_network_twice(self):
net_to_update = self.create_network_with_data()
template = self.get_template()
#Test the links as it's easier
empty_links = []
for l in net_to_update.links:
if l.types is None:
empty_links.append(l.id)
#Add the resource attributes to the links, so we can guarantee
#that these links will match those in the template.
for t in template.templatetypes:
if t.resource_type == 'LINK':
link_type = t
break
link_ra_1 = JSONObject(dict(
attr_id=link_type.typeattrs[0].attr_id
))
link_ra_2 = JSONObject(dict(
attr_id=link_type.typeattrs[1].attr_id
))
for link in net_to_update.links:
if link.types is None:
link.attributes.append(link_ra_1)
link.attributes.append(link_ra_2)
network = update_network(net_to_update, user_id=self.user_id)
for n in network.nodes:
assert len(n.types) == 1
assert n.types[0].name == 'Default Node'
apply_template_to_network(template.id, network.id)
apply_template_to_network(template.id, network.id)
network = get_network(network.id, user_id=self.user_id)
assert len(network.types) == 2
assert network.types[1].name == 'Network Type'
for l in network.links:
if l.id in empty_links:
assert l.types is not None
assert len(n.types) == 1
assert l.types[0].name == 'Link type'
for n in network.nodes:
assert len(n.types) == 1
assert n.types[0].template_name == 'Default Template'
def test_remove_template_from_network(self):
network = self.create_network_with_data()
template_id = network.types[0].template_id
#Test the links as it's easier
empty_links = []
for l in network.links:
if l.types is None or len(l.types) == 0:
empty_links.append(l.id)
for n in network.nodes:
assert len(n.types) == 1
assert n.types[0].type_name == "Default Node"
apply_template_to_network(template_id, network.id)
remove_template_from_network(network.id, template_id, 'N')
network_2 = get_network(network.id, user_id=self.user_id)
assert len(network_2.types) == 0
for l in network_2.links:
if l.id in empty_links:
assert len(l.types) == 0
for n in network_2.nodes:
assert len(n.types) == 0
def test_remove_template_and_attributes_from_network(self):
network = self.create_network_with_data()
template = self.get_template()
#Test the links as it's easier
empty_links = []
for l in network.links:
if l.types is None:
empty_links.append(l.id)
for n in network.nodes:
assert len(n.types) == 1
assert n.types[0].type_name == 'Default Node'
network_1 = get_network(network.id, user_id=self.user_id)
assert len(network_1.types) == 1
apply_template_to_network(template.id, network.id)
network_3 = get_network(network.id, user_id=self.user_id)
assert len(network_3.types) == 2
remove_template_from_network(network.id, template.id, 'Y')
network_2 = get_network(network.id, user_id=self.user_id)
assert len(network_2.types) == 1
link_attrs = []
for tt in template.templatetypes:
if tt.resource_type != 'LINK':
continue
for ta in tt.typeattrs:
attr_id = ta.attr_id
if attr_id not in link_attrs:
link_attrs.append(attr_id)
link_attrs.append(ta.attr_id)
for l in network_2.links:
if l.id in empty_links:
assert l.types is None
if l.attributes is not None:
for a in l.attributes:
assert a.attr_id not in link_attrs
for tt in template.templatetypes:
if tt.resource_type != 'NODE':
continue
for ta in tt.typeattrs:
attr_id = ta.attr_id
if attr_id not in link_attrs:
link_attrs.append(attr_id)
link_attrs.append(ta.attr_id)
for n in network_2.nodes:
assert len(n.types) == 1
if n.attributes is not None:
for a in n.attributes:
assert a.attr_id not in link_attrs
def test_validate_attr(self):
network = self.create_network_with_data()
scenario = network.scenarios[0]
rs_ids = [rs.resource_attr_id for rs in scenario.resourcescenarios]
template_id = network.nodes[0].types[0].template_id
for n in network.nodes:
node_type = get_templatetype(n.types[0].id)
for ra in n.attributes:
for attr in node_type.typeattrs:
if ra.attr_id == attr.attr_id and ra.id in rs_ids and attr.data_restriction is not None:
# logging.info("Validating RA %s in scenario %s", ra.id, scenario.id)
error = validate_attr(ra.id, scenario.id, template_id)
assert error.ref_id == n.id
def test_validate_attrs(self):
network = self.create_network_with_data()
scenario = network.scenarios[0]
ra_ids = []
for rs in scenario.resourcescenarios:
ra_ids.append(rs.resource_attr_id)
template_id = network.types[0].template_id
errors = validate_attrs(ra_ids, scenario.id, template_id)
assert len(errors) > 0
def test_validate_scenario(self):
network = self.create_network_with_data()
scenario = network.scenarios[0]
template_id = network.nodes[0].types[0].template_id
errors = validate_scenario(scenario.id,template_id)
assert len(errors) > 0
def test_validate_network(self):
network = self.create_network_with_data(use_existing_template=False)
util.update_template(network.types[0].template_id)
scenario = network.scenarios[0]
template = network.nodes[0].types[0]
#Validate the network without data: should pass as the network is built
#based on the template in these unit tests
errors1 = validate_network(network['id'], template['template_id'])
#The network should have an error, saying that the template has net_attr_c,
#but the network does not
assert len(errors1) == 1
assert errors1[0].find('net_attr_d') > 0
#Validate the network with data. Should fail as one of the attributes (node_attr_3)
#is specified as being a 'Cost' in the template but 'Speed' is the dimension
#of the dataset used. In addition, node_attr_1 specified a unit of 'm^3'
#whereas the timeseries in the data is in 'cm^3', so each will fail on unit
#mismatch also.
errors2 = validate_network(network['id'], template['template_id'],
scenario['id'])
assert len(errors2) > 0
#every node should have an associated error, plus the network error from above
assert len(errors2) == len(network['nodes'] * 2)+1
for err in errors2[1:]:
try:
assert err.startswith("Unit mismatch")
except AssertionError:
assert err.startswith("Dimension mismatch")
def test_type_compatibility(self):
"""
Check function that thests whether two types are compatible -- the
overlapping attributes are specified with the same unit.
Change the unit associated with an attribute for types in two idencical
templates, and test. There should be 1 error returned.
THen test comparison of identical types. No errors should return.
"""
template_1 = self.test_add_template()
template_2 = self.test_add_template()
diff_type_1_id = None
same_type_1_id = None
for typ in template_1.templatetypes:
if typ.typeattrs:
for ta in typ.typeattrs:
if ta.attr.name == 'node_attr_1':
diff_type_1_id = typ.id
ta.unit = "m^3"
elif ta.attr.name == 'link_attr_1':
same_type_1_id = typ.id
updated_template_1 = JSONObject(update_template(template_1))
diff_type_2_id = None
same_type_2_id = None
for typ in template_2.templatetypes:
if typ.typeattrs:
for ta in typ.typeattrs:
if ta.attr.name == 'node_attr_1':
diff_type_2_id = typ.id
ta.unit = "cm^3"
elif ta.attr.name == 'link_attr_1':
same_type_2_id = typ.id
#Before updating template 2, check compatibility of types, where T1 has
#a unit, but t2 does not.
errors_diff = check_type_compatibility(diff_type_1_id, diff_type_2_id)
assert len(errors_diff) == 1
errors_same = check_type_compatibility(same_type_1_id, same_type_2_id)
assert len(errors_same) == 0
#Now update T2 so that the types have conflicting units.
updated_template_2 = JSONObject(update_template(template_2))
errors_diff = check_type_compatibility(diff_type_1_id, diff_type_2_id)
assert len(errors_diff) == 1
errors_same = check_type_compatibility(same_type_1_id, same_type_2_id)
assert len(errors_same) == 0
print updated_template_1
for typ in updated_template_1.templatetypes:
if typ.typeattrs:
for ta in typ.typeattrs:
if ta.attr.name == 'node_attr_1':
ta.unit = None
#Update template 1 now so that it has no unit, but template 2 does.
updated_template_1_a= update_template(updated_template_1)
errors_diff = check_type_compatibility(diff_type_1_id, diff_type_2_id)
assert len(errors_diff) == 1
errors_same = check_type_compatibility(same_type_1_id, same_type_2_id)
assert len(errors_same) == 0
if __name__ == '__main__':
server.run()
| lgpl-3.0 |
mlperf/training_results_v0.7 | Fujitsu/benchmarks/transformer/implementations/implementation_closed/fairseq/data/monolingual_dataset.py | 4 | 2728 | # Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the LICENSE file in
# the root directory of this source tree. An additional grant of patent rights
# can be found in the PATENTS file in the same directory.
import numpy as np
import torch
from . import data_utils, FairseqDataset
def collate(samples, pad_idx, eos_idx):
if len(samples) == 0:
return {}
def merge(key):
return data_utils.collate_tokens(
[s[key] for s in samples], pad_idx, eos_idx, left_pad=False,
)
return {
'id': torch.LongTensor([s['id'] for s in samples]),
'ntokens': sum(len(s['target']) for s in samples),
'net_input': {
'src_tokens': merge('source'),
},
'target': merge('target'),
}
class MonolingualDataset(FairseqDataset):
"""A wrapper around torch.utils.data.Dataset for monolingual data."""
def __init__(self, dataset, sizes, vocab, shuffle):
self.dataset = dataset
self.sizes = np.array(sizes)
self.vocab = vocab
self.shuffle = shuffle
def __getitem__(self, index):
source, target = self.dataset[index]
return {'id': index, 'source': source, 'target': target}
def __len__(self):
return len(self.dataset)
def collater(self, samples):
"""Merge a list of samples to form a mini-batch."""
return collate(samples, self.vocab.pad(), self.vocab.eos())
def get_dummy_batch(self, num_tokens, max_positions, tgt_len=128):
assert isinstance(max_positions, float) or isinstance(max_positions, int)
tgt_len = min(tgt_len, max_positions)
bsz = num_tokens // tgt_len
target = self.vocab.dummy_sentence(tgt_len + 1)
source, target = target[:-1], target[1:]
return self.collater([
{'id': i, 'source': source, 'target': target}
for i in range(bsz)
])
def num_tokens(self, index):
"""Return an example's length (number of tokens), used for batching."""
source, target = self.dataset[index]
return len(source)
def ordered_indices(self, seed=None):
"""Ordered indices for batching."""
if self.shuffle:
order = [np.random.permutation(len(self))]
else:
order = [np.arange(len(self))]
order.append(np.flip(self.sizes, 0))
return np.lexsort(order)
def valid_size(self, index, max_positions):
"""Check if an example's size is valid according to max_positions."""
assert isinstance(max_positions, float) or isinstance(max_positions, int)
return self.sizes[index] <= max_positions
| apache-2.0 |
AliLozano/django-guardian | docs/conf.py | 16 | 7163 | # -*- coding: utf-8 -*-
#
# django-guardian documentation build configuration file, created by
# sphinx-quickstart on Thu Feb 18 23:18:28 2010.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('..'))
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__))))
os.environ['DJANGO_SETTINGS_MODULE'] = 'guardian.testsettings'
ANONYMOUS_USER_ID = -1 # Required by guardian
guardian = __import__('guardian')
# -- General configuration -----------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'exts']
try:
import rst2pdf
if rst2pdf.version >= '0.16':
extensions.append('rst2pdf.pdfbuilder')
except ImportError:
print "[NOTE] In order to build PDF you need rst2pdf with version >=0.16"
autoclass_content = "both"
# Add any paths that contain templates here, relative to this directory.
templates_path = ['templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'django-guardian'
copyright = u'2010, Lukasz Balcerzak'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = guardian.get_version()
# The full version, including alpha/beta/rc tags.
release = guardian.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
#unused_docs = []
# List of directories, relative to source directory, that shouldn't be searched
# for source files.
exclude_trees = ['build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
#html_theme = 'default'
# Theme URL: https://github.com/coordt/ADCtheme/
html_theme = 'ADCtheme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = ['theme']
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_use_modindex = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = 'guardiandoc'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'guardian.tex', u'guardian Documentation',
u'Lukasz Balcerzak', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_use_modindex = True
pdf_documents = [
('index', u'django-guardian', u'Documentation for django-guardian',
u'Lukasz Balcerzak'),
]
pdf_stylesheets = ['sphinx','kerning','a4']
pdf_break_level = 2
pdf_inline_footnotes = True
#pdf_extensions = ['vectorpdf', 'dotted_toc']
| bsd-2-clause |
wooga/airflow | tests/providers/presto/hooks/test_presto.py | 5 | 4331 | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import unittest
from unittest import mock
from unittest.mock import patch
from prestodb.transaction import IsolationLevel
from airflow.models import Connection
from airflow.providers.presto.hooks.presto import PrestoHook
class TestPrestoHookConn(unittest.TestCase):
def setUp(self):
super().setUp()
self.connection = Connection(
login='login',
password='password',
host='host',
schema='hive',
)
class UnitTestPrestoHook(PrestoHook):
conn_name_attr = 'presto_conn_id'
self.db_hook = UnitTestPrestoHook()
self.db_hook.get_connection = mock.Mock()
self.db_hook.get_connection.return_value = self.connection
@patch('airflow.providers.presto.hooks.presto.prestodb.auth.BasicAuthentication')
@patch('airflow.providers.presto.hooks.presto.prestodb.dbapi.connect')
def test_get_conn(self, mock_connect, mock_basic_auth):
self.db_hook.get_conn()
mock_connect.assert_called_once_with(catalog='hive', host='host', port=None, http_scheme='http',
schema='hive', source='airflow', user='login', isolation_level=0,
auth=mock_basic_auth('login', 'password'))
class TestPrestoHook(unittest.TestCase):
def setUp(self):
super().setUp()
self.cur = mock.MagicMock()
self.conn = mock.MagicMock()
self.conn.cursor.return_value = self.cur
conn = self.conn
class UnitTestPrestoHook(PrestoHook):
conn_name_attr = 'test_conn_id'
def get_conn(self):
return conn
def get_isolation_level(self):
return IsolationLevel.READ_COMMITTED
self.db_hook = UnitTestPrestoHook()
@patch('airflow.hooks.dbapi_hook.DbApiHook.insert_rows')
def test_insert_rows(self, mock_insert_rows):
table = "table"
rows = [("hello",),
("world",)]
target_fields = None
commit_every = 10
self.db_hook.insert_rows(table, rows, target_fields, commit_every)
mock_insert_rows.assert_called_once_with(table, rows, None, 10)
def test_get_first_record(self):
statement = 'SQL'
result_sets = [('row1',), ('row2',)]
self.cur.fetchone.return_value = result_sets[0]
self.assertEqual(result_sets[0], self.db_hook.get_first(statement))
self.conn.close.assert_called_once_with()
self.cur.close.assert_called_once_with()
self.cur.execute.assert_called_once_with(statement)
def test_get_records(self):
statement = 'SQL'
result_sets = [('row1',), ('row2',)]
self.cur.fetchall.return_value = result_sets
self.assertEqual(result_sets, self.db_hook.get_records(statement))
self.conn.close.assert_called_once_with()
self.cur.close.assert_called_once_with()
self.cur.execute.assert_called_once_with(statement)
def test_get_pandas_df(self):
statement = 'SQL'
column = 'col'
result_sets = [('row1',), ('row2',)]
self.cur.description = [(column,)]
self.cur.fetchall.return_value = result_sets
df = self.db_hook.get_pandas_df(statement)
self.assertEqual(column, df.columns[0])
self.assertEqual(result_sets[0][0], df.values.tolist()[0][0])
self.assertEqual(result_sets[1][0], df.values.tolist()[1][0])
self.cur.execute.assert_called_once_with(statement, None)
| apache-2.0 |
davemehringer/keplerpp | apps/plotter_mayavi.py | 1 | 10321 | # plotter_mayavi.py
# Copyright (C) 2016 David Mehringer
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from mayavi.mlab import *
import numpy as np
import os
import random
import sys
import threading
import time
def do_colors():
global colors
colors = (np.random.random((nbodies, 4))*255).astype(np.uint16)
colors[:,-1] = 255 # No transparency
for c in colors:
red = c[0]
green = c[1]
blue = c[2]
sum = red + green + blue
if sum < 384:
while True:
red = (np.random.random((1))*255).astype(np.uint16)[0]
green = (np.random.random((1))*255).astype(np.uint16)[0]
blue = (np.random.random((1))*255).astype(np.uint16)[0]
sum = red + green + blue
if sum >= 384:
c[:] = [red, green, blue, 255]
break
norm_colors.append(tuple(c[0:3]/255.0))
def do_labels():
if texts == []:
show_labels()
elif name_toggle or tag_toggle:
modify_labels()
else:
hide_labels()
def do_tags():
global tag_map
global tag_list
n = len(tag_chars)
tag_set_as_list = list(tag_chars)
# if n bodies > n tags, not all bodies will have tags
for i in xrange(nbodies):
if i < n:
z = tag_set_as_list[i % n]
tag = str(z)
tag_map[tag] = i
tag_list.append(tag)
else:
tag_list.append("")
def hide_labels():
global texts
gcf().scene.disable_render = True
for t in texts:
t.remove()
gcf().scene.disable_render = False
texts = []
def hide_paths():
global paths
if not paths:
return
gcf().scene.disable_render = True
for p in paths:
p.remove()
gcf().scene.disable_render = False
paths = []
def init():
global lines
global pts
global colors
global texts
global names
global positions
# first read gets body names
read()
# second read gets initial positions
read()
do_colors()
plot_positions(True, True)
do_tags()
def key_intercept(vtk_obj, event):
global name_toggle
global path_toggle
global scale_factor
global tag_toggle
global cur_key_event
global tag_map
keypress = vtk_obj.GetKeyCode()
if keypress == '>':
scale_factor *= 2
plot_positions(False, True)
hide_paths()
elif keypress == '<':
scale_factor /= 2
plot_positions(False, True)
hide_paths()
elif keypress == 'o':
path_toggle = not path_toggle
if path_toggle:
plot_paths()
else:
hide_paths()
elif keypress == 'n' or keypress == 't':
if keypress == 'n':
name_toggle = not name_toggle
elif keypress == 't':
tag_toggle = not tag_toggle
do_labels()
elif keypress in tag_map:
global center_id
center_id = tag_map[keypress]
replot_all(False)
def modify_labels():
global texts
gcf().scene.disable_render = True
for i in xrange(nbodies):
if name_toggle and tag_toggle:
texts[i].text = names[i] + " " + tag_list[i]
elif name_toggle:
texts[i].text = names[i]
elif tag_toggle:
texts[i].text = tag_list[i]
gcf().scene.disable_render = False
@animate(delay=50)
def myanim():
global count
global done
global positions
prev = 0
mycount = 0
while True:
if not done:
read()
replot_all(False)
yield
def picker_callback(picker):
""" Picker callback: this get called when on pick events.
"""
global pts
if picker.actor in pts.actor.actors:
glyph_points = pts.glyph.glyph_source.glyph_source.output.points.to_array()
# Find which data point corresponds to the point picked:
# we have to account for the fact that each data point is
# represented by a glyph with several points
point_id = picker.point_id/glyph_points.shape[0]
# If the no points have been selected, we have '-1'
if point_id != -1:
# Retrieve the coordinnates coorresponding to that data
# point
global center_id
center_id = point_id
#clf()
replot_all(False)
def plot_paths():
if not path_toggle:
return
(x, y, z) = rel_positions(True)
if x.shape[1] < 2:
return
gcf().scene.disable_render = True
global paths
(azimuth, elevation, distance, focalpoint) = view()
tr = scale_factor/4.0
n = x.shape[1]
if not paths:
if paths:
for p in paths:
p.remove()
paths = []
zero = np.zeros([10000])
for i in xrange(nbodies):
xx = zero
xx[:n] = x[i]
xx[n+1:] = None
yy = zero
yy[:n] = y[i]
yy[n+1:] = None
zz = zero
zz[:n] = z[i]
zz[n+1:] = None
path = plot3d(
xx, yy, zz, color=norm_colors[i],
tube_radius=tr
)
paths.append(path)
else:
for i in xrange(nbodies):
if i != center_id:
xx = paths[i].mlab_source.x
xx[:n] = x[i]
paths[i].mlab_source.x = xx
yy = paths[i].mlab_source.y
yy[:n] = y[i]
paths[i].mlab_source.y = yy
zz = paths[i].mlab_source.z
zz[:n] = z[i]
paths[i].mlab_source.z = zz
view(azimuth=azimuth, elevation=elevation, distance=distance, focalpoint=focalpoint)
gcf().scene.disable_render = False
def plot_positions(reset_zoom, rescale_data):
gcf().scene.disable_render = True
global pts
(azimuth, elevation, distance, focalpoint) = view()
(x, y, z) = rel_positions(False)
if pts and not rescale_data:
pts.mlab_source.x = x
pts.mlab_source.y = y
pts.mlab_source.z = z
else:
if pts:
pts.remove()
pts = points3d(
x, y, z, np.arange(nbodies), scale_factor=scale_factor,
reset_zoom=reset_zoom
)
pts.glyph.color_mode = 'color_by_scalar' # Color by scalar
pts.glyph.scale_mode = 'scale_by_vector' # scale by vector
# Set look-up table and redraw
pts.module_manager.scalar_lut_manager.lut.table = colors
view(azimuth=azimuth, elevation=elevation, distance=distance, focalpoint=focalpoint)
gcf().scene.disable_render = False
def read():
global count
global nbodies
global done
global positions
global names
x = f.readline().rstrip()
if x == "end":
f.close()
done = True
return positions
elif x[0].isalpha():
names = np.array(x.split())
return
data = np.fromstring(x, sep=" ")
if count == 0:
nbodies = len(data)/3
data = data.reshape(nbodies, 3)
if count == 0:
positions = np.expand_dims(data, axis=2)
else:
positions = np.append(positions, np.expand_dims(data, axis=2), axis=2)
count += 1
def rel_positions(all):
if all:
rp = (positions[:, :] - positions[center_id, :])/au
else:
rp = (positions[:, :, -1] - positions[center_id, :, -1])/au
return (rp[:,0], rp[:,1], rp[:,2])
def replot_all(reset_zoom):
if path_toggle:
plot_paths()
plot_positions(reset_zoom, False)
if name_toggle or tag_toggle:
update_labels()
def show_labels():
(x, y, z) = rel_positions(False)
(azimuth, elevation, distance, focalpoint) = view()
#scale = 0.005*distance
scale = 0.007*distance
x += 2*scale
y += 2*scale
z += 2*scale
gcf().scene.disable_render = True
global texts
texts = []
for i in xrange(nbodies):
if name_toggle and tag_toggle:
text = names[i] + " " + tag_list[i]
elif name_toggle:
text = names[i]
elif tag_toggle:
text = tag_list[i]
xx = text3d(
x[i], y[i], z[i], text,
scale=scale, color=norm_colors[i]
)
texts.append(xx)
gcf().scene.disable_render = False
def update_labels():
"""animate text labels to follow objects"""
if not name_toggle and not tag_toggle:
return
(x, y, z) = rel_positions(False)
gcf().scene.disable_render = True
for (tt, xx, yy, zz) in zip(texts, x, y, z):
tt.position = (xx, yy, zz)
gcf().scene.disable_render = False
tag_chars = set([
'0','1','2','4','5','6','7','8','9','b','d','g','h','i','k',
'm','q','r','u','v','x','y','z','A','B','C','D','E','G','H',
'I','J','K','M','N','O','Q','R','T','U','V','X','Y','Z'
])
picker = gcf().on_mouse_pick(picker_callback)
# Decrease the tolerance, so that we can more easily select a precise
# point.
picker.tolerance = 0.01
pipename = '/tmp/fifo_kepler'
if not os.path.exists(pipename):
os.mkfifo(pipename)
f = open(pipename, 'r')
nbodies = 0
names = []
#centeridx = -1
plotlines = True
showtext = False
update = True
live = True
lastidx = -1
scale_factor = 0.03
center_id = 0
tag_map = {}
tag_list = []
gcf().scene.interactor.add_observer('KeyPressEvent', key_intercept);
colors = []
norm_colors = []
pts = None
paths = None
texts = []
done = False
positions = np.zeros([nbodies, 3])
count = 0
name_toggle = False
path_toggle = False
tag_toggle = False
cur_count = count
au = 1.495e8
figure(gcf(), bgcolor=(0, 0, 0))
init()
#t = threading.Thread(target=read_loop)
#t.start()
myanim()
| gpl-3.0 |
xchaoinfo/python | burun/0007/codes/find_keywords_in_diary.py | 78 | 1185 | #!/usr/bin/env python
# -*- encoding: utf-8 -*-
# Date: 13-03-15
# Author: Liang
import os
import re
# set diaries path
diaries_path = "diaries/"
diaries = os.listdir(diaries_path)
# set stop words to make informative keywords
stop_words = open("Stop Words.txt", 'r').read()
stop_words_list = stop_words.split(" ")
# Find top 5 keywords in a txt
def find_keywords(words):
words_dictionary = {}
for word in words:
if word.lower() not in words_dictionary and word.lower() not in stop_words_list:
# Put word in dictionary
words_dictionary[word] = 0
for item in words:
if item == word:
words_dictionary[word] += 1
# Find 5 keywords which by highest frequency
keywords = sorted(
words_dictionary, key=words_dictionary.__getitem__, reverse=True)[0:5]
return keywords
for diary in diaries:
# Coding by utf-8
with open(diaries_path + diary, "r", encoding='utf-8', errors='ignore') as content:
diary_words_list = re.findall(r"[\w']+", content.read())
print("The keywords of diary " + diary + " is: ", end="")
print(find_keywords(diary_words_list))
| mit |