repo_name
stringlengths 6
100
| path
stringlengths 4
294
| copies
stringlengths 1
5
| size
stringlengths 4
6
| content
stringlengths 606
896k
| license
stringclasses 15
values | var_hash
int64 -9,223,186,179,200,150,000
9,223,291,175B
| doc_hash
int64 -9,223,304,365,658,930,000
9,223,309,051B
| line_mean
float64 3.5
99.8
| line_max
int64 13
999
| alpha_frac
float64 0.25
0.97
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|---|
132nd-etcher/EMFT | emft/gui/tab_about.py | 1 | 1269 | # coding=utf-8
from emft.core import constant
from emft.core.logging import make_logger
from emft.gui.base import GridLayout, HSpacer, Label, VLayout, VSpacer
from emft.gui.main_ui_tab_widget import MainUiTabChild
LOGGER = make_logger(__name__)
class TabChildAbout(MainUiTabChild):
def tab_clicked(self):
pass
@property
def tab_title(self) -> str:
return 'About'
def __init__(self, parent=None):
super(TabChildAbout, self).__init__(parent)
repo_label = Label(
'''<a href='{link}'>{link}</a>'''.format(link=constant.LINK_REPO)
)
repo_label.setOpenExternalLinks(True)
changelog_label = Label(
'''<a href='{link}'>{link}</a>'''.format(link=constant.LINK_CHANGELOG)
)
changelog_label.setOpenExternalLinks(True)
self.setLayout(
VLayout(
[
GridLayout(
[
[Label('Github repository: '), repo_label, HSpacer()],
[Label('Changelog: '), changelog_label, HSpacer()],
],
[0, 0, 1]
),
VSpacer(),
]
)
)
| gpl-3.0 | 780,620,645,744,232,400 | -6,537,607,988,053,126,000 | 27.2 | 82 | 0.502758 | false |
jfinkels/networkx | networkx/readwrite/graph6.py | 3 | 7803 | # Original author: D. Eppstein, UC Irvine, August 12, 2003.
# The original code at http://www.ics.uci.edu/~eppstein/PADS/ is public domain.
# Copyright (C) 2004-2016 by
# Aric Hagberg <hagberg@lanl.gov>
# Dan Schult <dschult@colgate.edu>
# Pieter Swart <swart@lanl.gov>
# Tomas Gavenciak <gavento@ucw.cz>
# All rights reserved.
# BSD license.
#
# Authors: Tomas Gavenciak <gavento@ucw.cz>
# Aric Hagberg <aric.hagberg@lanl.gov>
"""Functions for reading and writing graphs in the *graph6* format.
The *graph6* file format is suitable for small graphs or large dense
graphs. For large sparse graphs, use the *sparse6* format.
For more information, see the `graph6`_ homepage.
.. _graph6: http://users.cecs.anu.edu.au/~bdm/data/formats.html
"""
import networkx as nx
from networkx.exception import NetworkXError
from networkx.utils import open_file, not_implemented_for
__all__ = ['read_graph6', 'parse_graph6', 'generate_graph6', 'write_graph6']
def parse_graph6(string):
"""Read a simple undirected graph in graph6 format from string.
Parameters
----------
string : string
Data in graph6 format
Returns
-------
G : Graph
Raises
------
NetworkXError
If the string is unable to be parsed in graph6 format
Examples
--------
>>> G = nx.parse_graph6('A_')
>>> sorted(G.edges())
[(0, 1)]
See Also
--------
generate_graph6, read_graph6, write_graph6
References
----------
.. [1] Graph6 specification
<http://users.cecs.anu.edu.au/~bdm/data/formats.html>
"""
def bits():
"""Return sequence of individual bits from 6-bit-per-value
list of data values."""
for d in data:
for i in [5,4,3,2,1,0]:
yield (d>>i)&1
if string.startswith('>>graph6<<'):
string = string[10:]
data = graph6_to_data(string)
n, data = data_to_n(data)
nd = (n*(n-1)//2 + 5) // 6
if len(data) != nd:
raise NetworkXError(\
'Expected %d bits but got %d in graph6' % (n*(n-1)//2, len(data)*6))
G=nx.Graph()
G.add_nodes_from(range(n))
for (i,j),b in zip([(i,j) for j in range(1,n) for i in range(j)], bits()):
if b:
G.add_edge(i,j)
return G
@open_file(0,mode='rt')
def read_graph6(path):
"""Read simple undirected graphs in graph6 format from path.
Parameters
----------
path : file or string
File or filename to write.
Returns
-------
G : Graph or list of Graphs
If the file contains multiple lines then a list of graphs is returned
Raises
------
NetworkXError
If the string is unable to be parsed in graph6 format
Examples
--------
>>> nx.write_graph6(nx.Graph([(0,1)]), 'test.g6')
>>> G = nx.read_graph6('test.g6')
>>> sorted(G.edges())
[(0, 1)]
See Also
--------
generate_graph6, parse_graph6, write_graph6
References
----------
.. [1] Graph6 specification
<http://users.cecs.anu.edu.au/~bdm/data/formats.html>
"""
glist = []
for line in path:
line = line.strip()
if not len(line):
continue
glist.append(parse_graph6(line))
if len(glist) == 1:
return glist[0]
else:
return glist
@not_implemented_for('directed','multigraph')
def generate_graph6(G, nodes = None, header=True):
"""Generate graph6 format string from a simple undirected graph.
Parameters
----------
G : Graph (undirected)
nodes: list or iterable
Nodes are labeled 0...n-1 in the order provided. If None the ordering
given by G.nodes() is used.
header: bool
If True add '>>graph6<<' string to head of data
Returns
-------
s : string
String in graph6 format
Raises
------
NetworkXError
If the graph is directed or has parallel edges
Examples
--------
>>> G = nx.Graph([(0, 1)])
>>> nx.generate_graph6(G)
'>>graph6<<A_'
See Also
--------
read_graph6, parse_graph6, write_graph6
Notes
-----
The format does not support edge or node labels, parallel edges or
self loops. If self loops are present they are silently ignored.
References
----------
.. [1] Graph6 specification
<http://users.cecs.anu.edu.au/~bdm/data/formats.html>
"""
if nodes is not None:
G = G.subgraph(nodes)
H = nx.convert_node_labels_to_integers(G)
ns = sorted(H.nodes())
def bits():
for (i,j) in [(i,j) for j in range(1,n) for i in range(j)]:
yield G.has_edge(ns[i],ns[j])
n = G.order()
data = n_to_data(n)
d = 0
flush = False
for i, b in zip(range(n * n), bits()):
d |= b << (5 - (i % 6))
flush = True
if i % 6 == 5:
data.append(d)
d = 0
flush = False
if flush:
data.append(d)
string_data = data_to_graph6(data)
if header:
string_data = '>>graph6<<' + string_data
return string_data
@open_file(1, mode='wt')
def write_graph6(G, path, nodes = None, header=True):
"""Write a simple undirected graph to path in graph6 format.
Parameters
----------
G : Graph (undirected)
path : file or string
File or filename to write.
nodes: list or iterable
Nodes are labeled 0...n-1 in the order provided. If None the ordering
given by G.nodes() is used.
header: bool
If True add '>>graph6<<' string to head of data
Raises
------
NetworkXError
If the graph is directed or has parallel edges
Examples
--------
>>> G = nx.Graph([(0, 1)])
>>> nx.write_graph6(G, 'test.g6')
See Also
--------
generate_graph6, parse_graph6, read_graph6
Notes
-----
The format does not support edge or node labels, parallel edges or
self loops. If self loops are present they are silently ignored.
References
----------
.. [1] Graph6 specification
<http://users.cecs.anu.edu.au/~bdm/data/formats.html>
"""
path.write(generate_graph6(G, nodes=nodes, header=header))
path.write('\n')
# helper functions
def graph6_to_data(string):
"""Convert graph6 character sequence to 6-bit integers."""
v = [ord(c)-63 for c in string]
if len(v) > 0 and (min(v) < 0 or max(v) > 63):
return None
return v
def data_to_graph6(data):
"""Convert 6-bit integer sequence to graph6 character sequence."""
if len(data) > 0 and (min(data) < 0 or max(data) > 63):
raise NetworkXError("graph6 data units must be within 0..63")
return ''.join([chr(d+63) for d in data])
def data_to_n(data):
"""Read initial one-, four- or eight-unit value from graph6
integer sequence.
Return (value, rest of seq.)"""
if data[0] <= 62:
return data[0], data[1:]
if data[1] <= 62:
return (data[1]<<12) + (data[2]<<6) + data[3], data[4:]
return ((data[2]<<30) + (data[3]<<24) + (data[4]<<18) +
(data[5]<<12) + (data[6]<<6) + data[7], data[8:])
def n_to_data(n):
"""Convert an integer to one-, four- or eight-unit graph6 sequence."""
if n < 0:
raise NetworkXError("Numbers in graph6 format must be non-negative.")
if n <= 62:
return [n]
if n <= 258047:
return [63, (n>>12) & 0x3f, (n>>6) & 0x3f, n & 0x3f]
if n <= 68719476735:
return [63, 63,
(n>>30) & 0x3f, (n>>24) & 0x3f, (n>>18) & 0x3f,
(n>>12) & 0x3f, (n>>6) & 0x3f, n & 0x3f]
raise NetworkXError("Numbers above 68719476735 are not supported by graph6")
def teardown_module(module):
import os
if os.path.isfile('test.g6'):
os.unlink('test.g6')
| bsd-3-clause | 8,297,269,426,694,884,000 | -3,826,876,541,812,963,000 | 25.09699 | 80 | 0.570934 | false |
imply/chuu | ppapi/generators/idl_lexer.py | 62 | 9292 | #!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
""" Lexer for PPAPI IDL """
#
# IDL Lexer
#
# The lexer is uses the PLY lex library to build a tokenizer which understands
# WebIDL tokens.
#
# WebIDL, and WebIDL regular expressions can be found at:
# http://dev.w3.org/2006/webapi/WebIDL/
# PLY can be found at:
# http://www.dabeaz.com/ply/
import os.path
import re
import sys
#
# Try to load the ply module, if not, then assume it is in the third_party
# directory, relative to ppapi
#
try:
from ply import lex
except:
module_path, module_name = os.path.split(__file__)
third_party = os.path.join(module_path, '..', '..', 'third_party')
sys.path.append(third_party)
from ply import lex
from idl_option import GetOption, Option, ParseOptions
Option('output', 'Generate output.')
#
# IDL Lexer
#
class IDLLexer(object):
# 'tokens' is a value required by lex which specifies the complete list
# of valid token types.
tokens = [
# Symbol and keywords types
'COMMENT',
'DESCRIBE',
'ENUM',
'LABEL',
'SYMBOL',
'INLINE',
'INTERFACE',
'STRUCT',
'TYPEDEF',
# Extra WebIDL keywords
'CALLBACK',
'DICTIONARY',
'OPTIONAL',
'STATIC',
# Invented for apps use
'NAMESPACE',
# Data types
'FLOAT',
'OCT',
'INT',
'HEX',
'STRING',
# Operators
'LSHIFT',
'RSHIFT'
]
# 'keywords' is a map of string to token type. All SYMBOL tokens are
# matched against keywords, to determine if the token is actually a keyword.
keywords = {
'describe' : 'DESCRIBE',
'enum' : 'ENUM',
'label' : 'LABEL',
'interface' : 'INTERFACE',
'readonly' : 'READONLY',
'struct' : 'STRUCT',
'typedef' : 'TYPEDEF',
'callback' : 'CALLBACK',
'dictionary' : 'DICTIONARY',
'optional' : 'OPTIONAL',
'static' : 'STATIC',
'namespace' : 'NAMESPACE',
}
# 'literals' is a value expected by lex which specifies a list of valid
# literal tokens, meaning the token type and token value are identical.
literals = '"*.(){}[],;:=+-/~|&^?'
# Token definitions
#
# Lex assumes any value or function in the form of 't_<TYPE>' represents a
# regular expression where a match will emit a token of type <TYPE>. In the
# case of a function, the function is called when a match is made. These
# definitions come from WebIDL.
# 't_ignore' is a special match of items to ignore
t_ignore = ' \t'
# Constant values
t_FLOAT = r'-?(\d+\.\d*|\d*\.\d+)([Ee][+-]?\d+)?|-?\d+[Ee][+-]?\d+'
t_INT = r'-?[0-9]+[uU]?'
t_OCT = r'-?0[0-7]+'
t_HEX = r'-?0[Xx][0-9A-Fa-f]+'
t_LSHIFT = r'<<'
t_RSHIFT = r'>>'
# A line ending '\n', we use this to increment the line number
def t_LINE_END(self, t):
r'\n+'
self.AddLines(len(t.value))
# We do not process escapes in the IDL strings. Strings are exclusively
# used for attributes, and not used as typical 'C' constants.
def t_STRING(self, t):
r'"[^"]*"'
t.value = t.value[1:-1]
self.AddLines(t.value.count('\n'))
return t
# A C or C++ style comment: /* xxx */ or //
def t_COMMENT(self, t):
r'(/\*(.|\n)*?\*/)|(//.*(\n[ \t]*//.*)*)'
self.AddLines(t.value.count('\n'))
return t
# Return a "preprocessor" inline block
def t_INLINE(self, t):
r'\#inline (.|\n)*?\#endinl.*'
self.AddLines(t.value.count('\n'))
return t
# A symbol or keyword.
def t_KEYWORD_SYMBOL(self, t):
r'_?[A-Za-z][A-Za-z_0-9]*'
# All non-keywords are assumed to be symbols
t.type = self.keywords.get(t.value, 'SYMBOL')
# We strip leading underscores so that you can specify symbols with the same
# value as a keywords (E.g. a dictionary named 'interface').
if t.value[0] == '_':
t.value = t.value[1:]
return t
def t_ANY_error(self, t):
msg = "Unrecognized input"
line = self.lexobj.lineno
# If that line has not been accounted for, then we must have hit
# EoF, so compute the beginning of the line that caused the problem.
if line >= len(self.index):
# Find the offset in the line of the first word causing the issue
word = t.value.split()[0]
offs = self.lines[line - 1].find(word)
# Add the computed line's starting position
self.index.append(self.lexobj.lexpos - offs)
msg = "Unexpected EoF reached after"
pos = self.lexobj.lexpos - self.index[line]
file = self.lexobj.filename
out = self.ErrorMessage(file, line, pos, msg)
sys.stderr.write(out + '\n')
self.lex_errors += 1
def AddLines(self, count):
# Set the lexer position for the beginning of the next line. In the case
# of multiple lines, tokens can not exist on any of the lines except the
# last one, so the recorded value for previous lines are unused. We still
# fill the array however, to make sure the line count is correct.
self.lexobj.lineno += count
for i in range(count):
self.index.append(self.lexobj.lexpos)
def FileLineMsg(self, file, line, msg):
if file: return "%s(%d) : %s" % (file, line + 1, msg)
return "<BuiltIn> : %s" % msg
def SourceLine(self, file, line, pos):
caret = '\t^'.expandtabs(pos)
# We decrement the line number since the array is 0 based while the
# line numbers are 1 based.
return "%s\n%s" % (self.lines[line - 1], caret)
def ErrorMessage(self, file, line, pos, msg):
return "\n%s\n%s" % (
self.FileLineMsg(file, line, msg),
self.SourceLine(file, line, pos))
def SetData(self, filename, data):
# Start with line 1, not zero
self.lexobj.lineno = 1
self.lexobj.filename = filename
self.lines = data.split('\n')
self.index = [0]
self.lexobj.input(data)
self.lex_errors = 0
def __init__(self):
self.lexobj = lex.lex(object=self, lextab=None, optimize=0)
#
# FilesToTokens
#
# From a set of source file names, generate a list of tokens.
#
def FilesToTokens(filenames, verbose=False):
lexer = IDLLexer()
outlist = []
for filename in filenames:
data = open(filename).read()
lexer.SetData(filename, data)
if verbose: sys.stdout.write(' Loaded %s...\n' % filename)
while 1:
t = lexer.lexobj.token()
if t is None: break
outlist.append(t)
return outlist
def TokensFromText(text):
lexer = IDLLexer()
lexer.SetData('unknown', text)
outlist = []
while 1:
t = lexer.lexobj.token()
if t is None: break
outlist.append(t.value)
return outlist
#
# TextToTokens
#
# From a block of text, generate a list of tokens
#
def TextToTokens(source):
lexer = IDLLexer()
outlist = []
lexer.SetData('AUTO', source)
while 1:
t = lexer.lexobj.token()
if t is None: break
outlist.append(t.value)
return outlist
#
# TestSame
#
# From a set of token values, generate a new source text by joining with a
# single space. The new source is then tokenized and compared against the
# old set.
#
def TestSame(values1):
# Recreate the source from the tokens. We use newline instead of whitespace
# since the '//' and #inline regex are line sensitive.
text = '\n'.join(values1)
values2 = TextToTokens(text)
count1 = len(values1)
count2 = len(values2)
if count1 != count2:
print "Size mismatch original %d vs %d\n" % (count1, count2)
if count1 > count2: count1 = count2
for i in range(count1):
if values1[i] != values2[i]:
print "%d >>%s<< >>%s<<" % (i, values1[i], values2[i])
if GetOption('output'):
sys.stdout.write('Generating original.txt and tokenized.txt\n')
open('original.txt', 'w').write(src1)
open('tokenized.txt', 'w').write(src2)
if values1 == values2:
sys.stdout.write('Same: Pass\n')
return 0
print "****************\n%s\n%s***************\n" % (src1, src2)
sys.stdout.write('Same: Failed\n')
return -1
#
# TestExpect
#
# From a set of tokens pairs, verify the type field of the second matches
# the value of the first, so that:
# INT 123 FLOAT 1.1
# will generate a passing test, where the first token is the SYMBOL INT,
# and the second token is the INT 123, third token is the SYMBOL FLOAT and
# the fourth is the FLOAT 1.1, etc...
def TestExpect(tokens):
count = len(tokens)
index = 0
errors = 0
while index < count:
type = tokens[index].value
token = tokens[index + 1]
index += 2
if type != token.type:
sys.stderr.write('Mismatch: Expected %s, but got %s = %s.\n' %
(type, token.type, token.value))
errors += 1
if not errors:
sys.stdout.write('Expect: Pass\n')
return 0
sys.stdout.write('Expect: Failed\n')
return -1
def Main(args):
filenames = ParseOptions(args)
try:
tokens = FilesToTokens(filenames, GetOption('verbose'))
values = [tok.value for tok in tokens]
if GetOption('output'): sys.stdout.write(' <> '.join(values) + '\n')
if GetOption('test'):
if TestSame(values):
return -1
if TestExpect(tokens):
return -1
return 0
except lex.LexError as le:
sys.stderr.write('%s\n' % str(le))
return -1
if __name__ == '__main__':
sys.exit(Main(sys.argv[1:]))
| bsd-3-clause | 5,317,699,096,860,631,000 | 7,263,202,688,237,150,000 | 25.624642 | 80 | 0.622901 | false |
sanger-pathogens/gff3toembl | gff3toembl/EMBLConverter.py | 3 | 1946 | import gff3toembl
from gt import CustomVisitor
from gff3toembl.EMBLContig import EMBLContig
class EMBLConverter(CustomVisitor):
def __init__(self, locus_tag=None, translation_table=11):
CustomVisitor.__init__(self)
self.contigs = {}
self.locus_tag = locus_tag
self.translation_table = translation_table
def visit_feature_node(self, feature_node):
sequence_id = feature_node.get_seqid()
contig = self.contigs.get(sequence_id)
if contig: # contig already exists, just try and update it
contig.add_feature(sequence_id = sequence_id, feature_type = feature_node.get_type(), start = feature_node.get_start(),
end = feature_node.get_end(), strand = feature_node.get_strand(),
feature_attributes = feature_node.attribs,
locus_tag = self.locus_tag, translation_table = self.translation_table)
else:
contig = EMBLContig()
successfully_added_feature = contig.add_feature(sequence_id = sequence_id, feature_type = feature_node.get_type(), start = feature_node.get_start(),
end = feature_node.get_end(), strand = feature_node.get_strand(),
feature_attributes = feature_node.attribs,
locus_tag = self.locus_tag, translation_table = self.translation_table)
if successfully_added_feature:
self.contigs[sequence_id] = contig
else:
pass # discard the contig because we didn't add a feature so it is empty
def visit_region_node(self, region_node):
pass # for now
def visit_comment_node(self, comment_node):
pass # for now
def visit_sequence_node(self, sequence_node):
sequence_id = sequence_node.get_description()
contig = self.contigs.setdefault(sequence_id, EMBLContig())
contig.add_sequence(sequence_node.get_sequence())
| gpl-3.0 | 1,926,829,460,692,399,900 | -708,487,220,745,068,800 | 46.463415 | 156 | 0.633607 | false |
SanPen/GridCal | src/GridCal/Engine/Sparse/utils.py | 1 | 2463 | # This file is part of GridCal.
#
# GridCal is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# GridCal is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GridCal. If not, see <http://www.gnu.org/licenses/>.
import numpy as np
def slice_to_range(sl: slice, n):
"""
Turn a slice into a range
:param sl: slice object
:param n: total number of items
:return: range object, if the slice is not supported an exception is raised
"""
if sl.start is None and sl.step is None and sl.start is None: # (:)
return range(n)
elif sl.start is not None and sl.step is None and sl.start is None: # (a:)
return range(sl.start, n)
elif sl.start is not None and sl.step is not None and sl.start is None: # (?)
raise Exception('Invalid slice')
elif sl.start is not None and sl.step is None and sl.start is not None: # (a:b)
return range(sl.start, sl.stop)
elif sl.start is not None and sl.step is not None and sl.start is not None: # (a:s:b)
return range(sl.start, sl.stop, sl.step)
elif sl.start is None and sl.step is None and sl.start is not None: # (:b)
return range(sl.stop)
else:
raise Exception('Invalid slice')
def dense_to_str(mat: np.ndarray):
"""
Turn dense 2D numpy array into a string
:param mat: 2D numpy array
:return: string
"""
rows, cols = mat.shape
val = "Matrix (" + ("%d" % rows) + " x " + ("%d" % cols) + ")\n"
val += str(mat).replace('. ', ' ').replace('[', ' ').replace(']', '').replace('0 ', '_ ').replace('0.', '_ ')
# for i in range(0, rows):
# for j in range(0, cols):
# x = mat[i, j]
# if x is not None:
# if x == 0:
# val += '{:<4}'.format(0)
# else:
# val += '{:<4}'.format(x)
# else:
# val += ""
# val += '\n'
# for rows in M:
# print(*['{:<4}'.format(each) for each in rows])
return val
| gpl-3.0 | 6,019,726,522,540,913,000 | -995,721,098,688,405,400 | 33.208333 | 113 | 0.587089 | false |
trashkalmar/omim | tools/python/mwm/dump_mwm.py | 10 | 1418 | #!/usr/bin/python
import sys, os.path, random
import json
from mwm import MWM
if len(sys.argv) < 2:
print('Dumps some MWM structures.')
print('Usage: {0} <country.mwm>'.format(sys.argv[0]))
sys.exit(1)
mwm = MWM(open(sys.argv[1], 'rb'))
mwm.read_types(os.path.join(os.path.dirname(sys.argv[0]), '..', '..', '..', 'data', 'types.txt'))
print('Tags:')
tvv = sorted([(k, v[0], v[1]) for k, v in mwm.tags.items()], key=lambda x: x[1])
for tv in tvv:
print(' {0:<8}: offs {1:9} len {2:8}'.format(tv[0], tv[1], tv[2]))
v = mwm.read_version()
print('Format: {0}, version: {1}'.format(v['fmt'], v['date'].strftime('%Y-%m-%d %H:%M')))
print('Header: {0}'.format(mwm.read_header()))
print('Region Info: {0}'.format(mwm.read_region_info()))
print('Metadata count: {0}'.format(len(mwm.read_metadata())))
cross = mwm.read_crossmwm()
if cross:
print('Outgoing points: {0}, incoming: {1}'.format(len(cross['out']), len(cross['in'])))
print('Outgoing regions: {0}'.format(set(cross['neighbours'])))
# Print some random features using reservoir sampling
count = 5
sample = []
for i, feature in enumerate(mwm.iter_features()):
if i < count:
sample.append(feature)
elif random.randint(0, i) < count:
sample[random.randint(0, count-1)] = feature
print('Feature count: {0}'.format(i))
print('Sample features:')
for feature in sample:
print(json.dumps(feature, ensure_ascii=False))
| apache-2.0 | -1,666,888,041,325,059,300 | -4,498,748,573,651,022,000 | 34.45 | 97 | 0.631171 | false |
vergecurrency/electrum-xvg | gui/qt/version_getter.py | 2 | 4598 | #!/usr/bin/env python
#
# Electrum - lightweight Bitcoin client
# Copyright (C) 2012 thomasv@gitorious
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import threading, re, socket
import webbrowser
import requests
from PyQt4.QtGui import *
from PyQt4.QtCore import *
import PyQt4.QtCore as QtCore
from electrum_xvg.i18n import _
from electrum_xvg import ELECTRUM_VERSION, print_error
class VersionGetter(threading.Thread):
def __init__(self, label):
threading.Thread.__init__(self)
self.label = label
self.daemon = True
def run(self):
try:
res = requests.request("GET", "http://electrum-verge.xyz/version")
except:
print_error("Could not retrieve version information")
return
if res.status_code == 200:
latest_version = res.text
latest_version = latest_version.replace("\n","")
if(re.match('^\d+(\.\d+)*$', latest_version)):
self.label.callback(latest_version)
class UpdateLabel(QLabel):
def __init__(self, config, sb):
QLabel.__init__(self)
self.new_version = False
self.sb = sb
self.config = config
self.current_version = ELECTRUM_VERSION
self.connect(self, QtCore.SIGNAL('new_electrum_version'), self.new_electrum_version)
# prevent HTTP leaks if a proxy is set
if self.config.get('proxy'):
return
VersionGetter(self).start()
def callback(self, version):
self.latest_version = version
if(self.compare_versions(self.latest_version, self.current_version) == 1):
latest_seen = self.config.get("last_seen_version",ELECTRUM_VERSION)
if(self.compare_versions(self.latest_version, latest_seen) == 1):
self.new_version = True
self.emit(QtCore.SIGNAL('new_electrum_version'))
def new_electrum_version(self):
if self.new_version:
self.setText(_("New version available") + ": " + self.latest_version)
self.sb.insertPermanentWidget(1, self)
def compare_versions(self, version1, version2):
def normalize(v):
return [int(x) for x in re.sub(r'(\.0+)*$','', v).split(".")]
try:
return cmp(normalize(version1), normalize(version2))
except:
return 0
def ignore_this_version(self):
self.setText("")
self.config.set_key("last_seen_version", self.latest_version, True)
QMessageBox.information(self, _("Preference saved"), _("Notifications about this update will not be shown again."))
self.dialog.done(0)
def ignore_all_version(self):
self.setText("")
self.config.set_key("last_seen_version", "9.9.9", True)
QMessageBox.information(self, _("Preference saved"), _("No more notifications about version updates will be shown."))
self.dialog.done(0)
def open_website(self):
webbrowser.open("http://electrum-verge.xyz/download.html")
self.dialog.done(0)
def mouseReleaseEvent(self, event):
dialog = QDialog(self)
dialog.setWindowTitle(_('Electrum-XVG update'))
dialog.setModal(1)
main_layout = QGridLayout()
main_layout.addWidget(QLabel(_("A new version of Electrum-XVG is available:")+" " + self.latest_version), 0,0,1,3)
ignore_version = QPushButton(_("Ignore this version"))
ignore_version.clicked.connect(self.ignore_this_version)
ignore_all_versions = QPushButton(_("Ignore all versions"))
ignore_all_versions.clicked.connect(self.ignore_all_version)
open_website = QPushButton(_("Goto download page"))
open_website.clicked.connect(self.open_website)
main_layout.addWidget(ignore_version, 1, 0)
main_layout.addWidget(ignore_all_versions, 1, 1)
main_layout.addWidget(open_website, 1, 2)
dialog.setLayout(main_layout)
self.dialog = dialog
if not dialog.exec_(): return
| gpl-3.0 | 9,210,067,497,314,935,000 | -5,745,468,158,435,431,000 | 35.784 | 125 | 0.645063 | false |
lambeau/ansible-modules-core | cloud/openstack/_quantum_router.py | 37 | 7032 | #!/usr/bin/python
#coding: utf-8 -*-
# (c) 2013, Benno Joy <benno@ansible.com>
#
# This module is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this software. If not, see <http://www.gnu.org/licenses/>.
try:
try:
from neutronclient.neutron import client
except ImportError:
from quantumclient.quantum import client
from keystoneclient.v2_0 import client as ksclient
HAVE_DEPS = True
except ImportError:
HAVE_DEPS = False
DOCUMENTATION = '''
---
module: quantum_router
version_added: "1.2"
author: "Benno Joy (@bennojoy)"
deprecated: Deprecated in 2.0. Use os_router instead
short_description: Create or Remove router from openstack
description:
- Create or Delete routers from OpenStack
options:
login_username:
description:
- login username to authenticate to keystone
required: true
default: admin
login_password:
description:
- Password of login user
required: true
default: 'yes'
login_tenant_name:
description:
- The tenant name of the login user
required: true
default: 'yes'
auth_url:
description:
- The keystone url for authentication
required: false
default: 'http://127.0.0.1:35357/v2.0/'
region_name:
description:
- Name of the region
required: false
default: None
state:
description:
- Indicate desired state of the resource
choices: ['present', 'absent']
default: present
name:
description:
- Name to be give to the router
required: true
default: None
tenant_name:
description:
- Name of the tenant for which the router has to be created, if none router would be created for the login tenant.
required: false
default: None
admin_state_up:
description:
- desired admin state of the created router .
required: false
default: true
requirements:
- "python >= 2.6"
- "python-neutronclient or python-quantumclient"
- "python-keystoneclient"
'''
EXAMPLES = '''
# Creates a router for tenant admin
- quantum_router: state=present
login_username=admin
login_password=admin
login_tenant_name=admin
name=router1"
'''
_os_keystone = None
_os_tenant_id = None
def _get_ksclient(module, kwargs):
try:
kclient = ksclient.Client(username=kwargs.get('login_username'),
password=kwargs.get('login_password'),
tenant_name=kwargs.get('login_tenant_name'),
auth_url=kwargs.get('auth_url'))
except Exception, e:
module.fail_json(msg = "Error authenticating to the keystone: %s " % e.message)
global _os_keystone
_os_keystone = kclient
return kclient
def _get_endpoint(module, ksclient):
try:
endpoint = ksclient.service_catalog.url_for(service_type='network', endpoint_type='publicURL')
except Exception, e:
module.fail_json(msg = "Error getting network endpoint: %s" % e.message)
return endpoint
def _get_neutron_client(module, kwargs):
_ksclient = _get_ksclient(module, kwargs)
token = _ksclient.auth_token
endpoint = _get_endpoint(module, _ksclient)
kwargs = {
'token': token,
'endpoint_url': endpoint
}
try:
neutron = client.Client('2.0', **kwargs)
except Exception, e:
module.fail_json(msg = "Error in connecting to neutron: %s " % e.message)
return neutron
def _set_tenant_id(module):
global _os_tenant_id
if not module.params['tenant_name']:
_os_tenant_id = _os_keystone.tenant_id
else:
tenant_name = module.params['tenant_name']
for tenant in _os_keystone.tenants.list():
if tenant.name == tenant_name:
_os_tenant_id = tenant.id
break
if not _os_tenant_id:
module.fail_json(msg = "The tenant id cannot be found, please check the parameters")
def _get_router_id(module, neutron):
kwargs = {
'name': module.params['name'],
'tenant_id': _os_tenant_id,
}
try:
routers = neutron.list_routers(**kwargs)
except Exception, e:
module.fail_json(msg = "Error in getting the router list: %s " % e.message)
if not routers['routers']:
return None
return routers['routers'][0]['id']
def _create_router(module, neutron):
router = {
'name': module.params['name'],
'tenant_id': _os_tenant_id,
'admin_state_up': module.params['admin_state_up'],
}
try:
new_router = neutron.create_router(dict(router=router))
except Exception, e:
module.fail_json( msg = "Error in creating router: %s" % e.message)
return new_router['router']['id']
def _delete_router(module, neutron, router_id):
try:
neutron.delete_router(router_id)
except:
module.fail_json("Error in deleting the router")
return True
def main():
argument_spec = openstack_argument_spec()
argument_spec.update(dict(
name = dict(required=True),
tenant_name = dict(default=None),
state = dict(default='present', choices=['absent', 'present']),
admin_state_up = dict(type='bool', default=True),
))
module = AnsibleModule(argument_spec=argument_spec)
if not HAVE_DEPS:
module.fail_json(msg='python-keystoneclient and either python-neutronclient or python-quantumclient are required')
neutron = _get_neutron_client(module, module.params)
_set_tenant_id(module)
if module.params['state'] == 'present':
router_id = _get_router_id(module, neutron)
if not router_id:
router_id = _create_router(module, neutron)
module.exit_json(changed=True, result="Created", id=router_id)
else:
module.exit_json(changed=False, result="success" , id=router_id)
else:
router_id = _get_router_id(module, neutron)
if not router_id:
module.exit_json(changed=False, result="success")
else:
_delete_router(module, neutron, router_id)
module.exit_json(changed=True, result="deleted")
# this is magic, see lib/ansible/module.params['common.py
from ansible.module_utils.basic import *
from ansible.module_utils.openstack import *
if __name__ == '__main__':
main()
| gpl-3.0 | 7,256,300,803,703,418,000 | 1,436,887,258,766,311,400 | 31.256881 | 122 | 0.625427 | false |
regionbibliotekhalland/digitalasagor | edittabvideo.py | 1 | 4897 | # Copyright 2013 Regionbibliotek Halland
#
# This file is part of Digitala sagor.
#
# Digitala sagor is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Digitala sagor is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Digitala sagor. If not, see <http://www.gnu.org/licenses/>.
from Tkinter import *
import ttk
from tooltip import ToolTip
from language import lang
import language as lng
from playerdlg import showPlayerDialog
from datamodel import tpVideo
import tkFileDialog
import os
import os.path
import shutil
import spmanager as spm
from edittab import EditTab
_videoFileFormats = [('mp4', '*.mp4'), ('avi', '*.avi'), ('wmv', '*.wmv'), ('mpeg', '*.mpeg'), ('mov', '*.mov')]
class EditTabVideo(EditTab):
"""A Frame for editing video based stories"""
def __init__(self, parent, wdir, datamodel, psize):
"""Initiate
Arguments
parent -- parent tkinter item
wdir -- working directory
datamodel -- the database that is edited by the program
psize -- tuple defining preview size of videos
"""
EditTab.__init__(self, parent, wdir, datamodel, psize)
self._mediatype = tpVideo
#Create variables for common data
self._svVideo = StringVar()
#Make the first row expandable
self.rowconfigure(0, weight = 1)
#Add frame from super class
self._superFrame.grid(row = 0, column = 0, sticky = W + N)
#Create the right column
rightLf = ttk.LabelFrame(self, text = ' ' + lang[lng.txtVideo] + ' ')
rightLf.grid(row = 0, column = 1, pady = 10, sticky = W + N)
rightFrame = Frame(rightLf)
rightFrame.grid()
e = Entry(rightFrame, w = 32, textvariable = self._svVideo, state = "readonly")
e.grid(row = 0, column = 0, padx = 10, pady = 5, sticky = W);
tt = ToolTip(e, '', textvariable = self._svVideo, wraplength = parent.winfo_screenwidth() * 4 / 5)
b = Button(rightFrame, text = lang[lng.txtSelect] + '...', command = self._ehGetVideo)
b.grid(row = 0, column = 1, padx = 10, pady = 5)
b = Button(rightFrame, text = lang[lng.txtWatch], command = self._ehWatch)
b.grid(row = 0, column = 2, padx = 10, pady = 5)
def open(self, slideshow, prepared = False):
"""Open a slideshow for editing
Arguments
slideshow -- the slideshow
prepared -- if true, all media data is already copied to the working folder
(i.e. the slideshow has been created automatically)
"""
EditTab.open(self, slideshow, prepared = False)
if(not prepared):
if(slideshow.video != ''):
shutil.copyfile(slideshow.getPath(slideshow.video), os.path.join(self._wdir, slideshow.video))
self._svVideo.set(slideshow.video)
def clear(self):
"""Clear the edit tab"""
EditTab.clear(self)
self._svVideo.set('')
def _getCurrentSlideshow(self):
"""Create and return a slideshow representing the currently edited slideshow."""
slideshow = EditTab._getCurrentSlideshow(self)
slideshow.video = self._svVideo.get()
return slideshow
#Event handlers
def _ehGetVideo(self):
"""Event handler for assigning a video"""
initdir = spm.spmanager.getFirstPath([spm.VideoFolder,
spm.MostRecentFolder])
filenamepath = tkFileDialog.askopenfilename(initialdir = initdir, filetypes = _videoFileFormats)
if(len(filenamepath) > 0):
filename = os.path.basename(filenamepath)
try:
shutil.copyfile(filenamepath, os.path.join(self._wdir, filename))
except IOError:
showerror(lang[lng.txtCopyError], lang[lng.txtCouldNotCopy] + os.path.basename(filename))
return
self._svVideo.set(filename)
self.setDirty(True)
spm.spmanager.setPath(spm.VideoFolder, os.path.dirname(filenamepath))
def _ehWatch(self):
"""Event handler for preview of the video"""
media = self._getCurrentSlideshow()
showPlayerDialog(self._parent, self._psize, media)
| gpl-3.0 | -5,058,063,836,021,137,000 | 7,373,712,909,376,517,000 | 35.669231 | 112 | 0.603226 | false |
devalbo/mm_anywhere | google/protobuf/internal/encoder.py | 484 | 25695 | # Protocol Buffers - Google's data interchange format
# Copyright 2008 Google Inc. All rights reserved.
# http://code.google.com/p/protobuf/
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Code for encoding protocol message primitives.
Contains the logic for encoding every logical protocol field type
into one of the 5 physical wire types.
This code is designed to push the Python interpreter's performance to the
limits.
The basic idea is that at startup time, for every field (i.e. every
FieldDescriptor) we construct two functions: a "sizer" and an "encoder". The
sizer takes a value of this field's type and computes its byte size. The
encoder takes a writer function and a value. It encodes the value into byte
strings and invokes the writer function to write those strings. Typically the
writer function is the write() method of a cStringIO.
We try to do as much work as possible when constructing the writer and the
sizer rather than when calling them. In particular:
* We copy any needed global functions to local variables, so that we do not need
to do costly global table lookups at runtime.
* Similarly, we try to do any attribute lookups at startup time if possible.
* Every field's tag is encoded to bytes at startup, since it can't change at
runtime.
* Whatever component of the field size we can compute at startup, we do.
* We *avoid* sharing code if doing so would make the code slower and not sharing
does not burden us too much. For example, encoders for repeated fields do
not just call the encoders for singular fields in a loop because this would
add an extra function call overhead for every loop iteration; instead, we
manually inline the single-value encoder into the loop.
* If a Python function lacks a return statement, Python actually generates
instructions to pop the result of the last statement off the stack, push
None onto the stack, and then return that. If we really don't care what
value is returned, then we can save two instructions by returning the
result of the last statement. It looks funny but it helps.
* We assume that type and bounds checking has happened at a higher level.
"""
__author__ = 'kenton@google.com (Kenton Varda)'
import struct
from google.protobuf.internal import wire_format
# This will overflow and thus become IEEE-754 "infinity". We would use
# "float('inf')" but it doesn't work on Windows pre-Python-2.6.
_POS_INF = 1e10000
_NEG_INF = -_POS_INF
def _VarintSize(value):
"""Compute the size of a varint value."""
if value <= 0x7f: return 1
if value <= 0x3fff: return 2
if value <= 0x1fffff: return 3
if value <= 0xfffffff: return 4
if value <= 0x7ffffffff: return 5
if value <= 0x3ffffffffff: return 6
if value <= 0x1ffffffffffff: return 7
if value <= 0xffffffffffffff: return 8
if value <= 0x7fffffffffffffff: return 9
return 10
def _SignedVarintSize(value):
"""Compute the size of a signed varint value."""
if value < 0: return 10
if value <= 0x7f: return 1
if value <= 0x3fff: return 2
if value <= 0x1fffff: return 3
if value <= 0xfffffff: return 4
if value <= 0x7ffffffff: return 5
if value <= 0x3ffffffffff: return 6
if value <= 0x1ffffffffffff: return 7
if value <= 0xffffffffffffff: return 8
if value <= 0x7fffffffffffffff: return 9
return 10
def _TagSize(field_number):
"""Returns the number of bytes required to serialize a tag with this field
number."""
# Just pass in type 0, since the type won't affect the tag+type size.
return _VarintSize(wire_format.PackTag(field_number, 0))
# --------------------------------------------------------------------
# In this section we define some generic sizers. Each of these functions
# takes parameters specific to a particular field type, e.g. int32 or fixed64.
# It returns another function which in turn takes parameters specific to a
# particular field, e.g. the field number and whether it is repeated or packed.
# Look at the next section to see how these are used.
def _SimpleSizer(compute_value_size):
"""A sizer which uses the function compute_value_size to compute the size of
each value. Typically compute_value_size is _VarintSize."""
def SpecificSizer(field_number, is_repeated, is_packed):
tag_size = _TagSize(field_number)
if is_packed:
local_VarintSize = _VarintSize
def PackedFieldSize(value):
result = 0
for element in value:
result += compute_value_size(element)
return result + local_VarintSize(result) + tag_size
return PackedFieldSize
elif is_repeated:
def RepeatedFieldSize(value):
result = tag_size * len(value)
for element in value:
result += compute_value_size(element)
return result
return RepeatedFieldSize
else:
def FieldSize(value):
return tag_size + compute_value_size(value)
return FieldSize
return SpecificSizer
def _ModifiedSizer(compute_value_size, modify_value):
"""Like SimpleSizer, but modify_value is invoked on each value before it is
passed to compute_value_size. modify_value is typically ZigZagEncode."""
def SpecificSizer(field_number, is_repeated, is_packed):
tag_size = _TagSize(field_number)
if is_packed:
local_VarintSize = _VarintSize
def PackedFieldSize(value):
result = 0
for element in value:
result += compute_value_size(modify_value(element))
return result + local_VarintSize(result) + tag_size
return PackedFieldSize
elif is_repeated:
def RepeatedFieldSize(value):
result = tag_size * len(value)
for element in value:
result += compute_value_size(modify_value(element))
return result
return RepeatedFieldSize
else:
def FieldSize(value):
return tag_size + compute_value_size(modify_value(value))
return FieldSize
return SpecificSizer
def _FixedSizer(value_size):
"""Like _SimpleSizer except for a fixed-size field. The input is the size
of one value."""
def SpecificSizer(field_number, is_repeated, is_packed):
tag_size = _TagSize(field_number)
if is_packed:
local_VarintSize = _VarintSize
def PackedFieldSize(value):
result = len(value) * value_size
return result + local_VarintSize(result) + tag_size
return PackedFieldSize
elif is_repeated:
element_size = value_size + tag_size
def RepeatedFieldSize(value):
return len(value) * element_size
return RepeatedFieldSize
else:
field_size = value_size + tag_size
def FieldSize(value):
return field_size
return FieldSize
return SpecificSizer
# ====================================================================
# Here we declare a sizer constructor for each field type. Each "sizer
# constructor" is a function that takes (field_number, is_repeated, is_packed)
# as parameters and returns a sizer, which in turn takes a field value as
# a parameter and returns its encoded size.
Int32Sizer = Int64Sizer = EnumSizer = _SimpleSizer(_SignedVarintSize)
UInt32Sizer = UInt64Sizer = _SimpleSizer(_VarintSize)
SInt32Sizer = SInt64Sizer = _ModifiedSizer(
_SignedVarintSize, wire_format.ZigZagEncode)
Fixed32Sizer = SFixed32Sizer = FloatSizer = _FixedSizer(4)
Fixed64Sizer = SFixed64Sizer = DoubleSizer = _FixedSizer(8)
BoolSizer = _FixedSizer(1)
def StringSizer(field_number, is_repeated, is_packed):
"""Returns a sizer for a string field."""
tag_size = _TagSize(field_number)
local_VarintSize = _VarintSize
local_len = len
assert not is_packed
if is_repeated:
def RepeatedFieldSize(value):
result = tag_size * len(value)
for element in value:
l = local_len(element.encode('utf-8'))
result += local_VarintSize(l) + l
return result
return RepeatedFieldSize
else:
def FieldSize(value):
l = local_len(value.encode('utf-8'))
return tag_size + local_VarintSize(l) + l
return FieldSize
def BytesSizer(field_number, is_repeated, is_packed):
"""Returns a sizer for a bytes field."""
tag_size = _TagSize(field_number)
local_VarintSize = _VarintSize
local_len = len
assert not is_packed
if is_repeated:
def RepeatedFieldSize(value):
result = tag_size * len(value)
for element in value:
l = local_len(element)
result += local_VarintSize(l) + l
return result
return RepeatedFieldSize
else:
def FieldSize(value):
l = local_len(value)
return tag_size + local_VarintSize(l) + l
return FieldSize
def GroupSizer(field_number, is_repeated, is_packed):
"""Returns a sizer for a group field."""
tag_size = _TagSize(field_number) * 2
assert not is_packed
if is_repeated:
def RepeatedFieldSize(value):
result = tag_size * len(value)
for element in value:
result += element.ByteSize()
return result
return RepeatedFieldSize
else:
def FieldSize(value):
return tag_size + value.ByteSize()
return FieldSize
def MessageSizer(field_number, is_repeated, is_packed):
"""Returns a sizer for a message field."""
tag_size = _TagSize(field_number)
local_VarintSize = _VarintSize
assert not is_packed
if is_repeated:
def RepeatedFieldSize(value):
result = tag_size * len(value)
for element in value:
l = element.ByteSize()
result += local_VarintSize(l) + l
return result
return RepeatedFieldSize
else:
def FieldSize(value):
l = value.ByteSize()
return tag_size + local_VarintSize(l) + l
return FieldSize
# --------------------------------------------------------------------
# MessageSet is special.
def MessageSetItemSizer(field_number):
"""Returns a sizer for extensions of MessageSet.
The message set message looks like this:
message MessageSet {
repeated group Item = 1 {
required int32 type_id = 2;
required string message = 3;
}
}
"""
static_size = (_TagSize(1) * 2 + _TagSize(2) + _VarintSize(field_number) +
_TagSize(3))
local_VarintSize = _VarintSize
def FieldSize(value):
l = value.ByteSize()
return static_size + local_VarintSize(l) + l
return FieldSize
# ====================================================================
# Encoders!
def _VarintEncoder():
"""Return an encoder for a basic varint value (does not include tag)."""
local_chr = chr
def EncodeVarint(write, value):
bits = value & 0x7f
value >>= 7
while value:
write(local_chr(0x80|bits))
bits = value & 0x7f
value >>= 7
return write(local_chr(bits))
return EncodeVarint
def _SignedVarintEncoder():
"""Return an encoder for a basic signed varint value (does not include
tag)."""
local_chr = chr
def EncodeSignedVarint(write, value):
if value < 0:
value += (1 << 64)
bits = value & 0x7f
value >>= 7
while value:
write(local_chr(0x80|bits))
bits = value & 0x7f
value >>= 7
return write(local_chr(bits))
return EncodeSignedVarint
_EncodeVarint = _VarintEncoder()
_EncodeSignedVarint = _SignedVarintEncoder()
def _VarintBytes(value):
"""Encode the given integer as a varint and return the bytes. This is only
called at startup time so it doesn't need to be fast."""
pieces = []
_EncodeVarint(pieces.append, value)
return "".join(pieces)
def TagBytes(field_number, wire_type):
"""Encode the given tag and return the bytes. Only called at startup."""
return _VarintBytes(wire_format.PackTag(field_number, wire_type))
# --------------------------------------------------------------------
# As with sizers (see above), we have a number of common encoder
# implementations.
def _SimpleEncoder(wire_type, encode_value, compute_value_size):
"""Return a constructor for an encoder for fields of a particular type.
Args:
wire_type: The field's wire type, for encoding tags.
encode_value: A function which encodes an individual value, e.g.
_EncodeVarint().
compute_value_size: A function which computes the size of an individual
value, e.g. _VarintSize().
"""
def SpecificEncoder(field_number, is_repeated, is_packed):
if is_packed:
tag_bytes = TagBytes(field_number, wire_format.WIRETYPE_LENGTH_DELIMITED)
local_EncodeVarint = _EncodeVarint
def EncodePackedField(write, value):
write(tag_bytes)
size = 0
for element in value:
size += compute_value_size(element)
local_EncodeVarint(write, size)
for element in value:
encode_value(write, element)
return EncodePackedField
elif is_repeated:
tag_bytes = TagBytes(field_number, wire_type)
def EncodeRepeatedField(write, value):
for element in value:
write(tag_bytes)
encode_value(write, element)
return EncodeRepeatedField
else:
tag_bytes = TagBytes(field_number, wire_type)
def EncodeField(write, value):
write(tag_bytes)
return encode_value(write, value)
return EncodeField
return SpecificEncoder
def _ModifiedEncoder(wire_type, encode_value, compute_value_size, modify_value):
"""Like SimpleEncoder but additionally invokes modify_value on every value
before passing it to encode_value. Usually modify_value is ZigZagEncode."""
def SpecificEncoder(field_number, is_repeated, is_packed):
if is_packed:
tag_bytes = TagBytes(field_number, wire_format.WIRETYPE_LENGTH_DELIMITED)
local_EncodeVarint = _EncodeVarint
def EncodePackedField(write, value):
write(tag_bytes)
size = 0
for element in value:
size += compute_value_size(modify_value(element))
local_EncodeVarint(write, size)
for element in value:
encode_value(write, modify_value(element))
return EncodePackedField
elif is_repeated:
tag_bytes = TagBytes(field_number, wire_type)
def EncodeRepeatedField(write, value):
for element in value:
write(tag_bytes)
encode_value(write, modify_value(element))
return EncodeRepeatedField
else:
tag_bytes = TagBytes(field_number, wire_type)
def EncodeField(write, value):
write(tag_bytes)
return encode_value(write, modify_value(value))
return EncodeField
return SpecificEncoder
def _StructPackEncoder(wire_type, format):
"""Return a constructor for an encoder for a fixed-width field.
Args:
wire_type: The field's wire type, for encoding tags.
format: The format string to pass to struct.pack().
"""
value_size = struct.calcsize(format)
def SpecificEncoder(field_number, is_repeated, is_packed):
local_struct_pack = struct.pack
if is_packed:
tag_bytes = TagBytes(field_number, wire_format.WIRETYPE_LENGTH_DELIMITED)
local_EncodeVarint = _EncodeVarint
def EncodePackedField(write, value):
write(tag_bytes)
local_EncodeVarint(write, len(value) * value_size)
for element in value:
write(local_struct_pack(format, element))
return EncodePackedField
elif is_repeated:
tag_bytes = TagBytes(field_number, wire_type)
def EncodeRepeatedField(write, value):
for element in value:
write(tag_bytes)
write(local_struct_pack(format, element))
return EncodeRepeatedField
else:
tag_bytes = TagBytes(field_number, wire_type)
def EncodeField(write, value):
write(tag_bytes)
return write(local_struct_pack(format, value))
return EncodeField
return SpecificEncoder
def _FloatingPointEncoder(wire_type, format):
"""Return a constructor for an encoder for float fields.
This is like StructPackEncoder, but catches errors that may be due to
passing non-finite floating-point values to struct.pack, and makes a
second attempt to encode those values.
Args:
wire_type: The field's wire type, for encoding tags.
format: The format string to pass to struct.pack().
"""
value_size = struct.calcsize(format)
if value_size == 4:
def EncodeNonFiniteOrRaise(write, value):
# Remember that the serialized form uses little-endian byte order.
if value == _POS_INF:
write('\x00\x00\x80\x7F')
elif value == _NEG_INF:
write('\x00\x00\x80\xFF')
elif value != value: # NaN
write('\x00\x00\xC0\x7F')
else:
raise
elif value_size == 8:
def EncodeNonFiniteOrRaise(write, value):
if value == _POS_INF:
write('\x00\x00\x00\x00\x00\x00\xF0\x7F')
elif value == _NEG_INF:
write('\x00\x00\x00\x00\x00\x00\xF0\xFF')
elif value != value: # NaN
write('\x00\x00\x00\x00\x00\x00\xF8\x7F')
else:
raise
else:
raise ValueError('Can\'t encode floating-point values that are '
'%d bytes long (only 4 or 8)' % value_size)
def SpecificEncoder(field_number, is_repeated, is_packed):
local_struct_pack = struct.pack
if is_packed:
tag_bytes = TagBytes(field_number, wire_format.WIRETYPE_LENGTH_DELIMITED)
local_EncodeVarint = _EncodeVarint
def EncodePackedField(write, value):
write(tag_bytes)
local_EncodeVarint(write, len(value) * value_size)
for element in value:
# This try/except block is going to be faster than any code that
# we could write to check whether element is finite.
try:
write(local_struct_pack(format, element))
except SystemError:
EncodeNonFiniteOrRaise(write, element)
return EncodePackedField
elif is_repeated:
tag_bytes = TagBytes(field_number, wire_type)
def EncodeRepeatedField(write, value):
for element in value:
write(tag_bytes)
try:
write(local_struct_pack(format, element))
except SystemError:
EncodeNonFiniteOrRaise(write, element)
return EncodeRepeatedField
else:
tag_bytes = TagBytes(field_number, wire_type)
def EncodeField(write, value):
write(tag_bytes)
try:
write(local_struct_pack(format, value))
except SystemError:
EncodeNonFiniteOrRaise(write, value)
return EncodeField
return SpecificEncoder
# ====================================================================
# Here we declare an encoder constructor for each field type. These work
# very similarly to sizer constructors, described earlier.
Int32Encoder = Int64Encoder = EnumEncoder = _SimpleEncoder(
wire_format.WIRETYPE_VARINT, _EncodeSignedVarint, _SignedVarintSize)
UInt32Encoder = UInt64Encoder = _SimpleEncoder(
wire_format.WIRETYPE_VARINT, _EncodeVarint, _VarintSize)
SInt32Encoder = SInt64Encoder = _ModifiedEncoder(
wire_format.WIRETYPE_VARINT, _EncodeVarint, _VarintSize,
wire_format.ZigZagEncode)
# Note that Python conveniently guarantees that when using the '<' prefix on
# formats, they will also have the same size across all platforms (as opposed
# to without the prefix, where their sizes depend on the C compiler's basic
# type sizes).
Fixed32Encoder = _StructPackEncoder(wire_format.WIRETYPE_FIXED32, '<I')
Fixed64Encoder = _StructPackEncoder(wire_format.WIRETYPE_FIXED64, '<Q')
SFixed32Encoder = _StructPackEncoder(wire_format.WIRETYPE_FIXED32, '<i')
SFixed64Encoder = _StructPackEncoder(wire_format.WIRETYPE_FIXED64, '<q')
FloatEncoder = _FloatingPointEncoder(wire_format.WIRETYPE_FIXED32, '<f')
DoubleEncoder = _FloatingPointEncoder(wire_format.WIRETYPE_FIXED64, '<d')
def BoolEncoder(field_number, is_repeated, is_packed):
"""Returns an encoder for a boolean field."""
false_byte = chr(0)
true_byte = chr(1)
if is_packed:
tag_bytes = TagBytes(field_number, wire_format.WIRETYPE_LENGTH_DELIMITED)
local_EncodeVarint = _EncodeVarint
def EncodePackedField(write, value):
write(tag_bytes)
local_EncodeVarint(write, len(value))
for element in value:
if element:
write(true_byte)
else:
write(false_byte)
return EncodePackedField
elif is_repeated:
tag_bytes = TagBytes(field_number, wire_format.WIRETYPE_VARINT)
def EncodeRepeatedField(write, value):
for element in value:
write(tag_bytes)
if element:
write(true_byte)
else:
write(false_byte)
return EncodeRepeatedField
else:
tag_bytes = TagBytes(field_number, wire_format.WIRETYPE_VARINT)
def EncodeField(write, value):
write(tag_bytes)
if value:
return write(true_byte)
return write(false_byte)
return EncodeField
def StringEncoder(field_number, is_repeated, is_packed):
"""Returns an encoder for a string field."""
tag = TagBytes(field_number, wire_format.WIRETYPE_LENGTH_DELIMITED)
local_EncodeVarint = _EncodeVarint
local_len = len
assert not is_packed
if is_repeated:
def EncodeRepeatedField(write, value):
for element in value:
encoded = element.encode('utf-8')
write(tag)
local_EncodeVarint(write, local_len(encoded))
write(encoded)
return EncodeRepeatedField
else:
def EncodeField(write, value):
encoded = value.encode('utf-8')
write(tag)
local_EncodeVarint(write, local_len(encoded))
return write(encoded)
return EncodeField
def BytesEncoder(field_number, is_repeated, is_packed):
"""Returns an encoder for a bytes field."""
tag = TagBytes(field_number, wire_format.WIRETYPE_LENGTH_DELIMITED)
local_EncodeVarint = _EncodeVarint
local_len = len
assert not is_packed
if is_repeated:
def EncodeRepeatedField(write, value):
for element in value:
write(tag)
local_EncodeVarint(write, local_len(element))
write(element)
return EncodeRepeatedField
else:
def EncodeField(write, value):
write(tag)
local_EncodeVarint(write, local_len(value))
return write(value)
return EncodeField
def GroupEncoder(field_number, is_repeated, is_packed):
"""Returns an encoder for a group field."""
start_tag = TagBytes(field_number, wire_format.WIRETYPE_START_GROUP)
end_tag = TagBytes(field_number, wire_format.WIRETYPE_END_GROUP)
assert not is_packed
if is_repeated:
def EncodeRepeatedField(write, value):
for element in value:
write(start_tag)
element._InternalSerialize(write)
write(end_tag)
return EncodeRepeatedField
else:
def EncodeField(write, value):
write(start_tag)
value._InternalSerialize(write)
return write(end_tag)
return EncodeField
def MessageEncoder(field_number, is_repeated, is_packed):
"""Returns an encoder for a message field."""
tag = TagBytes(field_number, wire_format.WIRETYPE_LENGTH_DELIMITED)
local_EncodeVarint = _EncodeVarint
assert not is_packed
if is_repeated:
def EncodeRepeatedField(write, value):
for element in value:
write(tag)
local_EncodeVarint(write, element.ByteSize())
element._InternalSerialize(write)
return EncodeRepeatedField
else:
def EncodeField(write, value):
write(tag)
local_EncodeVarint(write, value.ByteSize())
return value._InternalSerialize(write)
return EncodeField
# --------------------------------------------------------------------
# As before, MessageSet is special.
def MessageSetItemEncoder(field_number):
"""Encoder for extensions of MessageSet.
The message set message looks like this:
message MessageSet {
repeated group Item = 1 {
required int32 type_id = 2;
required string message = 3;
}
}
"""
start_bytes = "".join([
TagBytes(1, wire_format.WIRETYPE_START_GROUP),
TagBytes(2, wire_format.WIRETYPE_VARINT),
_VarintBytes(field_number),
TagBytes(3, wire_format.WIRETYPE_LENGTH_DELIMITED)])
end_bytes = TagBytes(1, wire_format.WIRETYPE_END_GROUP)
local_EncodeVarint = _EncodeVarint
def EncodeField(write, value):
write(start_bytes)
local_EncodeVarint(write, value.ByteSize())
value._InternalSerialize(write)
return write(end_bytes)
return EncodeField
| agpl-3.0 | 5,709,194,657,205,806,000 | 732,076,512,618,822,400 | 32.413524 | 80 | 0.67663 | false |
ciudadanointeligente/lobby_cplt | lobby/csv_reader.py | 1 | 1655 | from lobby.models import Active, Audiencia, Passive
from popolo.models import Identifier
import uuid
import unicodedata
from datetime import datetime
class ActivosCSVReader():
def parse_line(self, line):
active = Active()
active.name = unicode(line[3] + " " + line[4])
active.save()
seed = line[3] + line[4] + line[5] + line[7]
i = Identifier(identifier=line[0])
active.identifiers.add(i)
class AudienciasCSVReader():
def __init__(self, *args, **kwargs):
self.audiencia_records = {
}
def parse_audiencia_line(self, line):
audiencia = Audiencia()
audiencia.observations = line[9].decode('utf-8').strip()
audiencia.length = int(line[7])
date = datetime.strptime(line[6], '%Y-%m-%d %H:%M:%S')
audiencia.date = date
self.audiencia_records[line[0]] = audiencia
def parse_several_lines(self, lines):
lines.pop(0)
for line in lines:
self.parse_audiencia_line(line)
def parse_one_person(self, line, klass, pre_):
name = line[3].decode('utf-8').strip() + u" " + line[4].decode('utf-8').strip()
p = klass.objects.get(name=name)
i = Identifier(identifier=pre_ + line[0].decode('utf-8').strip())
p.identifiers.add(i)
def parse_one_passive_lines(self, line):
self.parse_one_person(line, Passive, 'passive_')
def parse_several_passives_lines(self, lines):
lines.pop(0)
for line in lines:
self.parse_one_passive_lines(line)
def parse_one_active_lines(self, line):
self.parse_one_person(line, Active, 'active_')
| agpl-3.0 | 8,336,952,305,327,993,000 | 2,831,408,640,563,105,000 | 30.826923 | 87 | 0.607855 | false |
marcelocure/django | django/core/management/sql.py | 399 | 1890 | from __future__ import unicode_literals
from django.apps import apps
from django.db import models
def sql_flush(style, connection, only_django=False, reset_sequences=True, allow_cascade=False):
"""
Returns a list of the SQL statements used to flush the database.
If only_django is True, then only table names that have associated Django
models and are in INSTALLED_APPS will be included.
"""
if only_django:
tables = connection.introspection.django_table_names(only_existing=True, include_views=False)
else:
tables = connection.introspection.table_names(include_views=False)
seqs = connection.introspection.sequence_list() if reset_sequences else ()
statements = connection.ops.sql_flush(style, tables, seqs, allow_cascade)
return statements
def emit_pre_migrate_signal(verbosity, interactive, db):
# Emit the pre_migrate signal for every application.
for app_config in apps.get_app_configs():
if app_config.models_module is None:
continue
if verbosity >= 2:
print("Running pre-migrate handlers for application %s" % app_config.label)
models.signals.pre_migrate.send(
sender=app_config,
app_config=app_config,
verbosity=verbosity,
interactive=interactive,
using=db)
def emit_post_migrate_signal(verbosity, interactive, db):
# Emit the post_migrate signal for every application.
for app_config in apps.get_app_configs():
if app_config.models_module is None:
continue
if verbosity >= 2:
print("Running post-migrate handlers for application %s" % app_config.label)
models.signals.post_migrate.send(
sender=app_config,
app_config=app_config,
verbosity=verbosity,
interactive=interactive,
using=db)
| bsd-3-clause | -2,561,690,969,595,645,400 | -735,251,645,773,106,600 | 36.8 | 101 | 0.667196 | false |
johanvdw/rasterio | examples/concurrent-cpu-bound.py | 6 | 3361 | """concurrent-cpu-bound.py
Operate on a raster dataset window-by-window using a ThreadPoolExecutor.
Simulates a CPU-bound thread situation where multiple threads can improve performance.
With -j 4, the program returns in about 1/4 the time as with -j 1.
"""
import concurrent.futures
import multiprocessing
import time
import numpy
import rasterio
from rasterio._example import compute
def main(infile, outfile, num_workers=4):
with rasterio.drivers():
# Open the source dataset.
with rasterio.open(infile) as src:
# Create a destination dataset based on source params.
# The destination will be tiled, and we'll "process" the tiles
# concurrently.
meta = src.meta
del meta['transform']
meta.update(affine=src.affine)
meta.update(blockxsize=256, blockysize=256, tiled='yes')
with rasterio.open(outfile, 'w', **meta) as dst:
# Define a generator for data, window pairs.
# We use the new read() method here to a 3D array with all
# bands, but could also use read_band().
def jobs():
for ij, window in dst.block_windows():
data = src.read(window=window)
result = numpy.zeros(data.shape, dtype=data.dtype)
yield data, result, window
# Submit the jobs to the thread pool executor.
with concurrent.futures.ThreadPoolExecutor(
max_workers=num_workers) as executor:
# Map the futures returned from executor.submit()
# to their destination windows.
#
# The _example.compute function modifies no Python
# objects and releases the GIL. It can execute
# concurrently.
future_to_window = {
executor.submit(compute, data, res): (res, window)
for data, res, window in jobs()}
# As the processing jobs are completed, get the
# results and write the data to the appropriate
# destination window.
for future in concurrent.futures.as_completed(
future_to_window):
result, window = future_to_window[future]
# Since there's no multiband write() method yet in
# Rasterio, we use write_band for each part of the
# 3D data array.
for i, arr in enumerate(result, 1):
dst.write_band(i, arr, window=window)
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(
description="Concurrent raster processing demo")
parser.add_argument(
'input',
metavar='INPUT',
help="Input file name")
parser.add_argument(
'output',
metavar='OUTPUT',
help="Output file name")
parser.add_argument(
'-j',
metavar='NUM_JOBS',
type=int,
default=multiprocessing.cpu_count(),
help="Number of concurrent jobs")
args = parser.parse_args()
main(args.input, args.output, args.j)
| bsd-3-clause | -2,754,200,184,191,004,700 | 153,778,353,819,631,900 | 34.378947 | 86 | 0.551026 | false |
Alwnikrotikz/micolog2 | plugins/wapblog/wapblog.py | 2 | 8001 | # -*- coding: utf-8 -*-
from micolog_plugin import *
import logging
import urllib
from model import *
from google.appengine.api import users
from google.appengine.api import memcache
from base import BaseRequestHandler,request_cache
from google.appengine.ext import webapp
from datetime import datetime, timedelta
def urlencode(value):
return urllib.quote(value.encode('utf8'))
class wapblog(Plugin):
def __init__(self):
Plugin.__init__(self,__file__)
self.author="云在天边"
self.authoruri="http://www.tangblog.info"
self.uri="http://www.tangblog.info"
self.description="Micolog WAP Blog插件,使用该插件可以方便在手机上浏览新博文,查看并发表评论。(默认仅支持Google Account用户登陆留言,点击插件名进行设置。)"
self.name="Micolog Wap Blog"
self.version="0.6"
self.register_urlhandler('(?i)/wap',waphandler)
self.register_urlhandler('/wap/(\d+)',waphandler)
self.register_urlhandler('(?i)/wap/page',pagehandler)
self.register_urlhandler('(?i)/wap/post_comment',postComment)
self.register_urlhandler('(?i)/wap/(.*)',Error404)
def get(self,page):
postcount=OptionSet.getValue("posts_per_page",default="8")
commentcount=OptionSet.getValue("LatestCommentCount",default="5")
return '''
<h3>“WAP Blog”插件已经工作!</h3>
<p>请完善如下设置</p>
<form action="" method="post">
每页显示文章数目:<input name="PostCount" value="%s" onKeyUp="this.value=this.value.replace(/\D/g,'')" onafterpaste="this.value=this.value.replace(/\D/g,'')" /><br />
文章最近评论数目:<input name="CommentCount" value="%s" onKeyUp="this.value=this.value.replace(/\D/g,'')" onafterpaste="this.value=this.value.replace(/\D/g,'')" />(若该值设为0,将显示所有留言)<br />
<br>
<input type="submit" title="Save" value="保存">
</form>
<p>恭喜你! 你的"Micolog WAP Blog" 插件已经工作!<br />访问Wap页面的URL是:
<a href="/wap" target="_blank">http://www.yourdomain.com/wap</a><br />
<b>作者:</b><a href="http://www.tangblog.info" target="_blank">云在天边</a><br/></p>
<p>您的支持是创作者继续发展的动力,感谢您以实际行动来帮助作者!</p>
<p>如果在使用过程中遇到任何问题,请到作者的留言板(云在天边 <a href="http://www.tangblog.info/contact">www.tangblog.info/contact</a>)提交报告!</p>
'''%(postcount,commentcount)
def post(self,page):
postcount=int(page.param("PostCount"))
commentcount=int(page.param("CommentCount"))
OptionSet.setValue("posts_per_page",postcount)
OptionSet.setValue("LatestCommentCount",commentcount)
return self.get(page)
class waphandler(BaseRequestHandler):
def get(self,page=1):
self.doget(page)
#TODO: update this @request_cache()
def doget(self,page):
try:
from model import g_blog
except:
pass
page=int(page)
time=datetime.now()
entrycount=g_blog.postscount()
posts_per_page = OptionSet.getValue("posts_per_page",default="8")
if posts_per_page:
posts_per_page = 8
max_page = entrycount / posts_per_page + ( entrycount % posts_per_page and 1 or 0 )
comments=memcache.get("wap_comments"+self.request.path_qs)
if comments is None:
comments=Comment.all().order('-date').fetch(5)
memcache.set("wap_comments"+self.request.path_qs,comments)
if page < 1 or page > max_page:
return self.error(404)
entries=memcache.get("wap_entries"+self.request.path_qs)
if entries is None :
entries = Entry.all().filter('entrytype =','post').\
filter("published =", True).order('-date').\
fetch(posts_per_page, offset = (page-1) * posts_per_page)
memcache.set("wap_entries"+self.request.path_qs,entries)
show_prev =entries and (not (page == 1))
show_next =entries and (not (page == max_page))
self.render2("plugins/wapblog/index.html",{'entries':entries,
'show_prev' : show_prev,
'show_next' : show_next,
'pageindex':page,
'time':time,
'ishome':True,
'pagecount':max_page,
'postscount':entrycount,
'comments':comments
})
class pagehandler(BaseRequestHandler):
#TODO: update this @request_cache()
def get(self,*arg1):
try: id=int(self.param("id") or self.param("p") )
except: return self.redirect('/wap')
time=datetime.now()
commentcount = OptionSet.getValue("LatestCommentCount",default="5")
if commentcount:
commentcount = 5
entries = Entry.all().filter("published =", True).filter('post_id =',id).fetch(1)
entry=entries[0]
comments=memcache.get("wap_comments"+self.request.path_qs)
if comments is None:
if commentcount==0:
comments=Comment.all().filter("entry =",entry).order('-date')
memcache.set("wap_comments"+self.request.path_qs,comments)
else:
comments=Comment.all().filter("entry =",entry).order('-date').fetch(commentcount)
memcache.set("wap_comments"+self.request.path_qs,comments)
Comments=memcache.get("wap_Comments"+self.request.path_qs)
if Comments is None:
Comments=Comment.all().filter("entry =",entry).order('-date')
user = users.get_current_user()
if user:
greeting = ("Welcome, %s! (<a href=\"%s\">sign out</a>)" %
(user.nickname(), users.create_logout_url(self.request.uri)))
email = user.email()
try:
query = Comment.all().filter('email =',email).order('-date').fetch(1)
name = query[0].author
weburl = query[0].weburl
except:
name=user.nickname()
weburl=None
self.render2("plugins/wapblog/page.html",{'entry':entry,'id':id,'comments':comments,'Comments':Comments,'user_name':name,'user_email':email,'user':user,'user_url':weburl,'greeting':greeting,'time':time})
else:
greeting = ("<a href=\"%s\">Sign in with your Google Account</a>." %
users.create_login_url(self.request.uri))
self.render2("plugins/wapblog/page.html",{'entry':entry,'id':id,'comments':comments,'Comments':Comments,'greeting':greeting,'user':user,'time':time})
class postComment(BaseRequestHandler):
def get(self,*arg1):
self.response.set_status(405)
self.write('<h1>405 Method Not Allowed</h1>\n<a href="/wap">Back To Home</a>')
def post(self):
name=self.param('author')
#email=self.param('email')
url=self.param('url')
key=self.param('key')
content=self.param('comment')
parent_id=self.paramint('parentid',0)
reply_notify_mail=True
user = users.get_current_user()
try:
email=user.email()
except:
email=None
if not (name and email and content):
self.response.out.write('Please input name and comment content .\n <a href="javascript:history.back(-1)">Back</a>')
else:
comment=Comment(author=name,
content=content+"<br /><small>from wap blog</small>",
email=email,
reply_notify_mail=reply_notify_mail,
entry=Entry.get(key))
starturl='http://'
if url:
try:
if not url.lower().startswith(('http://','https://')):
url = starturl + url
comment.weburl=url
except:
comment.weburl=None
info_str='#@#'.join([urlencode(name),urlencode(email),urlencode(url)])
logging.info("info:"+name+"#@#"+info_str + "Comment Form Wap Site")
cookiestr='comment_user=%s;expires=%s;domain=%s;path=/'%( info_str,
(datetime.now()+timedelta(days=100)).strftime("%a, %d-%b-%Y %H:%M:%S GMT"),
'' )
comment.ip=self.request.remote_addr
if parent_id:
comment.parent=Comment.get_by_id(parent_id)
comment.no=comment.entry.commentcount+1
try:
comment.save()
memcache.delete("/"+comment.entry.link)
self.response.headers.add_header( 'Set-Cookie', cookiestr)
self.redirect(self.referer+"#comment-"+str(comment.key().id()))
memcache.delete("/feed/comments")
except:
self.response.out.write('Sorry,Comment not allowed .\n <a href="javascript:history.back(-1)">Back</a>')
class Error404(BaseRequestHandler):
def get(self,*arg1):
self.response.clear()
self.response.set_status(404)
self.response.out.write('<h1>404 Not Found</h1>\n<a href="/wap">Back To Main Page ! </a>')
| gpl-3.0 | -1,066,156,452,368,180,400 | 3,483,973,721,551,884,300 | 37.685279 | 206 | 0.677339 | false |
labordoc/labordoc-next | modules/webtag/lib/webtag_forms.py | 3 | 7394 | # -*- coding: utf-8 -*-
##
## This file is part of Invenio.
## Copyright (C) 2013 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""WebTag Forms"""
from invenio.webtag_config import \
CFG_WEBTAG_LAST_MYSQL_CHARACTER
from invenio.webtag_config import \
CFG_WEBTAG_NAME_MAX_LENGTH
from invenio.webinterface_handler_flask_utils import _
from invenio.wtforms_utils import InvenioBaseForm
from invenio.webuser_flask import current_user
from wtforms import \
IntegerField, \
HiddenField, \
TextField, \
SelectMultipleField, \
validators
# Models
from invenio.sqlalchemyutils import db
from invenio.webtag_model import \
WtgTAG, \
WtgTAGRecord, \
wash_tag_silent, \
wash_tag_blocking
from invenio.bibedit_model import Bibrec
from invenio.search_engine import check_user_can_view_record
def validate_tag_name(dummy_form, field):
""" Check validity of tag name """
if field.data:
suggested_silent = wash_tag_silent(field.data)
suggested = wash_tag_blocking(suggested_silent)
field.data = suggested_silent
if suggested != suggested_silent:
raise validators.ValidationError(
_('Forbidden characters. Try ') + suggested + '.')
if len(suggested) <= 0:
raise validators.ValidationError(
_('The name must contain valid characters.'))
if len(suggested_silent) > CFG_WEBTAG_NAME_MAX_LENGTH:
raise validators.ValidationError( _('The name cannot exeed ') \
+ str(CFG_WEBTAG_NAME_MAX_LENGTH) + _(' characters.'))
if max(ord(letter) for letter in suggested_silent) \
> CFG_WEBTAG_LAST_MYSQL_CHARACTER:
raise validators.ValidationError( _('Forbidden character.'))
def validate_name_available(dummy_form, field):
""" Check if the user already has tag named this way """
if field.data:
uid = current_user.get_id()
copy_count = db.session.query(WtgTAG).\
filter_by(id_user=uid, name=field.data).count()
if copy_count > 0:
raise validators.ValidationError(
_('Tag with that name already exists.'))
def validate_tag_exists(dummy_form, field):
""" Check if id_tag matches a tag in database """
if field.data:
try:
field.data = int(field.data)
except ValueError:
raise validators.ValidationError(_('Tag ID must be an integer.'))
if not db.session.query(WtgTAG).get(field.data):
raise validators.ValidationError(_('Tag does not exist.'))
def validate_user_owns_tag(dummy_form, field):
""" Check if id_tag matches a tag in database """
if field.data:
tag = db.session.query(WtgTAG).get(field.data)
if tag and tag.id_user != current_user.get_id():
raise validators.ValidationError(
_('You are not the owner of this tag.'))
def validate_bibrec_exists(dummy_form, field):
""" Check if id_bibrec matches a bibrec in database """
if field.data:
try:
field.data = int(field.data)
except ValueError:
raise validators.ValidationError(_('Bibrec ID must be an integer.'))
record = db.session.query(Bibrec).get(field.data)
if (not record):
raise validators.ValidationError(_('Bibrec does not exist.'))
# Switch to merged record if present
merged_id = record.merged_recid_final
if merged_id != record.id:
record = db.session.query(Bibrec).get(merged_id)
field.data = merged_id
if record.deleted:
raise validators.ValidationError(_('Bibrec has been deleted.'))
def validate_user_can_see_bibrec(dummy_form, field):
""" Check if user has rights to view bibrec """
if field.data:
(auth_code, msg) = check_user_can_view_record(current_user, field.data)
if auth_code > 0:
raise validators.ValidationError(
_('Unauthorized to view record: ')+msg)
def validate_not_already_attached(form, dummy_field):
""" Check if the pair (tag, bibrec) is already connected """
if form:
if ('id_tag' in form.data) and ('id_bibrec' in form.data):
tag_record = db.session.query(WtgTAGRecord)\
.get((form.data['id_tag'], form.data['id_bibrec']))
if tag_record is not None:
raise validators.ValidationError(_('Tag already attached.'))
def validate_already_attached(form, dummy_field):
""" Check if the pair (tag, bibrec) is already connected """
if form:
if ('id_tag' in form.data) and ('id_bibrec' in form.data):
tag_record = db.session.query(WtgTAGRecord)\
.get((form.data['id_tag'], form.data['id_bibrec']))
if tag_record is None:
raise validators.ValidationError(_('Tag not attached.'))
class CreateTagForm(InvenioBaseForm):
"""Defines form for creating a new tag."""
name = TextField(_('Name'), [validators.Required(),
validate_tag_name,
validate_name_available])
# Ajax requests only:
# Send a record ID if the tag should be attached to the record
# right after creation
id_bibrec = HiddenField('Tagged record',
[validate_bibrec_exists,
validate_user_can_see_bibrec])
class DeleteTagForm(InvenioBaseForm):
"""Defines form for deleting a tag."""
id_tag = SelectMultipleField('Tag ID',
[validators.Required(),
validate_tag_exists,
validate_user_owns_tag])
class AttachTagForm(InvenioBaseForm):
"""Defines a form validating attaching a tag to record"""
# Ajax requests only:
id_tag = IntegerField('Tag ID',
[validators.Required(),
validate_tag_exists,
validate_not_already_attached,
validate_user_owns_tag])
# validate user rights on tag
id_bibrec = IntegerField('Record ID',
[validate_bibrec_exists,
validate_user_can_see_bibrec])
class DetachTagForm(InvenioBaseForm):
"""Defines a form validating detaching a tag from record"""
# Ajax requests only:
id_tag = IntegerField('Tag ID',
[validators.Required(),
validate_tag_exists,
validate_already_attached,
validate_user_owns_tag])
# validate user rights on tag
id_bibrec = IntegerField('Record ID',
[validators.Required(),
validate_bibrec_exists,
validate_user_can_see_bibrec])
| gpl-2.0 | -5,501,008,173,432,027,000 | 1,721,646,194,088,208,100 | 34.893204 | 80 | 0.620638 | false |
buguelos/odoo | addons/point_of_sale/controllers/main.py | 243 | 1576 | # -*- coding: utf-8 -*-
import logging
import simplejson
import os
import openerp
import time
import random
import werkzeug.utils
from openerp import http
from openerp.http import request
from openerp.addons.web.controllers.main import module_boot, login_redirect
_logger = logging.getLogger(__name__)
class PosController(http.Controller):
@http.route('/pos/web', type='http', auth='user')
def a(self, debug=False, **k):
cr, uid, context, session = request.cr, request.uid, request.context, request.session
if not session.uid:
return login_redirect()
PosSession = request.registry['pos.session']
pos_session_ids = PosSession.search(cr, uid, [('state','=','opened'),('user_id','=',session.uid)], context=context)
if not pos_session_ids:
return werkzeug.utils.redirect('/web#action=point_of_sale.action_pos_session_opening')
PosSession.login(cr,uid,pos_session_ids,context=context)
modules = simplejson.dumps(module_boot(request.db))
init = """
var wc = new s.web.WebClient();
wc._title_changed = function() {}
wc.show_application = function(){
wc.action_manager.do_action("pos.ui");
};
wc.setElement($(document.body));
wc.start();
"""
html = request.registry.get('ir.ui.view').render(cr, session.uid,'point_of_sale.index',{
'modules': modules,
'init': init,
})
return html
| agpl-3.0 | 5,845,252,346,859,807,000 | 5,118,021,337,117,765,000 | 31.833333 | 123 | 0.593274 | false |
JFriel/honours_project | networkx/networkx/algorithms/shortest_paths/dense.py | 42 | 5102 | # -*- coding: utf-8 -*-
"""Floyd-Warshall algorithm for shortest paths.
"""
# Copyright (C) 2004-2015 by
# Aric Hagberg <hagberg@lanl.gov>
# Dan Schult <dschult@colgate.edu>
# Pieter Swart <swart@lanl.gov>
# All rights reserved.
# BSD license.
import networkx as nx
__author__ = """Aric Hagberg <aric.hagberg@gmail.com>"""
__all__ = ['floyd_warshall',
'floyd_warshall_predecessor_and_distance',
'floyd_warshall_numpy']
def floyd_warshall_numpy(G, nodelist=None, weight='weight'):
"""Find all-pairs shortest path lengths using Floyd's algorithm.
Parameters
----------
G : NetworkX graph
nodelist : list, optional
The rows and columns are ordered by the nodes in nodelist.
If nodelist is None then the ordering is produced by G.nodes().
weight: string, optional (default= 'weight')
Edge data key corresponding to the edge weight.
Returns
-------
distance : NumPy matrix
A matrix of shortest path distances between nodes.
If there is no path between to nodes the corresponding matrix entry
will be Inf.
Notes
------
Floyd's algorithm is appropriate for finding shortest paths in
dense graphs or graphs with negative weights when Dijkstra's
algorithm fails. This algorithm can still fail if there are
negative cycles. It has running time O(n^3) with running space of O(n^2).
"""
try:
import numpy as np
except ImportError:
raise ImportError(\
"to_numpy_matrix() requires numpy: http://scipy.org/ ")
# To handle cases when an edge has weight=0, we must make sure that
# nonedges are not given the value 0 as well.
A = nx.to_numpy_matrix(G, nodelist=nodelist, multigraph_weight=min,
weight=weight, nonedge=np.inf)
n,m = A.shape
I = np.identity(n)
A[I==1] = 0 # diagonal elements should be zero
for i in range(n):
A = np.minimum(A, A[i,:] + A[:,i])
return A
def floyd_warshall_predecessor_and_distance(G, weight='weight'):
"""Find all-pairs shortest path lengths using Floyd's algorithm.
Parameters
----------
G : NetworkX graph
weight: string, optional (default= 'weight')
Edge data key corresponding to the edge weight.
Returns
-------
predecessor,distance : dictionaries
Dictionaries, keyed by source and target, of predecessors and distances
in the shortest path.
Notes
------
Floyd's algorithm is appropriate for finding shortest paths
in dense graphs or graphs with negative weights when Dijkstra's algorithm
fails. This algorithm can still fail if there are negative cycles.
It has running time O(n^3) with running space of O(n^2).
See Also
--------
floyd_warshall
floyd_warshall_numpy
all_pairs_shortest_path
all_pairs_shortest_path_length
"""
from collections import defaultdict
# dictionary-of-dictionaries representation for dist and pred
# use some defaultdict magick here
# for dist the default is the floating point inf value
dist = defaultdict(lambda : defaultdict(lambda: float('inf')))
for u in G:
dist[u][u] = 0
pred = defaultdict(dict)
# initialize path distance dictionary to be the adjacency matrix
# also set the distance to self to 0 (zero diagonal)
undirected = not G.is_directed()
for u,v,d in G.edges(data=True):
e_weight = d.get(weight, 1.0)
dist[u][v] = min(e_weight, dist[u][v])
pred[u][v] = u
if undirected:
dist[v][u] = min(e_weight, dist[v][u])
pred[v][u] = v
for w in G:
for u in G:
for v in G:
if dist[u][v] > dist[u][w] + dist[w][v]:
dist[u][v] = dist[u][w] + dist[w][v]
pred[u][v] = pred[w][v]
return dict(pred),dict(dist)
def floyd_warshall(G, weight='weight'):
"""Find all-pairs shortest path lengths using Floyd's algorithm.
Parameters
----------
G : NetworkX graph
weight: string, optional (default= 'weight')
Edge data key corresponding to the edge weight.
Returns
-------
distance : dict
A dictionary, keyed by source and target, of shortest paths distances
between nodes.
Notes
------
Floyd's algorithm is appropriate for finding shortest paths
in dense graphs or graphs with negative weights when Dijkstra's algorithm
fails. This algorithm can still fail if there are negative cycles.
It has running time O(n^3) with running space of O(n^2).
See Also
--------
floyd_warshall_predecessor_and_distance
floyd_warshall_numpy
all_pairs_shortest_path
all_pairs_shortest_path_length
"""
# could make this its own function to reduce memory costs
return floyd_warshall_predecessor_and_distance(G, weight=weight)[1]
# fixture for nose tests
def setup_module(module):
from nose import SkipTest
try:
import numpy
except:
raise SkipTest("NumPy not available")
| gpl-3.0 | -2,716,379,292,489,024,500 | 5,749,162,952,149,643,000 | 31.291139 | 78 | 0.635045 | false |
kafan15536900/shadowsocks | shadowsocks/eventloop.py | 51 | 7513 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2013-2015 clowwindy
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# from ssloop
# https://github.com/clowwindy/ssloop
from __future__ import absolute_import, division, print_function, \
with_statement
import os
import socket
import select
import errno
import logging
from collections import defaultdict
from shadowsocks import shell
__all__ = ['EventLoop', 'POLL_NULL', 'POLL_IN', 'POLL_OUT', 'POLL_ERR',
'POLL_HUP', 'POLL_NVAL', 'EVENT_NAMES']
POLL_NULL = 0x00
POLL_IN = 0x01
POLL_OUT = 0x04
POLL_ERR = 0x08
POLL_HUP = 0x10
POLL_NVAL = 0x20
EVENT_NAMES = {
POLL_NULL: 'POLL_NULL',
POLL_IN: 'POLL_IN',
POLL_OUT: 'POLL_OUT',
POLL_ERR: 'POLL_ERR',
POLL_HUP: 'POLL_HUP',
POLL_NVAL: 'POLL_NVAL',
}
class EpollLoop(object):
def __init__(self):
self._epoll = select.epoll()
def poll(self, timeout):
return self._epoll.poll(timeout)
def add_fd(self, fd, mode):
self._epoll.register(fd, mode)
def remove_fd(self, fd):
self._epoll.unregister(fd)
def modify_fd(self, fd, mode):
self._epoll.modify(fd, mode)
class KqueueLoop(object):
MAX_EVENTS = 1024
def __init__(self):
self._kqueue = select.kqueue()
self._fds = {}
def _control(self, fd, mode, flags):
events = []
if mode & POLL_IN:
events.append(select.kevent(fd, select.KQ_FILTER_READ, flags))
if mode & POLL_OUT:
events.append(select.kevent(fd, select.KQ_FILTER_WRITE, flags))
for e in events:
self._kqueue.control([e], 0)
def poll(self, timeout):
if timeout < 0:
timeout = None # kqueue behaviour
events = self._kqueue.control(None, KqueueLoop.MAX_EVENTS, timeout)
results = defaultdict(lambda: POLL_NULL)
for e in events:
fd = e.ident
if e.filter == select.KQ_FILTER_READ:
results[fd] |= POLL_IN
elif e.filter == select.KQ_FILTER_WRITE:
results[fd] |= POLL_OUT
return results.items()
def add_fd(self, fd, mode):
self._fds[fd] = mode
self._control(fd, mode, select.KQ_EV_ADD)
def remove_fd(self, fd):
self._control(fd, self._fds[fd], select.KQ_EV_DELETE)
del self._fds[fd]
def modify_fd(self, fd, mode):
self.remove_fd(fd)
self.add_fd(fd, mode)
class SelectLoop(object):
def __init__(self):
self._r_list = set()
self._w_list = set()
self._x_list = set()
def poll(self, timeout):
r, w, x = select.select(self._r_list, self._w_list, self._x_list,
timeout)
results = defaultdict(lambda: POLL_NULL)
for p in [(r, POLL_IN), (w, POLL_OUT), (x, POLL_ERR)]:
for fd in p[0]:
results[fd] |= p[1]
return results.items()
def add_fd(self, fd, mode):
if mode & POLL_IN:
self._r_list.add(fd)
if mode & POLL_OUT:
self._w_list.add(fd)
if mode & POLL_ERR:
self._x_list.add(fd)
def remove_fd(self, fd):
if fd in self._r_list:
self._r_list.remove(fd)
if fd in self._w_list:
self._w_list.remove(fd)
if fd in self._x_list:
self._x_list.remove(fd)
def modify_fd(self, fd, mode):
self.remove_fd(fd)
self.add_fd(fd, mode)
class EventLoop(object):
def __init__(self):
self._iterating = False
if hasattr(select, 'epoll'):
self._impl = EpollLoop()
model = 'epoll'
elif hasattr(select, 'kqueue'):
self._impl = KqueueLoop()
model = 'kqueue'
elif hasattr(select, 'select'):
self._impl = SelectLoop()
model = 'select'
else:
raise Exception('can not find any available functions in select '
'package')
self._fd_to_f = {}
self._handlers = []
self._ref_handlers = []
self._handlers_to_remove = []
logging.debug('using event model: %s', model)
def poll(self, timeout=None):
events = self._impl.poll(timeout)
return [(self._fd_to_f[fd], fd, event) for fd, event in events]
def add(self, f, mode):
fd = f.fileno()
self._fd_to_f[fd] = f
self._impl.add_fd(fd, mode)
def remove(self, f):
fd = f.fileno()
del self._fd_to_f[fd]
self._impl.remove_fd(fd)
def modify(self, f, mode):
fd = f.fileno()
self._impl.modify_fd(fd, mode)
def add_handler(self, handler, ref=True):
self._handlers.append(handler)
if ref:
# when all ref handlers are removed, loop stops
self._ref_handlers.append(handler)
def remove_handler(self, handler):
if handler in self._ref_handlers:
self._ref_handlers.remove(handler)
if self._iterating:
self._handlers_to_remove.append(handler)
else:
self._handlers.remove(handler)
def run(self):
events = []
while self._ref_handlers:
try:
events = self.poll(1)
except (OSError, IOError) as e:
if errno_from_exception(e) in (errno.EPIPE, errno.EINTR):
# EPIPE: Happens when the client closes the connection
# EINTR: Happens when received a signal
# handles them as soon as possible
logging.debug('poll:%s', e)
else:
logging.error('poll:%s', e)
import traceback
traceback.print_exc()
continue
self._iterating = True
for handler in self._handlers:
# TODO when there are a lot of handlers
try:
handler(events)
except (OSError, IOError) as e:
shell.print_exception(e)
if self._handlers_to_remove:
for handler in self._handlers_to_remove:
self._handlers.remove(handler)
self._handlers_to_remove = []
self._iterating = False
# from tornado
def errno_from_exception(e):
"""Provides the errno from an Exception object.
There are cases that the errno attribute was not set so we pull
the errno out of the args but if someone instatiates an Exception
without any args you will get a tuple error. So this function
abstracts all that behavior to give you a safe way to get the
errno.
"""
if hasattr(e, 'errno'):
return e.errno
elif e.args:
return e.args[0]
else:
return None
# from tornado
def get_sock_error(sock):
error_number = sock.getsockopt(socket.SOL_SOCKET, socket.SO_ERROR)
return socket.error(error_number, os.strerror(error_number))
| apache-2.0 | 1,007,031,284,025,270,000 | 5,920,291,187,102,049,000 | 28.120155 | 77 | 0.562891 | false |
indhub/mxnet | example/recommenders/randomproj.py | 14 | 6041 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Random projection layers in MXNet as custom python ops.
Currently slow and memory-inefficient, but functional.
"""
import os
import numpy as np
import mxnet as mx
# ref: http://mxnet.io/faq/new_op.html
class RandomBagOfWordsProjection(mx.operator.CustomOp):
"""Random projection layer for sparse bag-of-words (n-hot) inputs.
In the sparse input, only the indices are supplied, because all the
values are understood to be exactly 1.0.
See also RandomProjection for values other than 1.0.
"""
def __init__(self, vocab_size, output_dim, random_seed=54321):
# need_top_grad=True means this is not a loss layer
super(RandomBagOfWordsProjection, self).__init__()
self._vocab = vocab_size
self._proj_dim = output_dim
#NOTE: This naive implementation is slow and uses lots of memory.
# Should use something smarter to not instantiate this matrix.
rs = np.random.RandomState(seed=random_seed)
self.W = self.random_unit_vecs(self._vocab, self._proj_dim, rs)
def random_unit_vecs(self, num_vecs, num_dims, rs):
W = rs.normal(size=(num_vecs, num_dims))
Wlen = np.linalg.norm(W, axis=1)
W_unit = W / Wlen[:,None]
return W_unit
def _get_mask(self, idx, in_data):
"""Returns the mask by which to multiply the parts of the embedding layer.
In this version, we have no weights to apply.
"""
mask = idx >= 0 # bool False for -1 values that should be removed. shape=(b,mnz)
mask = np.expand_dims(mask,2) # shape = (b,mnz,1)
mask = np.repeat(mask, self._proj_dim, axis=2) # shape = (b,mnz,d)
return mask
def forward(self, is_train, req, in_data, out_data, aux):
#Note: see this run in notebooks/howto-numpy-random-proj.ipynb
# Notation for shapes: b = batch_size, mnz = max_nonzero, d = proj_dim
idx = in_data[0].asnumpy().astype('int32') # shape=(b,mnz)
wd = self.W[idx] # shape= (b,mnz,d)
mask = self._get_mask(idx, in_data)
wd = np.multiply(wd,mask) # shape=(b,mnz,d), but zero'd out non-masked
y = np.sum(wd,axis=1) # shape=(b,d)
mxy = mx.nd.array(y) #NOTE: this hangs if the environment variables aren't set correctly
# See https://github.com/dmlc/mxnet/issues/3813
self.assign(out_data[0], req[0], mxy)
@mx.operator.register("SparseBOWProj")
class RandomBagOfWordsProjectionProp(mx.operator.CustomOpProp):
def __init__(self, vocab_size, output_dim):
# need_top_grad=True means this is not a loss layer
super(RandomBagOfWordsProjectionProp, self).__init__(need_top_grad=True)
self._kwargs = {
'vocab_size': int(vocab_size),
'output_dim': int(output_dim),
}
def list_arguments(self):
return ['indexes']
def list_outputs(self):
return ['output']
def create_operator(self, ctx, shapes, dtypes, **kwargs):
return RandomBagOfWordsProjection(**self._kwargs)
def infer_shape(self, in_shape):
batch_size = in_shape[0][0]
output_shape = (batch_size, self._kwargs['output_dim'])
return in_shape, [output_shape], []
class SparseRandomProjection(RandomBagOfWordsProjection):
"""Random projection of sparse input vector.
Takes an sparse input layer, effectively in coordinate (COO) format,
where the row number is implicit, because it's the minibatch record.
See the simpler version RandomBagOfWordsProjection if all values are 1.0.
"""
def _get_mask(self, idx, in_data):
"""Returns the mask by which to multiply the parts of the embedding layer.
In this version, we apply the weights.
"""
val = in_data[1].asnumpy() # shape=(b,mnz)
mask = idx >= 0 # bool False for -1 values that should be removed. shape=(b,mnz)
mask = np.multiply(mask,val) # All (b,mnz)
mask = np.expand_dims(mask,2) # shape = (b,mnz,1)
mask = np.repeat(mask, self._proj_dim, axis=2) # shape = (b,mnz,d)
return mask
@mx.operator.register("SparseRandomProjection")
class SparseRandomProjectionProp(RandomBagOfWordsProjectionProp):
def list_arguments(self):
return ['indexes', 'values']
def create_operator(self, ctx, shapes, dtypes, **kwargs):
return SparseRandomProjection(**self._kwargs)
def infer_shape(self, in_shape):
# check that indexes and values are the same shape.
if in_shape[0] != in_shape[1]:
raise ValueError("Input shapes differ. indexes:%s. values:%s. must be same"
% (str(in_shape[0]),str(in_shape[1])))
return super(SparseRandomProjectionProp,self).infer_shape(in_shape)
if __name__ == "__main__":
print("Simple test of proj layer")
data = mx.symbol.Variable('data')
vals = mx.symbol.Variable('vals')
net = mx.symbol.Custom(indexes=data, values=vals, name='rproj',
op_type='SparseRandomProjection',
vocab_size=999, output_dim=29)
d = mx.nd.zeros(shape=(3,100))
v = mx.nd.ones(shape=(3,100))
e = net.bind(ctx=mx.cpu(), args={'data':d, 'vals':v})
e.forward()
print(e.outputs[0].asnumpy())
print("Done with proj layer test")
| apache-2.0 | -8,505,338,246,276,671,000 | 1,213,403,182,864,530,700 | 39.273333 | 97 | 0.652044 | false |
nschneid/pyutil | ds/set.py | 4 | 2535 | '''
OrderedSet implementation, from http://stackoverflow.com/questions/1653970/does-python-have-an-ordered-set/1653978#1653978
The index() method and a few unit tests have been added.
@author: Nathan Schneider (nschneid)
@since: 2010-08-11
'''
# Strive towards Python 3 compatibility
from __future__ import print_function, unicode_literals, division, absolute_import
from future_builtins import map, filter
import collections
class OrderedSet(collections.OrderedDict, collections.MutableSet):
'''
A set that preserves the ordering of its entries.
>>> {3,2,9,2}=={9,2,3}
True
>>> x = OrderedSet([3,2,9,2])
>>> x == OrderedSet([2,9,3])
False
>>> x == OrderedSet([3,2,3,9,2])
True
>>> [y for y in x]
[3, 2, 9]
>>> x.index(2)
1
>>> x.index(0)
Traceback (most recent call last):
...
ValueError: 0 is not in set
>>> [y for y in {3,2,9}]
[9, 2, 3]
'''
def update(self, *args, **kwargs):
if kwargs:
raise TypeError("update() takes no keyword arguments")
for s in args:
for e in s:
self.add(e)
def add(self, elem):
self[elem] = None
def discard(self, elem):
self.pop(elem, None)
def index(self, elem):
try:
return self.keys().index(elem)
except ValueError:
raise ValueError('{} is not in set'.format(elem))
def __le__(self, other):
return all(e in other for e in self)
def __lt__(self, other):
return self <= other and self != other
def __ge__(self, other):
return all(e in self for e in other)
def __gt__(self, other):
return self >= other and self != other
def __repr__(self):
return 'OrderedSet([%s])' % (', '.join(map(repr, self.keys())))
def __str__(self):
return '{%s}' % (', '.join(map(repr, self.keys())))
difference = property(lambda self: self.__sub__)
difference_update = property(lambda self: self.__isub__)
intersection = property(lambda self: self.__and__)
intersection_update = property(lambda self: self.__iand__)
issubset = property(lambda self: self.__le__)
issuperset = property(lambda self: self.__ge__)
symmetric_difference = property(lambda self: self.__xor__)
symmetric_difference_update = property(lambda self: self.__ixor__)
union = property(lambda self: self.__or__)
def test():
import doctest
doctest.testmod()
if __name__=='__main__':
test()
| mit | -8,520,959,205,549,158,000 | -8,972,021,700,080,776,000 | 26.554348 | 122 | 0.584615 | false |
Dave-ts/Sigil | src/Resource_Files/plugin_launchers/python/sigil_bs4/builder/_lxml.py | 5 | 10167 | from __future__ import unicode_literals, division, absolute_import, print_function
import sys
PY3 = sys.version_info[0] == 3
if PY3:
text_type = str
binary_type = bytes
unicode = str
else:
range = xrange
text_type = unicode
binary_type = str
__all__ = [
'LXMLTreeBuilderForXML',
'LXMLTreeBuilder',
]
from io import BytesIO
if PY3:
from io import StringIO
else:
from StringIO import StringIO
import collections
from lxml import etree
from sigil_bs4.element import (
Comment,
Doctype,
NamespacedAttribute,
ProcessingInstruction,
)
from sigil_bs4.builder import (
FAST,
HTML,
HTMLTreeBuilder,
PERMISSIVE,
ParserRejectedMarkup,
TreeBuilder,
XML)
from sigil_bs4.dammit import EncodingDetector
LXML = 'lxml'
class LXMLTreeBuilderForXML(TreeBuilder):
DEFAULT_PARSER_CLASS = etree.XMLParser
is_xml = True
NAME = "lxml-xml"
ALTERNATE_NAMES = ["xml"]
# Well, it's permissive by XML parser standards.
features = [NAME, LXML, XML, FAST, PERMISSIVE]
CHUNK_SIZE = 512
# This namespace mapping is specified in the XML Namespace
# standard.
DEFAULT_NSMAPS = {'http://www.w3.org/XML/1998/namespace' : "xml"}
def default_parser(self, encoding):
# This can either return a parser object or a class, which
# will be instantiated with default arguments.
if self._default_parser is not None:
return self._default_parser
return etree.XMLParser(
target=self, strip_cdata=False, recover=True, encoding=encoding)
def parser_for(self, encoding):
# Use the default parser.
parser = self.default_parser(encoding)
if isinstance(parser, collections.Callable):
# Instantiate the parser with default arguments
parser = parser(target=self, strip_cdata=False, encoding=encoding)
return parser
def __init__(self, parser=None, empty_element_tags=None):
# TODO: Issue a warning if parser is present but not a
# callable, since that means there's no way to create new
# parsers for different encodings.
self._default_parser = parser
if empty_element_tags is not None:
self.empty_element_tags = set(empty_element_tags)
self.soup = None
self.nsmaps = [self.DEFAULT_NSMAPS]
def _getNsTag(self, tag):
# Split the namespace URL out of a fully-qualified lxml tag
# name. Copied from lxml's src/lxml/sax.py.
if tag[0] == '{':
return tuple(tag[1:].split('}', 1))
else:
return (None, tag)
def prepare_markup(self, markup, user_specified_encoding=None,
exclude_encodings=None,
document_declared_encoding=None):
"""
:yield: A series of 4-tuples.
(markup, encoding, declared encoding,
has undergone character replacement)
Each 4-tuple represents a strategy for parsing the document.
"""
if isinstance(markup, unicode):
# We were given Unicode. Maybe lxml can parse Unicode on
# this system?
yield markup, None, document_declared_encoding, False
if isinstance(markup, unicode):
# No, apparently not. Convert the Unicode to UTF-8 and
# tell lxml to parse it as UTF-8.
yield (markup.encode("utf8"), "utf8",
document_declared_encoding, False)
# Instead of using UnicodeDammit to convert the bytestring to
# Unicode using different encodings, use EncodingDetector to
# iterate over the encodings, and tell lxml to try to parse
# the document as each one in turn.
is_html = not self.is_xml
try_encodings = [user_specified_encoding, document_declared_encoding]
detector = EncodingDetector(
markup, try_encodings, is_html, exclude_encodings)
for encoding in detector.encodings:
yield (detector.markup, encoding, document_declared_encoding, False)
def feed(self, markup):
if isinstance(markup, bytes):
markup = BytesIO(markup)
elif isinstance(markup, unicode):
markup = StringIO(markup)
# Call feed() at least once, even if the markup is empty,
# or the parser won't be initialized.
data = markup.read(self.CHUNK_SIZE)
try:
self.parser = self.parser_for(self.soup.original_encoding)
self.parser.feed(data)
while len(data) != 0:
# Now call feed() on the rest of the data, chunk by chunk.
data = markup.read(self.CHUNK_SIZE)
if len(data) != 0:
self.parser.feed(data)
self.parser.close()
except (UnicodeDecodeError, LookupError, etree.ParserError) as e:
raise ParserRejectedMarkup(str(e))
def close(self):
self.nsmaps = [self.DEFAULT_NSMAPS]
def start(self, name, attrs, nsmap={}):
# Make sure attrs is a mutable dict--lxml may send an immutable dictproxy.
attrs = dict(attrs)
nsprefix = None
# Fix bug in bs4 _lxml.py that ignores attributes that specify namespaces on this tag
# Invert each namespace map as it comes in.
if len(nsmap) > 0:
# A new namespace mapping has come into play.
inverted_nsmap = dict((value, key) for key, value in list(nsmap.items()))
self.nsmaps.append(inverted_nsmap)
# Also treat the namespace mapping as a set of attributes on the
# tag, so we can properly recreate it later.
attrs = attrs.copy()
for prefix, namespace in list(nsmap.items()):
attribute = NamespacedAttribute(
"xmlns", prefix, "http://www.w3.org/2000/xmlns/")
attrs[attribute] = namespace
elif len(self.nsmaps) > 1:
# There are no new namespaces for this tag, but
# non-default namespaces are in play, so we need a
# separate tag stack to know when they end.
self.nsmaps.append(None)
# Namespaces are in play. Find any attributes that came in
# from lxml with namespaces attached to their names, and
# turn then into NamespacedAttribute objects.
new_attrs = {}
for attr, value in list(attrs.items()):
namespace, attr = self._getNsTag(attr)
if namespace is None:
new_attrs[attr] = value
else:
nsprefix = self._prefix_for_attr_namespace(namespace)
attr = NamespacedAttribute(nsprefix, attr, namespace)
new_attrs[attr] = value
attrs = new_attrs
namespace, name = self._getNsTag(name)
nsprefix = self._prefix_for_tag_namespace(namespace)
self.soup.handle_starttag(name, namespace, nsprefix, attrs)
def _prefix_for_attr_namespace(self, namespace):
"""Find the currently active prefix for the given namespace."""
if namespace is None:
return None
for inverted_nsmap in reversed(self.nsmaps):
if inverted_nsmap is not None and namespace in inverted_nsmap:
return inverted_nsmap[namespace]
return None
# To keep the tag prefixes as clean/simple as possible if there is
# more than one possible prefix allowed and it includes None use it instead
# This happens when a namespace prefix is added for an attribute that duplicates
# an earlier namespace meant for tags that had set that namespace prefix to None
def _prefix_for_tag_namespace(self, namespace):
"""Find the currently active prefix for the given namespace for a tag."""
if namespace is None:
return None
prefixes = []
for inverted_nsmap in self.nsmaps:
if inverted_nsmap is not None and namespace in inverted_nsmap:
prefixes.append(inverted_nsmap[namespace])
if len(prefixes) == 0 or None in prefixes:
return None
# ow return the last (most recent) viable prefix
return prefixes[-1]
def end(self, name):
self.soup.endData()
completed_tag = self.soup.tagStack[-1]
namespace, name = self._getNsTag(name)
nsprefix = self._prefix_for_tag_namespace(namespace)
self.soup.handle_endtag(name, nsprefix)
if len(self.nsmaps) > 1:
# This tag, or one of its parents, introduced a namespace
# mapping, so pop it off the stack.
self.nsmaps.pop()
def pi(self, target, data):
self.soup.endData()
self.soup.handle_data(target + ' ' + data)
self.soup.endData(ProcessingInstruction)
def data(self, content):
self.soup.handle_data(content)
def doctype(self, name, pubid, system):
self.soup.endData()
doctype = Doctype.for_name_and_ids(name, pubid, system)
self.soup.object_was_parsed(doctype)
def comment(self, content):
"Handle comments as Comment objects."
self.soup.endData()
self.soup.handle_data(content)
self.soup.endData(Comment)
def test_fragment_to_document(self, fragment):
"""See `TreeBuilder`."""
return '<?xml version="1.0" encoding="utf-8"?>\n%s' % fragment
class LXMLTreeBuilder(HTMLTreeBuilder, LXMLTreeBuilderForXML):
NAME = LXML
ALTERNATE_NAMES = ["lxml-html"]
features = ALTERNATE_NAMES + [NAME, HTML, FAST, PERMISSIVE]
is_xml = False
def default_parser(self, encoding):
return etree.HTMLParser
def feed(self, markup):
encoding = self.soup.original_encoding
try:
self.parser = self.parser_for(encoding)
self.parser.feed(markup)
self.parser.close()
except (UnicodeDecodeError, LookupError, etree.ParserError) as e:
raise ParserRejectedMarkup(str(e))
def test_fragment_to_document(self, fragment):
"""See `TreeBuilder`."""
return '<html><body>%s</body></html>' % fragment
| gpl-3.0 | -4,622,860,858,907,824,000 | 8,587,186,473,691,638,000 | 35.053191 | 93 | 0.618963 | false |
ossdemura/django-miniblog | src/Lib/site-packages/django/contrib/gis/db/backends/postgis/pgraster.py | 491 | 5071 | import binascii
import struct
from django.forms import ValidationError
from .const import (
GDAL_TO_POSTGIS, GDAL_TO_STRUCT, POSTGIS_HEADER_STRUCTURE, POSTGIS_TO_GDAL,
STRUCT_SIZE,
)
def pack(structure, data):
"""
Pack data into hex string with little endian format.
"""
return binascii.hexlify(struct.pack('<' + structure, *data)).upper()
def unpack(structure, data):
"""
Unpack little endian hexlified binary string into a list.
"""
return struct.unpack('<' + structure, binascii.unhexlify(data))
def chunk(data, index):
"""
Split a string into two parts at the input index.
"""
return data[:index], data[index:]
def get_pgraster_srid(data):
"""
Extract the SRID from a PostGIS raster string.
"""
if data is None:
return
# The positional arguments here extract the hex-encoded srid from the
# header of the PostGIS raster string. This can be understood through
# the POSTGIS_HEADER_STRUCTURE constant definition in the const module.
return unpack('i', data[106:114])[0]
def from_pgraster(data):
"""
Convert a PostGIS HEX String into a dictionary.
"""
if data is None:
return
# Split raster header from data
header, data = chunk(data, 122)
header = unpack(POSTGIS_HEADER_STRUCTURE, header)
# Parse band data
bands = []
pixeltypes = []
while data:
# Get pixel type for this band
pixeltype, data = chunk(data, 2)
pixeltype = unpack('B', pixeltype)[0]
# Subtract nodata byte from band nodata value if it exists
has_nodata = pixeltype >= 64
if has_nodata:
pixeltype -= 64
# Convert datatype from PostGIS to GDAL & get pack type and size
pixeltype = POSTGIS_TO_GDAL[pixeltype]
pack_type = GDAL_TO_STRUCT[pixeltype]
pack_size = 2 * STRUCT_SIZE[pack_type]
# Parse band nodata value. The nodata value is part of the
# PGRaster string even if the nodata flag is True, so it always
# has to be chunked off the data string.
nodata, data = chunk(data, pack_size)
nodata = unpack(pack_type, nodata)[0]
# Chunk and unpack band data (pack size times nr of pixels)
band, data = chunk(data, pack_size * header[10] * header[11])
band_result = {'data': binascii.unhexlify(band)}
# If the nodata flag is True, set the nodata value.
if has_nodata:
band_result['nodata_value'] = nodata
# Append band data to band list
bands.append(band_result)
# Store pixeltype of this band in pixeltypes array
pixeltypes.append(pixeltype)
# Check that all bands have the same pixeltype.
# This is required by GDAL. PostGIS rasters could have different pixeltypes
# for bands of the same raster.
if len(set(pixeltypes)) != 1:
raise ValidationError("Band pixeltypes are not all equal.")
return {
'srid': int(header[9]),
'width': header[10], 'height': header[11],
'datatype': pixeltypes[0],
'origin': (header[5], header[6]),
'scale': (header[3], header[4]),
'skew': (header[7], header[8]),
'bands': bands,
}
def to_pgraster(rast):
"""
Convert a GDALRaster into PostGIS Raster format.
"""
# Return if the raster is null
if rast is None or rast == '':
return
# Prepare the raster header data as a tuple. The first two numbers are
# the endianness and the PostGIS Raster Version, both are fixed by
# PostGIS at the moment.
rasterheader = (
1, 0, len(rast.bands), rast.scale.x, rast.scale.y,
rast.origin.x, rast.origin.y, rast.skew.x, rast.skew.y,
rast.srs.srid, rast.width, rast.height,
)
# Hexlify raster header
result = pack(POSTGIS_HEADER_STRUCTURE, rasterheader)
for band in rast.bands:
# The PostGIS raster band header has exactly two elements, a 8BUI byte
# and the nodata value.
#
# The 8BUI stores both the PostGIS pixel data type and a nodata flag.
# It is composed as the datatype integer plus 64 as a flag for existing
# nodata values:
# 8BUI_VALUE = PG_PIXEL_TYPE (0-11) + FLAG (0 or 64)
#
# For example, if the byte value is 71, then the datatype is
# 71-64 = 7 (32BSI) and the nodata value is True.
structure = 'B' + GDAL_TO_STRUCT[band.datatype()]
# Get band pixel type in PostGIS notation
pixeltype = GDAL_TO_POSTGIS[band.datatype()]
# Set the nodata flag
if band.nodata_value is not None:
pixeltype += 64
# Pack band header
bandheader = pack(structure, (pixeltype, band.nodata_value or 0))
# Hexlify band data
band_data_hex = binascii.hexlify(band.data(as_memoryview=True)).upper()
# Add packed header and band data to result
result += bandheader + band_data_hex
# Cast raster to string before passing it to the DB
return result.decode()
| mit | -6,334,916,719,069,067,000 | 6,177,857,146,261,548,000 | 30.496894 | 79 | 0.626504 | false |
RootForum/magrathea | magrathea/cli/commands/version.py | 1 | 1239 | # -*- coding: utf-8 -*-
"""
magrathea.cli.commands.version
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
:copyright: Copyright 2014 by the RootForum.org team, see AUTHORS.
:license: MIT License, see LICENSE for details.
"""
import magrathea
from ..base import BaseCommand
class VersionCommand(BaseCommand):
"""
Command class implementing the version command.
"""
name = 'version'
aliases = ('--version', '-v')
help = 'Show version and copyright information'
arguments = (
(('-s', '--short'), {'help': 'only print the version string', 'action': 'store_true'}),
)
def handle(self):
"""Command handler for the version command"""
if 'short' in self.args and self.args.short:
self.log_notice(magrathea.get_version())
else:
self.log_notice("""Magrathea version {version}
Copyright (C) {year} by {author}
This is free software; see the source for copying conditions. There is NO
warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. """.format(
version=magrathea.get_version(),
year=magrathea.COPYRIGHT[0],
author=magrathea.COPYRIGHT[1])
)
self._status = 0
| mit | 7,474,105,363,412,676,000 | -3,523,605,944,312,882,700 | 30.769231 | 95 | 0.603713 | false |
2014c2g4/c2g4 | w2/static/Brython2.0.0-20140209-164925/Lib/xml/sax/saxutils.py | 730 | 11688 | """\
A library of useful helper classes to the SAX classes, for the
convenience of application and driver writers.
"""
import os, urllib.parse, urllib.request
import io
from . import handler
from . import xmlreader
def __dict_replace(s, d):
"""Replace substrings of a string using a dictionary."""
for key, value in d.items():
s = s.replace(key, value)
return s
def escape(data, entities={}):
"""Escape &, <, and > in a string of data.
You can escape other strings of data by passing a dictionary as
the optional entities parameter. The keys and values must all be
strings; each key will be replaced with its corresponding value.
"""
# must do ampersand first
data = data.replace("&", "&")
data = data.replace(">", ">")
data = data.replace("<", "<")
if entities:
data = __dict_replace(data, entities)
return data
def unescape(data, entities={}):
"""Unescape &, <, and > in a string of data.
You can unescape other strings of data by passing a dictionary as
the optional entities parameter. The keys and values must all be
strings; each key will be replaced with its corresponding value.
"""
data = data.replace("<", "<")
data = data.replace(">", ">")
if entities:
data = __dict_replace(data, entities)
# must do ampersand last
return data.replace("&", "&")
def quoteattr(data, entities={}):
"""Escape and quote an attribute value.
Escape &, <, and > in a string of data, then quote it for use as
an attribute value. The \" character will be escaped as well, if
necessary.
You can escape other strings of data by passing a dictionary as
the optional entities parameter. The keys and values must all be
strings; each key will be replaced with its corresponding value.
"""
entities = entities.copy()
entities.update({'\n': ' ', '\r': ' ', '\t':'	'})
data = escape(data, entities)
if '"' in data:
if "'" in data:
data = '"%s"' % data.replace('"', """)
else:
data = "'%s'" % data
else:
data = '"%s"' % data
return data
def _gettextwriter(out, encoding):
if out is None:
import sys
return sys.stdout
if isinstance(out, io.TextIOBase):
# use a text writer as is
return out
# wrap a binary writer with TextIOWrapper
if isinstance(out, io.RawIOBase):
# Keep the original file open when the TextIOWrapper is
# destroyed
class _wrapper:
__class__ = out.__class__
def __getattr__(self, name):
return getattr(out, name)
buffer = _wrapper()
buffer.close = lambda: None
else:
# This is to handle passed objects that aren't in the
# IOBase hierarchy, but just have a write method
buffer = io.BufferedIOBase()
buffer.writable = lambda: True
buffer.write = out.write
try:
# TextIOWrapper uses this methods to determine
# if BOM (for UTF-16, etc) should be added
buffer.seekable = out.seekable
buffer.tell = out.tell
except AttributeError:
pass
return io.TextIOWrapper(buffer, encoding=encoding,
errors='xmlcharrefreplace',
newline='\n',
write_through=True)
class XMLGenerator(handler.ContentHandler):
def __init__(self, out=None, encoding="iso-8859-1", short_empty_elements=False):
handler.ContentHandler.__init__(self)
out = _gettextwriter(out, encoding)
self._write = out.write
self._flush = out.flush
self._ns_contexts = [{}] # contains uri -> prefix dicts
self._current_context = self._ns_contexts[-1]
self._undeclared_ns_maps = []
self._encoding = encoding
self._short_empty_elements = short_empty_elements
self._pending_start_element = False
def _qname(self, name):
"""Builds a qualified name from a (ns_url, localname) pair"""
if name[0]:
# Per http://www.w3.org/XML/1998/namespace, The 'xml' prefix is
# bound by definition to http://www.w3.org/XML/1998/namespace. It
# does not need to be declared and will not usually be found in
# self._current_context.
if 'http://www.w3.org/XML/1998/namespace' == name[0]:
return 'xml:' + name[1]
# The name is in a non-empty namespace
prefix = self._current_context[name[0]]
if prefix:
# If it is not the default namespace, prepend the prefix
return prefix + ":" + name[1]
# Return the unqualified name
return name[1]
def _finish_pending_start_element(self,endElement=False):
if self._pending_start_element:
self._write('>')
self._pending_start_element = False
# ContentHandler methods
def startDocument(self):
self._write('<?xml version="1.0" encoding="%s"?>\n' %
self._encoding)
def endDocument(self):
self._flush()
def startPrefixMapping(self, prefix, uri):
self._ns_contexts.append(self._current_context.copy())
self._current_context[uri] = prefix
self._undeclared_ns_maps.append((prefix, uri))
def endPrefixMapping(self, prefix):
self._current_context = self._ns_contexts[-1]
del self._ns_contexts[-1]
def startElement(self, name, attrs):
self._finish_pending_start_element()
self._write('<' + name)
for (name, value) in attrs.items():
self._write(' %s=%s' % (name, quoteattr(value)))
if self._short_empty_elements:
self._pending_start_element = True
else:
self._write(">")
def endElement(self, name):
if self._pending_start_element:
self._write('/>')
self._pending_start_element = False
else:
self._write('</%s>' % name)
def startElementNS(self, name, qname, attrs):
self._finish_pending_start_element()
self._write('<' + self._qname(name))
for prefix, uri in self._undeclared_ns_maps:
if prefix:
self._write(' xmlns:%s="%s"' % (prefix, uri))
else:
self._write(' xmlns="%s"' % uri)
self._undeclared_ns_maps = []
for (name, value) in attrs.items():
self._write(' %s=%s' % (self._qname(name), quoteattr(value)))
if self._short_empty_elements:
self._pending_start_element = True
else:
self._write(">")
def endElementNS(self, name, qname):
if self._pending_start_element:
self._write('/>')
self._pending_start_element = False
else:
self._write('</%s>' % self._qname(name))
def characters(self, content):
if content:
self._finish_pending_start_element()
self._write(escape(content))
def ignorableWhitespace(self, content):
if content:
self._finish_pending_start_element()
self._write(content)
def processingInstruction(self, target, data):
self._finish_pending_start_element()
self._write('<?%s %s?>' % (target, data))
class XMLFilterBase(xmlreader.XMLReader):
"""This class is designed to sit between an XMLReader and the
client application's event handlers. By default, it does nothing
but pass requests up to the reader and events on to the handlers
unmodified, but subclasses can override specific methods to modify
the event stream or the configuration requests as they pass
through."""
def __init__(self, parent = None):
xmlreader.XMLReader.__init__(self)
self._parent = parent
# ErrorHandler methods
def error(self, exception):
self._err_handler.error(exception)
def fatalError(self, exception):
self._err_handler.fatalError(exception)
def warning(self, exception):
self._err_handler.warning(exception)
# ContentHandler methods
def setDocumentLocator(self, locator):
self._cont_handler.setDocumentLocator(locator)
def startDocument(self):
self._cont_handler.startDocument()
def endDocument(self):
self._cont_handler.endDocument()
def startPrefixMapping(self, prefix, uri):
self._cont_handler.startPrefixMapping(prefix, uri)
def endPrefixMapping(self, prefix):
self._cont_handler.endPrefixMapping(prefix)
def startElement(self, name, attrs):
self._cont_handler.startElement(name, attrs)
def endElement(self, name):
self._cont_handler.endElement(name)
def startElementNS(self, name, qname, attrs):
self._cont_handler.startElementNS(name, qname, attrs)
def endElementNS(self, name, qname):
self._cont_handler.endElementNS(name, qname)
def characters(self, content):
self._cont_handler.characters(content)
def ignorableWhitespace(self, chars):
self._cont_handler.ignorableWhitespace(chars)
def processingInstruction(self, target, data):
self._cont_handler.processingInstruction(target, data)
def skippedEntity(self, name):
self._cont_handler.skippedEntity(name)
# DTDHandler methods
def notationDecl(self, name, publicId, systemId):
self._dtd_handler.notationDecl(name, publicId, systemId)
def unparsedEntityDecl(self, name, publicId, systemId, ndata):
self._dtd_handler.unparsedEntityDecl(name, publicId, systemId, ndata)
# EntityResolver methods
def resolveEntity(self, publicId, systemId):
return self._ent_handler.resolveEntity(publicId, systemId)
# XMLReader methods
def parse(self, source):
self._parent.setContentHandler(self)
self._parent.setErrorHandler(self)
self._parent.setEntityResolver(self)
self._parent.setDTDHandler(self)
self._parent.parse(source)
def setLocale(self, locale):
self._parent.setLocale(locale)
def getFeature(self, name):
return self._parent.getFeature(name)
def setFeature(self, name, state):
self._parent.setFeature(name, state)
def getProperty(self, name):
return self._parent.getProperty(name)
def setProperty(self, name, value):
self._parent.setProperty(name, value)
# XMLFilter methods
def getParent(self):
return self._parent
def setParent(self, parent):
self._parent = parent
# --- Utility functions
def prepare_input_source(source, base=""):
"""This function takes an InputSource and an optional base URL and
returns a fully resolved InputSource object ready for reading."""
if isinstance(source, str):
source = xmlreader.InputSource(source)
elif hasattr(source, "read"):
f = source
source = xmlreader.InputSource()
source.setByteStream(f)
if hasattr(f, "name"):
source.setSystemId(f.name)
if source.getByteStream() is None:
sysid = source.getSystemId()
basehead = os.path.dirname(os.path.normpath(base))
sysidfilename = os.path.join(basehead, sysid)
if os.path.isfile(sysidfilename):
source.setSystemId(sysidfilename)
f = open(sysidfilename, "rb")
else:
source.setSystemId(urllib.parse.urljoin(base, sysid))
f = urllib.request.urlopen(source.getSystemId())
source.setByteStream(f)
return source
| gpl-2.0 | 5,377,505,398,410,937,000 | 8,219,255,604,507,200,000 | 31.831461 | 84 | 0.612252 | false |
ratnania/pigasus | doc/manual/include/demo/test_neumann_quartcircle.py | 1 | 2730 | #! /usr/bin/python
# ...
try:
from matplotlib import pyplot as plt
PLOT=True
except ImportError:
PLOT=False
# ...
import numpy as np
from pigasus.gallery.poisson import *
import sys
import inspect
filename = inspect.getfile(inspect.currentframe()) # script filename (usually with path)
# ...
sin = np.sin ; cos = np.cos ; pi = np.pi ; exp = np.exp
# ...
#-----------------------------------
try:
nx = int(sys.argv[1])
except:
nx = 31
try:
ny = int(sys.argv[2])
except:
ny = 31
try:
px = int(sys.argv[3])
except:
px = 2
try:
py = int(sys.argv[4])
except:
py = 2
from igakit.cad_geometry import quart_circle as domain
geo = domain(n=[nx,ny],p=[px,py])
#-----------------------------------
# ...
# exact solution
# ...
R = 1.
r = 0.5
c = 1. # for neumann
#c = pi / (R**2-r**2) # for all dirichlet bc
u = lambda x,y : [ x * y * sin ( c * (R**2 - x**2 - y**2 )) ]
# ...
# ...
# rhs
# ...
f = lambda x,y : [4*c**2*x**3*y*sin(c*(R**2 - x**2 - y**2)) \
+ 4*c**2*x*y**3*sin(c*(R**2 - x**2 - y**2)) \
+ 12*c*x*y*cos(c*(R**2 - x**2 - y**2)) ]
# ...
# ...
# values of gradu.n at the boundary
# ...
gradu = lambda x,y : [-2*c*x**2*y*cos(c*(R**2 - x**2 - y**2)) + y*sin(c*(R**2
-
x**2
-
y**2)) \
,-2*c*x*y**2*cos(c*(R**2 - x**2 - y**2)) + x*sin(c*(R**2 - x**2 - y**2)) ]
def func_g (x,y) :
du = gradu (x, y)
return [ du[0] , du[1] ]
# ...
# ...
# values of u at the boundary
# ...
bc_neumann={}
bc_neumann [0,0] = func_g
Dirichlet = [[1,2,3]]
#AllDirichlet = True
# ...
# ...
try:
bc_dirichlet
except NameError:
bc_dirichlet = None
else:
pass
try:
bc_neumann
except NameError:
bc_neumann = None
else:
pass
try:
AllDirichlet
except NameError:
AllDirichlet = None
else:
pass
try:
Dirichlet
except NameError:
Dirichlet = None
else:
pass
try:
Metric
except NameError:
Metric = None
else:
pass
# ...
# ...
PDE = poisson(geometry=geo, bc_dirichlet=bc_dirichlet, bc_neumann=bc_neumann,
AllDirichlet=AllDirichlet, Dirichlet=Dirichlet,metric=Metric)
# ...
# ...
PDE.assembly(f=f)
PDE.solve()
# ...
# ...
normU = PDE.norm(exact=u)
print "norm U = ", normU
# ...
# ...
if PLOT:
PDE.plot() ; plt.colorbar(); plt.title('$u_h$')
plt.savefig(filename.split('.py')[0]+'.png', format='png')
plt.clf()
# ...
PDE.free()
| mit | -6,280,085,234,493,662,000 | -8,082,561,401,385,837,000 | 17.322148 | 97 | 0.456044 | false |
aroig/offlineimap | test/OLItest/globals.py | 12 | 1373 | #Constants, that don't rely on anything else in the module
# Copyright (C) 2012- Sebastian Spaeth & contributors
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
try:
from cStringIO import StringIO
except ImportError: #python3
from io import StringIO
default_conf=StringIO("""[general]
#will be set automatically
metadata =
accounts = test
ui = quiet
[Account test]
localrepository = Maildir
remoterepository = IMAP
[Repository Maildir]
Type = Maildir
# will be set automatically during tests
localfolders =
[Repository IMAP]
type=IMAP
# Don't hammer the server with too many connection attempts:
maxconnections=1
folderfilter= lambda f: f.startswith('INBOX.OLItest')
""")
| gpl-2.0 | 7,658,065,020,478,947,000 | 4,414,208,022,772,053,000 | 31.690476 | 78 | 0.748725 | false |
wsilva/fdp-folha-de-ponto-ach2077 | fdp/settings/base.py | 1 | 2384 | """
Django settings for fdp project.
For more information on this file, see
https://docs.djangoproject.com/en/1.7/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.7/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.7/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'j7y4q=&c=n0o9hdoc(ebkfj41k%wyhe&^zq!dqrwnwxgxbz&z+'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'pontos',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'fdp.urls'
WSGI_APPLICATION = 'fdp.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.7/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.7/topics/i18n/
# LANGUAGE_CODE = 'en-us'
LANGUAGE_CODE = 'pt-br'
# TIME_ZONE = 'UTC'
TIME_ZONE = 'America/Sao_Paulo'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.7/howto/static-files/
STATIC_URL = '/static/'
UPLOAD_DIR = os.path.join(BASE_DIR, 'static', 'uploads')
STATIC_ROOT = os.path.join(BASE_DIR, 'static', 'static_root')
STATICFILES_DIRS = (
os.path.join(BASE_DIR, 'static', 'static_dirs'),
)
TEMPLATE_DIRS = (
os.path.join(BASE_DIR, 'templates'),
)
| gpl-3.0 | 7,745,456,355,557,954,000 | 6,094,693,808,234,647,000 | 23.326531 | 71 | 0.713926 | false |
flyher/pymo | symbian/PythonForS60_1.9.6/module-repo/standard-modules/encodings/cp1253.py | 593 | 13350 | """ Python Character Mapping Codec cp1253 generated from 'MAPPINGS/VENDORS/MICSFT/WINDOWS/CP1253.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_table)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_table)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='cp1253',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Table
decoding_table = (
u'\x00' # 0x00 -> NULL
u'\x01' # 0x01 -> START OF HEADING
u'\x02' # 0x02 -> START OF TEXT
u'\x03' # 0x03 -> END OF TEXT
u'\x04' # 0x04 -> END OF TRANSMISSION
u'\x05' # 0x05 -> ENQUIRY
u'\x06' # 0x06 -> ACKNOWLEDGE
u'\x07' # 0x07 -> BELL
u'\x08' # 0x08 -> BACKSPACE
u'\t' # 0x09 -> HORIZONTAL TABULATION
u'\n' # 0x0A -> LINE FEED
u'\x0b' # 0x0B -> VERTICAL TABULATION
u'\x0c' # 0x0C -> FORM FEED
u'\r' # 0x0D -> CARRIAGE RETURN
u'\x0e' # 0x0E -> SHIFT OUT
u'\x0f' # 0x0F -> SHIFT IN
u'\x10' # 0x10 -> DATA LINK ESCAPE
u'\x11' # 0x11 -> DEVICE CONTROL ONE
u'\x12' # 0x12 -> DEVICE CONTROL TWO
u'\x13' # 0x13 -> DEVICE CONTROL THREE
u'\x14' # 0x14 -> DEVICE CONTROL FOUR
u'\x15' # 0x15 -> NEGATIVE ACKNOWLEDGE
u'\x16' # 0x16 -> SYNCHRONOUS IDLE
u'\x17' # 0x17 -> END OF TRANSMISSION BLOCK
u'\x18' # 0x18 -> CANCEL
u'\x19' # 0x19 -> END OF MEDIUM
u'\x1a' # 0x1A -> SUBSTITUTE
u'\x1b' # 0x1B -> ESCAPE
u'\x1c' # 0x1C -> FILE SEPARATOR
u'\x1d' # 0x1D -> GROUP SEPARATOR
u'\x1e' # 0x1E -> RECORD SEPARATOR
u'\x1f' # 0x1F -> UNIT SEPARATOR
u' ' # 0x20 -> SPACE
u'!' # 0x21 -> EXCLAMATION MARK
u'"' # 0x22 -> QUOTATION MARK
u'#' # 0x23 -> NUMBER SIGN
u'$' # 0x24 -> DOLLAR SIGN
u'%' # 0x25 -> PERCENT SIGN
u'&' # 0x26 -> AMPERSAND
u"'" # 0x27 -> APOSTROPHE
u'(' # 0x28 -> LEFT PARENTHESIS
u')' # 0x29 -> RIGHT PARENTHESIS
u'*' # 0x2A -> ASTERISK
u'+' # 0x2B -> PLUS SIGN
u',' # 0x2C -> COMMA
u'-' # 0x2D -> HYPHEN-MINUS
u'.' # 0x2E -> FULL STOP
u'/' # 0x2F -> SOLIDUS
u'0' # 0x30 -> DIGIT ZERO
u'1' # 0x31 -> DIGIT ONE
u'2' # 0x32 -> DIGIT TWO
u'3' # 0x33 -> DIGIT THREE
u'4' # 0x34 -> DIGIT FOUR
u'5' # 0x35 -> DIGIT FIVE
u'6' # 0x36 -> DIGIT SIX
u'7' # 0x37 -> DIGIT SEVEN
u'8' # 0x38 -> DIGIT EIGHT
u'9' # 0x39 -> DIGIT NINE
u':' # 0x3A -> COLON
u';' # 0x3B -> SEMICOLON
u'<' # 0x3C -> LESS-THAN SIGN
u'=' # 0x3D -> EQUALS SIGN
u'>' # 0x3E -> GREATER-THAN SIGN
u'?' # 0x3F -> QUESTION MARK
u'@' # 0x40 -> COMMERCIAL AT
u'A' # 0x41 -> LATIN CAPITAL LETTER A
u'B' # 0x42 -> LATIN CAPITAL LETTER B
u'C' # 0x43 -> LATIN CAPITAL LETTER C
u'D' # 0x44 -> LATIN CAPITAL LETTER D
u'E' # 0x45 -> LATIN CAPITAL LETTER E
u'F' # 0x46 -> LATIN CAPITAL LETTER F
u'G' # 0x47 -> LATIN CAPITAL LETTER G
u'H' # 0x48 -> LATIN CAPITAL LETTER H
u'I' # 0x49 -> LATIN CAPITAL LETTER I
u'J' # 0x4A -> LATIN CAPITAL LETTER J
u'K' # 0x4B -> LATIN CAPITAL LETTER K
u'L' # 0x4C -> LATIN CAPITAL LETTER L
u'M' # 0x4D -> LATIN CAPITAL LETTER M
u'N' # 0x4E -> LATIN CAPITAL LETTER N
u'O' # 0x4F -> LATIN CAPITAL LETTER O
u'P' # 0x50 -> LATIN CAPITAL LETTER P
u'Q' # 0x51 -> LATIN CAPITAL LETTER Q
u'R' # 0x52 -> LATIN CAPITAL LETTER R
u'S' # 0x53 -> LATIN CAPITAL LETTER S
u'T' # 0x54 -> LATIN CAPITAL LETTER T
u'U' # 0x55 -> LATIN CAPITAL LETTER U
u'V' # 0x56 -> LATIN CAPITAL LETTER V
u'W' # 0x57 -> LATIN CAPITAL LETTER W
u'X' # 0x58 -> LATIN CAPITAL LETTER X
u'Y' # 0x59 -> LATIN CAPITAL LETTER Y
u'Z' # 0x5A -> LATIN CAPITAL LETTER Z
u'[' # 0x5B -> LEFT SQUARE BRACKET
u'\\' # 0x5C -> REVERSE SOLIDUS
u']' # 0x5D -> RIGHT SQUARE BRACKET
u'^' # 0x5E -> CIRCUMFLEX ACCENT
u'_' # 0x5F -> LOW LINE
u'`' # 0x60 -> GRAVE ACCENT
u'a' # 0x61 -> LATIN SMALL LETTER A
u'b' # 0x62 -> LATIN SMALL LETTER B
u'c' # 0x63 -> LATIN SMALL LETTER C
u'd' # 0x64 -> LATIN SMALL LETTER D
u'e' # 0x65 -> LATIN SMALL LETTER E
u'f' # 0x66 -> LATIN SMALL LETTER F
u'g' # 0x67 -> LATIN SMALL LETTER G
u'h' # 0x68 -> LATIN SMALL LETTER H
u'i' # 0x69 -> LATIN SMALL LETTER I
u'j' # 0x6A -> LATIN SMALL LETTER J
u'k' # 0x6B -> LATIN SMALL LETTER K
u'l' # 0x6C -> LATIN SMALL LETTER L
u'm' # 0x6D -> LATIN SMALL LETTER M
u'n' # 0x6E -> LATIN SMALL LETTER N
u'o' # 0x6F -> LATIN SMALL LETTER O
u'p' # 0x70 -> LATIN SMALL LETTER P
u'q' # 0x71 -> LATIN SMALL LETTER Q
u'r' # 0x72 -> LATIN SMALL LETTER R
u's' # 0x73 -> LATIN SMALL LETTER S
u't' # 0x74 -> LATIN SMALL LETTER T
u'u' # 0x75 -> LATIN SMALL LETTER U
u'v' # 0x76 -> LATIN SMALL LETTER V
u'w' # 0x77 -> LATIN SMALL LETTER W
u'x' # 0x78 -> LATIN SMALL LETTER X
u'y' # 0x79 -> LATIN SMALL LETTER Y
u'z' # 0x7A -> LATIN SMALL LETTER Z
u'{' # 0x7B -> LEFT CURLY BRACKET
u'|' # 0x7C -> VERTICAL LINE
u'}' # 0x7D -> RIGHT CURLY BRACKET
u'~' # 0x7E -> TILDE
u'\x7f' # 0x7F -> DELETE
u'\u20ac' # 0x80 -> EURO SIGN
u'\ufffe' # 0x81 -> UNDEFINED
u'\u201a' # 0x82 -> SINGLE LOW-9 QUOTATION MARK
u'\u0192' # 0x83 -> LATIN SMALL LETTER F WITH HOOK
u'\u201e' # 0x84 -> DOUBLE LOW-9 QUOTATION MARK
u'\u2026' # 0x85 -> HORIZONTAL ELLIPSIS
u'\u2020' # 0x86 -> DAGGER
u'\u2021' # 0x87 -> DOUBLE DAGGER
u'\ufffe' # 0x88 -> UNDEFINED
u'\u2030' # 0x89 -> PER MILLE SIGN
u'\ufffe' # 0x8A -> UNDEFINED
u'\u2039' # 0x8B -> SINGLE LEFT-POINTING ANGLE QUOTATION MARK
u'\ufffe' # 0x8C -> UNDEFINED
u'\ufffe' # 0x8D -> UNDEFINED
u'\ufffe' # 0x8E -> UNDEFINED
u'\ufffe' # 0x8F -> UNDEFINED
u'\ufffe' # 0x90 -> UNDEFINED
u'\u2018' # 0x91 -> LEFT SINGLE QUOTATION MARK
u'\u2019' # 0x92 -> RIGHT SINGLE QUOTATION MARK
u'\u201c' # 0x93 -> LEFT DOUBLE QUOTATION MARK
u'\u201d' # 0x94 -> RIGHT DOUBLE QUOTATION MARK
u'\u2022' # 0x95 -> BULLET
u'\u2013' # 0x96 -> EN DASH
u'\u2014' # 0x97 -> EM DASH
u'\ufffe' # 0x98 -> UNDEFINED
u'\u2122' # 0x99 -> TRADE MARK SIGN
u'\ufffe' # 0x9A -> UNDEFINED
u'\u203a' # 0x9B -> SINGLE RIGHT-POINTING ANGLE QUOTATION MARK
u'\ufffe' # 0x9C -> UNDEFINED
u'\ufffe' # 0x9D -> UNDEFINED
u'\ufffe' # 0x9E -> UNDEFINED
u'\ufffe' # 0x9F -> UNDEFINED
u'\xa0' # 0xA0 -> NO-BREAK SPACE
u'\u0385' # 0xA1 -> GREEK DIALYTIKA TONOS
u'\u0386' # 0xA2 -> GREEK CAPITAL LETTER ALPHA WITH TONOS
u'\xa3' # 0xA3 -> POUND SIGN
u'\xa4' # 0xA4 -> CURRENCY SIGN
u'\xa5' # 0xA5 -> YEN SIGN
u'\xa6' # 0xA6 -> BROKEN BAR
u'\xa7' # 0xA7 -> SECTION SIGN
u'\xa8' # 0xA8 -> DIAERESIS
u'\xa9' # 0xA9 -> COPYRIGHT SIGN
u'\ufffe' # 0xAA -> UNDEFINED
u'\xab' # 0xAB -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
u'\xac' # 0xAC -> NOT SIGN
u'\xad' # 0xAD -> SOFT HYPHEN
u'\xae' # 0xAE -> REGISTERED SIGN
u'\u2015' # 0xAF -> HORIZONTAL BAR
u'\xb0' # 0xB0 -> DEGREE SIGN
u'\xb1' # 0xB1 -> PLUS-MINUS SIGN
u'\xb2' # 0xB2 -> SUPERSCRIPT TWO
u'\xb3' # 0xB3 -> SUPERSCRIPT THREE
u'\u0384' # 0xB4 -> GREEK TONOS
u'\xb5' # 0xB5 -> MICRO SIGN
u'\xb6' # 0xB6 -> PILCROW SIGN
u'\xb7' # 0xB7 -> MIDDLE DOT
u'\u0388' # 0xB8 -> GREEK CAPITAL LETTER EPSILON WITH TONOS
u'\u0389' # 0xB9 -> GREEK CAPITAL LETTER ETA WITH TONOS
u'\u038a' # 0xBA -> GREEK CAPITAL LETTER IOTA WITH TONOS
u'\xbb' # 0xBB -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
u'\u038c' # 0xBC -> GREEK CAPITAL LETTER OMICRON WITH TONOS
u'\xbd' # 0xBD -> VULGAR FRACTION ONE HALF
u'\u038e' # 0xBE -> GREEK CAPITAL LETTER UPSILON WITH TONOS
u'\u038f' # 0xBF -> GREEK CAPITAL LETTER OMEGA WITH TONOS
u'\u0390' # 0xC0 -> GREEK SMALL LETTER IOTA WITH DIALYTIKA AND TONOS
u'\u0391' # 0xC1 -> GREEK CAPITAL LETTER ALPHA
u'\u0392' # 0xC2 -> GREEK CAPITAL LETTER BETA
u'\u0393' # 0xC3 -> GREEK CAPITAL LETTER GAMMA
u'\u0394' # 0xC4 -> GREEK CAPITAL LETTER DELTA
u'\u0395' # 0xC5 -> GREEK CAPITAL LETTER EPSILON
u'\u0396' # 0xC6 -> GREEK CAPITAL LETTER ZETA
u'\u0397' # 0xC7 -> GREEK CAPITAL LETTER ETA
u'\u0398' # 0xC8 -> GREEK CAPITAL LETTER THETA
u'\u0399' # 0xC9 -> GREEK CAPITAL LETTER IOTA
u'\u039a' # 0xCA -> GREEK CAPITAL LETTER KAPPA
u'\u039b' # 0xCB -> GREEK CAPITAL LETTER LAMDA
u'\u039c' # 0xCC -> GREEK CAPITAL LETTER MU
u'\u039d' # 0xCD -> GREEK CAPITAL LETTER NU
u'\u039e' # 0xCE -> GREEK CAPITAL LETTER XI
u'\u039f' # 0xCF -> GREEK CAPITAL LETTER OMICRON
u'\u03a0' # 0xD0 -> GREEK CAPITAL LETTER PI
u'\u03a1' # 0xD1 -> GREEK CAPITAL LETTER RHO
u'\ufffe' # 0xD2 -> UNDEFINED
u'\u03a3' # 0xD3 -> GREEK CAPITAL LETTER SIGMA
u'\u03a4' # 0xD4 -> GREEK CAPITAL LETTER TAU
u'\u03a5' # 0xD5 -> GREEK CAPITAL LETTER UPSILON
u'\u03a6' # 0xD6 -> GREEK CAPITAL LETTER PHI
u'\u03a7' # 0xD7 -> GREEK CAPITAL LETTER CHI
u'\u03a8' # 0xD8 -> GREEK CAPITAL LETTER PSI
u'\u03a9' # 0xD9 -> GREEK CAPITAL LETTER OMEGA
u'\u03aa' # 0xDA -> GREEK CAPITAL LETTER IOTA WITH DIALYTIKA
u'\u03ab' # 0xDB -> GREEK CAPITAL LETTER UPSILON WITH DIALYTIKA
u'\u03ac' # 0xDC -> GREEK SMALL LETTER ALPHA WITH TONOS
u'\u03ad' # 0xDD -> GREEK SMALL LETTER EPSILON WITH TONOS
u'\u03ae' # 0xDE -> GREEK SMALL LETTER ETA WITH TONOS
u'\u03af' # 0xDF -> GREEK SMALL LETTER IOTA WITH TONOS
u'\u03b0' # 0xE0 -> GREEK SMALL LETTER UPSILON WITH DIALYTIKA AND TONOS
u'\u03b1' # 0xE1 -> GREEK SMALL LETTER ALPHA
u'\u03b2' # 0xE2 -> GREEK SMALL LETTER BETA
u'\u03b3' # 0xE3 -> GREEK SMALL LETTER GAMMA
u'\u03b4' # 0xE4 -> GREEK SMALL LETTER DELTA
u'\u03b5' # 0xE5 -> GREEK SMALL LETTER EPSILON
u'\u03b6' # 0xE6 -> GREEK SMALL LETTER ZETA
u'\u03b7' # 0xE7 -> GREEK SMALL LETTER ETA
u'\u03b8' # 0xE8 -> GREEK SMALL LETTER THETA
u'\u03b9' # 0xE9 -> GREEK SMALL LETTER IOTA
u'\u03ba' # 0xEA -> GREEK SMALL LETTER KAPPA
u'\u03bb' # 0xEB -> GREEK SMALL LETTER LAMDA
u'\u03bc' # 0xEC -> GREEK SMALL LETTER MU
u'\u03bd' # 0xED -> GREEK SMALL LETTER NU
u'\u03be' # 0xEE -> GREEK SMALL LETTER XI
u'\u03bf' # 0xEF -> GREEK SMALL LETTER OMICRON
u'\u03c0' # 0xF0 -> GREEK SMALL LETTER PI
u'\u03c1' # 0xF1 -> GREEK SMALL LETTER RHO
u'\u03c2' # 0xF2 -> GREEK SMALL LETTER FINAL SIGMA
u'\u03c3' # 0xF3 -> GREEK SMALL LETTER SIGMA
u'\u03c4' # 0xF4 -> GREEK SMALL LETTER TAU
u'\u03c5' # 0xF5 -> GREEK SMALL LETTER UPSILON
u'\u03c6' # 0xF6 -> GREEK SMALL LETTER PHI
u'\u03c7' # 0xF7 -> GREEK SMALL LETTER CHI
u'\u03c8' # 0xF8 -> GREEK SMALL LETTER PSI
u'\u03c9' # 0xF9 -> GREEK SMALL LETTER OMEGA
u'\u03ca' # 0xFA -> GREEK SMALL LETTER IOTA WITH DIALYTIKA
u'\u03cb' # 0xFB -> GREEK SMALL LETTER UPSILON WITH DIALYTIKA
u'\u03cc' # 0xFC -> GREEK SMALL LETTER OMICRON WITH TONOS
u'\u03cd' # 0xFD -> GREEK SMALL LETTER UPSILON WITH TONOS
u'\u03ce' # 0xFE -> GREEK SMALL LETTER OMEGA WITH TONOS
u'\ufffe' # 0xFF -> UNDEFINED
)
### Encoding table
encoding_table=codecs.charmap_build(decoding_table)
| mit | 9,088,449,105,050,500,000 | -4,117,290,232,040,372,700 | 42.485342 | 119 | 0.545843 | false |
antonioguirola/webpy-base | forms.py | 1 | 4418 | # -*- coding: utf-8 -*-
from web import form
import re
import db
# Expresiones regulares necesarias:
#formatoVisa=re.compile(r'[0-9]{4}-[0-9]{4}-[0-9]{4}-[0-9]{4}')
# Funciones necesarias para las validaciones
def fooFunction():
pass
"""
EJEMPLO DE FORMULARIO PARA DARSE DE ALTA
formularioInscripcion = form.Form(
form.Textbox(
"nombre",
form.notnull,
class_="form-control",
id="nombreId",
description="Nombre: "
),
form.Textbox(
"apellidos",
form.notnull,
class_="form-control",
id="apellidosId",
description="Apellidos: "
),
form.Textbox(
"dni",
form.notnull,
class_="form-control",
id="dniId",
description="DNI: "
),
form.Textbox(
"email",
form.notnull,
form.regexp(r'[a-zA-Z0-9._%+-]+@[a-zA-Z0-9.-]+\.[a-zA-Z]{2,4}',
'Formato de email incorrecto'),
class_="form-control",
id="emailId",
description=u"Correo electrónico: "
),
form.Dropdown(
"dia",
[(d, d) for d in range(1,32)],
id="diaID",
description=u"Día de nacimiento: ",
),
form.Dropdown(
"mes",
[(1,'Enero'),(2,'Febrero'),(3,'Marzo'),(4,'Abril'),(5,'Mayo'),(6,'Junio'),
(7,'Julio'),(8,'Agosto'),(9,'Septiembre'),(10,'Octubre'),(11,'Noviembre'),(12,'Diciembre')],
id="mesID",
description="Mes de nacimiento: "
),
form.Dropdown(
"anio",
[d for d in range(1930,2006)],
id="anioID",
description=u"Año de nacimiento: "
),
form.Textarea(
"direccion",
form.notnull,
class_="form-control",
id="direccionId",
description=u"Dirección: "
),
form.Textbox(
"username",
form.notnull,
class_="form-control",
id="usernameId",
description="Nombre de usuario: "
),
form.Password(
"password1",
form.notnull,
class_="form-control",
id="password1Id",
description=u"Contraseña: "
),
form.Password(
"password2",
form.notnull,
class_="form-control",
id="password2Id",
description=u"Repita la contraseña: "
),
form.Radio(
'formaPago',
[["VISA","VISA "],["contraReembolso","Contra reembolso"]],
form.notnull,
id="formaPagoId",
description="Forma de pago: "
),
form.Textbox(
"visa",
class_="form-control",
id="visaId",
description="Número de tarjeta VISA: ",
),
form.Checkbox(
"acepto",
description="Acepto las condiciones de uso ",
id="aceptoId",
value="si"
),
validators = [
form.Validator(u"Fecha incorrecta", lambda x: ((int(x.mes)==2 and int(x.dia)<=28)) or
(int(x.mes) in [4,6,9,11] and int(x.dia)<31) or (int(x.mes) in [1,3,5,7,8,10,12])
or (int(x.mes)==2 and int(x.dia)==29 and esBisiesto(x.anio))),
form.Validator(u"La contraseña debe tener al menos 7 caracteres",lambda x: len(x.password1)>6),
form.Validator(u"Las contraseñas no coinciden", lambda x: x.password1 == x.password2),
form.Validator(u"Debe introducir un número de tarjeta válido",lambda x: (x.formaPago=="contraReembolso")
or (x.formaPago=="VISA" and formatoVisa.match(x.visa))),
form.Validator(u"Debe aceptar los términos y condiciones",lambda x: x.acepto=="si")
]
)
"""
| gpl-3.0 | -5,111,416,543,373,467,000 | -6,007,240,353,605,437,000 | 33.155039 | 121 | 0.433727 | false |
hehongliang/tensorflow | tensorflow/contrib/tensorrt/custom_plugin_examples/inc_op.py | 37 | 1215 | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
"""Loader for the custom inc_op."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import platform
if platform.system() != "Windows":
# pylint: disable=g-import-not-at-top
from tensorflow.contrib.util import loader
from tensorflow.python.platform import resource_loader
# pylint: enable=g-import-not-at-top
_inc_op = loader.load_op_library(
resource_loader.get_path_to_datafile("_inc_op.so"))
else:
raise RuntimeError("Windows not supported")
| apache-2.0 | 325,638,944,133,152,600 | -8,517,909,292,822,585,000 | 36.96875 | 79 | 0.700412 | false |
binghongcha08/pyQMD | sys_bath/bilinear/sys_bath_lqf.py | 2 | 10991 | # -*- coding: utf-8 -*-
"""
Created on Fri Mar 25 09:42:22 2016
@author: bing
"""
import numpy as np
#import scipy
import numba
import sys
import math
bohr_angstrom = 0.52917721092
hartree_wavenumber = 219474.63
#hartree_wavenumber = scipy.constants.value(u'hartree-inverse meter relationship') / 1e2
def M1mat(a, Nb):
M1 = np.zeros((Nb,Nb))
for m in range(Nb-1):
M1[m,m+1] = np.sqrt(float(m+1)/2.0/a)
M1 = Sym(M1)
return M1
def M2mat(a, Nb):
M2 = np.zeros((Nb,Nb))
for m in range(Nb):
M2[m,m] = (float(m) + 0.5)/a
if Nb > 1:
for m in range(Nb-2):
M2[m,m+2] = np.sqrt(float((m+1)*(m+2)))/2.0/a
M2 = Sym(M2)
return M2
def M3mat(a, Nb):
M3 = np.zeros((Nb,Nb))
for m in range(Nb-1):
M3[m,m+1] = 3.0 * (float(m+1)/2./a)**1.5
if Nb > 2:
for m in range(Nb-3):
M3[m,m+3] = np.sqrt(float((m+1)*(m+2)*(m+3))) / (2.0*a)**1.5
M3 = Sym(M3)
return M3
def M4mat(a, Nb):
M4 = np.zeros((Nb,Nb))
for m in range(Nb):
M4[m,m] = float(3.0 * m**2 + 3.0 * (m+1)**2) / (2.*a)**2
if Nb > 1:
for m in range(Nb-2):
M4[m,m+2] = (4.0*m + 6.0) * np.sqrt(float((m+1)*(m+2))) / (2.*a)**2
if Nb > 3:
for m in range(Nb-4):
M4[m,m+4] = np.sqrt(float((m+1)*(m+2)*(m+3)*(m+4))) / (2.0*a)**2
M4 = Sym(M4)
if Nb > 1:
if not M4[0,1] == M4[1,0]:
print(M4)
print('\n ERROR: Not symmetric matrix M4.\n')
sys.exit()
return M4
def Hermite(x):
cons = np.array([1. / np.sqrt(float(2**n) * float(math.factorial(n))) for n in range(Nb)])
H = []
H.append(1.0)
H.append( x * 2.0 )
if Nb > 2:
for n in range(2,Nb):
Hn = 2.0 * x * H[n-1] - 2.0*(n-1) * H[n-2]
H.append(Hn)
for n in range(Nb):
H[n] = H[n]*cons[n]
return H
# if n == 0:
# H.append(1.)
# elif n == 1:
# return 2. * x * cons
# elif n == 2:
# return (4. * x**2 - 2.) * cons
# elif n == 3:
# return (8.0 * x**3 - 12.0 * x) * cons
# elif n == 4:
# return (16.0 * x**4 - 48.0 * x**2 + 12.0) * cons
# elif n == 5:
# return (32.0*x**5 - 160.0*x**3 + 120.0*x) * cons
# elif n == 6:
# return ()
#def Vx(x):
# g = 0.1
# return x**2/2.0 + g * x**4 / 4.0
def Kmat(alpha,pAve, Nb):
K = np.zeros((Nb,Nb),dtype=complex)
ar = alpha.real
for j in range(Nb):
K[j,j] = np.abs(alpha)**2 / ar * (2. * j + 1.)/2. + pAve**2
for j in range(1,Nb):
K[j-1,j] = -1j*np.conj(alpha) * pAve * np.sqrt(2. * j / ar)
K[j,j-1] = np.conj(K[j-1,j])
if Nb > 2:
for j in range(2,Nb):
K[j-2,j] = - np.sqrt(float((j-1)*j)) * np.conj(alpha)**2 / 2. / ar
K[j,j-2] = np.conj(K[j-2,j])
#K[0,0] = np.abs(alpha)**2/alpha.real / 2. + pAve**2
#K[1,1] = np.abs(alpha)**2/alpha.real * 3.0 / 2. + pAve**2
#K[0,1] = -1j*np.conj(alpha) * pAve * np.sqrt(2.*j/alpha.real)
#K[1,0] = np.conj(K[0,1])
K = K / (2.*amx)
return K
def Sym(V):
n = V.shape[-1]
for i in range(n):
for j in range(i):
V[i,j] = V[j,i]
return V
@numba.autojit
def Vint(x,y):
"""
interaction potential between x and y
"""
PES = 'HO'
if PES == 'Morse':
a, x0 = 1.02, 1.4
De = 0.176 / 100.0
d = (1.0-np.exp(-a*x))
v0 = De*d**2
dv = 2. * De * d * a * np.exp(-a*x)
elif PES == 'HO':
v0 = x**2/2.0 + y**2/2.0
elif PES == 'AHO':
eps = 0.4
v0 = x**2/2.0 + eps * x**4/4.0
dv = x + eps * x**3
#ddv = 2.0 * De * (-d*np.exp(-a*((x-x0)))*a**2 + (np.exp(-a*(x-x0)))**2*a**2)
# elif PES == 'pH2':
#
# dx = 1e-4
#
# v0 = np.zeros(Ntraj)
# dv = np.zeros(Ntraj)
#
# for i in range(Ntraj):
# v0[i] = vpot(x[i])
# dv[i] = ( vpot(x[i] + dx) - v0[i])/dx
return v0
def Vy(y):
v0 = y**2/2.0
dv = y
return v0,dv
def LQF(x,w):
xAve = np.dot(x,w)
xSqdAve = np.dot(x*x,w)
var = (xSqdAve - xAve**2)
a = 1. / 2. / var
r = - a * (x-xAve)
dr = - a
uAve = (np.dot(r**2,w))/2./amy
du = -1./amy * (r*dr)
return r, du, uAve
@numba.autojit
def qpot(x,p,r,w):
"""
Linear Quantum Force : direct polynomial fitting of derivative-log density (amplitude)
curve_fit : randomly choose M points and do a nonlinear least-square fitting to a
predefined functional form
"""
#tau = (max(xdata) - min(xdata))/(max(x) - min(x))
#if tau > 0.6:
# pass
#else:
# print('Data points are not sampled well.'
am= amy
Nb = 2
S = np.zeros((Nb,Nb))
for j in range(Nb):
for k in range(Nb):
S[j,k] = np.dot(x**(j+k), w)
bp = np.zeros(Nb)
br = np.zeros(Nb)
for n in range(Nb):
bp[n] = np.dot(x**n * p, w)
br[n] = np.dot(x**n * r, w)
cp = np.linalg.solve(S,bp)
cr = np.linalg.solve(S,br)
#unit = np.identity(Nb)
#r_approx = cr[0] * unit + cr[1] * x + cr[2] * x**2 + cr[3] * x**3
#p_approx = cp[0] * unit + cp[1] * x + cp[2] * x**2 + cp[3] * x**3
N = len(x)
dr = np.zeros(N)
dp = np.zeros(N)
ddr = np.zeros(N)
ddp = np.zeros(N)
for k in range(1,Nb):
dr += float(k) * cr[k] * x**(k-1)
dp += float(k) * cp[k] * x**(k-1)
for k in range(2,Nb-1):
ddr += float(k * (k-1)) * cr[k] * x**(k-2)
ddp += float(k * (k-1)) * cp[k] * x**(k-2)
fr = -1./2./am * (2. * r * dp + ddp)
fq = 1./2./am * (2. * r * dr + ddr)
Eu = -1./2./am * np.dot(r**2 + dr,w)
return Eu,fq,fr
# initialization
# for DOF y : an ensemble of trajectories
# for DOF x : for each trajectory associate a complex vector c of dimension M
Ntraj = 1024
M = 16
nfit = 2
ax = 1.0 # width of the GH basis
ay0 = 4.0
y0 = 0.0
print('polynomial fitting of c, order = {} \n'.format(nfit))
# initial conditions for c
c = np.zeros((Ntraj,M),dtype=np.complex128)
# mixture of ground and first excited state
#c[:,0] = 1.0/np.sqrt(2.0)+0j
#c[:,1] = 1.0/np.sqrt(2.0)+0j
#for i in range(2,M):
# c[:,i] = 0.0+0.0j
# coherent state
z = 1.0/np.sqrt(2.0)
for i in range(M):
c[:,i] = np.exp(-0.5 * np.abs(z)**2) * z**i / np.sqrt(math.factorial(i))
print('initial occupation \n',c[0,:])
print('trace of density matrix',np.vdot(c[0,:], c[0,:]))
# ---------------------------------
# initial conditions for QTs
y = np.random.randn(Ntraj)
y = y / np.sqrt(2.0 * ay0) + y0
print('trajectory range {}, {}'.format(min(y),max(y)))
py = np.zeros(Ntraj)
ry = - ay0 * (y-y0)
w = np.array([1./Ntraj]*Ntraj)
# -------------------------------
amx = 1.0
amy = 10.0
f_MSE = open('rMSE.out','w')
nout = 20 # number of trajectories to print
fmt = ' {}' * (nout+1) + '\n'
Eu = 0.
Ndim = 1 # dimensionality of the bath
fric_cons = 0.0 # friction constant
Nt = 2**14
dt = 1.0/2.0**10
dt2 = dt/2.0
t = 0.0
print('time range for propagation is [0,{}]'.format(Nt*dt))
print('timestep = {}'.format(dt))
# construct the Hamiltonian matrix for anharmonic oscilator
g = 0.4
V = 0.5 * M2mat(ax,M) + g/4.0 * M4mat(ax,M)
K = Kmat(ax,0.0,M)
H = K+V
print('Hamiltonian matrix in DOF x = \n')
print(H)
print('\n')
@numba.autojit
def norm(c,w):
anm = 0.0
for k in range(Ntraj):
anm += np.vdot(c[k,:], c[k,:]).real * w[k]
return anm
@numba.autojit
def fit_c(c,y):
"""
global approximation of c vs y to obtain the derivative c'',c'
"""
dc = np.zeros((Ntraj,M),dtype=np.complex128)
ddc = np.zeros((Ntraj,M),dtype=np.complex128)
for j in range(M):
z = c[:,j]
pars = np.polyfit(y,z,nfit)
p0 = np.poly1d(pars)
p1 = np.polyder(p0)
p2 = np.polyder(p1)
#for k in range(Ntraj):
dc[:,j] = p1(y)
ddc[:,j] = p2(y)
return dc, ddc
@numba.autojit
def prop_c(H,c,y,ry,py):
dc, ddc = fit_c(c,y)
dcdt = np.zeros([Ntraj,M],dtype=np.complex128)
eps = 0.50 # bilinear coupling Vint = eps*x*y
X1 = M1mat(ax,M)
for k in range(Ntraj):
Vp = eps * y[k] * X1
tmp = (H + Vp).dot(c[k,:]) - ddc[k,:]/2.0/amy - dc[k,:] * ry[k]/amy
dcdt[k,:] = -1j * tmp
return dcdt
@numba.autojit
def xAve(c,y,w):
"""
compute expectation value of x
"""
Xmat = M1mat(ax,M)
x_ave = 0.0+0.0j
for k in range(Ntraj):
for m in range(M):
for n in range(M):
x_ave += Xmat[m,n] * np.conjugate(c[k,m]) * c[k,n] * w[k]
return x_ave.real
# propagate the QTs for y
# update the coeffcients for each trajectory
fmt_c = ' {} '* (M+1)
f = open('traj.dat','w')
fe = open('en.out','w')
fc = open('c.dat','w')
fx = open('xAve.dat','w')
fnorm = open('norm.dat', 'w')
v0, dv = Vy(y)
ry, du, Eu = LQF(y,w)
cold = c
dcdt = prop_c(H,c,y,ry,py)
c = c + dcdt * dt
for k in range(Nt):
t = t + dt
py += (- dv - du) * dt2 - fric_cons * py * dt2
y += py*dt/amy
# force field
ry, du, Eu = LQF(y,w)
v0, dv = Vy(y)
py += (- dv - du) * dt2 - fric_cons * py * dt2
# renormalization
anm = norm(c,w)
c /= np.sqrt(anm)
# update c
dcdt = prop_c(H,c,y,ry,py)
cnew = cold + dcdt * dt * 2.0
cold = c
c = cnew
# output data for each timestep
# d = c
# for k in range(Ntraj):
# for i in range(M):
# d[k,i] = np.exp(-1j*t*H[i,i])*c[k,i]
x_ave = xAve(c,y,w)
fx.write('{} {} \n'.format(t,x_ave))
f.write(fmt.format(t,*y[0:nout]))
fnorm.write(' {} {} \n'.format(t,anm))
Ek = np.dot(py*py,w)/2./amy
Ev = np.dot(v0,w)
Eu = Eu
Etot = Ek + Ev + Eu
fe.write('{} {} {} {} {} \n'.format(t,Ek,Ev,Eu,Etot))
print('The total energy = {} Hartree. \n'.format(Etot))
# print trajectory and coefficients
for k in range(Ntraj):
fc.write( '{} {} {} {} \n'.format(y[k], c[k,0],c[k,-2],c[k,-1]))
fe.close()
f.close()
fc.close()
fx.close()
#a, x0, De = 1.02, 1.4, 0.176/100
#print('The well depth = {} cm-1. \n'.format(De * hartree_wavenumber))
#
#omega = a * np.sqrt(2. * De / am )
#E0 = omega/2. - omega**2/16./De
#dE = (Etot-E0) * hartree_wavenumber
#print('Exact ground-state energy = {} Hartree. \nEnergy deviation = {} cm-1. \n'.format(E0,dE))
#
| gpl-3.0 | -6,172,904,489,373,398,000 | 1,340,156,213,176,367,600 | 20.136538 | 96 | 0.459467 | false |
estaban/pyload | module/plugins/accounts/FileserveCom.py | 1 | 2261 | # -*- coding: utf-8 -*-
"""
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 3 of the License,
or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
See the GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, see <http://www.gnu.org/licenses/>.
"""
from time import mktime, strptime
from module.plugins.Account import Account
from module.common.json_layer import json_loads
class FileserveCom(Account):
__name__ = "FileserveCom"
__version__ = "0.2"
__type__ = "account"
__description__ = """Fileserve.com account plugin"""
__author_name__ = "mkaay"
__author_mail__ = "mkaay@mkaay.de"
def loadAccountInfo(self, user, req):
data = self.getAccountData(user)
page = req.load("http://app.fileserve.com/api/login/", post={"username": user, "password": data['password'],
"submit": "Submit+Query"})
res = json_loads(page)
if res['type'] == "premium":
validuntil = mktime(strptime(res['expireTime'], "%Y-%m-%d %H:%M:%S"))
return {"trafficleft": res['traffic'], "validuntil": validuntil}
else:
return {"premium": False, "trafficleft": None, "validuntil": None}
def login(self, user, data, req):
page = req.load("http://app.fileserve.com/api/login/", post={"username": user, "password": data['password'],
"submit": "Submit+Query"})
res = json_loads(page)
if not res['type']:
self.wrongPassword()
#login at fileserv page
req.load("http://www.fileserve.com/login.php",
post={"loginUserName": user, "loginUserPassword": data['password'], "autoLogin": "checked",
"loginFormSubmit": "Login"})
| gpl-3.0 | 902,533,730,763,959,600 | -213,686,237,428,314,900 | 37.982759 | 116 | 0.597523 | false |
sbellem/django | tests/template_tests/test_extends.py | 154 | 7062 | import os
from django.template import Context, Engine, TemplateDoesNotExist
from django.template.loader_tags import ExtendsError
from django.template.loaders.base import Loader
from django.test import SimpleTestCase, ignore_warnings
from django.utils.deprecation import RemovedInDjango20Warning
from .utils import ROOT
RECURSIVE = os.path.join(ROOT, 'recursive_templates')
class ExtendsBehaviorTests(SimpleTestCase):
def test_normal_extend(self):
engine = Engine(dirs=[os.path.join(RECURSIVE, 'fs')])
template = engine.get_template('one.html')
output = template.render(Context({}))
self.assertEqual(output.strip(), 'three two one')
def test_extend_recursive(self):
engine = Engine(dirs=[
os.path.join(RECURSIVE, 'fs'),
os.path.join(RECURSIVE, 'fs2'),
os.path.join(RECURSIVE, 'fs3'),
])
template = engine.get_template('recursive.html')
output = template.render(Context({}))
self.assertEqual(output.strip(), 'fs3/recursive fs2/recursive fs/recursive')
def test_extend_missing(self):
engine = Engine(dirs=[os.path.join(RECURSIVE, 'fs')])
template = engine.get_template('extend-missing.html')
with self.assertRaises(TemplateDoesNotExist) as e:
template.render(Context({}))
tried = e.exception.tried
self.assertEqual(len(tried), 1)
self.assertEqual(tried[0][0].template_name, 'missing.html')
def test_recursive_multiple_loaders(self):
engine = Engine(
dirs=[os.path.join(RECURSIVE, 'fs')],
loaders=[
('django.template.loaders.locmem.Loader', {
'one.html': '{% extends "one.html" %}{% block content %}{{ block.super }} locmem-one{% endblock %}',
'two.html': '{% extends "two.html" %}{% block content %}{{ block.super }} locmem-two{% endblock %}',
'three.html': (
'{% extends "three.html" %}{% block content %}{{ block.super }} locmem-three{% endblock %}'
),
}),
'django.template.loaders.filesystem.Loader',
],
)
template = engine.get_template('one.html')
output = template.render(Context({}))
self.assertEqual(output.strip(), 'three locmem-three two locmem-two one locmem-one')
def test_extend_self_error(self):
"""
Catch if a template extends itself and no other matching
templates are found.
"""
engine = Engine(dirs=[os.path.join(RECURSIVE, 'fs')])
template = engine.get_template('self.html')
with self.assertRaises(TemplateDoesNotExist):
template.render(Context({}))
def test_extend_cached(self):
engine = Engine(
dirs=[
os.path.join(RECURSIVE, 'fs'),
os.path.join(RECURSIVE, 'fs2'),
os.path.join(RECURSIVE, 'fs3'),
],
loaders=[
('django.template.loaders.cached.Loader', [
'django.template.loaders.filesystem.Loader',
]),
],
)
template = engine.get_template('recursive.html')
output = template.render(Context({}))
self.assertEqual(output.strip(), 'fs3/recursive fs2/recursive fs/recursive')
cache = engine.template_loaders[0].get_template_cache
self.assertEqual(len(cache), 3)
expected_path = os.path.join('fs', 'recursive.html')
self.assertTrue(cache['recursive.html'].origin.name.endswith(expected_path))
# Render another path that uses the same templates from the cache
template = engine.get_template('other-recursive.html')
output = template.render(Context({}))
self.assertEqual(output.strip(), 'fs3/recursive fs2/recursive fs/recursive')
# Template objects should not be duplicated.
self.assertEqual(len(cache), 4)
expected_path = os.path.join('fs', 'other-recursive.html')
self.assertTrue(cache['other-recursive.html'].origin.name.endswith(expected_path))
def test_unique_history_per_loader(self):
"""
Extending should continue even if two loaders return the same
name for a template.
"""
engine = Engine(
loaders=[
['django.template.loaders.locmem.Loader', {
'base.html': '{% extends "base.html" %}{% block content %}{{ block.super }} loader1{% endblock %}',
}],
['django.template.loaders.locmem.Loader', {
'base.html': '{% block content %}loader2{% endblock %}',
}],
]
)
template = engine.get_template('base.html')
output = template.render(Context({}))
self.assertEqual(output.strip(), 'loader2 loader1')
class NonRecursiveLoader(Loader):
def __init__(self, engine, templates_dict):
self.templates_dict = templates_dict
super(NonRecursiveLoader, self).__init__(engine)
def load_template_source(self, template_name, template_dirs=None):
try:
return self.templates_dict[template_name], template_name
except KeyError:
raise TemplateDoesNotExist(template_name)
@ignore_warnings(category=RemovedInDjango20Warning)
class NonRecursiveLoaderExtendsTests(SimpleTestCase):
loaders = [
('template_tests.test_extends.NonRecursiveLoader', {
'base.html': 'base',
'index.html': '{% extends "base.html" %}',
'recursive.html': '{% extends "recursive.html" %}',
'other-recursive.html': '{% extends "recursive.html" %}',
'a.html': '{% extends "b.html" %}',
'b.html': '{% extends "a.html" %}',
}),
]
def test_extend(self):
engine = Engine(loaders=self.loaders)
output = engine.render_to_string('index.html')
self.assertEqual(output, 'base')
def test_extend_cached(self):
engine = Engine(loaders=[
('django.template.loaders.cached.Loader', self.loaders),
])
output = engine.render_to_string('index.html')
self.assertEqual(output, 'base')
cache = engine.template_loaders[0].template_cache
self.assertTrue('base.html' in cache)
self.assertTrue('index.html' in cache)
# Render a second time from cache
output = engine.render_to_string('index.html')
self.assertEqual(output, 'base')
def test_extend_error(self):
engine = Engine(loaders=self.loaders)
msg = 'Cannot extend templates recursively when using non-recursive template loaders'
with self.assertRaisesMessage(ExtendsError, msg):
engine.render_to_string('recursive.html')
with self.assertRaisesMessage(ExtendsError, msg):
engine.render_to_string('other-recursive.html')
with self.assertRaisesMessage(ExtendsError, msg):
engine.render_to_string('a.html')
| bsd-3-clause | 4,352,655,740,693,684,000 | 1,437,149,156,290,936,600 | 38.233333 | 120 | 0.59983 | false |
SlimRemix/android_external_chromium_org | chrome/common/extensions/docs/server2/patcher.py | 121 | 1026 | # Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
class Patcher(object):
def GetPatchedFiles(self, version=None):
'''Returns patched files as(added_files, deleted_files, modified_files)
from the patchset specified by |version|.
'''
raise NotImplementedError(self.__class__)
def GetVersion(self):
'''Returns patch version. Returns None when nothing is patched by the
patcher.
'''
raise NotImplementedError(self.__class__)
def Apply(self, paths, file_system, version=None):
'''Apply the patch to added/modified files. Returns Future with patched
data. Throws FileNotFoundError if |paths| contains deleted files.
'''
raise NotImplementedError(self.__class__)
def GetIdentity(self):
'''Returns a string that identifies this patch. Typically it would be the
codereview server's ID for this patch.
'''
raise NotImplementedError(self.__class__)
| bsd-3-clause | -7,763,251,413,363,609,000 | 6,612,599,518,193,393,000 | 35.642857 | 77 | 0.71345 | false |
OCA/stock-logistics-barcode | stock_barcodes/wizard/stock_production_lot.py | 1 | 1359 | # Copyright 2019 Sergio Teruel <sergio.teruel@tecnativa.com>
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).
from odoo import fields, models
class WizStockBarcodesNewLot(models.TransientModel):
_inherit = 'barcodes.barcode_events_mixin'
_name = 'wiz.stock.barcodes.new.lot'
_description = 'Wizard to create new lot from barcode scanner'
product_id = fields.Many2one(
comodel_name='product.product',
required=True,
)
lot_name = fields.Char(
string='Lot name',
required=True,
)
def on_barcode_scanned(self, barcode):
product = self.env['product.product'].search([
('barcode', '=', barcode),
])[:1]
if product and not self.product_id:
self.product_id = product
return
self.lot_name = barcode
def _prepare_lot_values(self):
return {
'product_id': self.product_id.id,
'name': self.lot_name,
}
def confirm(self):
lot = self.env['stock.production.lot'].create(
self._prepare_lot_values())
# Assign lot created to wizard scanning barcode lot_id field
wiz = self.env[self.env.context['active_model']].browse(
self.env.context['active_id']
)
if wiz:
wiz.lot_id = lot
return lot
| agpl-3.0 | -1,491,512,946,510,855,700 | -4,049,943,929,752,887,000 | 29.886364 | 68 | 0.592347 | false |
jalavik/invenio | invenio/modules/search/searchext/units/cataloguer.py | 9 | 1556 | # -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2014, 2015 CERN.
#
# Invenio is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""Cataloguer search unit."""
from intbitset import intbitset
def search_unit(query, f, m, wl=None):
"""Return hitset of recIDs that were modified by the given cataloguer."""
from invenio.ext.sqlalchemy import db
from invenio.modules.editor.models import HstRECORD
if query:
try:
cataloguer_name, modification_date = query.split(";")
except ValueError:
cataloguer_name = query
modification_date = ""
where = [HstRECORD.job_person == cataloguer_name]
if modification_date:
where += HstRECORD.filter_time_interval(modification_date)
return intbitset(
db.session.query(HstRECORD.id_bibrec).filter(*where).all()
)
else:
return intbitset([])
| gpl-2.0 | 1,539,311,956,687,123,000 | -3,160,469,067,294,356,000 | 35.186047 | 77 | 0.688303 | false |
RoboCupULaval/RULEngine | Game/Team.py | 1 | 1310 | # Under MIT License, see LICENSE.txt
from RULEngine.Game.Player import Player
from RULEngine.Util.constant import PLAYER_PER_TEAM
from RULEngine.Util.team_color_service import TeamColor
class Team():
def __init__(self, team_color, type="friend"):
self.players = {}
for player_id in range(PLAYER_PER_TEAM):
self.players[player_id] = Player(self, player_id, True, type)
self.team_color = team_color
self.score = 0
def has_player(self, player):
has_player = False
for team_player in self.players.values():
if team_player is player:
has_player = True
return has_player
def is_team_yellow(self):
return self.team_color == TeamColor.YELLOW_TEAM
def update_player(self, player_id, pose, delta=0):
try:
self.players[player_id].update(pose, delta)
except KeyError as err:
raise err
def kalman_update(self, player_id, pose_list, delta=0):
try:
self.players[player_id].kalman_update(pose_list, delta)
except KeyError as err:
raise err
def update_player_command(self, player_id, cmd):
try:
self.players[player_id].set_command(cmd)
except KeyError as err:
raise err
| mit | 7,194,281,695,444,627,000 | -8,472,325,008,027,095,000 | 29.465116 | 73 | 0.61145 | false |
Nitaco/ansible | lib/ansible/modules/storage/netapp/netapp_e_amg_role.py | 22 | 7838 | #!/usr/bin/python
# (c) 2016, NetApp, Inc
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = """
---
module: netapp_e_amg_role
short_description: Update the role of a storage array within an Asynchronous Mirror Group (AMG).
description:
- Update a storage array to become the primary or secondary instance in an asynchronous mirror group
version_added: '2.2'
author: Kevin Hulquest (@hulquest)
options:
api_username:
required: true
description:
- The username to authenticate with the SANtricity WebServices Proxy or embedded REST API.
api_password:
required: true
description:
- The password to authenticate with the SANtricity WebServices Proxy or embedded REST API.
api_url:
required: true
description:
- The url to the SANtricity WebServices Proxy or embedded REST API.
validate_certs:
required: false
default: true
description:
- Should https certificates be validated?
ssid:
description:
- The ID of the primary storage array for the async mirror action
required: yes
role:
description:
- Whether the array should be the primary or secondary array for the AMG
required: yes
choices: ['primary', 'secondary']
noSync:
description:
- Whether to avoid synchronization prior to role reversal
required: no
default: no
type: bool
force:
description:
- Whether to force the role reversal regardless of the online-state of the primary
required: no
default: no
"""
EXAMPLES = """
- name: Update the role of a storage array
netapp_e_amg_role:
name: updating amg role
role: primary
ssid: "{{ ssid }}"
api_url: "{{ netapp_api_url }}"
api_username: "{{ netapp_api_username }}"
api_password: "{{ netapp_api_password }}"
validate_certs: "{{ netapp_api_validate_certs }}"
"""
RETURN = """
msg:
description: Failure message
returned: failure
type: string
sample: "No Async Mirror Group with the name."
"""
import json
import traceback
from ansible.module_utils.api import basic_auth_argument_spec
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.six.moves.urllib.error import HTTPError
from ansible.module_utils._text import to_native
from ansible.module_utils.urls import open_url
HEADERS = {
"Content-Type": "application/json",
"Accept": "application/json",
}
def request(url, data=None, headers=None, method='GET', use_proxy=True,
force=False, last_mod_time=None, timeout=10, validate_certs=True,
url_username=None, url_password=None, http_agent=None, force_basic_auth=True, ignore_errors=False):
try:
r = open_url(url=url, data=data, headers=headers, method=method, use_proxy=use_proxy,
force=force, last_mod_time=last_mod_time, timeout=timeout, validate_certs=validate_certs,
url_username=url_username, url_password=url_password, http_agent=http_agent,
force_basic_auth=force_basic_auth)
except HTTPError as e:
r = e.fp
try:
raw_data = r.read()
if raw_data:
data = json.loads(raw_data)
else:
raw_data = None
except:
if ignore_errors:
pass
else:
raise Exception(raw_data)
resp_code = r.getcode()
if resp_code >= 400 and not ignore_errors:
raise Exception(resp_code, data)
else:
return resp_code, data
def has_match(module, ssid, api_url, api_pwd, api_usr, body, name):
amg_exists = False
has_desired_role = False
amg_id = None
amg_data = None
get_amgs = 'storage-systems/%s/async-mirrors' % ssid
url = api_url + get_amgs
try:
amg_rc, amgs = request(url, url_username=api_usr, url_password=api_pwd,
headers=HEADERS)
except:
module.fail_json(msg="Failed to find AMGs on storage array. Id [%s]" % (ssid))
for amg in amgs:
if amg['label'] == name:
amg_exists = True
amg_id = amg['id']
amg_data = amg
if amg['localRole'] == body.get('role'):
has_desired_role = True
return amg_exists, has_desired_role, amg_id, amg_data
def update_amg(module, ssid, api_url, api_usr, api_pwd, body, amg_id):
endpoint = 'storage-systems/%s/async-mirrors/%s/role' % (ssid, amg_id)
url = api_url + endpoint
post_data = json.dumps(body)
try:
request(url, data=post_data, method='POST', url_username=api_usr,
url_password=api_pwd, headers=HEADERS)
except Exception as e:
module.fail_json(
msg="Failed to change role of AMG. Id [%s]. AMG Id [%s]. Error [%s]" % (ssid, amg_id, to_native(e)),
exception=traceback.format_exc())
status_endpoint = 'storage-systems/%s/async-mirrors/%s' % (ssid, amg_id)
status_url = api_url + status_endpoint
try:
rc, status = request(status_url, method='GET', url_username=api_usr,
url_password=api_pwd, headers=HEADERS)
except Exception as e:
module.fail_json(
msg="Failed to check status of AMG after role reversal. "
"Id [%s]. AMG Id [%s]. Error [%s]" % (ssid, amg_id, to_native(e)),
exception=traceback.format_exc())
# Here we wait for the role reversal to complete
if 'roleChangeProgress' in status:
while status['roleChangeProgress'] != "none":
try:
rc, status = request(status_url, method='GET',
url_username=api_usr, url_password=api_pwd, headers=HEADERS)
except Exception as e:
module.fail_json(
msg="Failed to check status of AMG after role reversal. "
"Id [%s]. AMG Id [%s]. Error [%s]" % (ssid, amg_id, to_native(e)),
exception=traceback.format_exc())
return status
def main():
argument_spec = basic_auth_argument_spec()
argument_spec.update(dict(
name=dict(required=True, type='str'),
role=dict(required=True, choices=['primary', 'secondary']),
noSync=dict(required=False, type='bool', default=False),
force=dict(required=False, type='bool', default=False),
ssid=dict(required=True, type='str'),
api_url=dict(required=True),
api_username=dict(required=False),
api_password=dict(required=False, no_log=True),
))
module = AnsibleModule(argument_spec=argument_spec)
p = module.params
ssid = p.pop('ssid')
api_url = p.pop('api_url')
user = p.pop('api_username')
pwd = p.pop('api_password')
name = p.pop('name')
if not api_url.endswith('/'):
api_url += '/'
agm_exists, has_desired_role, async_id, amg_data = has_match(module, ssid, api_url, pwd, user, p, name)
if not agm_exists:
module.fail_json(msg="No Async Mirror Group with the name: '%s' was found" % name)
elif has_desired_role:
module.exit_json(changed=False, **amg_data)
else:
amg_data = update_amg(module, ssid, api_url, user, pwd, p, async_id)
if amg_data:
module.exit_json(changed=True, **amg_data)
else:
module.exit_json(changed=True, msg="AMG role changed.")
if __name__ == '__main__':
main()
| gpl-3.0 | 6,470,517,642,672,671,000 | 8,798,617,092,232,363,000 | 32.784483 | 114 | 0.601429 | false |
weety/rt-thread | bsp/stm32/stm32f103-blue-pill/rtconfig.py | 14 | 4024 | import os
# toolchains options
ARCH='arm'
CPU='cortex-m3'
CROSS_TOOL='gcc'
# bsp lib config
BSP_LIBRARY_TYPE = None
if os.getenv('RTT_CC'):
CROSS_TOOL = os.getenv('RTT_CC')
if os.getenv('RTT_ROOT'):
RTT_ROOT = os.getenv('RTT_ROOT')
# cross_tool provides the cross compiler
# EXEC_PATH is the compiler execute path, for example, CodeSourcery, Keil MDK, IAR
if CROSS_TOOL == 'gcc':
PLATFORM = 'gcc'
EXEC_PATH = r'C:\Users\XXYYZZ'
elif CROSS_TOOL == 'keil':
PLATFORM = 'armcc'
EXEC_PATH = r'C:/Keil_v5'
elif CROSS_TOOL == 'iar':
PLATFORM = 'iar'
EXEC_PATH = r'C:/Program Files (x86)/IAR Systems/Embedded Workbench 8.0'
if os.getenv('RTT_EXEC_PATH'):
EXEC_PATH = os.getenv('RTT_EXEC_PATH')
BUILD = 'debug'
if PLATFORM == 'gcc':
# toolchains
PREFIX = 'arm-none-eabi-'
CC = PREFIX + 'gcc'
AS = PREFIX + 'gcc'
AR = PREFIX + 'ar'
CXX = PREFIX + 'g++'
LINK = PREFIX + 'gcc'
TARGET_EXT = 'elf'
SIZE = PREFIX + 'size'
OBJDUMP = PREFIX + 'objdump'
OBJCPY = PREFIX + 'objcopy'
DEVICE = ' -mcpu=cortex-m3 -mthumb -ffunction-sections -fdata-sections'
CFLAGS = DEVICE + ' -Dgcc'
AFLAGS = ' -c' + DEVICE + ' -x assembler-with-cpp -Wa,-mimplicit-it=thumb '
LFLAGS = DEVICE + ' -Wl,--gc-sections,-Map=rt-thread.map,-cref,-u,Reset_Handler -T board/linker_scripts/link.lds'
CPATH = ''
LPATH = ''
if BUILD == 'debug':
CFLAGS += ' -O0 -gdwarf-2 -g'
AFLAGS += ' -gdwarf-2'
else:
CFLAGS += ' -O2'
CXXFLAGS = CFLAGS
POST_ACTION = OBJCPY + ' -O binary $TARGET rtthread.bin\n' + SIZE + ' $TARGET \n'
elif PLATFORM == 'armcc':
# toolchains
CC = 'armcc'
CXX = 'armcc'
AS = 'armasm'
AR = 'armar'
LINK = 'armlink'
TARGET_EXT = 'axf'
DEVICE = ' --cpu Cortex-M3 '
CFLAGS = '-c ' + DEVICE + ' --apcs=interwork --c99'
AFLAGS = DEVICE + ' --apcs=interwork '
LFLAGS = DEVICE + ' --scatter "board\linker_scripts\link.sct" --info sizes --info totals --info unused --info veneers --list rt-thread.map --strict'
CFLAGS += ' -I' + EXEC_PATH + '/ARM/ARMCC/include'
LFLAGS += ' --libpath=' + EXEC_PATH + '/ARM/ARMCC/lib'
CFLAGS += ' -D__MICROLIB '
AFLAGS += ' --pd "__MICROLIB SETA 1" '
LFLAGS += ' --library_type=microlib '
EXEC_PATH += '/ARM/ARMCC/bin/'
if BUILD == 'debug':
CFLAGS += ' -g -O0'
AFLAGS += ' -g'
else:
CFLAGS += ' -O2'
CXXFLAGS = CFLAGS
CFLAGS += ' -std=c99'
POST_ACTION = 'fromelf --bin $TARGET --output rtthread.bin \nfromelf -z $TARGET'
elif PLATFORM == 'iar':
# toolchains
CC = 'iccarm'
CXX = 'iccarm'
AS = 'iasmarm'
AR = 'iarchive'
LINK = 'ilinkarm'
TARGET_EXT = 'out'
DEVICE = '-Dewarm'
CFLAGS = DEVICE
CFLAGS += ' --diag_suppress Pa050'
CFLAGS += ' --no_cse'
CFLAGS += ' --no_unroll'
CFLAGS += ' --no_inline'
CFLAGS += ' --no_code_motion'
CFLAGS += ' --no_tbaa'
CFLAGS += ' --no_clustering'
CFLAGS += ' --no_scheduling'
CFLAGS += ' --endian=little'
CFLAGS += ' --cpu=Cortex-M3'
CFLAGS += ' -e'
CFLAGS += ' --fpu=None'
CFLAGS += ' --dlib_config "' + EXEC_PATH + '/arm/INC/c/DLib_Config_Normal.h"'
CFLAGS += ' --silent'
AFLAGS = DEVICE
AFLAGS += ' -s+'
AFLAGS += ' -w+'
AFLAGS += ' -r'
AFLAGS += ' --cpu Cortex-M3'
AFLAGS += ' --fpu None'
AFLAGS += ' -S'
if BUILD == 'debug':
CFLAGS += ' --debug'
CFLAGS += ' -On'
else:
CFLAGS += ' -Oh'
LFLAGS = ' --config "board/linker_scripts/link.icf"'
LFLAGS += ' --entry __iar_program_start'
CXXFLAGS = CFLAGS
EXEC_PATH = EXEC_PATH + '/arm/bin/'
POST_ACTION = 'ielftool --bin $TARGET rtthread.bin'
def dist_handle(BSP_ROOT, dist_dir):
import sys
cwd_path = os.getcwd()
sys.path.append(os.path.join(os.path.dirname(BSP_ROOT), 'tools'))
from sdk_dist import dist_do_building
dist_do_building(BSP_ROOT, dist_dir)
| apache-2.0 | 7,724,228,605,285,090,000 | 2,260,396,797,904,066,800 | 25.649007 | 152 | 0.561382 | false |
dajohnso/cfme_tests | widgetastic_manageiq/__init__.py | 1 | 87294 | # -*- coding: utf-8 -*-
import atexit
import json
import os
import re
import six
from collections import namedtuple
from datetime import date
from math import ceil
from tempfile import NamedTemporaryFile
from wait_for import wait_for
from cached_property import cached_property
from jsmin import jsmin
from lxml.html import document_fromstring
from selenium.common.exceptions import WebDriverException
from widgetastic.exceptions import NoSuchElementException
from widgetastic.log import logged
from widgetastic.utils import ParametrizedLocator, Parameter, ParametrizedString, attributize_string
from widgetastic.utils import VersionPick, Version
from widgetastic.widget import (
Table as VanillaTable,
TableColumn as VanillaTableColumn,
TableRow as VanillaTableRow,
Widget,
View,
Select,
TextInput,
Text,
Checkbox,
Image,
ParametrizedView,
FileInput as BaseFileInput,
ClickableMixin,
ConditionalSwitchableView,
do_not_read_this_widget)
from widgetastic.xpath import quote
from widgetastic_patternfly import (
Accordion as PFAccordion, CandidateNotFound, BootstrapSwitch, BootstrapTreeview, Button, Input,
BootstrapSelect, CheckableBootstrapTreeview, FlashMessages)
from cfme.exceptions import ItemNotFound, ManyEntitiesFound
class DynamicTableAddError(Exception):
"""Raised when an attempt to add or save a row to a `widgetastic_manageiq.DynamicTable` fails"""
pass
class DynaTree(Widget):
""" A class directed at CFME Tree elements
"""
XPATH = """\
function xpath(root, xpath) {
if(root == null)
root = document;
var nt = XPathResult.ANY_UNORDERED_NODE_TYPE;
return document.evaluate(xpath, root, null, nt, null).singleNodeValue;
}
"""
# This function retrieves the root of the tree. Can wait for the tree to get initialized
TREE_GET_ROOT = """\
function get_root(loc) {
var start_time = new Date();
var root = null;
while(root === null && ((new Date()) - start_time) < 10000)
{
try {
root = $(loc).dynatree("getRoot");
} catch(err) {
// Nothing ...
}
}
return root;
}
"""
# This function is used to DRY the decision on which text to match
GET_LEVEL_NAME = XPATH + """\
function get_level_name(level, by_id) {
if(by_id){
return level.li.getAttribute("id");
} else {
var e = xpath(level.li, "./span/a");
if(e === null)
return null;
else
return e.textContent;
}
}
"""
# needs xpath to work, provided by dependencies of the other functions
EXPANDABLE = """\
function expandable(el) {
return xpath(el.li, "./span/span[contains(@class, 'dynatree-expander')]") !== null;
}
"""
# This function reads whole tree. If it faces an ajax load, it returns false.
# If it does not return false, the result is complete.
READ_TREE = jsmin(TREE_GET_ROOT + GET_LEVEL_NAME + EXPANDABLE + """\
function read_tree(root, read_id, _root_tree) {
if(read_id === undefined)
read_id = false;
if(_root_tree === undefined)
_root_tree = true;
if(_root_tree) {
root = get_root(root);
if(root === null)
return null;
if(expandable(root) && (!root.bExpanded)) {
root.expand();
if(root.childList === null && root.data.isLazy){
return false;
}
}
var result = new Array();
var need_wait = false;
var children = (root.childList === null) ? [] : root.childList;
for(var i = 0; i < children.length; i++) {
var child = children[i];
var sub = read_tree(child, read_id, false);
if(sub === false)
need_wait = true;
else
result.push(sub);
}
if(need_wait)
return false;
else if(children.length == 0)
return null;
else
return result;
} else {
if(expandable(root) && (!root.bExpanded)) {
root.expand();
if(root.childList === null && root.data.isLazy){
return false;
}
}
var name = get_level_name(root, read_id);
var result = new Array();
var need_wait = false;
var children = (root.childList === null) ? [] : root.childList;
for(var i = 0; i < children.length; i++) {
var child = children[i];
var sub = read_tree(child, read_id, false);
if(sub === false)
need_wait = true;
else
result.push(sub);
}
if(need_wait)
return false;
else if(children.length == 0)
return name;
else
return [name, result]
}
}
""")
def __init__(self, parent, tree_id=None, logger=None):
Widget.__init__(self, parent, logger=logger)
self._tree_id = tree_id
@property
def tree_id(self):
if self._tree_id is not None:
return self._tree_id
else:
try:
return self.parent.tree_id
except AttributeError:
raise NameError(
'You have to specify tree_id to BootstrapTreeview if the parent object does '
'not implement .tree_id!')
def __locator__(self):
return '#{}'.format(self.tree_id)
def read(self):
return self.currently_selected
def fill(self, value):
if self.currently_selected == value:
return False
self.click_path(*value)
return True
@property
def currently_selected(self):
items = self.browser.elements(
'.//li[.//span[contains(@class, "dynatree-active")]]/span/a',
parent=self,
check_visibility=True)
return map(self.browser.text, items)
def root_el(self):
return self.browser.element(self)
def _get_tag(self):
if getattr(self, 'tag', None) is None:
self.tag = self.browser.tag(self)
return self.tag
def read_contents(self, by_id=False):
result = False
while result is False:
self.browser.plugin.ensure_page_safe()
result = self.browser.execute_script(
"{} return read_tree(arguments[0], arguments[1]);".format(self.READ_TREE),
self.__locator__(),
by_id)
return result
@staticmethod
def _construct_xpath(path, by_id=False):
items = []
for item in path:
if by_id:
items.append('ul/li[@id={}]'.format(quote(item)))
else:
items.append('ul/li[./span/a[normalize-space(.)={}]]'.format(quote(item)))
return './' + '/'.join(items)
def _item_expanded(self, id):
span = self.browser.element('.//li[@id={}]/span'.format(quote(id)), parent=self)
return 'dynatree-expanded' in self.browser.get_attribute('class', span)
def _item_expandable(self, id):
return bool(
self.browser.elements(
'.//li[@id={}]/span/span[contains(@class, "dynatree-expander")]'.format(quote(id)),
parent=self))
def _click_expander(self, id):
expander = self.browser.element(
'.//li[@id={}]/span/span[contains(@class, "dynatree-expander")]'.format(quote(id)),
parent=self)
return self.browser.click(expander)
def expand_id(self, id):
self.browser.plugin.ensure_page_safe()
if not self._item_expanded(id) and self._item_expandable(id):
self.logger.debug('expanding node %r', id)
self._click_expander(id)
wait_for(lambda: self._item_expanded(id), num_sec=15, delay=0.5)
def child_items(self, id, ids=False):
self.expand_id(id)
items = self.browser.elements('.//li[@id={}]/ul/li'.format(quote(id)), parent=self)
result = []
for item in items:
if ids:
result.append(self.browser.get_attribute('id', item))
else:
text_item = self.browser.element('./span/a', parent=item)
result.append(self.browser.text(text_item))
return result
def expand_path(self, *path, **kwargs):
""" Exposes a path.
Args:
*path: The path as multiple positional string arguments denoting the course to take.
Keywords:
by_id: Whether to match ids instead of text.
Returns: The leaf web element.
"""
by_id = kwargs.pop("by_id", False)
current_path = []
last_id = None
node = None
for item in path:
if last_id is None:
last_id = self.browser.get_attribute(
'id', self.browser.element('./ul/li', parent=self))
self.expand_id(last_id)
if isinstance(item, re._pattern_type):
self.logger.debug('Looking for regexp %r in path %r', item.pattern, current_path)
for child_item in self.child_items(last_id, ids=by_id):
if item.match(child_item) is not None:
# found
item = child_item
break
else:
raise CandidateNotFound(
{'message': "r{!r}: could not be found in the tree.".format(item.pattern),
'path': current_path,
'cause': None})
current_path.append(item)
xpath = self._construct_xpath(current_path, by_id=by_id)
try:
node = self.browser.element(xpath, parent=self)
except NoSuchElementException:
raise CandidateNotFound(
{'message': "{}: could not be found in the tree.".format(item),
'path': current_path,
'cause': None})
last_id = self.browser.get_attribute('id', node)
if node is not None:
self.expand_id(last_id)
return self.browser.element('./span', parent=node)
def click_path(self, *path, **kwargs):
""" Exposes a path and then clicks it.
Args:
*path: The path as multiple positional string arguments denoting the course to take.
Keywords:
by_id: Whether to match ids instead of text.
Returns: The leaf web element.
"""
leaf = self.expand_path(*path, **kwargs)
title = self.browser.element('./a', parent=leaf)
self.logger.info("Path %r yielded menuitem %r", path, self.browser.text(title))
if title is not None:
self.browser.plugin.ensure_page_safe()
self.browser.click(title)
checkbox_locator = './span[contains(@class, "dynatree-checkbox")]'
if self.browser.is_displayed(checkbox_locator, parent=leaf):
checkbox = self.browser.element(checkbox_locator, parent=leaf)
self.browser.click(checkbox)
return leaf
class CheckableDynaTree(DynaTree):
""" Checkable variation of CFME Tree. This widget not only expands a tree for a provided path,
but also checks a checkbox.
"""
IS_CHECKABLE = './span[contains(@class, "dynatree-checkbox")]'
IS_CHECKED = './../span[contains(@class, "dynatree-selected")]'
def is_checkable(self, item):
return bool(self.browser.elements(self.IS_CHECKABLE, parent=item))
def is_checked(self, item):
return bool(self.browser.elements(self.IS_CHECKED, parent=item))
def check_uncheck_node(self, check, *path, **kwargs):
leaf = self.expand_path(*path, **kwargs)
if not self.is_checkable(leaf):
raise TypeError('Item is not checkable')
checked = self.is_checked(leaf)
if checked != check:
self.logger.info('%s %r', 'Checking' if check else 'Unchecking', path[-1])
self.browser.click(self.IS_CHECKABLE, parent=leaf)
def check_node(self, *path, **kwargs):
"""Expands the passed path and checks a checkbox that is located at the node."""
return self.check_uncheck_node(True, *path, **kwargs)
def uncheck_node(self, *path, **kwargs):
"""Expands the passed path and unchecks a checkbox that is located at the node."""
return self.check_uncheck_node(False, *path, **kwargs)
def node_checked(self, *path, **kwargs):
"""Check if a checkbox is checked on the node in that path."""
leaf = self.expand_path(*path, **kwargs)
if not self.is_checkable(leaf):
return False
return self.is_checked(leaf)
def fill(self, path):
if self.node_checked(*path):
return False
else:
self.check_node(*path)
return True
def read(self):
do_not_read_this_widget()
def CheckableManageIQTree(tree_id=None): # noqa
return VersionPick({
Version.lowest(): CheckableDynaTree(tree_id),
'5.7.0.1': CheckableBootstrapTreeview(tree_id),
})
def ManageIQTree(tree_id=None): # noqa
return VersionPick({
Version.lowest(): DynaTree(tree_id),
'5.7.0.1': BootstrapTreeview(tree_id),
})
class SummaryFormItem(Widget):
"""The UI item that shows the values for objects that are NOT VMs, Providers and such ones."""
LOCATOR = (
'.//h3[normalize-space(.)={}]/following-sibling::div/div'
'//label[normalize-space(.)={}]/following-sibling::div')
def __init__(self, parent, group_title, item_name, text_filter=None, logger=None):
Widget.__init__(self, parent, logger=logger)
self.group_title = group_title
self.item_name = item_name
if text_filter is not None and not callable(text_filter):
raise TypeError('text_filter= must be a callable')
self.text_filter = text_filter
def __locator__(self):
return self.LOCATOR.format(quote(self.group_title), quote(self.item_name))
@property
def text(self):
if not self.is_displayed:
return None
ui_text = self.browser.text(self)
if self.text_filter is not None:
# Process it
ui_text = self.text_filter(ui_text)
return ui_text
def read(self):
text = self.text
if text is None:
do_not_read_this_widget()
return text
class MultiBoxSelect(View):
"""This view combines two `<select>` elements and buttons for moving items between them.
This view can be found in policy profile, alert profiles adding screens; assigning actions to an
event, assigning conditions to a policy screens and so on.
TODO When CFME 5.7.1 will become deprecated `_move_into_image_button` and
`_move_from_image_button` can be removed.
Attributes:
AVAILABLE_ITEMS_ID (str): default value of `<select>` id for available items
CHOSEN_ITEMS_ID (str): default value of `<select>` id for chosen items
MOVE_FROM (str): default value of `data-submit` attribute for 'move_from' button
MOVE_INTO (str): default value of `data-submit` attribute for 'move_into' button
Args:
available_items (str): provided value of `<select>` id for available items
chosen_items (str): provided value of `<select>` id for available items
move_into (str): provided value of `data-submit` attribute for 'move_into' button
move_from (str): provided value of `data-submit` attribute for 'move_from' button
"""
AVAILABLE_ITEMS_ID = "choices_chosen"
CHOSEN_ITEMS_ID = "members_chosen"
MOVE_INTO = "choices_chosen_div"
MOVE_FROM = "members_chosen_div"
available_options = Select(id=Parameter("@available_items"))
chosen_options = Select(id=Parameter("@chosen_items"))
_move_into_image_button = Image(ParametrizedLocator(
".//a[@data-submit={@move_into|quote}]/img"))
_move_from_image_button = Image(ParametrizedLocator(
".//a[@data-submit={@move_from|quote}]/img"))
_move_into_native_button = Button(**{"data-submit": Parameter("@move_into")})
_move_from_native_button = Button(**{"data-submit": Parameter("@move_from")})
def __init__(self, parent, move_into=None, move_from=None, available_items=None,
chosen_items=None, logger=None):
View.__init__(self, parent, logger=logger)
self.available_items = available_items or self.AVAILABLE_ITEMS_ID
self.chosen_items = chosen_items or self.CHOSEN_ITEMS_ID
self.move_into = move_into or self.MOVE_INTO
self.move_from = move_from or self.MOVE_FROM
@cached_property
def move_into_button(self):
if self._move_into_image_button.is_displayed:
return self._move_into_image_button
else:
return self._move_into_native_button
@cached_property
def move_from_button(self):
if self._move_from_image_button.is_displayed:
return self._move_from_image_button
else:
return self._move_from_native_button
def _values_to_remove(self, values):
return list(set(self.all_options) - set(values))
def _values_to_add(self, values):
return list(set(values) - set(self.all_options))
def fill(self, values):
if set(values) == self.all_options:
return False
else:
values_to_remove = self._values_to_remove(values)
values_to_add = self._values_to_add(values)
if values_to_remove:
self.chosen_options.fill(values_to_remove)
self.move_from_button.click()
self.browser.plugin.ensure_page_safe()
if values_to_add:
self.available_options.fill(values_to_add)
self.move_into_button.click()
self.browser.plugin.ensure_page_safe()
return True
@property
def all_options(self):
return [option.text for option in self.chosen_options.all_options]
def read(self):
return self.all_options
class CheckboxSelect(Widget):
ROOT = ParametrizedLocator(".//div[@id={@search_root|quote}]")
def __init__(self, parent, search_root, text_access_func=None, logger=None):
Widget.__init__(self, parent, logger=logger)
self.search_root = search_root
self._access_func = text_access_func
@property
def checkboxes(self):
"""All checkboxes."""
return {Checkbox(self, id=el.get_attribute("id")) for el in self.browser.elements(
".//input[@type='checkbox']", parent=self)}
@property
def selected_checkboxes(self):
"""Only selected checkboxes."""
return {cb for cb in self.checkboxes if cb.selected}
@cached_property
def selected_text(self):
"""Only selected checkboxes' text descriptions."""
return {self.browser.element("./..", parent=cb).text for cb in self.selected_checkboxes}
@property
def selected_values(self):
"""Only selected checkboxes' values."""
return {cb.get_attribute("value") for cb in self.selected_checkboxes}
@property
def unselected_checkboxes(self):
"""Only unselected checkboxes."""
return {cb for cb in self.checkboxes if not cb.selected}
@property
def unselected_values(self):
"""Only unselected checkboxes' values."""
return {cb.get_attribute("value") for cb in self.unselected_checkboxes}
def checkbox_by_id(self, id):
"""Find checkbox's WebElement by id."""
return Checkbox(self, id=id)
def _values_to_remove(self, values):
return list(self.selected_text - set(values))
def _values_to_add(self, values):
return list(set(values) - self.selected_text)
def select_all(self):
"""Selects all checkboxes."""
for cb in self.unselected_checkboxes:
cb.fill(True)
def unselect_all(self):
"""Unselects all checkboxes."""
for cb in self.selected_checkboxes:
cb.fill(False)
def checkbox_by_text(self, text):
"""Returns checkbox's WebElement searched by its text."""
if self._access_func is not None:
for cb in self.checkboxes:
txt = self._access_func(cb)
if txt == text:
return cb
else:
raise NameError("Checkbox with text {} not found!".format(text))
else:
# Has to be only single
return Checkbox(
self,
locator=".//*[normalize-space(.)={}]/input[@type='checkbox']".format(quote(text))
)
def fill(self, values):
if set(values) == self.selected_text:
return False
else:
for value in self._values_to_remove(values):
checkbox = self.checkbox_by_text(value)
checkbox.fill(False)
for value in self._values_to_add(values):
checkbox = self.checkbox_by_text(value)
checkbox.fill(True)
return True
def read(self):
"""Only selected checkboxes."""
return [cb for cb in self.checkboxes if cb.selected]
class BootstrapSwitchSelect(CheckboxSelect):
"""BootstrapSwitchSelect view.
This view is very similar to parent CheckboxSelect view. BootstrapSwitches used instead of
usual Checkboxes. It can be found in the same policy's events assignment screen since
CFME 5.8.1.
"""
BS_TEXT = '/../../following-sibling::text()[1]'
def _get_bs_description(self, bs):
"""Returns text description of the BootstrapSwitch widget.
We have to use such hack with the script execution, because Selenium cannot return text of a
text node itself.
Returns: str
"""
return bs._label or self.browser.execute_script(
"{script} return xpath(null, {arg}).textContent;".format(
script=DynaTree.XPATH,
arg=quote(bs.ROOT.locator + self.BS_TEXT)
)).strip()
@property
def checkboxes(self):
"""All bootstrap switches."""
return {BootstrapSwitch(self, id=el.get_attribute("id")) for el in self.browser.elements(
".//input[@type='checkbox']", parent=self)}
def checkbox_by_id(self, id):
"""Finds bootstrap switch by id."""
return BootstrapSwitch(self, id=id)
@property
def selected_text(self):
"""Only selected bootstrap switches' text descriptions."""
return {self._get_bs_description(bs) for bs in self.selected_checkboxes}
def checkbox_by_text(self, text):
"""Returns bootstrap switch searched by its text."""
if self._access_func is not None:
for cb in self.checkboxes:
txt = self._access_func(cb)
if txt == text:
return cb
else:
raise NameError("Bootstrap switch with text {} not found!".format(text))
else:
# Has to be only single
return BootstrapSwitch(self, label=text)
# ManageIQ table objects definition
class TableColumn(VanillaTableColumn):
@property
def checkbox(self):
try:
return self.browser.element('./input[@type="checkbox"]', parent=self)
except NoSuchElementException:
return None
@property
def checked(self):
checkbox = self.checkbox
if checkbox is None:
return None
return self.browser.is_selected(checkbox)
def check(self):
if not self.checked:
self.browser.click(self.checkbox)
def uncheck(self):
if self.checked:
self.browser.click(self.checkbox)
class TableRow(VanillaTableRow):
Column = TableColumn
class Table(VanillaTable):
CHECKBOX_ALL = '|'.join([
'./thead/tr/th[1]/input[contains(@class, "checkall")]',
'./tr/th[1]/input[contains(@class, "checkall")]',
'.//input[@id="masterToggle"]',
'.//th[1]/input[@id="check-all"]'
])
SORTED_BY_LOC = (
'./thead/tr/th[contains(@class, "sorting_asc") or contains(@class, "sorting_desc")]')
SORT_LINK = './thead/tr/th[{}]/a'
Row = TableRow
@property
def checkbox_all(self):
try:
return self.browser.element(self.CHECKBOX_ALL, parent=self)
except NoSuchElementException:
return None
@property
def all_checked(self):
checkbox = self.checkbox_all
if checkbox is None:
return None
return self.browser.is_selected(checkbox)
def check_all(self):
if not self.all_checked:
self.browser.click(self.checkbox_all)
def uncheck_all(self):
self.check_all()
self.browser.click(self.checkbox_all)
@property
def sorted_by(self):
"""Returns the name of column that the table is sorted by. Attributized!"""
return attributize_string(self.browser.text(self.SORTED_BY_LOC, parent=self))
@property
def sort_order(self):
"""Returns the sorting order of the table for current column.
Returns:
``asc`` or ``desc``
"""
klass = self.browser.get_attribute('class', self.SORTED_BY_LOC, parent=self)
return re.search(r'sorting_(asc|desc)', klass).groups()[0]
def click_sort(self, column):
"""Clicks the sorting link in the given column. The column gets attributized."""
self.logger.info('click_sort(%r)', column)
column = attributize_string(column)
column_position = self.header_index_mapping[self.attributized_headers[column]]
self.browser.click(self.SORT_LINK.format(column_position + 1), parent=self)
def sort_by(self, column, order='asc'):
"""Sort table by column and in given direction.
Args:
column: Name of the column, can be normal or attributized.
order: Sorting order. ``asc`` or ``desc``.
"""
self.logger.info('sort_by(%r, %r)', column, order)
column = attributize_string(column)
# Sort column
if self.sorted_by != column:
self.click_sort(column)
else:
self.logger.debug('sort_by(%r, %r): column already selected', column, order)
# Sort order
if self.sort_order != order:
self.logger.info('sort_by(%r, %r): changing the sort order', column, order)
self.click_sort(column)
self.logger.debug('sort_by(%r, %r): order already selected', column, order)
class SummaryTable(VanillaTable):
"""Table used in Provider, VM, Host, ... summaries.
Todo:
* Make it work properly with rowspan (that is for the My Company Tags).
Args:
title: Title of the table (eg. ``Properties``)
"""
BASELOC = './/table[./thead/tr/th[contains(@align, "left") and normalize-space(.)={}]]'
Image = namedtuple('Image', ['alt', 'title', 'src'])
def __init__(self, parent, title, *args, **kwargs):
VanillaTable.__init__(self, parent, self.BASELOC.format(quote(title)), *args, **kwargs)
@property
def fields(self):
"""Returns a list of the field names in the table (the left column)."""
return [row[0].text for row in self]
def get_field(self, field_name):
"""Returns the table row of the field with this name.
Args:
field_name: Name of the field (left column)
Returns:
An instance of :py:class:`VanillaRow`
"""
try:
return self.row((0, field_name))
except IndexError:
raise NameError('Could not find field with name {!r}'.format(field_name))
def get_text_of(self, field_name):
"""Returns the text of the field with this name.
Args:
field_name: Name of the field (left column)
Returns:
:py:class:`str`
"""
return self.get_field(field_name)[1].text
def get_img_of(self, field_name):
"""Returns the information about the image in the field with this name.
Args:
field_name: Name of the field (left column)
Returns:
A 3-tuple: ``alt``, ``title``, ``src``.
"""
try:
img_el = self.browser.element('./img', parent=self.get_field(field_name)[1])
except NoSuchElementException:
return None
return self.Image(
self.browser.get_attribute('alt', img_el),
self.browser.get_attribute('title', img_el),
self.browser.get_attribute('src', img_el))
def click_at(self, field_name):
"""Clicks the field with this name.
Args:
field_name: Name of the field (left column)
"""
return self.get_field(field_name)[1].click()
def read(self):
return {field: self.get_text_of(field) for field in self.fields}
class Accordion(PFAccordion):
@property
def is_dimmed(self):
return bool(
self.browser.elements('.//div[contains(@id, "tree") and contains(@class, "dimmed")]'))
class Calendar(TextInput):
"""A CFME calendar form field
Calendar fields are readonly, and managed by the dxhtmlCalendar widget. A Calendar field
will accept any object that can be coerced into a string, but the value may not match the format
expected by dhtmlxCalendar or CFME. For best results, either a ``datetime.date`` or
``datetime.datetime`` object should be used to create a valid date field.
Args:
name: "name" property of the readonly calendar field.
"""
# Expects: arguments[0] = element, arguments[1] = value to set
set_angularjs_value_script = """\
(function(elem, value){
var angular_elem = angular.element(elem);
var $parse = angular_elem.injector().get('$parse');
var getter = $parse(elem.getAttribute('ng-model'));
var setter = getter.assign;
angular_elem.scope().$apply(function($scope) { setter($scope, value); });
}(arguments[0], arguments[1]));
"""
def fill(self, value):
# input = self.browser.element(self.name)
if isinstance(value, date):
date_str = value.strftime('%m/%d/%Y')
else:
date_str = str(value)
self.move_to()
# need to write to a readonly field: resort to evil
if self.browser.get_attribute("ng-model", self) is not None:
self.browser.execute_script(self.set_angularjs_value_script, self.browser.element(self),
date_str)
else:
self.browser.set_attribute("value", date_str, self)
# Now when we set the value, we need to simulate a change event.
if self.browser.get_attribute("data-date-autoclose", self):
# New one
script = "$(arguments[0]).trigger('changeDate');"
else:
# Old one
script = "$(arguments[0]).change();"
try:
self.browser.execute_script(script, self.browser.element(self))
except WebDriverException as e:
self.logger.warning(
"An exception was raised during handling of the Cal #{}'s change event:\n{}"
.format(self.name, str(e)))
self.browser.plugin.ensure_page_safe()
return True
class SNMPHostsField(View):
_input = Input("host")
def __init__(self, parent, logger=None):
View.__init__(self, parent, logger=logger)
def fill(self, values):
fields = self.host_fields
if isinstance(values, six.string_types):
values = [values]
if len(values) > len(fields):
raise ValueError("You cannot specify more hosts than the form allows!")
return any(fields[i].fill(value) for i, value in enumerate(values))
@property
def host_fields(self):
"""Returns list of locators to all host fields"""
if self._input.is_displayed:
return [self._input]
else:
return [Input(self, "host_{}".format(i)) for i in range(1, 4)]
class SNMPTrapsField(Widget):
def __init__(self, parent, logger=None):
Widget.__init__(self, parent, logger=logger)
def fill_oid_field(self, i, oid):
oid_field = Input(self, "oid__{}".format(i))
return oid_field.fill(oid)
def fill_type_field(self, i, type_):
type_field = BootstrapSelect(self, "var_type__{}".format(i))
return type_field.fill(type_)
def fill_value_field(self, i, value):
value_field = Input(self, "value__{}".format(i))
return value_field.fill(value)
def fill(self, traps):
result = []
for i, trap in enumerate(traps, 1):
assert 2 <= len(trap) <= 3, "The tuple must be at least 2 items and max 3 items!"
if len(trap) == 2:
trap += (None,)
oid, type_, value = trap
result.append(any((
self.fill_oid_field(i, oid),
self.fill_type_field(i, type_),
self.fill_value_field(i, value)
)))
return any(result)
def read(self):
do_not_read_this_widget()
class SNMPForm(View):
hosts = SNMPHostsField()
version = BootstrapSelect("snmp_version")
id = Input("trap_id")
traps = SNMPTrapsField()
class ScriptBox(Widget):
"""Represents a script box as is present on the customization templates pages.
This box has to be activated before keys can be sent. Since this can't be done
until the box element is visible, and some dropdowns change the element, it must
be activated "inline".
Args:
"""
def __init__(self, parent, locator=None, item_name=None, logger=None):
Widget.__init__(self, parent, logger=logger)
self.locator = locator
self.item_name = item_name
def __locator__(self):
if not self.locator:
self.locator = "//textarea[contains(@id, 'method_data')]"
return self.locator
@property
def name(self):
if not self.item_name:
self.item_name = 'ManageIQ.editor'
return self.item_name
@property
def script(self):
return self.browser.execute_script('{}.getValue();'.format(self.name))
def fill(self, value):
if self.script == value:
return False
self.browser.execute_script('{}.setValue(arguments[0]);'.format(self.name), value)
self.browser.execute_script('{}.save();'.format(self.name))
return True
def read(self):
return self.script
def get_value(self):
script = self.browser.execute_script('return {}.getValue();'.format(self.name))
script = script.replace('\\"', '"').replace("\\n", "\n")
return script
def workaround_save_issue(self):
# We need to fire off the handlers manually in some cases ...
self.browser.execute_script(
"{}._handlers.change.map(function(handler) {{ handler() }});".format(self.item_name))
class Paginator(Widget):
""" Represents Paginator control that includes First/Last/Next/Prev buttons
and a control displaying amount of items on current page vs overall amount.
It is mainly used in Paginator Pane.
"""
PAGINATOR_CTL = './/ul[@class="pagination"]'
CUR_PAGE_CTL = './li/span/input[@name="limitstart"]/..'
PAGE_BUTTON_CTL = './li[contains(@class, {})]/span'
def __locator__(self):
return self._paginator
@property
def _paginator(self):
return self.browser.element(self.PAGINATOR_CTL, parent=self.parent_view)
def _is_enabled(self, element):
return 'disabled' not in self.browser.classes(element.find_element_by_xpath('..'))
def _click_button(self, cmd):
cur_page_btn = self.browser.element(self.PAGE_BUTTON_CTL.format(quote(cmd)),
parent=self._paginator)
if self._is_enabled(cur_page_btn):
self.browser.click(cur_page_btn)
else:
raise NoSuchElementException('such button {} is absent/grayed out'.format(cmd))
def next_page(self):
self._click_button('next')
def prev_page(self):
self._click_button('prev')
def last_page(self):
self._click_button('last')
def first_page(self):
self._click_button('first')
def page_info(self):
cur_page = self.browser.element(self.CUR_PAGE_CTL, parent=self._paginator)
text = cur_page.text
return re.search('(\d+)?-?(\d+)\s+of\s+(\d+)', text).groups()
class ReportDataControllerMixin(object):
"""
This is helper mixin for several widgets which use Miq JS API
"""
def _invoke_cmd(self, cmd, data=None):
raw_data = {'controller': 'reportDataController', 'action': cmd}
if data:
raw_data['data'] = [data]
json_data = json.dumps(raw_data)
js_cmd = 'sendDataWithRx({data}); return ManageIQ.qe.gtl.result'.format(data=json_data)
self.logger.info("executed command: {cmd}".format(cmd=js_cmd))
# command result is always stored in this global variable
return self.browser.execute_script(js_cmd)
def _call_item_method(self, method):
raw_data = {'controller': 'reportDataController',
'action': 'get_item',
'data': [self.name]}
js_data = json.dumps(raw_data)
js_cmd = ('sendDataWithRx({data}); '
'return ManageIQ.qe.gtl.result.{method}()').format(data=js_data, method=method)
self.logger.info("executed command: {cmd}".format(cmd=js_cmd))
return self.browser.execute_script(js_cmd)
class JSPaginationPane(View, ReportDataControllerMixin):
""" Represents Paginator Pane with js api provided by ManageIQ.
The intention of this view is to use it as nested view on f.e. Infrastructure Providers page.
"""
@property
def is_displayed(self):
# upstream sometimes shows old pagination page and sometime new one
paginator = ("return $('#paging_div').length !== 0 || "
"document.getElementsByTagName('miq-pagination').length != 0")
return self.browser.execute_script(paginator)
@property
def exists(self):
return self.is_displayed
def check_all(self):
self._invoke_cmd('select_all', True)
def uncheck_all(self):
self._invoke_cmd('select_all', False)
def sort(self, sort_by, ascending=True):
# in order to change both sorting and direction, command has to be called twice
data = {'columnName': sort_by, 'isAscending': ascending}
self._invoke_cmd('set_sorting', data)
@property
def sorted_by(self):
return self._invoke_cmd('get_sorting')
@property
def items_per_page(self):
return self._invoke_cmd('get_items_per_page')
def set_items_per_page(self, value):
self._invoke_cmd('set_items_per_page', value)
@property
def cur_page(self):
return self._invoke_cmd('get_current_page')
@property
def pages_amount(self):
return self._invoke_cmd('get_pages_amount')
def next_page(self):
self._invoke_cmd('next_page')
def prev_page(self):
self._invoke_cmd('previous_page')
def first_page(self):
self._invoke_cmd('first_page')
def last_page(self):
self._invoke_cmd('last_page')
def go_to_page(self, value):
self._invoke_cmd('go_to_page', value)
@property
def items_amount(self):
return len(self._invoke_cmd('get_all_items'))
def pages(self):
"""Generator to iterate over pages, yielding after moving to the next page"""
if self.exists:
# start iterating at the first page
if self.cur_page != 1:
self.logger.debug('Resetting paginator to first page')
self.first_page()
# Adding 1 to pages_amount to include the last page in loop
for page in range(1, self.pages_amount + 1):
yield self.cur_page
if self.cur_page == self.pages_amount:
# last or only page, stop looping
break
else:
self.logger.debug('Paginator advancing to next page')
self.next_page()
else:
return
def find_row_on_pages(self, table, *args, **kwargs):
"""Find first row matching filters provided by kwargs on the given table widget
Args:
table: Table widget object
args: Filters to be passed to table.row()
kwargs: Filters to be passed to table.row()
"""
self.first_page()
for _ in self.pages():
try:
row = table.row(*args, **kwargs)
except IndexError:
continue
if not row:
continue
else:
return row
else:
raise NoSuchElementException('Row matching filter {} not found on table {}'
.format(kwargs, table))
class NonJSPaginationPane(View):
""" Represents Paginator Pane with the following controls.
The intention of this view is to use it as nested view on f.e. Infrastructure Providers page.
"""
ROOT = '//div[@id="paging_div"]'
check_all_items = Checkbox(id='masterToggle')
sort_by = BootstrapSelect(id='sort_choice')
items_on_page = BootstrapSelect(id='ppsetting')
paginator = Paginator()
@property
def is_displayed(self):
# there are cases when paging_div is shown but it is empty
return self.check_all_items.is_displayed
@property
def exists(self):
return self.is_displayed
def check_all(self):
self.check_all_items.fill(True)
def uncheck_all(self):
self.check_all()
self.check_all_items.fill(False)
def sort(self, value):
self.sort_by.select_by_visible_text(value)
@property
def sorted_by(self):
raise NotImplementedError('to implement it when needed')
@property
def items_per_page(self):
selected = self.items_on_page.selected_option
return int(re.sub(r'\s+items', '', selected))
def set_items_per_page(self, value):
self.items_on_page.select_by_visible_text(str(value))
def _parse_pages(self):
min_item, max_item, item_amt = self.paginator.page_info()
item_amt = int(item_amt)
max_item = int(max_item)
items_per_page = self.items_per_page
# obtaining amount of existing pages, there is 1 page by default
if item_amt == 0:
page_amt = 1
else:
# round up after dividing total item count by per-page
page_amt = int(ceil(float(item_amt) / float(items_per_page)))
# calculating current_page_number
if max_item <= items_per_page:
cur_page = 1
else:
# round up after dividing highest displayed item number by per-page
cur_page = int(ceil(float(max_item) / float(items_per_page)))
return cur_page, page_amt
@property
def cur_page(self):
return self._parse_pages()[0]
@property
def pages_amount(self):
return self._parse_pages()[1]
def next_page(self):
self.paginator.next_page()
def prev_page(self):
self.paginator.prev_page()
def first_page(self):
if self.cur_page != 1:
self.paginator.first_page()
def last_page(self):
if self.cur_page != self.pages_amount:
self.paginator.last_page()
def pages(self):
"""Generator to iterate over pages, yielding after moving to the next page"""
if self.exists:
# start iterating at the first page
if self.cur_page != 1:
self.logger.debug('Resetting paginator to first page')
self.first_page()
# Adding 1 to pages_amount to include the last page in loop
for page in range(1, self.pages_amount + 1):
yield self.cur_page
if self.cur_page == self.pages_amount:
# last or only page, stop looping
break
else:
self.logger.debug('Paginator advancing to next page')
self.next_page()
else:
return
@property
def items_amount(self):
return self.paginator.page_info()[2]
def find_row_on_pages(self, table, *args, **kwargs):
"""Find first row matching filters provided by kwargs on the given table widget
Args:
table: Table widget object
args: Filters to be passed to table.row()
kwargs: Filters to be passed to table.row()
"""
self.first_page()
for _ in self.pages():
try:
row = table.row(*args, **kwargs)
except IndexError:
continue
if not row:
continue
else:
return row
else:
raise NoSuchElementException('Row matching filter {} not found on table {}'
.format(kwargs, table))
def PaginationPane(*args, **kwargs): # noqa
parent = kwargs.get("parent")
verpick_obj = VersionPick({
Version.lowest(): NonJSPaginationPane(*args, **kwargs),
'5.9': JSPaginationPane(*args, **kwargs),
})
return verpick_obj.pick(parent.browser.product_version) if parent else verpick_obj
class Stepper(View):
""" A CFME Stepper Control
.. code-block:: python
stepper = Stepper(locator='//div[contains(@class, "timeline-stepper")]')
stepper.increase()
"""
ROOT = ParametrizedLocator('{@locator}')
minus_button = Button('-')
plus_button = Button('+')
value_field = Input(locator='.//input[contains(@class, "bootstrap-touchspin")]')
def __init__(self, parent, locator, logger=None):
View.__init__(self, parent=parent, logger=logger)
self.locator = locator
def read(self):
return int(self.value_field.read())
def decrease(self):
self.minus_button.click()
def increase(self):
self.plus_button.click()
def set_value(self, value):
value = int(value)
if value < 1:
raise ValueError('The value cannot be less than 1')
steps = value - self.read()
if steps == 0:
return False
elif steps > 0:
operation = self.increase
else:
operation = self.decrease
steps = abs(steps)
for step in range(steps):
operation()
return True
def fill(self, value):
return self.set_value(value)
class RadioGroup(Widget):
""" CFME Radio Group Control
.. code-block:: python
radio_group = RadioGroup(locator='//span[contains(@class, "timeline-option")]')
radio_group.select(radio_group.button_names()[-1])
"""
BUTTONS = './/label[input[@type="radio"]]'
def __init__(self, parent, locator, logger=None):
Widget.__init__(self, parent=parent, logger=logger)
self.locator = locator
def __locator__(self):
return self.locator
def _get_button(self, name):
br = self.browser
return next(btn for btn in br.elements(self.BUTTONS) if br.text(btn) == name)
@property
def button_names(self):
return [self.browser.text(btn) for btn in self.browser.elements(self.BUTTONS)]
@property
def selected(self):
names = self.button_names
for name in names:
if 'ng-valid-parse' in self.browser.classes('.//input[@type="radio"]',
parent=self._get_button(name)):
return name
else:
# radio button doesn't have any marks to make out which button is selected by default.
# so, returning first radio button's name
return names[0]
def select(self, name):
button = self._get_button(name)
if self.selected != name:
button.click()
return True
return False
def read(self):
return self.selected
def fill(self, name):
return self.select(name)
class BreadCrumb(Widget):
""" CFME BreadCrumb navigation control
.. code-block:: python
breadcrumb = BreadCrumb()
breadcrumb.click_location(breadcrumb.locations[0])
"""
ROOT = '//ol[@class="breadcrumb"]'
ELEMENTS = './/li'
def __init__(self, parent, locator=None, logger=None):
Widget.__init__(self, parent=parent, logger=logger)
self._locator = locator or self.ROOT
def __locator__(self):
return self._locator
@property
def _path_elements(self):
return self.browser.elements(self.ELEMENTS, parent=self)
@property
def locations(self):
return [self.browser.text(loc) for loc in self._path_elements]
@property
def active_location(self):
br = self.browser
return next(br.text(loc) for loc in self._path_elements if 'active' in br.classes(loc))
def click_location(self, name, handle_alert=True):
br = self.browser
location = next(loc for loc in self._path_elements if br.text(loc) == name)
result = br.click(location, ignore_ajax=handle_alert)
if handle_alert:
self.browser.handle_alert(wait=2.0, squash=True)
self.browser.plugin.ensure_page_safe()
return result
class ItemsToolBarViewSelector(View):
""" represents toolbar's view selector control
it is present on pages with items like Infra or Cloud Providers pages
.. code-block:: python
view_selector = View.nested(ItemsToolBarViewSelector)
view_selector.select('Tile View')
view_selector.selected
"""
ROOT = './/div[contains(@class, "toolbar-pf-view-selector")]'
grid_button = Button(title='Grid View')
tile_button = Button(title='Tile View')
list_button = Button(title='List View')
@property
def _view_buttons(self):
yield self.grid_button
yield self.tile_button
yield self.list_button
def select(self, title):
for button in self._view_buttons:
if button.title == title:
return button.click()
else:
raise ValueError("The view with title {title} isn't present".format(title=title))
@property
def selected(self):
if self.is_displayed:
return next(btn.title for btn in self._view_buttons if btn.active)
else:
return None
def read(self):
return self.selected
@property
def is_displayed(self):
return self.grid_button.is_displayed
class DetailsToolBarViewSelector(View):
""" represents toolbar's view selector control
it is present on pages like Infra Providers Details page
.. code-block:: python
view_selector = View.nested(DetailsToolBarViewSelector)
view_selector.select('Dashboard View')
view_selector.selected
"""
ROOT = './/div[contains(@class, "toolbar-pf-view-selector")]'
summary_button = Button(title='Summary View')
dashboard_button = Button(title='Dashboard View')
@property
def _view_buttons(self):
yield self.dashboard_button
yield self.summary_button
def select(self, title):
for button in self._view_buttons:
if button.title == title:
return button.click()
else:
raise ValueError("The view with title {title} isn't present".format(title=title))
@property
def selected(self):
if self.is_displayed:
return next(btn.title for btn in self._view_buttons if btn.active)
else:
return None
@property
def is_displayed(self):
# cloud provider detail page has empty view selector.
# so, default is_displayed works wrong in such case
return self.summary_button.is_displayed
def read(self):
return self.selected
class Search(View):
""" Represents search_text control
# TODO Add advanced search
"""
search_text = Input(name="search_text")
search_btn = Text("//div[@id='searchbox']//div[contains(@class, 'form-group')]"
"/*[self::a or (self::button and @type='submit')]")
clear_btn = Text(".//*[@id='searchbox']//div[contains(@class, 'clear')"
"and not(contains(@style, 'display: none'))]/div/button")
def clear_search(self):
if not self.is_empty:
self.clear_btn.click()
self.search_btn.click()
def search(self, text):
self.search_text.fill(text)
self.search_btn.click()
@property
@logged(log_result=True)
def is_empty(self):
return not bool(self.search_text.value)
class UpDownSelect(View):
"""Multiselect with two arrows (up/down) next to it. Eg. in AE/Domain priority selection.
Args:
select_loc: Locator for the select box (without Select element wrapping)
up_loc: Locator of the Move Up arrow.
down_loc: Locator with Move Down arrow.
"""
select = Select(ParametrizedLocator('{@select_loc}'))
up = Text(ParametrizedLocator('{@up_loc}'))
down = Text(ParametrizedLocator('{@down_loc}'))
def __init__(self, parent, select_loc, up_loc, down_loc, logger=None):
View.__init__(self, parent, logger=logger)
self.select_loc = select_loc
self.up_loc = up_loc
self.down_loc = down_loc
@property
def is_displayed(self):
return self.select.is_displayed and self.up.is_displayed and self.down.is_displayed
def read(self):
return self.items
@property
def items(self):
return [option.text for option in self.select.all_options]
def move_up(self, item):
item = str(item)
assert item in self.items
self.select.deselect_all()
self.select.select_by_visible_text(item)
self.up.click()
def move_down(self, item):
item = str(item)
assert item in self.items
self.select.deselect_all()
self.select.select_by_visible_text(item)
self.down.click()
def move_top(self, item):
item = str(item)
assert item in self.items
self.select.deselect_all()
while item != self.items[0]:
self.select.select_by_visible_text(item)
self.up.click()
def move_bottom(self, item):
item = str(item)
assert item in self.items
self.select.deselect_all()
while item != self.items[-1]:
self.select.select_by_visible_text(item)
self.down.click()
def fill(self, items):
if not isinstance(items, (list, tuple)):
items = [items]
current_items = self.items[:len(items)]
if current_items == items:
return False
items = map(str, items)
for item in reversed(items): # reversed because every new item at top pushes others down
self.move_top(item)
return True
class AlertEmail(View):
"""This set of widgets can be found in Control / Explorer / Alerts when you edit an alert."""
@ParametrizedView.nested
class recipients(ParametrizedView): # noqa
PARAMETERS = ("email", )
ALL_EMAILS = ".//a[starts-with(@title, 'Remove')]"
email = Text(ParametrizedLocator(".//a[text()={email|quote}]"))
def remove(self):
self.email.click()
@classmethod
def all(cls, browser):
return [(browser.text(e), ) for e in browser.elements(cls.ALL_EMAILS)]
ROOT = ParametrizedLocator(".//div[@id={@id|quote}]")
RECIPIENTS = "./div[@id='edit_to_email_div']//a"
add_button = Text(".//div[@title='Add']")
recipients_input = TextInput("email")
def __init__(self, parent, id="edit_email_div", logger=None):
View.__init__(self, parent, logger=logger)
self.id = id
def fill(self, values):
if isinstance(values, six.string_types):
values = [values]
if self.all_emails == set(values):
return False
else:
values_to_remove = self._values_to_remove(values)
values_to_add = self._values_to_add(values)
for value in values_to_remove:
self.recipients(value).remove()
for value in values_to_add:
self._add_recipient(value)
return True
def _values_to_remove(self, values):
return list(self.all_emails - set(values))
def _values_to_add(self, values):
return list(set(values) - self.all_emails)
def _add_recipient(self, email):
self.recipients_input.fill(email)
self.add_button.click()
@property
def all_emails(self):
return {self.browser.text(e) for e in self.browser.elements(self.RECIPIENTS)}
def read(self):
return list(self.all_emails)
class TimelinesZoomSlider(View):
"""This control represents Timeline's Zoom Slider
"""
ROOT = ParametrizedLocator('{@locator}')
zoom_in_button = Text(locator='//button[@id="timeline-pf-zoom-in"]') # "+" button
zoom_out_button = Text(locator='//button[@id="timeline-pf-zoom-out"]') # "-" button
def __init__(self, parent, locator, logger=None):
View.__init__(self, parent, logger=logger)
self.locator = locator
@property
def value(self):
return float(self.browser.get_attribute('value', self))
@cached_property
def max(self):
return float(self.browser.get_attribute('max', self))
@cached_property
def min(self):
return float(self.browser.get_attribute('min', self))
def zoom_in(self):
self.zoom_in_button.click()
def zoom_out(self):
self.zoom_out_button.click()
def zoom_max(self):
while self.value < self.max:
self.zoom_in()
def zoom_min(self):
while self.value > self.min:
self.zoom_out()
def read(self):
return self.value
class TimelinesFilter(View):
"""represents Filter Part of Timelines view
"""
# common
event_type = BootstrapSelect(id='tl_show')
event_category = BootstrapSelect(id='tl_category_management')
time_period = Stepper(locator='//div[contains(@class, "timeline-stepper")]')
time_range = BootstrapSelect(id='tl_range')
time_position = BootstrapSelect(id='tl_timepivot')
calendar = TextInput(locator='.//input[@class="form-control"]')
# todo: implement correct switch between management/policy views when switchable views done
apply = Text(locator='.//div[contains(@class, "timeline-apply")]')
# management controls
detailed_events = Checkbox(name='showDetailedEvents')
# policy controls
policy_event_category = BootstrapSelect(id='tl_category_policy')
policy_event_status = RadioGroup(locator='//span[contains(@class, "timeline-option")]')
class TimelinesChart(View):
"""represents Chart part of Timelines View
# currently only event collection is available
# todo: to add widgets for all controls and add chart objects interaction functionality
"""
ROOT = ParametrizedLocator('{@locator}')
CATEGORIES = './/*[name()="g" and contains(@class, "timeline-pf-labels")]' \
'//*[name()="text" and @class="timeline-pf-label"]'
EVENTS = '(.//*[name()="g" and contains(@class, "timeline-pf-drops-container")]/*[name()="g" ' \
'and @class="timeline-pf-drop-line"])[{pos}]/*[name()="text" ' \
'and contains(@class, "timeline-pf-drop")]'
legend = Table(locator='//div[@id="legend"]/table')
zoom = TimelinesZoomSlider(locator='//input[@id="timeline-pf-slider"]')
class TimelinesEvent(object):
def __repr__(self):
attrs = [attr for attr in self.__dict__.keys() if not attr.startswith('_')]
params = ", ".join(["{}={}".format(attr, getattr(self, attr)) for attr in attrs])
return "TimelinesEvent({})".format(params)
def __init__(self, parent, locator=None, logger=None):
super(TimelinesChart, self).__init__(parent=parent, logger=logger)
self.locator = locator or '//div[contains(@class, "timeline-container")]'
def get_categories(self, *categories):
br = self.browser
prepared_categories = []
for num, element in enumerate(br.elements(self.CATEGORIES), start=1):
# categories have number of events inside them
mo = re.search('^(.*?)(\s\(\s*\d+\s*\)\s*)*$', br.text(element))
category_name = mo.groups()[0]
if len(categories) == 0 or (len(categories) > 0 and category_name in categories):
prepared_categories.append((num, category_name))
return prepared_categories
def _is_group(self, evt):
return 'timeline-pf-event-group' in self.browser.classes(evt)
def _prepare_event(self, evt, category):
node = document_fromstring(evt)
# lxml doesn't replace <br> with \n in this case. so this has to be done by us
for br in node.xpath("*//br"):
br.tail = "\n" + br.tail if br.tail else "\n"
# parsing event and preparing its attributes
event = self.TimelinesEvent()
for line in node.text_content().split('\n'):
attr_name, attr_val = re.search('^(.*?):(.*)$', line).groups()
attr_name = attr_name.strip().lower().replace(' ', '_')
setattr(event, attr_name, attr_val.strip())
event.category = category
return event
def _click_group(self, group):
self.browser.execute_script("""jQuery.fn.art_click = function () {
this.each(function (i, e) {
var evt = new MouseEvent("click");
e.dispatchEvent(evt);
});};
$(arguments[0]).art_click();""", group)
def get_events(self, *categories):
got_categories = self.get_categories(*categories)
events = []
for category in got_categories:
cat_position, cat_name = category
# obtaining events for each category
for raw_event in self.browser.elements(self.EVENTS.format(pos=cat_position)):
if not self._is_group(raw_event):
# if ordinary event
event_text = self.browser.get_attribute('data-content', raw_event)
events.append(self._prepare_event(event_text, cat_name))
else:
# if event group
# todo: compare old table with new one if any issues
self.legend.clear_cache()
self._click_group(raw_event)
self.legend.wait_displayed()
for row in self.legend.rows():
event_text = self.browser.get_attribute('innerHTML', row['Event'])
events.append(self._prepare_event(event_text, cat_name))
return events
class TimelinesView(View):
"""represents Timelines page
"""
title = Text(locator='//h1')
breadcrumb = BreadCrumb()
@View.nested
class filter(TimelinesFilter): # NOQA
pass
@View.nested
class chart(TimelinesChart): # NOQA
pass
@property
def is_displayed(self):
return self.title.text == 'Timelines'
class AttributeValueForm(View):
@View.nested
class fields(ParametrizedView): # noqa
PARAMETERS = ('id', )
attribute = Input(
locator=ParametrizedLocator('.//input[@id=concat({@attr_prefix|quote}, {id|quote})]'))
value = Input(
locator=ParametrizedLocator('.//input[@id=concat({@val_prefix|quote}, {id|quote})]'))
@property
def attr_prefix(self):
return self.parent.attr_prefix
@property
def val_prefix(self):
return self.parent.val_prefix
# TODO: Figure out how to smuggle some extra data to the all classmethod
# TODO: since it is now impossible to pass the attr_prefix to it.
ATTRIBUTES = ParametrizedLocator('.//input[starts-with(@id, {@attr_prefix|quote})]')
def __init__(self, parent, attr_prefix, val_prefix, start=1, end=5, logger=None):
View.__init__(self, parent, logger=logger)
self.attr_prefix = attr_prefix
self.val_prefix = val_prefix
self.start = start
self.end = end
@property
def count(self):
return (self.end - self.start) + 1
@property
def current_attributes(self):
attributes = [
(i, self.browser.get_attribute('value', e))
for i, e in enumerate(self.browser.elements(self.ATTRIBUTES), self.start)]
return [a for a in attributes if a]
def attribute_to_id(self, attribute):
for id, attr in self.current_attributes:
if attr == attribute:
return id
else:
return None
def read(self):
result = {}
for id, attribute in self.current_attributes:
if not attribute:
continue
value = self.fields(id=str(id)).value.read()
result[attribute] = value
return result
def clear(self):
changed = False
for id, attr in self.current_attributes:
field = self.fields(id=str(id))
if field.attribute.fill(''):
changed = True
if field.value.fill(''):
changed = True
return changed
def fill(self, values):
if hasattr(values, 'items') and hasattr(values, 'keys'):
values = list(values.items())
if len(values) > self.count:
raise ValueError(
'This form is supposed to have only {} fields, passed {} items'.format(
self.count, len(values)))
changed = self.clear()
for id, (key, value) in enumerate(values, self.start):
field = self.fields(id=str(id))
if field.fill({'attribute': key, 'value': value}):
changed = True
return changed
class FileInput(BaseFileInput):
""" represents enhanced FileInput control.
Accepts a string. If the string is a file, then it is put in the input. Otherwise a temporary
file is generated and that one is fed to the file input.
technical debt:
ronny:
this introduces a requirement for out of band resource and file management, we should avoid
something like that
while this is merge-able as it adds functionality, we should clearly mark this as technical
debt needing a better resource management exposed from widgetastic or our wrappers
"""
def fill(self, value):
if not os.path.isfile(value):
f = NamedTemporaryFile()
f.write(str(value))
f.flush()
value = os.path.abspath(f.name)
atexit.register(f.close)
return super(FileInput, self).fill(value)
class BaseQuadIconEntity(ParametrizedView, ClickableMixin):
""" represents QuadIcon entity. one of states entity can be in
It is expected that some properties like "data" will be overridden in its children
"""
PARAMETERS = ('name',)
ROOT = ParametrizedLocator('.//table[./tbody/tr/td/a[contains(@title, {name|quote})]]')
LIST = '//dl[contains(@class, "tile")]/*[self::dt or self::dd]'
label = Text(locator=ParametrizedLocator('./tbody/tr/td/a[contains(@title, {name|quote})]'))
checkbox = Checkbox(locator='./tbody/tr/td/input[@type="checkbox"]')
QUADRANT = './/div[@class="flobj {pos}72"]/*[self::p or self::img]'
@property
def is_checked(self):
return self.checkbox.selected
def check(self):
return self.checkbox.fill(True)
def uncheck(self):
return self.checkbox.fill(False)
@property
def data(self):
""" every entity like QuadIcon/ListEntity etc displays some data,
which is different for each entity type.
This is property which should hold such data.
To override this property in concrete classes.
"""
return {}
def read(self):
return self.is_checked
def fill(self, values):
return self.check(values)
@property
def is_displayed(self):
try:
list_exists = self.browser.element(self.LIST).is_displayed()
except NoSuchElementException:
list_exists = False
return not list_exists and super(BaseQuadIconEntity, self).is_displayed
class BaseTileIconEntity(ParametrizedView):
""" represents Tile Icon entity. one of states entity can be in
"""
PARAMETERS = ('name',)
ROOT = ParametrizedLocator('.//table[.//table[./tbody/tr/td/a[contains(@title, '
'{name|quote})]]]')
LIST = '//dl[contains(@class, "tile")]/*[self::dt or self::dd]'
quad_icon = ParametrizedView.nested(BaseQuadIconEntity)
@property
def is_checked(self):
return self.quad_icon(self.context['name']).is_checked
def check(self):
return self.quad_icon(self.context['name']).check()
def uncheck(self):
return self.quad_icon(self.context['name']).uncheck()
@property
def data(self):
""" every entity like QuadIcon/ListEntity etc displays some data,
which is different for each entity type.
This is property which should hold such data.
"""
quad_data = self.quad_icon(self.context['name']).data
br = self.browser
# it seems we don't have list widget in other places.
# so, this code just parses it, creates dict and adds it to quad icon dict
els = [br.text(el) for el in br.elements(locator=self.LIST)]
list_data = dict(zip(els[::2], els[1::2])) # get first and second element and join them
quad_data.update(list_data)
return quad_data
def read(self):
return self.quad_icon(self.context['name']).read()
def fill(self, values):
return self.quad_icon(self.context['name']).fill()
@property
def is_displayed(self):
try:
return (super(BaseTileIconEntity, self).is_displayed and
self.browser.is_displayed(self.LIST))
except NoSuchElementException:
return False
class BaseListEntity(ParametrizedView, ClickableMixin):
""" represents List entity. one of states entity can be in
"""
PARAMETERS = ('name',)
ROOT = ParametrizedLocator('.//tr[./td[normalize-space(.)={name|quote}]]')
parent_table = Table(locator='./ancestor::table[1]')
checkbox = Checkbox(locator='.//input[@type="checkbox"]')
@property
def is_checked(self):
return self.checkbox.selected
def check(self):
return self.checkbox.fill(True)
def uncheck(self):
return self.checkbox.fill(False)
@property
def data(self):
""" every entity like QuadIcon/ListEntity etc displays some data,
which is different for each entity type.
This is property which should hold such data.
"""
row = next(row for row in self.parent_table.rows() if row.name.text == self.context['name'])
item_data = {}
for col_name in (h for h in self.parent_table.headers if h is not None):
item_data[col_name] = row[col_name].text
return item_data
def read(self):
return self.is_checked
def fill(self, values):
return self.check(values)
class NonJSBaseEntity(View):
""" represents Proxy class which represents Entity despite of state it is in.
it passes calls to concrete entity taking into account which entity type is displayed atm
"""
quad_entity = BaseQuadIconEntity
list_entity = BaseListEntity
tile_entity = BaseTileIconEntity
def __init__(self, parent, name, logger=None):
View.__init__(self, parent, logger=logger)
self.name = name
def _get_existing_entity(self):
for item in (self.quad_entity, self.tile_entity, self.list_entity):
if item(name=self.name).is_displayed:
return item(name=self.name)
else:
raise NoSuchElementException("Item {name} isn't found on page".format(name=self.name))
def __getattr__(self, name):
if name.startswith('__'):
return self.__dict__[name]
item = self._get_existing_entity()
if hasattr(item, name): # needed for is displayed
return getattr(item, name)
def __str__(self):
return str(self._get_existing_entity())
def __repr__(self):
return repr(self._get_existing_entity())
class JSBaseEntity(View, ReportDataControllerMixin):
""" represents Entity, no matter what state it is in.
It is implemented using ManageIQ JS API
"""
def __init__(self, parent, name, logger=None):
View.__init__(self, parent, logger=logger)
self.name = name
@property
def is_checked(self):
return self._call_item_method('is_selected')
def check(self):
self._call_item_method('select')
def uncheck(self):
self._call_item_method('unselect')
def click(self):
self._call_item_method('click')
@property
def data(self):
""" every entity like QuadIcon/ListEntity etc displays some data,
which is different for each entity type.
This is property which should hold such data.
"""
data = self._invoke_cmd('get_item', self.name)['item']
cells = data.pop('cells')
data.update(cells)
return {str(key).replace(' ', '_').lower(): value for key, value in data.items()}
def read(self):
return self.is_checked
def fill(self, values):
if values:
self.check()
else:
self.uncheck()
@property
def is_displayed(self):
return self._invoke_cmd('is_displayed', self.name)
class EntitiesConditionalView(View, ReportDataControllerMixin):
""" represents Entities view with regard to view selector state
"""
elements = '//tr[./td/div[@class="quadicon"]]/following-sibling::tr/td/a'
title = Text('//div[@id="main-content"]//h1')
search = View.nested(Search)
paginator = PaginationPane()
flash = FlashMessages('.//div[@id="flash_msg_div"]/div[@id="flash_text_div" or '
'contains(@class, "flash_text_div")]')
@property
def entity_names(self):
""" looks for entities and extracts their names
Returns: all current page entities
"""
current_version = VersionPick({
Version.lowest(): 'old',
'5.9': 'new',
'upstream': 'new'
}).pick(self.browser.product_version)
if current_version == 'old':
br = self.browser
return [br.get_attribute('title', el) for el in br.elements(self.elements)]
else:
entities = self._invoke_cmd('get_all_items')
return [entity['item']['cells']['Name'] for entity in entities]
def get_all(self, surf_pages=False):
""" obtains all entities like QuadIcon displayed by view
Args:
surf_pages (bool): current page entities if False, all entities otherwise
Returns: all entities (QuadIcon/etc.) displayed by view
"""
if not surf_pages:
return [self.parent.entity_class(parent=self, name=name) for name in self.entity_names]
else:
entities = []
for _ in self.paginator.pages():
entities.extend([self.parent.entity_class(parent=self, name=name)
for name in self.entity_names])
return entities
def get_entities(self, by_name=None, surf_pages=False):
""" obtains all matched entities like QuadIcon displayed by view
Args:
by_name (str): only entities which match to by_name will be returned
surf_pages (bool): current page entities if False, all entities otherwise
Returns: all matched entities (QuadIcon/etc.) displayed by view
"""
entities = self.get_all(surf_pages)
remaining_entities = []
for entity in entities:
if by_name and by_name in entity.name:
remaining_entities.append(entity)
# todo: by_type and by_regexp will be implemented later if needed
return remaining_entities
def get_entity(self, by_name=None, surf_pages=False):
""" obtains one entity matched to by_name
raises exception if no entities or several entities were found
Args:
by_name (str): only entity which match to by_name will be returned
surf_pages (bool): current page entity if False, all entities otherwise
Returns: matched entities (QuadIcon/etc.)
"""
entities = self.get_entities(by_name=by_name, surf_pages=surf_pages)
if len(entities) == 0:
raise ItemNotFound("Entity {name} isn't found on this page".format(name=by_name))
elif len(entities) > 1:
raise ManyEntitiesFound("Several entities with {name} were found".format(name=by_name))
return entities[0]
def get_first_entity(self, by_name=None):
""" obtains one entity matched to by_name and stops on that page
raises exception if no entity or several entities were found
Args:
by_name (str): only entity which match to by_name will be returned
Returns: matched entity (QuadIcon/etc.)
"""
for _ in self.paginator.pages():
found_entities = [self.parent.entity_class(parent=self, name=name)
for name in self.entity_names if by_name == name]
if found_entities:
return found_entities[0]
raise ItemNotFound("Entity {name} isn't found on this page".format(name=by_name))
class BaseEntitiesView(View):
"""
should represent the view with different entities like providers
"""
@property
def entity_class(self):
return VersionPick({
Version.lowest(): NonJSBaseEntity,
'5.9': JSBaseEntity
}).pick(self.browser.product_version)
entities = ConditionalSwitchableView(reference='parent.toolbar.view_selector',
ignore_bad_reference=True)
@entities.register('Grid View', default=True)
class GridView(EntitiesConditionalView):
pass
@entities.register('List View')
class ListView(EntitiesConditionalView):
elements = Table(locator='//div[@id="list_grid"]/table')
@property
def entity_names(self):
""" looks for entities and extracts their names
Returns: all current page entities
"""
return [row.name.text for row in self.elements.rows()]
@entities.register('Tile View')
class TileView(EntitiesConditionalView):
pass
class ProviderQuadIconEntity(BaseQuadIconEntity):
""" Provider child of Quad Icon entity
"""
@property
def data(self):
br = self.browser
return {
"no_host": br.text(self.QUADRANT.format(pos='a')),
"vendor": br.get_attribute('src', self.QUADRANT.format(pos='c')),
"creds": br.get_attribute('src', self.QUADRANT.format(pos='d')),
}
class ProviderTileIconEntity(BaseTileIconEntity):
""" Provider child of Tile Icon entity
"""
quad_icon = ParametrizedView.nested(ProviderQuadIconEntity)
class ProviderListEntity(BaseListEntity):
""" Provider child of List entity
"""
pass
class NonJSProviderEntity(NonJSBaseEntity):
""" Provider child of Proxy entity
"""
quad_entity = ProviderQuadIconEntity
list_entity = ProviderListEntity
tile_entity = ProviderTileIconEntity
def ProviderEntity(): # noqa
""" Temporary wrapper for Provider Entity during transition to JS based Entity
"""
return VersionPick({
Version.lowest(): NonJSProviderEntity,
'5.9': JSBaseEntity,
})
class DashboardWidgetsPicker(View):
""" Represents widgets picker in Dashboard editing screen (Cloud Intel/Reports/Dashobards).
"""
ROOT = ParametrizedLocator(".//div[@id='{@id}']")
select = BootstrapSelect(Parameter("@select_id"))
def __init__(self, parent, id, select_id, names_locator=None, remove_locator=None, logger=None):
View.__init__(self, parent=parent, logger=logger)
self.id = id
self.select_id = select_id
self.names_locator = names_locator
self.remove_locator = remove_locator
def add_widget(self, widget):
self.select.fill(widget)
def remove_widget(self, widget):
self.browser.click(self.remove_locator.format(quote(widget)))
@property
def all_elements(self):
return self.browser.elements(self.names_locator)
@property
def all_widgets(self):
if self.all_elements:
return [widget.text for widget in self.all_elements]
else:
return []
def _values_to_remove(self, values):
return list(set(self.all_widgets) - set(values))
def _values_to_add(self, values):
return list(set(values) - set(self.all_widgets))
def fill(self, values):
if isinstance(values, six.string_types):
values = [values]
if set(values) == set(self.all_widgets):
return False
else:
values_to_remove = self._values_to_remove(values)
values_to_add = self._values_to_add(values)
for value in values_to_remove:
self.remove_widget(value)
for value in values_to_add:
self.add_widget(value)
return True
def read(self):
return self.all_widgets
class MenuShortcutsPicker(DashboardWidgetsPicker):
""" Represents shortcut picker in Menu Widget editing screen
(Cloud Intel/Reports/Dashboard Widgets/Menus).
"""
@ParametrizedView.nested
class shortcut(ParametrizedView): # noqa
PARAMETERS = ("number",)
alias = Input(name=ParametrizedString("shortcut_desc_{number}"))
remove_button = Text(ParametrizedLocator(".//a[@id=s_{@number|quote}_close]"))
def fill(self, alias):
self.alias.fill(alias)
def remove(self):
self.remove_button.click()
def add_shortcut(self, shortcut, alias):
# We need to get all options from the dropdown before picking
mapping = self.mapping
self.select.fill(shortcut)
if shortcut != alias:
self.shortcut(mapping[shortcut]).fill(alias)
@cached_property
def mapping(self):
return {option.text: option.value for option in self.select.all_options}
@property
def all_shortcuts(self):
if self.all_elements:
return [shortcut.get_attribute("value") for shortcut in self.all_elements]
else:
return []
def clear(self):
for el in self.browser.elements(".//a[@title='Remove this Shortcut']"):
self.browser.click(el)
def fill(self, values):
dict_values = None
if isinstance(values, six.string_types):
values = [values]
if isinstance(values, dict):
dict_values = values
values = values.values()
if set(values) == set(self.all_shortcuts):
return False
else:
self.clear()
if dict_values is not None:
dict_values_to_add = dict_values
else:
dict_values_to_add = {value: value for value in values}
for shortcut, alias in dict_values_to_add.iteritems():
self.add_shortcut(shortcut, alias)
return True
def read(self):
return self.all_shortcuts
class DynamicTable(VanillaTable):
"""Extend the widget.Table class to implement row_add for dynamic tables with an 'Actions'
column.
In these tables, the top or bottom row can be clicked to add a new row, and when it is
clicked the row is replaced (top or bottom) with a row containing fillable widgets.
When the row is saved, it is moved to the bottom of the table. This behavior is specifc to
some MIQ dynamic tables.
Args:
action_row: index of the action row, generally 0 or -1, defaults to 0
See Widgetastic.widget.Table for more arguments
"""
def __init__(self, *args, **kwargs):
self.action_row = kwargs.pop('action_row', 0) # pull this off and pass the rest up
super(DynamicTable, self).__init__(*args, **kwargs)
def row_add(self):
"""Use the action-cell column widget to add a row
Clicks on the row directly, not the action button
Returns:
int positive row index of the action row where the new widgets should be displayed
"""
# convert action_row into a positive index
if self.action_row >= 0:
pos_action_index = self.action_row
else:
pos_action_index = self._process_negative_index(nindex=self.action_row)
try:
self[pos_action_index].click()
except IndexError: # self.action_row must have been None
raise DynamicTableAddError('DynamicTable action_row index "{}" not found in table'
.format(self.action_row))
return pos_action_index
def row_save(self, row=None):
"""Save the row, assuming attributized columns includes 'actions'
Implements behavior of AnalysisProfile type tables, where the row is moved to the bottom
on save
Returns:
int row index of the last row in the table
"""
try:
self[row or self.action_row].actions.click()
except IndexError: # self.action_row must have been None
raise DynamicTableAddError('DynamicTable action_row index "{}" not found in table'
.format(self.action_row))
return self._process_negative_index(nindex=-1) # use process_negative_index to get last row
| gpl-2.0 | -4,667,914,247,566,659,000 | -5,780,603,909,901,249,000 | 32.717265 | 100 | 0.592607 | false |
ninnux/exscript | tests/Exscript/LogfileTest.py | 6 | 1494 | import sys, unittest, re, os.path
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..', '..', 'src'))
from tempfile import mkdtemp
from shutil import rmtree
from LogTest import LogTest
from Exscript.Logfile import Logfile
class LogfileTest(LogTest):
CORRELATE = Logfile
def setUp(self):
self.tempdir = mkdtemp()
self.logfile = os.path.join(self.tempdir, 'test.log')
self.errorfile = self.logfile + '.error'
self.log = Logfile('testme', self.logfile)
def tearDown(self):
rmtree(self.tempdir)
def testConstructor(self):
self.assertEqual('testme', self.log.get_name())
self.assertEqual('', str(self.log))
self.failIf(os.path.exists(self.logfile))
self.failIf(os.path.exists(self.errorfile))
def testStarted(self):
LogTest.testStarted(self)
self.assert_(os.path.exists(self.logfile))
self.failIf(os.path.exists(self.errorfile))
def testAborted(self):
LogTest.testAborted(self)
self.assert_(os.path.exists(self.logfile))
self.assert_(os.path.exists(self.errorfile))
def testSucceeded(self):
LogTest.testSucceeded(self)
self.assert_(os.path.exists(self.logfile))
self.failIf(os.path.exists(self.errorfile))
def suite():
return unittest.TestLoader().loadTestsFromTestCase(LogfileTest)
if __name__ == '__main__':
unittest.TextTestRunner(verbosity = 2).run(suite())
| gpl-2.0 | 5,700,315,415,708,698,000 | 8,512,667,497,704,619,000 | 32.2 | 78 | 0.645248 | false |
hahaps/openstack-project-generator | template/<project_name>/tests/unit/cast_as_call.py | 55 | 1168 | # Copyright 2013 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
def mock_cast_as_call(obj=None):
"""Use this to mock `cast` as calls.
:param obj: Either an instance of RPCClient
or an instance of _Context.
"""
orig_prepare = obj.prepare
def prepare(*args, **kwargs):
cctxt = orig_prepare(*args, **kwargs)
mock_cast_as_call(obj=cctxt) # woo, recurse!
return cctxt
prepare_patch = mock.patch.object(obj, 'prepare').start()
prepare_patch.side_effect = prepare
cast_patch = mock.patch.object(obj, 'cast').start()
cast_patch.side_effect = obj.call
| apache-2.0 | 8,267,200,906,213,633,000 | 4,536,078,568,810,182,000 | 32.371429 | 78 | 0.680651 | false |
BeegorMif/HTPC-Manager | lib/hachoir_core/field/timestamp.py | 90 | 2941 | from lib.hachoir_core.tools import (humanDatetime, humanDuration,
timestampUNIX, timestampMac32, timestampUUID60,
timestampWin64, durationWin64)
from lib.hachoir_core.field import Bits, FieldSet
from datetime import datetime
class GenericTimestamp(Bits):
def __init__(self, parent, name, size, description=None):
Bits.__init__(self, parent, name, size, description)
def createDisplay(self):
return humanDatetime(self.value)
def createRawDisplay(self):
value = Bits.createValue(self)
return unicode(value)
def __nonzero__(self):
return Bits.createValue(self) != 0
def timestampFactory(cls_name, handler, size):
class Timestamp(GenericTimestamp):
def __init__(self, parent, name, description=None):
GenericTimestamp.__init__(self, parent, name, size, description)
def createValue(self):
value = Bits.createValue(self)
return handler(value)
cls = Timestamp
cls.__name__ = cls_name
return cls
TimestampUnix32 = timestampFactory("TimestampUnix32", timestampUNIX, 32)
TimestampUnix64 = timestampFactory("TimestampUnix64", timestampUNIX, 64)
TimestampMac32 = timestampFactory("TimestampUnix32", timestampMac32, 32)
TimestampUUID60 = timestampFactory("TimestampUUID60", timestampUUID60, 60)
TimestampWin64 = timestampFactory("TimestampWin64", timestampWin64, 64)
class TimeDateMSDOS32(FieldSet):
"""
32-bit MS-DOS timestamp (16-bit time, 16-bit date)
"""
static_size = 32
def createFields(self):
# TODO: Create type "MSDOS_Second" : value*2
yield Bits(self, "second", 5, "Second/2")
yield Bits(self, "minute", 6)
yield Bits(self, "hour", 5)
yield Bits(self, "day", 5)
yield Bits(self, "month", 4)
# TODO: Create type "MSDOS_Year" : value+1980
yield Bits(self, "year", 7, "Number of year after 1980")
def createValue(self):
return datetime(
1980+self["year"].value, self["month"].value, self["day"].value,
self["hour"].value, self["minute"].value, 2*self["second"].value)
def createDisplay(self):
return humanDatetime(self.value)
class DateTimeMSDOS32(TimeDateMSDOS32):
"""
32-bit MS-DOS timestamp (16-bit date, 16-bit time)
"""
def createFields(self):
yield Bits(self, "day", 5)
yield Bits(self, "month", 4)
yield Bits(self, "year", 7, "Number of year after 1980")
yield Bits(self, "second", 5, "Second/2")
yield Bits(self, "minute", 6)
yield Bits(self, "hour", 5)
class TimedeltaWin64(GenericTimestamp):
def __init__(self, parent, name, description=None):
GenericTimestamp.__init__(self, parent, name, 64, description)
def createDisplay(self):
return humanDuration(self.value)
def createValue(self):
value = Bits.createValue(self)
return durationWin64(value)
| gpl-3.0 | 2,667,448,047,627,569,700 | -468,009,066,928,350,600 | 33.197674 | 77 | 0.655559 | false |
asmikhailov/i3pystatus | i3pystatus/zabbix.py | 1 | 2493 | from i3pystatus import IntervalModule
from pyzabbix import ZabbixAPI
class Zabbix(IntervalModule):
"""
Zabbix alerts watcher
Requires: pyzabbix
.. rubric:: Available formatters
* {default} - Full output count alerts like total:a5/a4/a3/a2/a1/a0
* {total} - Total count of alerts
* {aX_count} - Count alerts of X severity
* {colorX} - Predicted color for X severity. It can be used with Pango markup hint for different colours at each severity with
"""
settings = (
("zabbix_server", "Zabbix Server URL"),
("zabbix_user", "Zabbix API User"),
("zabbix_password", "Zabbix users password"),
("interval", "Update interval"),
"format"
)
required = ("zabbix_server", "zabbix_user", "zabbix_password")
interval = 60
format = "{default}"
def run(self):
alerts_color = ["#DBDBDB", "#D6F6FF", "#FFF6A5", "#FFB689", "#FF9999", "#FF3838"]
zapi = ZabbixAPI(self.zabbix_server)
try:
zapi.login(self.zabbix_user, self.zabbix_password)
triggers = zapi.trigger.get(only_true=1,
skipDependent=1,
monitored=1,
active=1,
min_severity=2,
output=["priority"],
withLastEventUnacknowledged=1,
)
alerts_list = [t['priority'] for t in triggers]
alerts = [0, 0, 0, 0, 0, 0]
cdict = {}
for i in range(0, 6):
alerts[i] = alerts_list.count(str(i))
cdict["a%s_count" % i] = alerts[i]
if alerts[i] == 0:
cdict["color%s" % i] = "#FFFFFF"
else:
cdict["color%s" % i] = alerts_color[i]
cdict["default"] = "{0}:{a[5]}/{a[4]}/{a[3]}/{a[2]}/{a[1]}/{a[0]}".format(sum(alerts), a=alerts)
cdict["total"] = sum(alerts)
if alerts_list:
color = alerts_color[max(map(int, alerts_list))]
else:
color = alerts_color[0]
result = self.format.format(**cdict)
except Exception as e:
result = "Zabbix connection error"
color = "#FF0000"
self.output = {
"full_text": result,
"color": color
}
| mit | -4,640,737,535,350,470,000 | 1,786,680,758,423,928,300 | 34.112676 | 130 | 0.478941 | false |
DzinVision/adventofcode-2016 | day_13.py | 1 | 1232 | from queue import Queue
seed = 1362
seen = set()
def is_empty(x, y):
n = x*x + 3*x + 2*x*y + y + y*y + seed
return bin(n).count('1') % 2 == 0
def valid_moves(x, y):
result = []
actions = [-1, 1]
for action in actions:
new_x = x + action
if x > 0 and is_empty(new_x, y) and (new_x, y) not in seen:
result.append((new_x, y))
new_y = y + action
if y > 0 and is_empty(x, new_y) and (x, new_y) not in seen:
result.append((x, new_y))
return result
state = {
'coords': (1, 1),
'moves': 0
}
que = Queue()
que.put(state)
locations = 0
solved_1 = False
solved_2 = False
while not solved_1 or not solved_2:
current_state = que.get()
moves = current_state['moves']
if current_state['coords'] in seen:
continue
seen.add(current_state['coords'])
if current_state['coords'] == (31, 39):
solved_1 = True
print('#1:', moves)
possible_moves = valid_moves(*current_state['coords'])
for move in possible_moves:
new_state = {'coords': move, 'moves': moves + 1}
que.put(new_state)
if moves <= 50:
locations += 1
else:
solved_2 = True
print('#2:', locations)
| gpl-3.0 | -4,086,932,412,024,036,000 | -1,109,550,367,008,188,200 | 19.533333 | 67 | 0.541396 | false |
KitKatXperience/platform_external_chromium_org | third_party/closure_linter/closure_linter/tokenutil.py | 135 | 10976 | #!/usr/bin/env python
#
# Copyright 2007 The Closure Linter Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Token utility functions."""
__author__ = ('robbyw@google.com (Robert Walker)',
'ajp@google.com (Andy Perelson)')
import copy
from closure_linter import javascripttokens
from closure_linter.common import tokens
# Shorthand
JavaScriptToken = javascripttokens.JavaScriptToken
Type = tokens.TokenType
def GetFirstTokenInSameLine(token):
"""Returns the first token in the same line as token.
Args:
token: Any token in the line.
Returns:
The first token in the same line as token.
"""
while not token.IsFirstInLine():
token = token.previous
return token
def GetFirstTokenInPreviousLine(token):
"""Returns the first token in the previous line as token.
Args:
token: Any token in the line.
Returns:
The first token in the previous line as token, or None if token is on the
first line.
"""
first_in_line = GetFirstTokenInSameLine(token)
if first_in_line.previous:
return GetFirstTokenInSameLine(first_in_line.previous)
return None
def GetLastTokenInSameLine(token):
"""Returns the last token in the same line as token.
Args:
token: Any token in the line.
Returns:
The last token in the same line as token.
"""
while not token.IsLastInLine():
token = token.next
return token
def GetAllTokensInSameLine(token):
"""Returns all tokens in the same line as the given token.
Args:
token: Any token in the line.
Returns:
All tokens on the same line as the given token.
"""
first_token = GetFirstTokenInSameLine(token)
last_token = GetLastTokenInSameLine(token)
tokens_in_line = []
while first_token != last_token:
tokens_in_line.append(first_token)
first_token = first_token.next
tokens_in_line.append(last_token)
return tokens_in_line
def CustomSearch(start_token, func, end_func=None, distance=None,
reverse=False):
"""Returns the first token where func is True within distance of this token.
Args:
start_token: The token to start searching from
func: The function to call to test a token for applicability
end_func: The function to call to test a token to determine whether to abort
the search.
distance: The number of tokens to look through before failing search. Must
be positive. If unspecified, will search until the end of the token
chain
reverse: When true, search the tokens before this one instead of the tokens
after it
Returns:
The first token matching func within distance of this token, or None if no
such token is found.
"""
token = start_token
if reverse:
while token and (distance is None or distance > 0):
previous = token.previous
if previous:
if func(previous):
return previous
if end_func and end_func(previous):
return None
token = previous
if distance is not None:
distance -= 1
else:
while token and (distance is None or distance > 0):
next_token = token.next
if next_token:
if func(next_token):
return next_token
if end_func and end_func(next_token):
return None
token = next_token
if distance is not None:
distance -= 1
return None
def Search(start_token, token_types, distance=None, reverse=False):
"""Returns the first token of type in token_types within distance.
Args:
start_token: The token to start searching from
token_types: The allowable types of the token being searched for
distance: The number of tokens to look through before failing search. Must
be positive. If unspecified, will search until the end of the token
chain
reverse: When true, search the tokens before this one instead of the tokens
after it
Returns:
The first token of any type in token_types within distance of this token, or
None if no such token is found.
"""
return CustomSearch(start_token, lambda token: token.IsAnyType(token_types),
None, distance, reverse)
def SearchExcept(start_token, token_types, distance=None, reverse=False):
"""Returns the first token not of any type in token_types within distance.
Args:
start_token: The token to start searching from
token_types: The unallowable types of the token being searched for
distance: The number of tokens to look through before failing search. Must
be positive. If unspecified, will search until the end of the token
chain
reverse: When true, search the tokens before this one instead of the tokens
after it
Returns:
The first token of any type in token_types within distance of this token, or
None if no such token is found.
"""
return CustomSearch(start_token,
lambda token: not token.IsAnyType(token_types),
None, distance, reverse)
def SearchUntil(start_token, token_types, end_types, distance=None,
reverse=False):
"""Returns the first token of type in token_types before a token of end_type.
Args:
start_token: The token to start searching from.
token_types: The allowable types of the token being searched for.
end_types: Types of tokens to abort search if we find.
distance: The number of tokens to look through before failing search. Must
be positive. If unspecified, will search until the end of the token
chain
reverse: When true, search the tokens before this one instead of the tokens
after it
Returns:
The first token of any type in token_types within distance of this token
before any tokens of type in end_type, or None if no such token is found.
"""
return CustomSearch(start_token, lambda token: token.IsAnyType(token_types),
lambda token: token.IsAnyType(end_types),
distance, reverse)
def DeleteToken(token):
"""Deletes the given token from the linked list.
Args:
token: The token to delete
"""
if token.previous:
token.previous.next = token.next
if token.next:
token.next.previous = token.previous
following_token = token.next
while following_token and following_token.metadata.last_code == token:
following_token.metadata.last_code = token.metadata.last_code
following_token = following_token.next
def DeleteTokens(token, token_count):
"""Deletes the given number of tokens starting with the given token.
Args:
token: The token to start deleting at.
token_count: The total number of tokens to delete.
"""
for i in xrange(1, token_count):
DeleteToken(token.next)
DeleteToken(token)
def InsertTokenAfter(new_token, token):
"""Insert new_token after token.
Args:
new_token: A token to be added to the stream
token: A token already in the stream
"""
new_token.previous = token
new_token.next = token.next
new_token.metadata = copy.copy(token.metadata)
if token.IsCode():
new_token.metadata.last_code = token
if new_token.IsCode():
following_token = token.next
while following_token and following_token.metadata.last_code == token:
following_token.metadata.last_code = new_token
following_token = following_token.next
token.next = new_token
if new_token.next:
new_token.next.previous = new_token
if new_token.start_index is None:
if new_token.line_number == token.line_number:
new_token.start_index = token.start_index + len(token.string)
else:
new_token.start_index = 0
iterator = new_token.next
while iterator and iterator.line_number == new_token.line_number:
iterator.start_index += len(new_token.string)
iterator = iterator.next
def InsertTokensAfter(new_tokens, token):
"""Insert multiple tokens after token.
Args:
new_tokens: An array of tokens to be added to the stream
token: A token already in the stream
"""
# TODO(user): It would be nicer to have InsertTokenAfter defer to here
# instead of vice-versa.
current_token = token
for new_token in new_tokens:
InsertTokenAfter(new_token, current_token)
current_token = new_token
def InsertSpaceTokenAfter(token):
"""Inserts a space token after the given token.
Args:
token: The token to insert a space token after
Returns:
A single space token
"""
space_token = JavaScriptToken(' ', Type.WHITESPACE, token.line,
token.line_number)
InsertTokenAfter(space_token, token)
def InsertBlankLineAfter(token):
"""Inserts a blank line after the given token.
Args:
token: The token to insert a blank line after
Returns:
A single space token
"""
blank_token = JavaScriptToken('', Type.BLANK_LINE, '',
token.line_number + 1)
InsertLineAfter(token, [blank_token])
def InsertLineAfter(token, new_tokens):
"""Inserts a new line consisting of new_tokens after the given token.
Args:
token: The token to insert after.
new_tokens: The tokens that will make up the new line.
"""
insert_location = token
for new_token in new_tokens:
InsertTokenAfter(new_token, insert_location)
insert_location = new_token
# Update all subsequent line numbers.
next_token = new_tokens[-1].next
while next_token:
next_token.line_number += 1
next_token = next_token.next
def SplitToken(token, position):
"""Splits the token into two tokens at position.
Args:
token: The token to split
position: The position to split at. Will be the beginning of second token.
Returns:
The new second token.
"""
new_string = token.string[position:]
token.string = token.string[:position]
new_token = JavaScriptToken(new_string, token.type, token.line,
token.line_number)
InsertTokenAfter(new_token, token)
return new_token
def Compare(token1, token2):
"""Compares two tokens and determines their relative order.
Args:
token1: The first token to compare.
token2: The second token to compare.
Returns:
A negative integer, zero, or a positive integer as the first token is
before, equal, or after the second in the token stream.
"""
if token2.line_number != token1.line_number:
return token1.line_number - token2.line_number
else:
return token1.start_index - token2.start_index
| bsd-3-clause | -1,409,673,733,009,325,300 | 3,142,546,845,443,647,500 | 28.347594 | 80 | 0.690689 | false |
dgzurita/odoo | addons/payment/tests/common.py | 392 | 1822 | # -*- coding: utf-8 -*-
from openerp.tests import common
class PaymentAcquirerCommon(common.TransactionCase):
def setUp(self):
super(PaymentAcquirerCommon, self).setUp()
self.payment_acquirer = self.registry('payment.acquirer')
self.payment_transaction = self.registry('payment.transaction')
self.currency_euro_id = self.registry('res.currency').search(
self.cr, self.uid, [('name', '=', 'EUR')], limit=1)[0]
self.currency_euro = self.registry('res.currency').browse(
self.cr, self.uid, self.currency_euro_id)
self.country_belgium_id = self.registry('res.country').search(
self.cr, self.uid, [('code', 'like', 'BE')], limit=1)[0]
self.country_france_id = self.registry('res.country').search(
self.cr, self.uid, [('code', 'like', 'FR')], limit=1)[0]
# dict partner values
self.buyer_values = {
'name': 'Norbert Buyer',
'lang': 'en_US',
'email': 'norbert.buyer@example.com',
'street': 'Huge Street',
'street2': '2/543',
'phone': '0032 12 34 56 78',
'city': 'Sin City',
'zip': '1000',
'country_id': self.country_belgium_id,
'country_name': 'Belgium',
}
# test partner
self.buyer_id = self.registry('res.partner').create(
self.cr, self.uid, {
'name': 'Norbert Buyer',
'lang': 'en_US',
'email': 'norbert.buyer@example.com',
'street': 'Huge Street',
'street2': '2/543',
'phone': '0032 12 34 56 78',
'city': 'Sin City',
'zip': '1000',
'country_id': self.country_belgium_id,
}
)
| agpl-3.0 | -8,924,184,969,733,959,000 | -8,807,338,273,234,100,000 | 36.183673 | 71 | 0.511526 | false |
asen6/amartyasenguptadotcom | django/contrib/gis/db/backends/base.py | 55 | 10675 | """
Base/mixin classes for the spatial backend database operations and the
`SpatialRefSys` model the backend.
"""
import re
from django.conf import settings
from django.contrib.gis import gdal
class BaseSpatialOperations(object):
"""
This module holds the base `BaseSpatialBackend` object, which is
instantiated by each spatial database backend with the features
it has.
"""
distance_functions = {}
geometry_functions = {}
geometry_operators = {}
geography_operators = {}
geography_functions = {}
gis_terms = {}
truncate_params = {}
# Quick booleans for the type of this spatial backend, and
# an attribute for the spatial database version tuple (if applicable)
postgis = False
spatialite = False
mysql = False
oracle = False
spatial_version = None
# How the geometry column should be selected.
select = None
# Does the spatial database have a geography type?
geography = False
area = False
centroid = False
difference = False
distance = False
distance_sphere = False
distance_spheroid = False
envelope = False
force_rhr = False
mem_size = False
bounding_circle = False
num_geom = False
num_points = False
perimeter = False
perimeter3d = False
point_on_surface = False
polygonize = False
reverse = False
scale = False
snap_to_grid = False
sym_difference = False
transform = False
translate = False
union = False
# Aggregates
collect = False
extent = False
extent3d = False
make_line = False
unionagg = False
# Serialization
geohash = False
geojson = False
gml = False
kml = False
svg = False
# Constructors
from_text = False
from_wkb = False
# Default conversion functions for aggregates; will be overridden if implemented
# for the spatial backend.
def convert_extent(self, box):
raise NotImplementedError('Aggregate extent not implemented for this spatial backend.')
def convert_extent3d(self, box):
raise NotImplementedError('Aggregate 3D extent not implemented for this spatial backend.')
def convert_geom(self, geom_val, geom_field):
raise NotImplementedError('Aggregate method not implemented for this spatial backend.')
# For quoting column values, rather than columns.
def geo_quote_name(self, name):
if isinstance(name, unicode):
name = name.encode('ascii')
return "'%s'" % name
# GeometryField operations
def geo_db_type(self, f):
"""
Returns the database column type for the geometry field on
the spatial backend.
"""
raise NotImplementedError
def get_distance(self, f, value, lookup_type):
"""
Returns the distance parameters for the given geometry field,
lookup value, and lookup type.
"""
raise NotImplementedError('Distance operations not available on this spatial backend.')
def get_geom_placeholder(self, f, value):
"""
Returns the placeholder for the given geometry field with the given
value. Depending on the spatial backend, the placeholder may contain a
stored procedure call to the transformation function of the spatial
backend.
"""
raise NotImplementedError
# Spatial SQL Construction
def spatial_aggregate_sql(self, agg):
raise NotImplementedError('Aggregate support not implemented for this spatial backend.')
def spatial_lookup_sql(self, lvalue, lookup_type, value, field):
raise NotImplementedError
# Routines for getting the OGC-compliant models.
def geometry_columns(self):
raise NotImplementedError
def spatial_ref_sys(self):
raise NotImplementedError
class SpatialRefSysMixin(object):
"""
The SpatialRefSysMixin is a class used by the database-dependent
SpatialRefSys objects to reduce redundnant code.
"""
# For pulling out the spheroid from the spatial reference string. This
# regular expression is used only if the user does not have GDAL installed.
# TODO: Flattening not used in all ellipsoids, could also be a minor axis,
# or 'b' parameter.
spheroid_regex = re.compile(r'.+SPHEROID\[\"(?P<name>.+)\",(?P<major>\d+(\.\d+)?),(?P<flattening>\d{3}\.\d+),')
# For pulling out the units on platforms w/o GDAL installed.
# TODO: Figure out how to pull out angular units of projected coordinate system and
# fix for LOCAL_CS types. GDAL should be highly recommended for performing
# distance queries.
units_regex = re.compile(r'.+UNIT ?\["(?P<unit_name>[\w \'\(\)]+)", ?(?P<unit>[\d\.]+)(,AUTHORITY\["(?P<unit_auth_name>[\w \'\(\)]+)","(?P<unit_auth_val>\d+)"\])?\]([\w ]+)?(,AUTHORITY\["(?P<auth_name>[\w \'\(\)]+)","(?P<auth_val>\d+)"\])?\]$')
@property
def srs(self):
"""
Returns a GDAL SpatialReference object, if GDAL is installed.
"""
if gdal.HAS_GDAL:
# TODO: Is caching really necessary here? Is complexity worth it?
if hasattr(self, '_srs'):
# Returning a clone of the cached SpatialReference object.
return self._srs.clone()
else:
# Attempting to cache a SpatialReference object.
# Trying to get from WKT first.
try:
self._srs = gdal.SpatialReference(self.wkt)
return self.srs
except Exception, msg:
pass
try:
self._srs = gdal.SpatialReference(self.proj4text)
return self.srs
except Exception, msg:
pass
raise Exception('Could not get OSR SpatialReference from WKT: %s\nError:\n%s' % (self.wkt, msg))
else:
raise Exception('GDAL is not installed.')
@property
def ellipsoid(self):
"""
Returns a tuple of the ellipsoid parameters:
(semimajor axis, semiminor axis, and inverse flattening).
"""
if gdal.HAS_GDAL:
return self.srs.ellipsoid
else:
m = self.spheroid_regex.match(self.wkt)
if m: return (float(m.group('major')), float(m.group('flattening')))
else: return None
@property
def name(self):
"Returns the projection name."
return self.srs.name
@property
def spheroid(self):
"Returns the spheroid name for this spatial reference."
return self.srs['spheroid']
@property
def datum(self):
"Returns the datum for this spatial reference."
return self.srs['datum']
@property
def projected(self):
"Is this Spatial Reference projected?"
if gdal.HAS_GDAL:
return self.srs.projected
else:
return self.wkt.startswith('PROJCS')
@property
def local(self):
"Is this Spatial Reference local?"
if gdal.HAS_GDAL:
return self.srs.local
else:
return self.wkt.startswith('LOCAL_CS')
@property
def geographic(self):
"Is this Spatial Reference geographic?"
if gdal.HAS_GDAL:
return self.srs.geographic
else:
return self.wkt.startswith('GEOGCS')
@property
def linear_name(self):
"Returns the linear units name."
if gdal.HAS_GDAL:
return self.srs.linear_name
elif self.geographic:
return None
else:
m = self.units_regex.match(self.wkt)
return m.group('unit_name')
@property
def linear_units(self):
"Returns the linear units."
if gdal.HAS_GDAL:
return self.srs.linear_units
elif self.geographic:
return None
else:
m = self.units_regex.match(self.wkt)
return m.group('unit')
@property
def angular_name(self):
"Returns the name of the angular units."
if gdal.HAS_GDAL:
return self.srs.angular_name
elif self.projected:
return None
else:
m = self.units_regex.match(self.wkt)
return m.group('unit_name')
@property
def angular_units(self):
"Returns the angular units."
if gdal.HAS_GDAL:
return self.srs.angular_units
elif self.projected:
return None
else:
m = self.units_regex.match(self.wkt)
return m.group('unit')
@property
def units(self):
"Returns a tuple of the units and the name."
if self.projected or self.local:
return (self.linear_units, self.linear_name)
elif self.geographic:
return (self.angular_units, self.angular_name)
else:
return (None, None)
@classmethod
def get_units(cls, wkt):
"""
Class method used by GeometryField on initialization to
retrive the units on the given WKT, without having to use
any of the database fields.
"""
if gdal.HAS_GDAL:
return gdal.SpatialReference(wkt).units
else:
m = cls.units_regex.match(wkt)
return m.group('unit'), m.group('unit_name')
@classmethod
def get_spheroid(cls, wkt, string=True):
"""
Class method used by GeometryField on initialization to
retrieve the `SPHEROID[..]` parameters from the given WKT.
"""
if gdal.HAS_GDAL:
srs = gdal.SpatialReference(wkt)
sphere_params = srs.ellipsoid
sphere_name = srs['spheroid']
else:
m = cls.spheroid_regex.match(wkt)
if m:
sphere_params = (float(m.group('major')), float(m.group('flattening')))
sphere_name = m.group('name')
else:
return None
if not string:
return sphere_name, sphere_params
else:
# `string` parameter used to place in format acceptable by PostGIS
if len(sphere_params) == 3:
radius, flattening = sphere_params[0], sphere_params[2]
else:
radius, flattening = sphere_params
return 'SPHEROID["%s",%s,%s]' % (sphere_name, radius, flattening)
def __unicode__(self):
"""
Returns the string representation. If GDAL is installed,
it will be 'pretty' OGC WKT.
"""
try:
return unicode(self.srs)
except:
return unicode(self.wkt)
| bsd-3-clause | 6,161,618,130,225,390,000 | -3,783,766,191,132,489,000 | 30.770833 | 248 | 0.596721 | false |
rruebner/odoo | addons/stock_dropshipping/__init__.py | 61 | 1045 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4: | agpl-3.0 | 7,977,003,438,067,410,000 | -8,218,970,859,746,623,000 | 44.478261 | 78 | 0.611483 | false |
guorendong/iridium-browser-ubuntu | chrome/test/remoting/install_and_launch_app.py | 79 | 6848 | #!/usr/bin/python
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""A Chromedriver smoke-test that installs and launches a web-app.
Args:
driver_dir: Location of Chromedriver binary on local machine.
profile_dir: A user-data-dir containing login token for the app-user.
app_id: App ID of web-app in Chrome web-store.
app_window_title: The title of the window that should come up on app launch.
TODO(anandc): Reduce the # of parameters required from the command-line.
Maybe read from a JSON file. Also, map appID to expected app window title.
This script navigates to the app-detail page on Chrome Web Store for the
specified app-id. From there, it then installs the app and launches it. It
then checks if the resulting new window has the expected title.
"""
import argparse
import os
import shutil
import tempfile
import time
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
CWS_URL = 'https://chrome.google.com/webstore/detail'
WEBSTORE_BUTTON_LABEL = 'webstore-test-button-label'
FREE_BUTTON_XPATH = (
'//div[contains(@class, \"%s\") and text() = \"Free\"]' %
(WEBSTORE_BUTTON_LABEL))
LAUNCH_BUTTON_XPATH = (
'//div[contains(@class, \"%s\") and text() = \"Launch app\"]' %
(WEBSTORE_BUTTON_LABEL))
WAIT_TIME = 2
def CreateTempProfileDir(source_dir):
"""Creates a temporary profile directory, for use by the test.
This avoids modifying the input user-data-dir by actions that the test
performs.
Args:
source_dir: The directory to copy and place in a temp folder.
Returns:
tmp_dir: Name of the temporary folder that was created.
profile_dir: Name of the profile-dir under the tmp_dir.
"""
tmp_dir = tempfile.mkdtemp()
print 'Created folder %s' % (tmp_dir)
profile_dir = os.path.join(tmp_dir, 'testuser')
# Copy over previous created profile for this execution of Chrome Driver.
shutil.copytree(source_dir, profile_dir)
return tmp_dir, profile_dir
def ParseCmdLineArgs():
"""Parses command line arguments and returns them.
Returns:
args: Parse command line arguments.
"""
parser = argparse.ArgumentParser()
parser.add_argument(
'-d', '--driver_dir', required=True,
help='path to folder where Chromedriver has been installed.')
parser.add_argument(
'-p', '--profile_dir', required=True,
help='path to user-data-dir with trusted-tester signed in.')
parser.add_argument(
'-a', '--app_id', required=True,
help='app-id of web-store app being tested.')
parser.add_argument(
'-e', '--app_window_title', required=True,
help='Title of the app window that we expect to come up.')
# Use input json file if specified on command line.
args = parser.parse_args()
return args
def GetLinkAndWait(driver, link_to_get):
"""Navigates to the specified link.
Args:
driver: Active window for this Chromedriver instance.
link_to_get: URL of the destination.
"""
driver.get(link_to_get)
# TODO(anandc): Is there any event or state we could wait on? For now,
# we have hard-coded sleeps.
time.sleep(WAIT_TIME)
def ClickAndWait(driver, button_xpath):
"""Clicks button at the specified XPath of the current document.
Args:
driver: Active window for this Chromedriver instance.
button_xpath: XPath in this document to button we want to click.
"""
button = driver.find_element_by_xpath(button_xpath)
button.click()
time.sleep(WAIT_TIME)
def WindowWithTitleExists(driver, title):
"""Verifies if one of the open windows has the specified title.
Args:
driver: Active window for this Chromedriver instance.
title: Title of the window we are looking for.
Returns:
True if an open window in this session with the specified title was found.
False otherwise.
"""
for handle in driver.window_handles:
driver.switch_to_window(handle)
if driver.title == title:
return True
return False
def main():
args = ParseCmdLineArgs()
org_profile_dir = args.profile_dir
print 'Creating temp-dir using profile-dir %s' % org_profile_dir
tmp_dir, profile_dir = CreateTempProfileDir(org_profile_dir)
options = Options()
options.add_argument('--user-data-dir=' + profile_dir)
# Suppress the confirmation dialog that comes up.
# With M39, this flag will no longer work. See https://crbug/357774.
# TODO(anandc): Work with a profile-dir that already has extension downloaded,
# and also add support for loading extension from a local directory.
options.add_argument('--apps-gallery-install-auto-confirm-for-tests=accept')
driver = webdriver.Chrome(args.driver_dir, chrome_options=options)
try:
chrome_apps_link = 'chrome://apps'
cws_app_detail_link = '%s/%s' % (CWS_URL, args.app_id)
# Navigate to chrome:apps first.
# TODO(anandc): Add check to make sure the app we are testing isn't already
# added for this user.
GetLinkAndWait(driver, chrome_apps_link)
# Navigate to the app detail page at the Chrome Web Store.
GetLinkAndWait(driver, cws_app_detail_link)
# Get the page again, to get all controls. This seems to be a bug, either
# in ChromeDriver, or the app-page. Without this additional GET, we don't
# get all controls. Even sleeping for 5 seconds doesn't suffice.
# TODO(anandc): Investigate why the page doesn't work with just 1 call.
GetLinkAndWait(driver, cws_app_detail_link)
# Install the app by clicking the button that says "Free".
ClickAndWait(driver, FREE_BUTTON_XPATH)
# We should now be at a new tab. Get its handle.
current_tab = driver.window_handles[-1]
# And switch to it.
driver.switch_to_window(current_tab)
# From this new tab, go to Chrome Apps
# TODO(anandc): Add check to make sure the app we are testing is now added.
GetLinkAndWait(driver, chrome_apps_link)
# Back to the app detail page.
GetLinkAndWait(driver, cws_app_detail_link)
# Again, do this twice, for reasons noted above.
GetLinkAndWait(driver, cws_app_detail_link)
# Click to launch the newly installed app.
ClickAndWait(driver, LAUNCH_BUTTON_XPATH)
# For now, make sure the "connecting" dialog comes up.
# TODO(anandc): Add more validation; ideally, wait for the separate app
# window to appear.
if WindowWithTitleExists(driver, args.app_window_title):
print 'Web-App %s launched successfully.' % args.app_window_title
else:
print 'Web-app %s did not launch successfully.' % args.app_window_title
except Exception, e:
raise e
finally:
# Cleanup.
print 'Deleting %s' % tmp_dir
shutil.rmtree(profile_dir)
os.rmdir(tmp_dir)
driver.quit()
if __name__ == '__main__':
main()
| bsd-3-clause | -2,010,478,717,102,486,500 | 1,066,263,115,963,651,600 | 32.242718 | 80 | 0.702103 | false |
rhololkeolke/apo-website | src/flask/exceptions.py | 63 | 1462 | # -*- coding: utf-8 -*-
"""
flask.exceptions
~~~~~~~~~~~~
Flask specific additions to :class:`~werkzeug.exceptions.HTTPException`
:copyright: (c) 2011 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
from werkzeug.exceptions import HTTPException, BadRequest
from .helpers import json
class JSONHTTPException(HTTPException):
"""A base class for HTTP exceptions with ``Content-Type:
application/json``.
The ``description`` attribute of this class must set to a string (*not* an
HTML string) which describes the error.
"""
def get_body(self, environ):
"""Overrides :meth:`werkzeug.exceptions.HTTPException.get_body` to
return the description of this error in JSON format instead of HTML.
"""
return json.dumps(dict(description=self.get_description(environ)))
def get_headers(self, environ):
"""Returns a list of headers including ``Content-Type:
application/json``.
"""
return [('Content-Type', 'application/json')]
class JSONBadRequest(JSONHTTPException, BadRequest):
"""Represents an HTTP ``400 Bad Request`` error whose body contains an
error message in JSON format instead of HTML format (as in the superclass).
"""
#: The description of the error which occurred as a string.
description = (
'The browser (or proxy) sent a request that this server could not '
'understand.'
)
| bsd-3-clause | 1,435,887,265,843,819,800 | 3,341,232,005,588,671,000 | 28.836735 | 79 | 0.665527 | false |
ros2/ci | ros2_batch_job/vendor/osrf_pycommon/osrf_pycommon/process_utils/impl.py | 1 | 12684 | # Copyright 2014 Open Source Robotics Foundation, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
from .execute_process_nopty import _execute_process_nopty
try:
from .execute_process_pty import _execute_process_pty
except ImportError:
# pty doesn't work on Windows, it will fail to import
# so fallback to non pty implementation
_execute_process_pty = None
try:
_basestring = basestring # Python 2
except NameError:
_basestring = str # Python 3
def execute_process(cmd, cwd=None, env=None, shell=False, emulate_tty=False):
"""Executes a command with arguments and returns output line by line.
All arguments, except ``emulate_tty``, are passed directly to
:py:class:`subprocess.Popen`.
``execute_process`` returns a generator which yields the output, line by
line, until the subprocess finishes at which point the return code
is yielded.
This is an example of how this function should be used:
.. code-block:: python
from __future__ import print_function
from osrf_pycommon.process_utils import execute_process
cmd = ['ls', '-G']
for line in execute_process(cmd, cwd='/usr'):
if isinstance(line, int):
# This is a return code, the command has exited
print("'{0}' exited with: {1}".format(' '.join(cmd), line))
continue # break would also be appropriate here
# In Python 3, it will be a bytes array which needs to be decoded
if not isinstance(line, str):
line = line.decode('utf-8')
# Then print it to the screen
print(line, end='')
``stdout`` and ``stderr`` are always captured together and returned line
by line through the returned generator.
New line characters are preserved in the output, so if re-printing the data
take care to use ``end=''`` or first ``rstrip`` the output lines.
When ``emulate_tty`` is used on Unix systems, commands will identify that
they are on a tty and should output color to the screen as if you were
running it on the terminal, and therefore there should not be any need to
pass arguments like ``-c color.ui=always`` to commands like ``git``.
Additionally, programs might also behave differently in when
``emulate_tty`` is being used, for example, Python will default to
unbuffered output when it detects a tty.
``emulate_tty`` works by using psuedo-terminals on Unix machines, and so
if you are running this command many times in parallel (like hundreds
of times) then you may get one of a few different :py:exc:`OSError`'s.
For example, "OSError: [Errno 24] Too many open files: '/dev/ttyp0'" or
"OSError: out of pty devices".
You should also be aware that you share pty devices with the rest of the
system, so even if you are not using a lot, it is possible to get
this error.
You can catch this error before getting data from the generator, so when
using ``emulate_tty`` you might want to do something like this:
.. code-block:: python
from __future__ import print_function
from osrf_pycommon.process_utils import execute_process
cmd = ['ls', '-G', '/usr']
try:
output = execute_process(cmd, emulate_tty=True)
except OSError:
output = execute_process(cmd, emulate_tty=False)
for line in output:
if isinstance(line, int):
print("'{0}' exited with: {1}".format(' '.join(cmd), line))
continue
# In Python 3, it will be a bytes array which needs to be decoded
if not isinstance(line, str):
line = line.decode('utf-8')
print(line, end='')
This way if a pty cannot be opened in order to emulate the tty then you
can try again without emulation, and any other :py:exc:`OSError` should
raise again with ``emulate_tty`` set to ``False``.
Obviously, you only want to do this if emulating the tty is non-critical
to your processing, like when you are using it to capture color.
Any color information that the command outputs as ANSI escape sequences
is captured by this command.
That way you can print the output to the screen and preserve the color
formatting.
If you do not want color to be in the output, then try setting
``emulate_tty`` to ``False``, but that does not guarantee that there is no
color in the output, instead it only will cause called processes to
identify that they are not being run in a terminal.
Most well behaved programs will not output color if they detect that
they are not being executed in a terminal, but you shouldn't rely on that.
If you want to ensure there is no color in the output from an executed
process, then use this function:
:py:func:`osrf_pycommon.terminal_color.remove_ansi_escape_senquences`
Exceptions can be raised by functions called by the implementation,
for example, :py:class:`subprocess.Popen` can raise an :py:exc:`OSError`
when the given command is not found.
If you want to check for the existence of an executable on the path,
see: :py:func:`which`.
However, this function itself does not raise any special exceptions.
:param list cmd: list of strings with the first item being a command
and subsequent items being any arguments to that command;
passed directly to :py:class:`subprocess.Popen`.
:param str cwd: path in which to run the command, defaults to None which
means :py:func:`os.getcwd` is used;
passed directly to :py:class:`subprocess.Popen`.
:param dict env: environment dictionary to use for executing the command,
default is None which uses the :py:obj:`os.environ` environment;
passed directly to :py:class:`subprocess.Popen`.
:param bool shell: If True the system shell is used to evaluate the
command, default is False;
passed directly to :py:class:`subprocess.Popen`.
:param bool emulate_tty: If True attempts to use a pty to convince
subprocess's that they are being run in a terminal. Typically this is
useful for capturing colorized output from commands. This does not
work on Windows (no pty's), so it is considered False even when True.
Defaults to False.
:returns: a generator which yields output from the command line by line
:rtype: generator which yields strings
"""
exp_func = _execute_process_nopty
if emulate_tty and _execute_process_pty is not None:
exp_func = _execute_process_pty
for out, err, ret in exp_func(cmd, cwd, env, shell, stderr_to_stdout=True):
if ret is None:
yield out
continue
yield ret
def execute_process_split(
cmd, cwd=None, env=None, shell=False, emulate_tty=False
):
""":py:func:`execute_process`, except ``stderr`` is returned separately.
Instead of yielding output line by line until yielding a return code, this
function always a triplet of ``stdout``, ``stderr``, and return code.
Each time only one of the three will not be None.
Once you receive a non-None return code (type will be int) there will be no
more ``stdout`` or ``stderr``.
Therefore you can use the command like this:
.. code-block:: python
from __future__ import print_function
import sys
from osrf_pycommon.process_utils import execute_process_split
cmd = ['time', 'ls', '-G']
for out, err, ret in execute_process_split(cmd, cwd='/usr'):
# In Python 3, it will be a bytes array which needs to be decoded
out = out.decode('utf-8') if out is not None else None
err = err.decode('utf-8') if err is not None else None
if ret is not None:
# This is a return code, the command has exited
print("'{0}' exited with: {1}".format(' '.join(cmd), ret))
break
if out is not None:
print(out, end='')
if err is not None:
print(err, end='', file=sys.stderr)
When using this, it is possible that the ``stdout`` and ``stderr`` data can
be returned in a different order than what would happen on the terminal.
This is due to the fact that the subprocess is given different buffers for
``stdout`` and ``stderr`` and so there is a race condition on the
subprocess writing to the different buffers and this command reading the
buffers.
This can be avoided in most scenarios by using ``emulate_tty``, because of
the use of ``pty``'s, though the ordering can still not be guaranteed and
the number of ``pty``'s is finite as explained in the documentation for
:py:func:`execute_process`.
For situations where output ordering between ``stdout`` and ``stderr`` are
critical, they should not be returned separately and instead should share
one buffer, and so :py:func:`execute_process` should be used.
For all other parameters and documentation see: :py:func:`execute_process`
"""
exp_func = _execute_process_nopty
if emulate_tty and _execute_process_pty is not None:
exp_func = _execute_process_pty
return exp_func(cmd, cwd, env, shell, stderr_to_stdout=False)
try:
from shutil import which as _which
except ImportError:
_which = None
def _which_backport(cmd, mode=os.F_OK | os.X_OK, path=None):
# Check that a given file can be accessed with the correct mode.
# Additionally check that `file` is not a directory, as on Windows
# directories pass the os.access check.
def _access_check(fn, mode):
return (os.path.exists(fn) and os.access(fn, mode) and
not os.path.isdir(fn))
# If we're given a path with a directory part, look it up directly rather
# than referring to PATH directories. This includes checking relative
# to the current directory, e.g. ./script
if os.path.dirname(cmd):
if _access_check(cmd, mode):
return cmd
return None
if path is None:
path = os.environ.get("PATH", os.defpath)
if not path:
return None
path = path.split(os.pathsep)
if sys.platform == "win32":
# The current directory takes precedence on Windows.
if os.curdir not in path:
path.insert(0, os.curdir)
# PATHEXT is necessary to check on Windows.
pathext = os.environ.get("PATHEXT", "").split(os.pathsep)
# See if the given file matches any of the expected path extensions.
# This will allow us to short circuit when given "python.exe".
# If it does match, only test that one, otherwise we have to try
# others.
if any(cmd.lower().endswith(ext.lower()) for ext in pathext):
files = [cmd]
else:
files = [cmd + ext for ext in pathext]
else:
# On other platforms you don't have things like PATHEXT to tell you
# what file suffixes are executable, so just pass on cmd as-is.
files = [cmd]
seen = set()
for directory in path:
normdir = os.path.normcase(directory)
if normdir not in seen:
seen.add(normdir)
for thefile in files:
name = os.path.join(directory, thefile)
if _access_check(name, mode):
return name
return None
def which(cmd, mode=os.F_OK | os.X_OK, path=None, **kwargs):
"""Given a command, mode, and a PATH string, return the path which
conforms to the given mode on the PATH, or None if there is no such
file.
`mode` defaults to ``os.F_OK | os.X_OK``. `path` defaults to the result
of ``os.environ.get("PATH")``, or can be overridden with a custom search
path.
Backported from :py:func:`shutil.which`
(`<https://docs.python.org/3.3/library/shutil.html#shutil.which>`_),
available in Python 3.3.
"""
kwargs.update({'mode': mode, 'path': path})
global _which
if _which is not None:
return _which(cmd, **kwargs)
return _which_backport(cmd, **kwargs)
| apache-2.0 | 252,805,071,646,248,400 | -2,693,042,920,911,606,000 | 41.99661 | 79 | 0.660123 | false |
huoxudong125/poedit | deps/boost/tools/build/test/core_at_file.py | 45 | 1341 | #!/usr/bin/python
# Copyright 2011 Steven Watanabe
# Distributed under the Boost Software License, Version 1.0.
# (See accompanying file LICENSE_1_0.txt or copy at
# http://www.boost.org/LICENSE_1_0.txt)
import BoostBuild
t = BoostBuild.Tester(["-ffile.jam"], pass_toolset=0)
t.write("file.jam", """\
name = n1 n2 ;
contents = M1 M2 ;
EXIT file: "@(o$(name) .txt:E= test -D$(contents))" : 0 ;
""")
t.run_build_system()
t.expect_output_lines("file: on1 on2 .txt");
t.expect_addition("on1 on2 .txt")
t.expect_content("on1 on2 .txt", " test -DM1 -DM2", True)
t.rm(".")
t.write("file.jam", """\
name = n1 n2 ;
contents = M1 M2 ;
actions run { echo file: "@(o$(name) .txt:E= test -D$(contents))" }
run all ;
""")
t.run_build_system(["-d2"])
t.expect_output_lines(' echo file: "on1 on2 .txt" ');
t.expect_addition("on1 on2 .txt")
t.expect_content("on1 on2 .txt", " test -DM1 -DM2", True)
t.rm(".")
t.write("file.jam", """\
name = n1 n2 ;
contents = M1 M2 ;
file = "@($(STDOUT):E= test -D$(contents)\n)" ;
actions run { $(file) }
run all ;
""")
t.run_build_system(["-d1"])
t.expect_output_lines(" test -DM1 -DM2")
t.rm(".")
t.write("file.jam", """\
name = n1 n2 ;
contents = M1 M2 ;
actions run { @($(STDOUT):E= test -D$(contents)\n) }
run all ;
""")
t.run_build_system(["-d1"])
t.expect_output_lines(" test -DM1 -DM2")
t.cleanup()
| mit | -4,062,252,777,468,054,500 | -981,803,556,178,173,200 | 20.285714 | 67 | 0.614467 | false |
fjbatresv/odoo | addons/fetchmail/__openerp__.py | 260 | 2887 | #-*- coding:utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>). All Rights Reserved
# mga@openerp.com
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name' : 'Email Gateway',
'version' : '1.0',
'depends' : ['mail'],
'author' : 'OpenERP SA',
'category': 'Tools',
'description': """
Retrieve incoming email on POP/IMAP servers.
============================================
Enter the parameters of your POP/IMAP account(s), and any incoming emails on
these accounts will be automatically downloaded into your OpenERP system. All
POP3/IMAP-compatible servers are supported, included those that require an
encrypted SSL/TLS connection.
This can be used to easily create email-based workflows for many email-enabled OpenERP documents, such as:
----------------------------------------------------------------------------------------------------------
* CRM Leads/Opportunities
* CRM Claims
* Project Issues
* Project Tasks
* Human Resource Recruitments (Applicants)
Just install the relevant application, and you can assign any of these document
types (Leads, Project Issues) to your incoming email accounts. New emails will
automatically spawn new documents of the chosen type, so it's a snap to create a
mailbox-to-OpenERP integration. Even better: these documents directly act as mini
conversations synchronized by email. You can reply from within OpenERP, and the
answers will automatically be collected when they come back, and attached to the
same *conversation* document.
For more specific needs, you may also assign custom-defined actions
(technically: Server Actions) to be triggered for each incoming mail.
""",
'website': 'https://www.odoo.com/page/mailing',
'data': [
'fetchmail_data.xml',
'fetchmail_view.xml',
'security/ir.model.access.csv',
'fetchmail_installer_view.xml'
],
'demo': [],
'installable': True,
'auto_install': True,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 | -3,540,997,438,184,560,600 | -3,980,356,959,429,835,300 | 40.84058 | 106 | 0.642536 | false |
bunnyitvn/webptn | tests/regressiontests/model_inheritance_regress/models.py | 60 | 4929 | from __future__ import unicode_literals
import datetime
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
@python_2_unicode_compatible
class Place(models.Model):
name = models.CharField(max_length=50)
address = models.CharField(max_length=80)
class Meta:
ordering = ('name',)
def __str__(self):
return "%s the place" % self.name
@python_2_unicode_compatible
class Restaurant(Place):
serves_hot_dogs = models.BooleanField()
serves_pizza = models.BooleanField()
def __str__(self):
return "%s the restaurant" % self.name
@python_2_unicode_compatible
class ItalianRestaurant(Restaurant):
serves_gnocchi = models.BooleanField()
def __str__(self):
return "%s the italian restaurant" % self.name
@python_2_unicode_compatible
class ParkingLot(Place):
# An explicit link to the parent (we can control the attribute name).
parent = models.OneToOneField(Place, primary_key=True, parent_link=True)
capacity = models.IntegerField()
def __str__(self):
return "%s the parking lot" % self.name
class ParkingLot2(Place):
# In lieu of any other connector, an existing OneToOneField will be
# promoted to the primary key.
parent = models.OneToOneField(Place)
class ParkingLot3(Place):
# The parent_link connector need not be the pk on the model.
primary_key = models.AutoField(primary_key=True)
parent = models.OneToOneField(Place, parent_link=True)
class Supplier(models.Model):
restaurant = models.ForeignKey(Restaurant)
class Wholesaler(Supplier):
retailer = models.ForeignKey(Supplier,related_name='wholesale_supplier')
class Parent(models.Model):
created = models.DateTimeField(default=datetime.datetime.now)
class Child(Parent):
name = models.CharField(max_length=10)
class SelfRefParent(models.Model):
parent_data = models.IntegerField()
self_data = models.ForeignKey('self', null=True)
class SelfRefChild(SelfRefParent):
child_data = models.IntegerField()
@python_2_unicode_compatible
class Article(models.Model):
headline = models.CharField(max_length=100)
pub_date = models.DateTimeField()
class Meta:
ordering = ('-pub_date', 'headline')
def __str__(self):
return self.headline
class ArticleWithAuthor(Article):
author = models.CharField(max_length=100)
class M2MBase(models.Model):
articles = models.ManyToManyField(Article)
class M2MChild(M2MBase):
name = models.CharField(max_length=50)
class Evaluation(Article):
quality = models.IntegerField()
class Meta:
abstract = True
class QualityControl(Evaluation):
assignee = models.CharField(max_length=50)
@python_2_unicode_compatible
class BaseM(models.Model):
base_name = models.CharField(max_length=100)
def __str__(self):
return self.base_name
@python_2_unicode_compatible
class DerivedM(BaseM):
customPK = models.IntegerField(primary_key=True)
derived_name = models.CharField(max_length=100)
def __str__(self):
return "PK = %d, base_name = %s, derived_name = %s" \
% (self.customPK, self.base_name, self.derived_name)
class AuditBase(models.Model):
planned_date = models.DateField()
class Meta:
abstract = True
verbose_name_plural = 'Audits'
class CertificationAudit(AuditBase):
class Meta(AuditBase.Meta):
abstract = True
class InternalCertificationAudit(CertificationAudit):
auditing_dept = models.CharField(max_length=20)
# Check that abstract classes don't get m2m tables autocreated.
@python_2_unicode_compatible
class Person(models.Model):
name = models.CharField(max_length=100)
class Meta:
ordering = ('name',)
def __str__(self):
return self.name
@python_2_unicode_compatible
class AbstractEvent(models.Model):
name = models.CharField(max_length=100)
attendees = models.ManyToManyField(Person, related_name="%(class)s_set")
class Meta:
abstract = True
ordering = ('name',)
def __str__(self):
return self.name
class BirthdayParty(AbstractEvent):
pass
class BachelorParty(AbstractEvent):
pass
class MessyBachelorParty(BachelorParty):
pass
# Check concrete -> abstract -> concrete inheritance
class SearchableLocation(models.Model):
keywords = models.CharField(max_length=256)
class Station(SearchableLocation):
name = models.CharField(max_length=128)
class Meta:
abstract = True
class BusStation(Station):
bus_routes = models.CommaSeparatedIntegerField(max_length=128)
inbound = models.BooleanField()
class TrainStation(Station):
zone = models.IntegerField()
class User(models.Model):
username = models.CharField(max_length=30, unique=True)
class Profile(User):
profile_id = models.AutoField(primary_key=True)
extra = models.CharField(max_length=30, blank=True)
| bsd-3-clause | -1,846,333,800,414,375,200 | 314,498,908,115,006,400 | 25.788043 | 76 | 0.703591 | false |
xydinesh/flask-restful | jinja2/testsuite/regression.py | 90 | 7583 | # -*- coding: utf-8 -*-
"""
jinja2.testsuite.regression
~~~~~~~~~~~~~~~~~~~~~~~~~~~
Tests corner cases and bugs.
:copyright: (c) 2010 by the Jinja Team.
:license: BSD, see LICENSE for more details.
"""
import unittest
from jinja2.testsuite import JinjaTestCase
from jinja2 import Template, Environment, DictLoader, TemplateSyntaxError, \
TemplateNotFound, PrefixLoader
env = Environment()
class CornerTestCase(JinjaTestCase):
def test_assigned_scoping(self):
t = env.from_string('''
{%- for item in (1, 2, 3, 4) -%}
[{{ item }}]
{%- endfor %}
{{- item -}}
''')
assert t.render(item=42) == '[1][2][3][4]42'
t = env.from_string('''
{%- for item in (1, 2, 3, 4) -%}
[{{ item }}]
{%- endfor %}
{%- set item = 42 %}
{{- item -}}
''')
assert t.render() == '[1][2][3][4]42'
t = env.from_string('''
{%- set item = 42 %}
{%- for item in (1, 2, 3, 4) -%}
[{{ item }}]
{%- endfor %}
{{- item -}}
''')
assert t.render() == '[1][2][3][4]42'
def test_closure_scoping(self):
t = env.from_string('''
{%- set wrapper = "<FOO>" %}
{%- for item in (1, 2, 3, 4) %}
{%- macro wrapper() %}[{{ item }}]{% endmacro %}
{{- wrapper() }}
{%- endfor %}
{{- wrapper -}}
''')
assert t.render() == '[1][2][3][4]<FOO>'
t = env.from_string('''
{%- for item in (1, 2, 3, 4) %}
{%- macro wrapper() %}[{{ item }}]{% endmacro %}
{{- wrapper() }}
{%- endfor %}
{%- set wrapper = "<FOO>" %}
{{- wrapper -}}
''')
assert t.render() == '[1][2][3][4]<FOO>'
t = env.from_string('''
{%- for item in (1, 2, 3, 4) %}
{%- macro wrapper() %}[{{ item }}]{% endmacro %}
{{- wrapper() }}
{%- endfor %}
{{- wrapper -}}
''')
assert t.render(wrapper=23) == '[1][2][3][4]23'
class BugTestCase(JinjaTestCase):
def test_keyword_folding(self):
env = Environment()
env.filters['testing'] = lambda value, some: value + some
assert env.from_string("{{ 'test'|testing(some='stuff') }}") \
.render() == 'teststuff'
def test_extends_output_bugs(self):
env = Environment(loader=DictLoader({
'parent.html': '(({% block title %}{% endblock %}))'
}))
t = env.from_string('{% if expr %}{% extends "parent.html" %}{% endif %}'
'[[{% block title %}title{% endblock %}]]'
'{% for item in [1, 2, 3] %}({{ item }}){% endfor %}')
assert t.render(expr=False) == '[[title]](1)(2)(3)'
assert t.render(expr=True) == '((title))'
def test_urlize_filter_escaping(self):
tmpl = env.from_string('{{ "http://www.example.org/<foo"|urlize }}')
assert tmpl.render() == '<a href="http://www.example.org/<foo">http://www.example.org/<foo</a>'
def test_loop_call_loop(self):
tmpl = env.from_string('''
{% macro test() %}
{{ caller() }}
{% endmacro %}
{% for num1 in range(5) %}
{% call test() %}
{% for num2 in range(10) %}
{{ loop.index }}
{% endfor %}
{% endcall %}
{% endfor %}
''')
assert tmpl.render().split() == map(unicode, range(1, 11)) * 5
def test_weird_inline_comment(self):
env = Environment(line_statement_prefix='%')
self.assert_raises(TemplateSyntaxError, env.from_string,
'% for item in seq {# missing #}\n...% endfor')
def test_old_macro_loop_scoping_bug(self):
tmpl = env.from_string('{% for i in (1, 2) %}{{ i }}{% endfor %}'
'{% macro i() %}3{% endmacro %}{{ i() }}')
assert tmpl.render() == '123'
def test_partial_conditional_assignments(self):
tmpl = env.from_string('{% if b %}{% set a = 42 %}{% endif %}{{ a }}')
assert tmpl.render(a=23) == '23'
assert tmpl.render(b=True) == '42'
def test_stacked_locals_scoping_bug(self):
env = Environment(line_statement_prefix='#')
t = env.from_string('''\
# for j in [1, 2]:
# set x = 1
# for i in [1, 2]:
# print x
# if i % 2 == 0:
# set x = x + 1
# endif
# endfor
# endfor
# if a
# print 'A'
# elif b
# print 'B'
# elif c == d
# print 'C'
# else
# print 'D'
# endif
''')
assert t.render(a=0, b=False, c=42, d=42.0) == '1111C'
def test_stacked_locals_scoping_bug_twoframe(self):
t = Template('''
{% set x = 1 %}
{% for item in foo %}
{% if item == 1 %}
{% set x = 2 %}
{% endif %}
{% endfor %}
{{ x }}
''')
rv = t.render(foo=[1]).strip()
assert rv == u'1'
def test_call_with_args(self):
t = Template("""{% macro dump_users(users) -%}
<ul>
{%- for user in users -%}
<li><p>{{ user.username|e }}</p>{{ caller(user) }}</li>
{%- endfor -%}
</ul>
{%- endmacro -%}
{% call(user) dump_users(list_of_user) -%}
<dl>
<dl>Realname</dl>
<dd>{{ user.realname|e }}</dd>
<dl>Description</dl>
<dd>{{ user.description }}</dd>
</dl>
{% endcall %}""")
assert [x.strip() for x in t.render(list_of_user=[{
'username':'apo',
'realname':'something else',
'description':'test'
}]).splitlines()] == [
u'<ul><li><p>apo</p><dl>',
u'<dl>Realname</dl>',
u'<dd>something else</dd>',
u'<dl>Description</dl>',
u'<dd>test</dd>',
u'</dl>',
u'</li></ul>'
]
def test_empty_if_condition_fails(self):
self.assert_raises(TemplateSyntaxError, Template, '{% if %}....{% endif %}')
self.assert_raises(TemplateSyntaxError, Template, '{% if foo %}...{% elif %}...{% endif %}')
self.assert_raises(TemplateSyntaxError, Template, '{% for x in %}..{% endfor %}')
def test_recursive_loop_bug(self):
tpl1 = Template("""
{% for p in foo recursive%}
{{p.bar}}
{% for f in p.fields recursive%}
{{f.baz}}
{{p.bar}}
{% if f.rec %}
{{ loop(f.sub) }}
{% endif %}
{% endfor %}
{% endfor %}
""")
tpl2 = Template("""
{% for p in foo%}
{{p.bar}}
{% for f in p.fields recursive%}
{{f.baz}}
{{p.bar}}
{% if f.rec %}
{{ loop(f.sub) }}
{% endif %}
{% endfor %}
{% endfor %}
""")
def test_correct_prefix_loader_name(self):
env = Environment(loader=PrefixLoader({
'foo': DictLoader({})
}))
try:
env.get_template('foo/bar.html')
except TemplateNotFound, e:
assert e.name == 'foo/bar.html'
else:
assert False, 'expected error here'
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(CornerTestCase))
suite.addTest(unittest.makeSuite(BugTestCase))
return suite
| apache-2.0 | -6,643,126,065,331,361,000 | 3,088,094,246,879,449,600 | 28.737255 | 109 | 0.44191 | false |
pigeonflight/strider-plone | docker/appengine/lib/django-1.2/django/contrib/localflavor/jp/jp_prefectures.py | 543 | 2089 | from django.utils.translation import ugettext_lazy
JP_PREFECTURES = (
('hokkaido', ugettext_lazy('Hokkaido'),),
('aomori', ugettext_lazy('Aomori'),),
('iwate', ugettext_lazy('Iwate'),),
('miyagi', ugettext_lazy('Miyagi'),),
('akita', ugettext_lazy('Akita'),),
('yamagata', ugettext_lazy('Yamagata'),),
('fukushima', ugettext_lazy('Fukushima'),),
('ibaraki', ugettext_lazy('Ibaraki'),),
('tochigi', ugettext_lazy('Tochigi'),),
('gunma', ugettext_lazy('Gunma'),),
('saitama', ugettext_lazy('Saitama'),),
('chiba', ugettext_lazy('Chiba'),),
('tokyo', ugettext_lazy('Tokyo'),),
('kanagawa', ugettext_lazy('Kanagawa'),),
('yamanashi', ugettext_lazy('Yamanashi'),),
('nagano', ugettext_lazy('Nagano'),),
('niigata', ugettext_lazy('Niigata'),),
('toyama', ugettext_lazy('Toyama'),),
('ishikawa', ugettext_lazy('Ishikawa'),),
('fukui', ugettext_lazy('Fukui'),),
('gifu', ugettext_lazy('Gifu'),),
('shizuoka', ugettext_lazy('Shizuoka'),),
('aichi', ugettext_lazy('Aichi'),),
('mie', ugettext_lazy('Mie'),),
('shiga', ugettext_lazy('Shiga'),),
('kyoto', ugettext_lazy('Kyoto'),),
('osaka', ugettext_lazy('Osaka'),),
('hyogo', ugettext_lazy('Hyogo'),),
('nara', ugettext_lazy('Nara'),),
('wakayama', ugettext_lazy('Wakayama'),),
('tottori', ugettext_lazy('Tottori'),),
('shimane', ugettext_lazy('Shimane'),),
('okayama', ugettext_lazy('Okayama'),),
('hiroshima', ugettext_lazy('Hiroshima'),),
('yamaguchi', ugettext_lazy('Yamaguchi'),),
('tokushima', ugettext_lazy('Tokushima'),),
('kagawa', ugettext_lazy('Kagawa'),),
('ehime', ugettext_lazy('Ehime'),),
('kochi', ugettext_lazy('Kochi'),),
('fukuoka', ugettext_lazy('Fukuoka'),),
('saga', ugettext_lazy('Saga'),),
('nagasaki', ugettext_lazy('Nagasaki'),),
('kumamoto', ugettext_lazy('Kumamoto'),),
('oita', ugettext_lazy('Oita'),),
('miyazaki', ugettext_lazy('Miyazaki'),),
('kagoshima', ugettext_lazy('Kagoshima'),),
('okinawa', ugettext_lazy('Okinawa'),),
)
| mit | -9,070,141,821,371,749,000 | -1,045,339,996,097,357,200 | 39.960784 | 50 | 0.587362 | false |
devanshdalal/scikit-learn | examples/gaussian_process/plot_gpr_noisy_targets.py | 64 | 3706 | """
=========================================================
Gaussian Processes regression: basic introductory example
=========================================================
A simple one-dimensional regression example computed in two different ways:
1. A noise-free case
2. A noisy case with known noise-level per datapoint
In both cases, the kernel's parameters are estimated using the maximum
likelihood principle.
The figures illustrate the interpolating property of the Gaussian Process
model as well as its probabilistic nature in the form of a pointwise 95%
confidence interval.
Note that the parameter ``alpha`` is applied as a Tikhonov
regularization of the assumed covariance between the training points.
"""
print(__doc__)
# Author: Vincent Dubourg <vincent.dubourg@gmail.com>
# Jake Vanderplas <vanderplas@astro.washington.edu>
# Jan Hendrik Metzen <jhm@informatik.uni-bremen.de>s
# License: BSD 3 clause
import numpy as np
from matplotlib import pyplot as plt
from sklearn.gaussian_process import GaussianProcessRegressor
from sklearn.gaussian_process.kernels import RBF, ConstantKernel as C
np.random.seed(1)
def f(x):
"""The function to predict."""
return x * np.sin(x)
# ----------------------------------------------------------------------
# First the noiseless case
X = np.atleast_2d([1., 3., 5., 6., 7., 8.]).T
# Observations
y = f(X).ravel()
# Mesh the input space for evaluations of the real function, the prediction and
# its MSE
x = np.atleast_2d(np.linspace(0, 10, 1000)).T
# Instanciate a Gaussian Process model
kernel = C(1.0, (1e-3, 1e3)) * RBF(10, (1e-2, 1e2))
gp = GaussianProcessRegressor(kernel=kernel, n_restarts_optimizer=9)
# Fit to data using Maximum Likelihood Estimation of the parameters
gp.fit(X, y)
# Make the prediction on the meshed x-axis (ask for MSE as well)
y_pred, sigma = gp.predict(x, return_std=True)
# Plot the function, the prediction and the 95% confidence interval based on
# the MSE
fig = plt.figure()
plt.plot(x, f(x), 'r:', label=u'$f(x) = x\,\sin(x)$')
plt.plot(X, y, 'r.', markersize=10, label=u'Observations')
plt.plot(x, y_pred, 'b-', label=u'Prediction')
plt.fill(np.concatenate([x, x[::-1]]),
np.concatenate([y_pred - 1.9600 * sigma,
(y_pred + 1.9600 * sigma)[::-1]]),
alpha=.5, fc='b', ec='None', label='95% confidence interval')
plt.xlabel('$x$')
plt.ylabel('$f(x)$')
plt.ylim(-10, 20)
plt.legend(loc='upper left')
# ----------------------------------------------------------------------
# now the noisy case
X = np.linspace(0.1, 9.9, 20)
X = np.atleast_2d(X).T
# Observations and noise
y = f(X).ravel()
dy = 0.5 + 1.0 * np.random.random(y.shape)
noise = np.random.normal(0, dy)
y += noise
# Instanciate a Gaussian Process model
gp = GaussianProcessRegressor(kernel=kernel, alpha=(dy / y) ** 2,
n_restarts_optimizer=10)
# Fit to data using Maximum Likelihood Estimation of the parameters
gp.fit(X, y)
# Make the prediction on the meshed x-axis (ask for MSE as well)
y_pred, sigma = gp.predict(x, return_std=True)
# Plot the function, the prediction and the 95% confidence interval based on
# the MSE
fig = plt.figure()
plt.plot(x, f(x), 'r:', label=u'$f(x) = x\,\sin(x)$')
plt.errorbar(X.ravel(), y, dy, fmt='r.', markersize=10, label=u'Observations')
plt.plot(x, y_pred, 'b-', label=u'Prediction')
plt.fill(np.concatenate([x, x[::-1]]),
np.concatenate([y_pred - 1.9600 * sigma,
(y_pred + 1.9600 * sigma)[::-1]]),
alpha=.5, fc='b', ec='None', label='95% confidence interval')
plt.xlabel('$x$')
plt.ylabel('$f(x)$')
plt.ylim(-10, 20)
plt.legend(loc='upper left')
plt.show()
| bsd-3-clause | 7,524,857,791,127,874,000 | -2,962,990,622,768,513,500 | 31.79646 | 79 | 0.62952 | false |
pmidford/arachadmin | models/db.py | 1 | 17623 | import os
from ConfigParser import SafeConfigParser
defaults = dict(
host="localhost",
user="user",
password="userpass",
dbname="arachadmin")
conf = SafeConfigParser(defaults)
user = password = dbname = host = ''
if os.path.isfile("applications/%s/private/localconfig" % request.application):
conf.read("applications/%s/private/localconfig" % request.application)
host = conf.get("db", "host")
user = conf.get("db", "user")
password = conf.get("db", "password")
dbname = conf.get("db", "dbname")
else:
conf.read("applications/%s/private/config" % request.application)
host = conf.get("db", "host")
user = conf.get("db", "user")
password = conf.get("db", "password")
dbname = conf.get("db", "dbname")
db = DAL("mysql://%s:%s@%s/%s" % (user, password, host, dbname), migrate=False)
# table of source and generated id's - this makes sure that generated id's are
# unique across appropriate tables (publications, individuals, participants, etc.)
db.define_table(
'uidset',
Field('source_id', 'string', length=256),
Field('generated_id', 'string', length=64, unique=True),
Field('ref_id', 'string', length=64, unique=True),
migrate=False)
# table of curation status (steps in curation process)
db.define_table(
'publication_curation',
Field('status', 'string', writable=False, length=31),
format='%(status)s',
migrate=False)
# minimal implementation of authorship - primarily for display and query
db.define_table(
'author',
Field('last_name', 'string', writable=False, length=63),
Field('given_names', 'string', writable=False, length=63),
Field('assigned_id', 'string'),
Field('generated_id', 'string', writable=False),
Field('merge_set', 'reference author_merge', ondelete='NO ACTION'),
format='%(last_name)s',
migrate=False)
# used for joining different representations of one author
db.define_table(
'author_merge',
Field('preferred', 'reference author', ondelete='NO ACTION'),
format='%(id)s',
migrate=False)
def render_citation(p):
"""
generates a (hopefull unique) citation string for a publication
that will fit in a dropdown
"""
import publication_tools
return publication_tools.make_citation(p.author_list,p.publication_year)
# main table for publications - reflects a spreadsheet used previously
db.define_table(
'publication',
Field('publication_type', 'string', length=31),
Field('dispensation', 'string', length=31),
Field('downloaded', 'date'),
Field('reviewed', 'date'),
Field('title', 'text', length=255),
Field('alternate_title', 'text', length=255),
Field('author_list', 'text'),
Field('editor_list', 'text'),
Field('source_publication', 'string'),
Field('volume', 'integer'),
Field('issue', 'string'),
Field('serial_identifier', 'string'),
Field('page_range', 'string'),
Field('publication_date', 'string'),
Field('publication_year', 'string'),
Field('doi', 'string'),
Field('generated_id', 'string', writable=False),
Field(
'curation_status',
'reference publication_curation',
requires=IS_EMPTY_OR(IS_IN_DB(db,
'publication_curation.id',
'%(status)s'))),
Field('curation_update', 'datetime'),
Field('uidset','reference uidset',
requires=IS_EMPTY_OR(IS_IN_DB(db,'uidset.id','%(id)s'))),
format=render_citation,
migrate=False)
# allows ordering of authors on a publication
db.define_table('authorship',
Field('publication',
'reference publication',
requires=IS_IN_DB(db,
'publication.id',
'%(author_list)s'),
ondelete='CASCADE'),
Field('author',
'reference author',
requires=IS_IN_DB(db,
'author.id',
'%(last_name)s, %(first_name)s'),
ondelete='CASCADE'),
Field('position', 'integer'),
format='%(publication)s',
migrate=False)
# should capture synonyms of a term
db.define_table('synonym',
Field('text', 'string', length=512),
Field('term', 'reference term'),
migrate=False)
# represents an individual organism, body part, substrate, etc.
db.define_table('individual',
Field('source_id', 'string', length=512),
Field('generated_id', 'string', length=512, writable=False),
Field('label', 'string', length=64),
Field('term', 'reference term'),
Field('uidset', 'reference uidset'),
migrate=False)
def render_narrative(n):
"""
generates a printable representation for a narrative
"""
if n.label:
return n.label
return 'unlabelled narrative'
db.define_table('narrative',
Field('publication', 'reference publication', ondelete='NO ACTION'),
Field('label', 'string', length=64),
Field('description', 'string', length=512),
Field('generated_id', 'string', length=512, writable=False),
Field('uidset', 'reference uidset'),
format='%(label)s',
migrate=False)
# individuals are necessarily associated with at least one narrative
db.define_table('individual2narrative',
Field('individual', 'reference individual'),
Field('narrative', 'reference narrative'),
migrate=False)
# list of names subsets of concept space (taxonomy, chemistry, etc.)
db.define_table('domain',
Field('name', 'string'),
format='%(name)s',
migrate=False)
# people or groups responsible for source ontologies
db.define_table('authority',
Field('name', 'string'),
Field('uri', 'string'),
Field('domain', 'reference domain', ondelete='NO ACTION'),
format='%(name)s',
migrate=False)
# OWL (object?) properties from source ontologies
db.define_table('property',
Field('source_id', 'string', length=256),
Field('authority',
'reference authority',
ondelete='NO ACTION'),
Field('label', 'string', length=64),
Field('generated_id', # bogus
'string',
length=64,
writable=False),
Field('comment','string', length=512),
format='%(label)s',
migrate=False)
# util; probably should be somewhere else
def get_property(uri):
return db(db.property.source_id == uri).select().first().id
# owl classes from source ontologies
db.define_table('term',
Field('source_id', 'string'),
Field('authority',
'reference authority',
ondelete='NO ACTION'),
Field('domain',
'reference domain',
ondelete='NO ACTION'),
Field('label',
'string'),
Field('generated_id', # bogus
'string',
writable=False),
Field('comment', 'string'),
Field('uidset', 'reference uidset'),
format='%(label)s',
migrate=False)
# set of domains used for defining filters
behavior_domain_id = db(db.domain.name == 'behavior').select().first().id
behavior_domain = db(db.term.domain == behavior_domain_id)
anatomy_domain_id = db(db.domain.name == 'anatomy').select().first().id
anatomy_domain = db(db.term.domain == anatomy_domain_id) # need to fix this
taxonomy_domain_id = db(db.domain.name == 'taxonomy').select().first().id
taxon_domain = db(db.term.domain == taxonomy_domain_id)
evidence_domain_id = db(db.domain.name == 'evidence').select().first().id
evidence_domain = db(db.term.domain == evidence_domain_id)
environment_domain_id = db(db.domain.name == 'environment').select().first().id
# this is both incomplete and partially incorrect
substrate_domains = db(db.domain.name == environment_domain_id)
# participant may be individuals or one of several class expression types
# this table holds the options
db.define_table('participant_type',
Field('label', 'string', length=20),
migrate=False)
# holds a class or individual (see pelement2term, pelement2individual)
db.define_table('participant_element',
Field('type',
'reference participant_type',
requires=IS_EMPTY_OR(IS_IN_DB(db,
'participant_type.id',
'%(label)s'))),
Field('participant',
'reference participant',
ondelete='NO ACTION'),
migrate=False)
# links parent to child participant_elements - chains may branch though none do
# yet
db.define_table('participant_link',
Field('child', 'reference participant_element', ondelete='SET NULL'),
Field('parent', 'reference participant_element', ondelete='SET NULL'),
Field('property', 'reference property', ondelete='NO ACTION'),
migrate=False)
# associates a pelement with a term
db.define_table('pelement2term',
Field('element',
'reference participant_element',
ondelete='NO ACTION'),
Field('term',
'reference term',
ondelete='NO ACTION'),
migrate=False)
# associates a pelement with an individual
db.define_table('pelement2individual',
Field('element',
'reference participant_element',
ondelete='NO ACTION'),
Field('individual',
'reference individual',
ondelete='NO ACTION'),
migrate=False)
# this is used to capture taxa that aren't in NCBI yet
db.define_table('taxon',
Field('name', 'string', length=512),
Field('author', 'string', length=512),
Field('year', 'string', length=512),
Field('external_id', 'string', length=64),
Field('authority',
'reference authority',
ondelete='NO ACTION'),
Field('parent',
'reference taxon',
requires=IS_EMPTY_OR(IS_IN_DB(db,
'taxon.id',
'%(name)s'))),
Field('generated_id',
'string',
length=512,
writable=False),
Field('parent_term', 'reference term'),
Field('merged', 'boolean', writable=False),
Field('merge_status', 'string', length=64),
Field('uidset', 'reference uidset'),
format='%(name)s',
migrate=False)
db.taxon.parent_term.requires = IS_EMPTY_OR(IS_IN_DB(taxon_domain,
'term.id',
'%(label)s'))
db.define_table('taxonomy_authority',
Field('name', 'string', length=512),
format='%(name)s',
migrate=False)
db.define_table('evidence_code',
Field('long_name', 'string', length=512),
Field('obo_id', 'string', length=512),
Field('code', 'string', length=512),
migrate=False)
def render_participant(r):
"""
generates pidgin functional owl syntax for a participant
"""
if r.label:
return r.label
if r.quantification == 'some':
quan = 'some'
else:
quan = ''
if r.anatomy and r.taxon:
head = "%s of %s" % (db.term(r.anatomy).label, db.term(r.taxon).label)
elif r.taxon:
head = str(db.term(r.taxon).label)
elif r.substrate:
head = str(db.term(r.substrate).label)
else:
head = "Undefined participant"
return "%s %s" % (quan, head)
VALID_QUANTIFICATIONS = ["some", "individual"]
# although not empty, the taxon, anatomy, and substrate fields are obsolete
db.define_table('participant',
Field('taxon', 'reference term'),
Field('anatomy', 'reference term'),
Field('substrate',
'reference term',
requires=IS_EMPTY_OR(IS_IN_DB(db,
'term.id',
'%(label)s'))),
Field('quantification',
'string',
length=16,
requires=IS_NULL_OR(IS_IN_SET(VALID_QUANTIFICATIONS))),
Field('label', 'string'),
Field('publication_taxon', 'string'),
Field('publication_anatomy', 'string'),
Field('publication_substrate', 'string'),
Field('generated_id', 'string', writable=False),
Field('publication_text', 'string', length=512),
##TODO: remove
Field('participation_property', 'reference property'),
Field('head_element',
'reference participant_element',
writable=False),
Field('uidset', 'reference uidset'),
format=render_participant,
migrate=False)
db.participant.taxon.requires = IS_EMPTY_OR(IS_IN_DB(taxon_domain,
'term.id',
'%(label)s'))
db.participant.anatomy.requires = IS_EMPTY_OR(IS_IN_DB(anatomy_domain,
'term.id',
'%(label)s'))
# substrate_domains is the correct set here, if it were correct
db.participant.substrate.requires = IS_EMPTY_OR(IS_IN_DB(substrate_domains,
'term.id',
'%(label)s'))
db.define_table('claim',
Field('publication', db.publication),
Field('narrative',
'reference narrative',
requires=IS_EMPTY_OR(IS_IN_DB(db,
'narrative.id',
render_narrative))),
Field('publication_behavior', 'string'),
Field('behavior_term', 'reference term', notnull=True),
Field('primary_participant', # remove?
'reference participant',
requires=IS_EMPTY_OR(IS_IN_DB(db,
'participant.id',
render_participant))),
Field('evidence', 'reference evidence_code'),
Field('generated_id', 'string', writable=False),
Field('uidset', 'reference uidset'),
format='Claim: %(generated_id)s',
migrate=False)
db.claim.behavior_term.requires = IS_IN_DB(behavior_domain,
'term.id',
'%(label)s')
db.define_table('participant2claim',
Field('claim', 'reference claim'),
Field('participant', 'reference participant'),
Field('property', 'reference property'),
migrate=False)
# defines the source of a supporting ontology
# name - human friendly name of the ontology
# source_url - cannonical location for loading the ontology
# (e.g., a purl that redirects)
# processing - specifies a set of rules for processing the ontology file
# last_update - timestamp on the file in the cannonical location
# last time it was checked
# authority - generally the maintainer of the ontology
# domain - semantic domain (e.g., taxonomy, behavior, etc.)
# covered by the ontology
db.define_table('ontology_source',
Field('name', 'string', length=512),
Field('source_url', 'string', length=512),
Field('processing',
'reference ontology_processing',
requires=IS_EMPTY_OR(IS_IN_DB(db,
'ontology_processing.id',
'%(type_name)s'))),
Field('last_update', 'datetime', writable=False),
Field('authority', 'reference authority'),
Field('domain', 'reference domain', ondelete='NO ACTION'),
format='Ontology: %(name)',
migrate=False)
db.define_table('ontology_processing',
Field('type_name', 'string', length=512),
format='Ontology processing: %(type_name)',
migrate=False)
| mit | 8,249,227,578,436,438,000 | -229,882,773,585,969,380 | 38.425056 | 86 | 0.520399 | false |
jalilag/apspir | objedit/gnosis/util/convert/txt2dw.py | 2 | 13950 | #!/usr/bin/python
__oneliner__="Convert ASCII source files for XML presentation"
__longdoc__="""
This program is not yet particularly smart, and will produce
undefined output (or even traceback) if the source file does
not meet expected format. With time, it may get better about
this.
Usage: python txt2dw.py < MyArticle.txt > MyArticle.xml
------------------------------------------------------------------------
Expected input format for [SMART_ASCII]
#--- Paragraph rules: ---#
- Title occurs on first line of document, unindented and in
all caps.
- Subtitle occurs on second line, unindented and in mixed
case.
- Name, affiliation, date occur, unindented and in mixed
case, on lines 4-6.
- Section headings are preceded by two blank lines,
unindented, in all caps, followed by one line of 72
dashes and one blank line.
- Regular text paragraphs are block style, and are indented
two spaces.
- Block quotations are indented four spaces, rather than
the two of original text.
- Code samples are indented six spaces (with internal
indentation of code lines in the proper relative
position).
- Code samples may begin with a line indicating a title for
that block. If present, this title is indented the same
six spaces as the rest of the block, and begins and ends
with a pound sign ('#'). Dashes are used to fill space
within the title for ASCII asthetics.
-
#--- Character rules: ---#
- All character markup has the pattern:
whitespace-symbol-words(s)-symbol-whitespace
Examples are given, and this can be searched for
programmatically. The use of character markup applies
*only* to text paragraphs, *not* to code samples!
- Asterisks are used for an inflectional emphasis. For
example, "All good boys *deserve* fudge." This would
typically be indicated typographically with boldface or
italics.
- Underscores are used for book/journal citation. For
example, "Knuth's _Art of Computer Programming_ is
essential." This would typically be indicated
typographically with italics or underline.
- Single-stroke is used to indicate filenames and function
names. For example, "Every C program has a 'main()'
function." This might be indicated typographically by a
fixed font, by boldface, or simply by single-quotes.
- Braces are used to indicate a module, package or library.
For example, "The [cre] module will replace [re] in
Python 1.6." This will probably be indicated
typographically as a fixed font.
- Double-stroke is used as either inline quotation or scare
quotes. For example, "It may not be as "easy" as
suggested." In either case, typographic quotes are
probably the best format; italics would make some sense
also.
- Parenthesis are used, and should be preserved as is.
- Angle brackets and curly brackets have no special meaning
yet. I may choose to use those if there is something I
think the above forms do not capture.
- Em-dashes, diacritics, ligatures, and typographic
quotations are not available, and standard ASCII
approximations are used.
-
#--- Miscellany: ---#
- URL's are automatically transformed into a hotlink.
Basically, anything that starts with 'http://', 'ftp://',
'file://' or 'gopher://' looks like a URL to the program.
"""
__doc__=__oneliner__+__longdoc__
__comments__="""
This script utilizes the services of the Marc-Andre Lemburg's Python
Highlighter for HTML (v0.5+) [py2html]. [py2html] in turn relies on
Just van Rossum's [PyFontify] (v.0.3.1+) If these are not present,
Txt2HTML hopes to degrade gracefully, but will not provide syntax
highlighting for Python source code.
"""
__author__=["David Mertz (mertz@gnosis.cx)",]
__copyright__="""
This file is released to the public domain. I (dqm) would
appreciate it if you choose to keep derived works under terms
that promote freedom, but obviously am giving up any rights
to compel such.
"""
__version__="version 0.1 (August 2001)"
#-- import stuff, or at least try
import sys, re, string, time
from urllib import urlopen
from cStringIO import *
try:
import dw_colorize
py_formatter = 1
except:
py_formatter = 0
#-- Define some XML boilerplate
code_block = """
<heading refname="code1" type="code" toc="yes">%s</heading>
<code type="section">%s</code>"""
#-- End of boilerplate
def main():
#-- Setup the regex list
blankln = re.compile("^$")
headln = re.compile("\S") # no indent
textln = re.compile(" ? ?\S") # 1-3 spaces indent
quoteln = re.compile(" \S") # 4 spaces indent
codeln = re.compile("^ ") # 6+ spaces indent
re_list = (blankln, headln, textln, quoteln, codeln)
#-- Process as needed for input type
blocks = Make_Blocks(sys.stdin, re_list)
Process_Blocks(blocks)
def Make_Blocks(fhin, re_list):
#-- Initialize the globals
global state, blocks, bl_num, newblock
state = "HEADER"
blocks = [""]
bl_num = 0
newblock = 1
#-- Unpack the regex list
blankln, headln, textln, quoteln, codeln = re_list
#-- Break the file into relevant chunks
for line in fhin.readlines():
line = string.rstrip(line)+'\n' # Need to normalize line endings!
if state == "HEADER": # blank line means new block of ??
if blankln.match(line): newblock = 1
elif textln.match(line): startText(line)
elif quoteln.match(line): startQuote(line)
elif codeln.match(line): startCode(line)
else:
if newblock: startHead(line)
else: blocks[bl_num] += line
elif state == "TEXT": # blank line means new block of ??
if blankln.match(line): newblock = 1
elif headln.match(line): startHead(line)
elif quoteln.match(line): startQuote(line)
elif codeln.match(line): startCode(line)
else:
if newblock: startText(line)
else: blocks[bl_num] += line
elif state == "QUOTE": # blank line means new block of ??
if blankln.match(line): newblock = 1
elif headln.match(line): startHead(line)
elif textln.match(line): startText(line)
# do not transition quote->code without a blank line
# elif codeln.match(line): startCode(line)
else:
if newblock: startQuote(line)
else: blocks[bl_num] += line
elif state == "CODE": # blank line does not change state
if blankln.match(line): blocks[bl_num] = blocks[bl_num] + line
elif headln.match(line): startHead(line)
elif textln.match(line): startText(line)
else: blocks[bl_num] += line
else:
raise ValueError, "unexpected input block state: "+state
return blocks
def Process_Blocks(blocks):
# Process all blocks, then write out headers and body
print '<?xml version="1.0" encoding="UTF-8"?>'
print '<?xml-stylesheet'
print ' href="http://gnosis.cx/publish/programming/dW.css"'
print ' type="text/css"?>'
print '<article ratings="auto" toc="auto">'
# Title elements
head = Detag(blocks[1].replace('[HEAD]',''))
maintitle, subtitle = head.split('\n')[:2]
series, paper = maintitle.split(':')
#--
print ' <seriestitle>%s</seriestitle>' % series
print ' <papertitle>%s</papertitle>' % Typography(paper)
print ' <subtitle>%s</subtitle>' % Typography(subtitle)
# Author and category elements
author = Detag(blocks[2].replace('[HEAD]',''))
name, status, date = author.split('\n')[:3]
jobtitle, company = status.split(',',1)
month, year = date.split()
bio = blocks.pop().replace('[TEXT]','').split('\n')
# Expect: {Picture of Author: http://gnosis.cx/cgi-bin/img_dqm.cgi}
biopic = bio[0].split(':',1)[1].strip()[:-1]
biotxt = Detag(' '+'\n '.join(bio[1:-1]))
#--
print ' <author company="%s"' % company
print ' jobtitle="%s"' % jobtitle
print ' name="%s">' % name
print ' <img src="%s" />' % biopic
print '%s' % URLify(Typography(Detag(biotxt)))
print ' </author>'
print ' <date month="%s" year="%s" />' % (month,year)
print ' <zone name="xml" />'
print ' <meta name="KEYWORDS" content="Mertz" />'
# The abstract goes here
block = blocks[3]
fixabstract(block[6:])
# Massage each block as needed
for block in blocks[4:]:
if block[:6]=='[CODE]': fixcode(block[6:])
elif block[:6]=='[QUOT]': fixquote(block[6:])
elif block[:6]=='[TEXT]': fixtext(block[6:])
elif block[:6]=='[HEAD]': fixhead(block[6:])
else: raise ValueError, "unexpected block marker: "+block[:6]
print '</article>'
#-- Functions for start of block-type state
def startHead(line):
global state, blocks, bl_num, newblock
state = "HEADER"
bl_num = bl_num+1
blocks.append('[HEAD]'+line)
newblock = 0
def startText(line):
global state, blocks, bl_num, newblock
state = "TEXT"
bl_num = bl_num+1
blocks.append('[TEXT]'+line)
newblock = 0
def startQuote(line):
global state, blocks, bl_num, newblock
state = "QUOTE"
bl_num = bl_num+1
blocks.append('[QUOT]'+line)
newblock = 0
def startCode(line):
global state, blocks, bl_num, newblock
state = "CODE"
bl_num = bl_num+1
blocks.append('[CODE]'+line)
newblock = 0
def fixcode(block, doctype='UNKNOWN'):
# Some XML preparation
block = LeftMargin(block)
# Pull out title if available
re_title = re.compile('^#\-+ (.+) \-+#$', re.M)
if_title = re_title.match(block)
if if_title:
title = if_title.group(1)
block = re_title.sub('', block) # take title out of code
else: title = ''
# Process the code block with dw_colorize (if possible and appropriate)
if py_formatter and (string.count(title,'.py') or
string.count(title,'Python') or
string.count(title,'python') or
string.count(title,'py_') or
doctype == 'PYTHON'):
print ('<p><heading refname="code1" type="code" toc="yes">%s</heading>'
% Typography(title))
print '<code type="section">',
dw_colorize.Parser(block.rstrip()).toXML()
print '</code></p>'
# elif the-will-and-the-way-is-there-to-format-language-X:
# elif the-will-and-the-way-is-there-to-format-language-Y:
else:
block = Detag(block)
print code_block % (Typography(title), block.strip())
def fixquote(block):
print '<blockquote>\n%s</blockquote>' % URLify(Typography(Detag(block)))
def fixabstract(block):
print '<abstract>\n%s</abstract>' % URLify(Typography(Detag(block)))
def fixtext(block):
print '<p>\n%s</p>' % URLify(Typography(NoRule(Detag(block))))
def fixhead(block):
print '\n%s' % Typography(AdjustCaps(NoRule(Detag(block)))+' ')
#-- Utility functions for text transformation
def AdjustCaps(txt):
# Bare header is block in ALLCAPS (excluding [module] names)
non_lit = re.sub("['[].*?[]']", '', txt)
if non_lit == string.upper(non_lit):
return ('<heading refname="h1" type="major" toc="yes">%s</heading>\n'
% capwords(txt))
else:
return ('<heading refname="h2" type="minor" toc="yes">%s</heading>\n'
% URLify(txt))
def capwords(txt):
"""string.capwords does'nt do what we want for 'quoted' stuff"""
words = string.split(txt)
for n in range(len(words)):
if not words[n][0] in "'[": words[n] = string.capwords(words[n])
return string.join(words, ' ')
def LeftMargin(txt):
"""Remove as many leading spaces as possible from whole block"""
for l in range(12,-1,-1):
re_lead = '(?sm)'+' '*l+'\S'
if re.match(re_lead, txt): break
txt = re.sub('(?sm)^'+' '*l, '', txt)
return txt
def Detag(txt):
txt = string.replace(txt, '&', '&')
txt = string.replace(txt, '<', '<')
txt = string.replace(txt, '>', '>')
return txt
def URLify(txt):
txt0 = txt
# Convert special IMG URL's, e.g. {Alt Text: http://site.org/img.png}
# (don't actually try quite as hard to validate URL though)
txt = re.sub('(?sm){(.*?):\s*(http://.*)}', '<img alt="\\1" src="\\2" />', txt)
# Convert regular URL's
txt = re.sub('(?:[^="])((?:http|ftp|gopher|file)://(?:[^ \n\r<\)]+))(\s)',
' <a href="\\1">\\1</a>\\2', txt)
return txt
def NoRule(txt):
return re.compile('^-+$', re.M).sub('', txt)
def Typography(txt):
# [module] names
r = re.compile(r"""([\(\s'/">]|^)\[(.*?)\]([<\s\.\),:;'"?!/-])""", re.M | re.S)
txt = r.sub('\\1<code type="inline"><b>\\2</b></code>\\3',txt)
# *strongly emphasize* words
r = re.compile(r"""([\(\s'/"]|^)\*(.*?)\*([\s\.\),:;'"?!/-])""", re.M | re.S)
txt = r.sub('\\1<b>\\2</b>\\3', txt)
# -emphasize- words
r = re.compile(r"""([\(\s'/"]|^)-(.*?)-([\s\.\),:;'"?!/])""", re.M | re.S)
txt = r.sub('\\1<i>\\2</i>\\3', txt)
# _Book Title_ citations
r = re.compile(r"""([\(\s'/"]|^)_(.*?)_([\s\.\),:;'"?!/-])""", re.M | re.S)
txt = r.sub('\\1<attribution>\\2</attribution>\\3', txt)
# 'Function()' names
r = re.compile(r"""([\(\s/"]|^)'(.*?)'([\s\.\),:;"?!/-])""", re.M | re.S)
txt = r.sub('\\1<code type="inline">\\2</code>\\3', txt)
return txt
#-- The module level code
if __name__ == '__main__':
main()
| lgpl-2.1 | 6,663,745,170,754,736,000 | 5,877,217,547,062,799,000 | 37.96648 | 83 | 0.586953 | false |
Instagram/cassandra | pylib/cqlshlib/test/winpty.py | 62 | 1757 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from threading import Thread
from cStringIO import StringIO
from Queue import Queue, Empty
class WinPty:
def __init__(self, stdin):
self._s = stdin
self._q = Queue()
def _read_next_char(stdin, queue):
while True:
char = stdin.read(1) # potentially blocking read
if char:
queue.put(char)
else:
break
self._t = Thread(target=_read_next_char, args=(self._s, self._q))
self._t.daemon = True
self._t.start() # read characters asynchronously from stdin
def read(self, blksize=-1, timeout=1):
buf = StringIO()
count = 0
try:
while count < blksize or blksize == -1:
next = self._q.get(block=timeout is not None, timeout=timeout)
buf.write(next)
count = count + 1
except Empty:
pass
return buf.getvalue()
| apache-2.0 | -7,487,369,980,012,846,000 | 620,767,944,153,615,700 | 34.14 | 78 | 0.631759 | false |
Mahi/EzPyGame | ezpygame/scene.py | 1 | 5091 | class Scene:
"""An isolated scene which can be ran by an application.
Create your own scene by subclassing and overriding any methods.
The hosting :class:`.Application` instance is accessible
through the :attr:`application` property.
Example usage with two scenes interacting:
.. code-block:: python
class Menu(Scene):
def __init__(self):
self.font = pygame.font.Font(...)
def on_enter(self, previous_scene):
self.application.title = 'Main Menu'
self.application.resolution = (640, 480)
self.application.update_rate = 30
def draw(self, screen):
pygame.draw.rect(...)
text = self.font.render(...)
screen.blit(text, ...)
def handle_event(self, event):
if event.type == pygame.MOUSEBUTTONUP:
if event.button == 1:
game_size = self._get_game_size(event.pos)
self.change_scene(Game(game_size))
def _get_game_size(self, mouse_pos_upon_click):
...
class Game(ezpygame.Scene):
title = 'The Game!'
resolution = (1280, 720)
update_rate = 60
def __init__(self, size):
super().__init__()
self.size = size
self.player = ...
...
def on_enter(self, previous_scene):
super().on_enter(previous_scene)
self.previous_scene = previous_scene
def draw(self, screen):
self.player.draw(screen)
for enemy in self.enemies:
...
def update(self, dt):
self.player.move(dt)
...
if self.player.is_dead():
self.application.change_scene(self.previous_scene)
elif self.player_won():
self.application.change_scene(...)
def handle_event(self, event):
... # Player movement etc.
The above two classes use different approaches for changing
the application's settings when the scene is entered:
1. Manually set them in :meth:`on_enter`, as seen in ``Menu``
2. Use class variables, as I did with ``Game``
When using class variables (2), you can leave out any setting
(defaults to ``None``) to not override that particular setting.
If you override :meth:`on_enter` in the subclass, you must call
``super().on_enter(previous_scene)`` to use the class variables.
These settings can further be overridden in individual instances:
.. code-block:: python
my_scene0 = MyScene()
my_scene0.resolution = (1280, 720)
my_scene1 = MyScene(title='My Second Awesome Scene')
"""
title = None
resolution = None
update_rate = None
def __init__(self, title=None, resolution=None, update_rate=None):
self._application = None
if title is not None:
self.title = title
if resolution is not None:
self.resolution = resolution
if update_rate is not None:
self.update_rate = update_rate
@property
def application(self):
"""The host application that's currently running the scene."""
return self._application
def draw(self, screen):
"""Override this with the scene drawing.
:param pygame.Surface screen: screen to draw the scene on
"""
def update(self, dt):
"""Override this with the scene update tick.
:param int dt: time in milliseconds since the last update
"""
def handle_event(self, event):
"""Override this to handle an event in the scene.
All of :mod:`pygame`'s events are sent here, so filtering
should be applied manually in the subclass.
:param pygame.event.Event event: event to handle
"""
def on_enter(self, previous_scene):
"""Override this to initialize upon scene entering.
The :attr:`application` property is initialized at this point,
so you are free to access it through ``self.application``.
Stuff like changing resolution etc. should be done here.
If you override this method and want to use class variables
to change the application's settings, you must call
``super().on_enter(previous_scene)`` in the subclass.
:param Scene|None previous_scene: previous scene to run
"""
for attr in ('title', 'resolution', 'update_rate'):
value = getattr(self, attr)
if value is not None:
setattr(self.application, attr.lower(), value)
def on_exit(self, next_scene):
"""Override this to deinitialize upon scene exiting.
The :attr:`application` property is still initialized at this
point. Feel free to do saving, settings reset, etc. here.
:param Scene|None next_scene: next scene to run
"""
| mit | -1,083,339,437,766,593,300 | -4,568,558,808,469,725,700 | 32.715232 | 70 | 0.573758 | false |
adlius/osf.io | osf_tests/test_management_commands.py | 2 | 14643 | # -*- coding: utf-8 -*-
import mock
import pytest
import time
from collections import OrderedDict
from django.utils import timezone
from addons.osfstorage import settings as osfstorage_settings
from api_tests.utils import create_test_file
from framework.auth import Auth
from osf.management.commands.update_institution_project_counts import update_institution_project_counts
from osf.models import QuickFilesNode, RegistrationSchema
from osf.metrics import InstitutionProjectCounts, UserInstitutionProjectCounts
from osf_tests.factories import (
AuthUserFactory,
InstitutionFactory,
PreprintFactory,
ProjectFactory,
RegionFactory,
UserFactory,
DraftRegistrationFactory,
)
from tests.base import DbTestCase
from osf.management.commands.data_storage_usage import (
process_usages,
)
# Using powers of two so that any combination of file sizes will give a unique total
# If a summary value is incorrect, subtract out the values that are correct and convert
# to binary. Each of the 1s will correspond something that wasn't handled properly.
def next_file_size():
size = 1
while True:
yield size
size *= 2
class TestDataStorageUsage(DbTestCase):
def setUp(self):
super(TestDataStorageUsage, self).setUp()
self.region_us = RegionFactory(_id='US', name='United States')
@staticmethod
def add_file_version(file_to_version, user, size, version=1):
file_to_version.create_version(user, {
'object': '06d80e' + str(version),
'service': 'cloud',
osfstorage_settings.WATERBUTLER_RESOURCE: 'osf',
}, {
'size': size,
'contentType': 'img/png'
}).save()
@pytest.fixture()
def project(self, creator, is_public=True, is_deleted=False, region=None, parent=None):
if region is None:
region = self.region_us
project = ProjectFactory(creator=creator, is_public=is_public, is_deleted=is_deleted)
addon = project.get_addon('osfstorage')
addon.region = region
addon.save()
return project
@pytest.fixture()
def registration(self, project, creator, withdrawn=False):
schema = RegistrationSchema.objects.first()
draft_reg = DraftRegistrationFactory(branched_from=project)
registration = project.register_node(schema, Auth(user=creator), draft_reg)
registration.is_public = True
registration.save()
if withdrawn:
registration.retract_registration(creator)
withdrawal = registration.retraction
token = list(withdrawal.approval_state.values())[0]['approval_token']
with mock.patch('osf.models.AbstractNode.update_search'):
withdrawal.approve_retraction(creator, token)
withdrawal.save()
return registration
@pytest.fixture()
def component(self, parent, user):
return ProjectFactory(creator=user, parent=parent)
@pytest.fixture()
def project_deleted(self, user):
return ProjectFactory(creator=user, is_deleted=True)
@mock.patch('website.settings.ENABLE_ARCHIVER', False)
@pytest.mark.enable_quickfiles_creation
def test_data_storage_usage_command(self):
import logging
logger = logging.getLogger(__name__)
expected_summary_data = OrderedDict([
('date', None),
('total', 0),
('deleted', 0),
('registrations', 0),
('nd_quick_files', 0),
('nd_public_nodes', 0),
('nd_private_nodes', 0),
('nd_preprints', 0),
('nd_supp_nodes', 0),
('canada_montreal', 0),
('australia_sydney', 0),
('germany_frankfurt', 0),
('united_states', 0),
])
user = UserFactory()
user_addon = user.get_addon('osfstorage')
user_addon.default_region_id = self.region_us
region_ca = RegionFactory(_id='CA-1', name=u'Canada - Montréal')
region_de = RegionFactory(_id='DE-1', name='Germany - Frankfurt')
region_au = RegionFactory(_id='AU-1', name='Australia - Sydney')
project_public_us = self.project(creator=user, is_public=True)
small_size = next_file_size()
file_size = next(small_size)
project_public_us_test_file = create_test_file(
target=project_public_us,
user=user,
size=file_size
)
logger.debug(u'Public project, US: {}'.format(file_size))
expected_summary_data['total'] += file_size
expected_summary_data['nd_public_nodes'] += file_size
expected_summary_data['united_states'] += file_size
file_size = next(small_size)
self.add_file_version(
project_public_us_test_file,
user=user,
size=file_size,
)
logger.debug(u'Public project file version, US: {}'.format(file_size))
expected_summary_data['total'] += file_size
expected_summary_data['nd_public_nodes'] += file_size
expected_summary_data['united_states'] += file_size
project_private_au = self.project(creator=user, is_public=False, region=region_au)
file_size = next(small_size)
create_test_file(
target=project_private_au,
user=user,
size=file_size
)
logger.debug(u'Private project, AU: {}'.format(file_size))
expected_summary_data['total'] += file_size
expected_summary_data['nd_private_nodes'] += file_size
expected_summary_data['australia_sydney'] += file_size
component_private_small_deleted_de = self.project(
creator=user,
is_public=False,
region=region_de,
parent=project_public_us
)
file_size = next(small_size)
deleted_file = create_test_file(
target=component_private_small_deleted_de,
user=user,
size=file_size,
)
logger.debug('Before deletion: {}'.format(deleted_file.target.title))
deleted_file.delete(user=user, save=True)
logger.debug(u'Deleted project, DE: {}'.format(file_size))
expected_summary_data['total'] += file_size
expected_summary_data['deleted'] += file_size
expected_summary_data['germany_frankfurt'] += file_size
logger.debug('After deletion: {}'.format(deleted_file.target.title))
file_size = next(small_size)
PreprintFactory(creator=user, file_size=file_size) # preprint_us
logger.debug(u'Preprint, US: {}'.format(file_size))
expected_summary_data['total'] += file_size
expected_summary_data['nd_preprints'] += file_size
expected_summary_data['united_states'] += file_size
user_addon.default_region_id = region_ca
user_addon.save()
file_size = next(small_size)
preprint_with_supplement_ca = PreprintFactory(creator=user, file_size=file_size)
logger.debug(u'Preprint, CA: {}'.format(file_size))
expected_summary_data['total'] += file_size
expected_summary_data['nd_preprints'] += file_size
expected_summary_data['canada_montreal'] += file_size
user_addon.default_region_id = self.region_us
user_addon.save()
supplementary_node_public_au = self.project(creator=user, is_public=True, region=region_au)
preprint_with_supplement_ca.node = supplementary_node_public_au
preprint_with_supplement_ca.save()
file_size = next(small_size)
create_test_file(
target=supplementary_node_public_au,
user=user,
size=file_size
)
logger.debug(u'Public supplemental project of Canadian preprint, US: {}'.format(file_size))
expected_summary_data['total'] += file_size
expected_summary_data['nd_supp_nodes'] += file_size
expected_summary_data['nd_public_nodes'] += file_size
expected_summary_data['australia_sydney'] += file_size
file_size = next(small_size)
withdrawn_preprint_us = PreprintFactory(creator=user, file_size=file_size)
withdrawn_preprint_us.date_withdrawn = timezone.now()
withdrawn_preprint_us.save()
logger.debug(u'Withdrawn preprint, US: {}'.format(file_size))
expected_summary_data['total'] += file_size
expected_summary_data['nd_preprints'] += file_size
expected_summary_data['united_states'] += file_size
quickfiles_node_us = QuickFilesNode.objects.get(creator=user)
file_size = next(small_size)
create_test_file(target=quickfiles_node_us, user=user, size=file_size)
logger.debug(u'Quickfile, US: {}'.format(file_size))
expected_summary_data['total'] += file_size
expected_summary_data['nd_quick_files'] += file_size
expected_summary_data['united_states'] += file_size
file_size = next(small_size)
quickfile_deleted = create_test_file(
filename='deleted_test_file',
target=quickfiles_node_us,
user=user,
size=file_size
)
quickfile_deleted.delete(user=user, save=True)
logger.debug(u'Deleted quickfile, US: {}'.format(file_size))
expected_summary_data['total'] += file_size
expected_summary_data['deleted'] += file_size
expected_summary_data['united_states'] += file_size
project_to_register_us = self.project(creator=user, is_public=True, region=self.region_us)
registration = self.registration(project=project_to_register_us, creator=user)
file_size = next(small_size)
create_test_file(
target=registration,
user=user,
size=file_size
)
assert registration.get_addon('osfstorage').region == self.region_us
logger.debug(u'Registration, US: {}'.format(file_size))
expected_summary_data['total'] += file_size
expected_summary_data['united_states'] += file_size
expected_summary_data['registrations'] += file_size
withdrawal = self.registration(project=project_to_register_us, creator=user, withdrawn=True)
file_size = next(small_size)
create_test_file(
target=withdrawal,
user=user,
size=file_size
)
logger.debug(u'Withdrawn registration, US: {}'.format(file_size))
expected_summary_data['total'] += file_size
expected_summary_data['united_states'] += file_size
expected_summary_data['registrations'] += file_size
actual_summary_data = process_usages(dry_run=True, page_size=2)
actual_keys = actual_summary_data.keys()
for key in actual_summary_data:
logger.info('Actual field: {}'.format(key))
expected_keys = expected_summary_data.keys()
for key in expected_summary_data:
logger.info('Expected field: {}'.format(key))
assert actual_keys == expected_keys
assert len(actual_keys) != 0
for key in actual_keys:
if key != 'date':
assert (key, expected_summary_data[key]) == (key, actual_summary_data[key])
@pytest.mark.es
@pytest.mark.django_db
class TestInstitutionMetricsUpdate:
@pytest.fixture()
def institution(self):
# Private: 14, Public: 4
return InstitutionFactory()
@pytest.fixture()
def user1(self, institution):
# Private: 4, Public: 4 (+1 from user2 fixture)
user = AuthUserFactory()
institution.osfuser_set.add(user)
institution.save()
for i in range(5):
project = ProjectFactory(creator=user, is_public=False)
project.affiliated_institutions.add(institution)
project.save()
project.delete()
for i in range(3):
project = ProjectFactory(creator=user, is_public=True)
project.affiliated_institutions.add(institution)
project.save()
ProjectFactory(creator=user, is_public=True)
ProjectFactory(creator=user, is_public=False)
return user
@pytest.fixture()
def user2(self, institution, user1):
# Private: 10, Public: 1
user = AuthUserFactory()
institution.osfuser_set.add(user)
institution.save()
for i in range(10):
project = ProjectFactory(creator=user, is_public=False)
project.affiliated_institutions.add(institution)
project.save()
for i in range(1):
project = ProjectFactory(creator=user, is_public=True)
project.add_contributor(user1)
project.affiliated_institutions.add(institution)
project.save()
return user
@pytest.fixture()
def user3(self, institution):
# Private: 0, Public: 0
user = AuthUserFactory()
institution.osfuser_set.add(user)
institution.save()
return user
@pytest.fixture()
def user4(self):
# Projects should not be included in results
user = AuthUserFactory()
for i in range(3):
project = ProjectFactory(creator=user, is_public=False)
project.save()
for i in range(6):
project = ProjectFactory(creator=user, is_public=True)
project.save()
return user
def test_update_institution_counts(self, app, institution, user1, user2, user3, user4):
update_institution_project_counts()
time.sleep(2)
user_search = UserInstitutionProjectCounts.get_current_user_metrics(institution)
user_results = user_search.execute()
sorted_results = sorted(user_results, key=lambda x: x['private_project_count'])
user3_record = sorted_results[0]
user1_record = sorted_results[1]
user2_record = sorted_results[2]
assert user1_record['user_id'] == user1._id
assert user1_record['public_project_count'] == 4
assert user1_record['private_project_count'] == 4
assert user2_record['user_id'] == user2._id
assert user2_record['public_project_count'] == 1
assert user2_record['private_project_count'] == 10
assert user3_record['user_id'] == user3._id
assert user3_record['public_project_count'] == 0
assert user3_record['private_project_count'] == 0
institution_results = InstitutionProjectCounts.get_latest_institution_project_document(institution)
assert institution_results['public_project_count'] == 4
assert institution_results['private_project_count'] == 14
| apache-2.0 | -3,789,926,878,020,260,400 | -4,660,154,802,416,204,000 | 35.974747 | 107 | 0.625256 | false |
dnlm92/chokoretto | main/lib/unidecode/x07b.py | 252 | 4669 | data = (
'Mang ', # 0x00
'Zhu ', # 0x01
'Utsubo ', # 0x02
'Du ', # 0x03
'Ji ', # 0x04
'Xiao ', # 0x05
'Ba ', # 0x06
'Suan ', # 0x07
'Ji ', # 0x08
'Zhen ', # 0x09
'Zhao ', # 0x0a
'Sun ', # 0x0b
'Ya ', # 0x0c
'Zhui ', # 0x0d
'Yuan ', # 0x0e
'Hu ', # 0x0f
'Gang ', # 0x10
'Xiao ', # 0x11
'Cen ', # 0x12
'Pi ', # 0x13
'Bi ', # 0x14
'Jian ', # 0x15
'Yi ', # 0x16
'Dong ', # 0x17
'Shan ', # 0x18
'Sheng ', # 0x19
'Xia ', # 0x1a
'Di ', # 0x1b
'Zhu ', # 0x1c
'Na ', # 0x1d
'Chi ', # 0x1e
'Gu ', # 0x1f
'Li ', # 0x20
'Qie ', # 0x21
'Min ', # 0x22
'Bao ', # 0x23
'Tiao ', # 0x24
'Si ', # 0x25
'Fu ', # 0x26
'Ce ', # 0x27
'Ben ', # 0x28
'Pei ', # 0x29
'Da ', # 0x2a
'Zi ', # 0x2b
'Di ', # 0x2c
'Ling ', # 0x2d
'Ze ', # 0x2e
'Nu ', # 0x2f
'Fu ', # 0x30
'Gou ', # 0x31
'Fan ', # 0x32
'Jia ', # 0x33
'Ge ', # 0x34
'Fan ', # 0x35
'Shi ', # 0x36
'Mao ', # 0x37
'Po ', # 0x38
'Sey ', # 0x39
'Jian ', # 0x3a
'Qiong ', # 0x3b
'Long ', # 0x3c
'Souke ', # 0x3d
'Bian ', # 0x3e
'Luo ', # 0x3f
'Gui ', # 0x40
'Qu ', # 0x41
'Chi ', # 0x42
'Yin ', # 0x43
'Yao ', # 0x44
'Xian ', # 0x45
'Bi ', # 0x46
'Qiong ', # 0x47
'Gua ', # 0x48
'Deng ', # 0x49
'Jiao ', # 0x4a
'Jin ', # 0x4b
'Quan ', # 0x4c
'Sun ', # 0x4d
'Ru ', # 0x4e
'Fa ', # 0x4f
'Kuang ', # 0x50
'Zhu ', # 0x51
'Tong ', # 0x52
'Ji ', # 0x53
'Da ', # 0x54
'Xing ', # 0x55
'Ce ', # 0x56
'Zhong ', # 0x57
'Kou ', # 0x58
'Lai ', # 0x59
'Bi ', # 0x5a
'Shai ', # 0x5b
'Dang ', # 0x5c
'Zheng ', # 0x5d
'Ce ', # 0x5e
'Fu ', # 0x5f
'Yun ', # 0x60
'Tu ', # 0x61
'Pa ', # 0x62
'Li ', # 0x63
'Lang ', # 0x64
'Ju ', # 0x65
'Guan ', # 0x66
'Jian ', # 0x67
'Han ', # 0x68
'Tong ', # 0x69
'Xia ', # 0x6a
'Zhi ', # 0x6b
'Cheng ', # 0x6c
'Suan ', # 0x6d
'Shi ', # 0x6e
'Zhu ', # 0x6f
'Zuo ', # 0x70
'Xiao ', # 0x71
'Shao ', # 0x72
'Ting ', # 0x73
'Ce ', # 0x74
'Yan ', # 0x75
'Gao ', # 0x76
'Kuai ', # 0x77
'Gan ', # 0x78
'Chou ', # 0x79
'Kago ', # 0x7a
'Gang ', # 0x7b
'Yun ', # 0x7c
'O ', # 0x7d
'Qian ', # 0x7e
'Xiao ', # 0x7f
'Jian ', # 0x80
'Pu ', # 0x81
'Lai ', # 0x82
'Zou ', # 0x83
'Bi ', # 0x84
'Bi ', # 0x85
'Bi ', # 0x86
'Ge ', # 0x87
'Chi ', # 0x88
'Guai ', # 0x89
'Yu ', # 0x8a
'Jian ', # 0x8b
'Zhao ', # 0x8c
'Gu ', # 0x8d
'Chi ', # 0x8e
'Zheng ', # 0x8f
'Jing ', # 0x90
'Sha ', # 0x91
'Zhou ', # 0x92
'Lu ', # 0x93
'Bo ', # 0x94
'Ji ', # 0x95
'Lin ', # 0x96
'Suan ', # 0x97
'Jun ', # 0x98
'Fu ', # 0x99
'Zha ', # 0x9a
'Gu ', # 0x9b
'Kong ', # 0x9c
'Qian ', # 0x9d
'Quan ', # 0x9e
'Jun ', # 0x9f
'Chui ', # 0xa0
'Guan ', # 0xa1
'Yuan ', # 0xa2
'Ce ', # 0xa3
'Ju ', # 0xa4
'Bo ', # 0xa5
'Ze ', # 0xa6
'Qie ', # 0xa7
'Tuo ', # 0xa8
'Luo ', # 0xa9
'Dan ', # 0xaa
'Xiao ', # 0xab
'Ruo ', # 0xac
'Jian ', # 0xad
'Xuan ', # 0xae
'Bian ', # 0xaf
'Sun ', # 0xb0
'Xiang ', # 0xb1
'Xian ', # 0xb2
'Ping ', # 0xb3
'Zhen ', # 0xb4
'Sheng ', # 0xb5
'Hu ', # 0xb6
'Shi ', # 0xb7
'Zhu ', # 0xb8
'Yue ', # 0xb9
'Chun ', # 0xba
'Lu ', # 0xbb
'Wu ', # 0xbc
'Dong ', # 0xbd
'Xiao ', # 0xbe
'Ji ', # 0xbf
'Jie ', # 0xc0
'Huang ', # 0xc1
'Xing ', # 0xc2
'Mei ', # 0xc3
'Fan ', # 0xc4
'Chui ', # 0xc5
'Zhuan ', # 0xc6
'Pian ', # 0xc7
'Feng ', # 0xc8
'Zhu ', # 0xc9
'Hong ', # 0xca
'Qie ', # 0xcb
'Hou ', # 0xcc
'Qiu ', # 0xcd
'Miao ', # 0xce
'Qian ', # 0xcf
'[?] ', # 0xd0
'Kui ', # 0xd1
'Sik ', # 0xd2
'Lou ', # 0xd3
'Yun ', # 0xd4
'He ', # 0xd5
'Tang ', # 0xd6
'Yue ', # 0xd7
'Chou ', # 0xd8
'Gao ', # 0xd9
'Fei ', # 0xda
'Ruo ', # 0xdb
'Zheng ', # 0xdc
'Gou ', # 0xdd
'Nie ', # 0xde
'Qian ', # 0xdf
'Xiao ', # 0xe0
'Cuan ', # 0xe1
'Gong ', # 0xe2
'Pang ', # 0xe3
'Du ', # 0xe4
'Li ', # 0xe5
'Bi ', # 0xe6
'Zhuo ', # 0xe7
'Chu ', # 0xe8
'Shai ', # 0xe9
'Chi ', # 0xea
'Zhu ', # 0xeb
'Qiang ', # 0xec
'Long ', # 0xed
'Lan ', # 0xee
'Jian ', # 0xef
'Bu ', # 0xf0
'Li ', # 0xf1
'Hui ', # 0xf2
'Bi ', # 0xf3
'Di ', # 0xf4
'Cong ', # 0xf5
'Yan ', # 0xf6
'Peng ', # 0xf7
'Sen ', # 0xf8
'Zhuan ', # 0xf9
'Pai ', # 0xfa
'Piao ', # 0xfb
'Dou ', # 0xfc
'Yu ', # 0xfd
'Mie ', # 0xfe
'Zhuan ', # 0xff
)
| mit | 6,357,760,261,601,001,000 | -4,077,760,779,618,174,500 | 17.096899 | 20 | 0.394731 | false |
tadebayo/myedge | myvenv/Lib/site-packages/django/conf/locale/sl/formats.py | 504 | 2118 | # -*- encoding: utf-8 -*-
# This file is distributed under the same license as the Django package.
#
from __future__ import unicode_literals
# The *_FORMAT strings use the Django date format syntax,
# see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = 'd. F Y'
TIME_FORMAT = 'H:i'
DATETIME_FORMAT = 'j. F Y. H:i'
YEAR_MONTH_FORMAT = 'F Y'
MONTH_DAY_FORMAT = 'j. F'
SHORT_DATE_FORMAT = 'j. M. Y'
SHORT_DATETIME_FORMAT = 'j.n.Y. H:i'
FIRST_DAY_OF_WEEK = 0
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see http://docs.python.org/library/datetime.html#strftime-strptime-behavior
DATE_INPUT_FORMATS = [
'%d.%m.%Y', '%d.%m.%y', # '25.10.2006', '25.10.06'
'%d-%m-%Y', # '25-10-2006'
'%d. %m. %Y', '%d. %m. %y', # '25. 10. 2006', '25. 10. 06'
]
DATETIME_INPUT_FORMATS = [
'%d.%m.%Y %H:%M:%S', # '25.10.2006 14:30:59'
'%d.%m.%Y %H:%M:%S.%f', # '25.10.2006 14:30:59.000200'
'%d.%m.%Y %H:%M', # '25.10.2006 14:30'
'%d.%m.%Y', # '25.10.2006'
'%d.%m.%y %H:%M:%S', # '25.10.06 14:30:59'
'%d.%m.%y %H:%M:%S.%f', # '25.10.06 14:30:59.000200'
'%d.%m.%y %H:%M', # '25.10.06 14:30'
'%d.%m.%y', # '25.10.06'
'%d-%m-%Y %H:%M:%S', # '25-10-2006 14:30:59'
'%d-%m-%Y %H:%M:%S.%f', # '25-10-2006 14:30:59.000200'
'%d-%m-%Y %H:%M', # '25-10-2006 14:30'
'%d-%m-%Y', # '25-10-2006'
'%d. %m. %Y %H:%M:%S', # '25. 10. 2006 14:30:59'
'%d. %m. %Y %H:%M:%S.%f', # '25. 10. 2006 14:30:59.000200'
'%d. %m. %Y %H:%M', # '25. 10. 2006 14:30'
'%d. %m. %Y', # '25. 10. 2006'
'%d. %m. %y %H:%M:%S', # '25. 10. 06 14:30:59'
'%d. %m. %y %H:%M:%S.%f', # '25. 10. 06 14:30:59.000200'
'%d. %m. %y %H:%M', # '25. 10. 06 14:30'
'%d. %m. %y', # '25. 10. 06'
]
DECIMAL_SEPARATOR = ','
THOUSAND_SEPARATOR = '.'
NUMBER_GROUPING = 3
| mit | 8,884,551,348,142,008,000 | 7,542,625,970,913,619,000 | 41.36 | 77 | 0.446648 | false |
luotao1/Paddle | python/paddle/fluid/tests/unittests/test_dist_lookup_sparse_table_fuse_ops.py | 2 | 5894 | # Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import unittest
import numpy as np
import paddle.fluid as fluid
import paddle.fluid.core as core
import paddle
paddle.enable_static()
@unittest.skip("do not need currently")
class TestLookupTableFuseOp(unittest.TestCase):
def test_fuse(self):
places = [core.CPUPlace()]
# currently only support CPU
for place in places:
self.check_with_place(place)
def check_with_place(self, place):
scope = fluid.global_scope()
scope.var("LearningRate").get_tensor().set([0.01], place)
scope.var("Ids").get_tensor().set([i for i in range(100)], place)
init_program = fluid.Program()
lr = init_program.global_block().create_var(
name="LearningRate",
persistable=True,
type=fluid.core.VarDesc.VarType.LOD_TENSOR,
shape=[1],
dtype="float32")
ids = init_program.global_block().create_var(
name="Ids",
persistable=True,
type=fluid.core.VarDesc.VarType.LOD_TENSOR,
shape=[100],
dtype="int64")
output = init_program.global_block().create_var(
name="output",
type=fluid.core.VarDesc.VarType.LOD_TENSOR,
shape=[100, 8],
dtype="float32")
metas = []
metas.append(
"embedding_1.block0:Param,Moment1,Moment2:8,8,8:0:embedding_1@GRAD.block0:embedding_1.block0,embedding_1_moment1_0,embedding_1_moment2_0,kSparseIDs@embedding_1.block0:uniform_random&0&-0.5&0.5,fill_constant&0.0,fill_constant&0.0:none"
)
metas.append(
"embedding_2.block0:Param:8:0:embedding_2@GRAD.block0:embedding_2.block0,kSparseIDs@embedding_2.block0:uniform_random&0&-0.5&0.5:none"
)
init_program.global_block().append_op(
type="lookup_sparse_table_init",
inputs=None,
outputs=None,
attrs={"large_scale_metas": metas})
init_program.global_block().append_op(
type="lookup_sparse_table_read",
inputs={"Ids": ids},
outputs={"Out": output},
attrs={
"tablename": "embedding_1.block0",
"init": True,
"value_names": ["Param"],
})
init_program.global_block().append_op(
type="lookup_sparse_table_read",
inputs={"Ids": ids},
outputs={"Out": output},
attrs={
"tablename": "embedding_2.block0",
"init": True,
"value_names": ["Param"],
})
executor = fluid.Executor(place)
executor.run(init_program)
training_program = fluid.Program()
scope.var('Beta1Pow').get_tensor().set(
np.array([0]).astype("float32"), place)
scope.var('Beta2Pow').get_tensor().set(
np.array([0]).astype("float32"), place)
rows = [0, 1, 2, 3, 4, 5, 6]
row_numel = 8
w_selected_rows = scope.var('Grad').get_selected_rows()
w_selected_rows.set_height(len(rows))
w_selected_rows.set_rows(rows)
w_array = np.ones((len(rows), row_numel)).astype("float32")
for i in range(len(rows)):
w_array[i] *= i
w_tensor = w_selected_rows.get_tensor()
w_tensor.set(w_array, place)
lr = training_program.global_block().create_var(
name="LearningRate",
persistable=True,
type=fluid.core.VarDesc.VarType.LOD_TENSOR,
shape=[1],
dtype="float32")
grads = training_program.global_block().create_var(
name="Grad",
persistable=True,
type=fluid.core.VarDesc.VarType.SELECTED_ROWS,
shape=[100, 8],
dtype="float32")
beta1 = training_program.global_block().create_var(
name="Beta1Pow",
persistable=True,
type=fluid.core.VarDesc.VarType.LOD_TENSOR,
shape=[1],
dtype="float32")
beta2 = training_program.global_block().create_var(
name="Beta2Pow",
persistable=True,
type=fluid.core.VarDesc.VarType.LOD_TENSOR,
shape=[1],
dtype="float32")
training_program.global_block().append_op(
type="lookup_sparse_table_fuse_adam",
inputs={
"Grad": grads,
"LearningRate": lr,
"Beta1Pow": beta1,
"Beta2Pow": beta2,
},
outputs={"Beta1PowOut": beta1,
"Beta2PowOut": beta2},
attrs={
"is_entry": False,
"tablename": "embedding_1.block0",
"value_names": ["Param", "Moment1", "Moment2"],
})
training_program.global_block().append_op(
type="lookup_sparse_table_fuse_sgd",
inputs={"Grad": grads,
"LearningRate": lr},
attrs={
"is_entry": False,
"tablename": "embedding_2.block0",
"value_names": ["Param"],
})
executor.run(training_program)
if __name__ == "__main__":
unittest.main()
| apache-2.0 | -1,889,615,705,763,119,000 | -2,868,101,992,419,912,700 | 32.68 | 246 | 0.55548 | false |
DavidAntliff/AwaLWM2M | tools/tests/python/test_awa_client_server_interaction.py | 2 | 17796 | #/************************************************************************************************************************
# Copyright (c) 2016, Imagination Technologies Limited and/or its affiliated group companies.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification, are permitted provided that the
# following conditions are met:
# 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the
# following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the
# following disclaimer in the documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote
# products derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
# INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#************************************************************************************************************************/
# Tests related to server-client interaction
import unittest
import subprocess
import time
import overlord
import common
from collections import namedtuple
import tools_common
from tools_common import CustomObject
from tools_common import CustomResource
from test_awa_client_define import client_define
from test_awa_client_get import client_get
from test_awa_client_set import client_set
from test_awa_client_delete import client_delete
from test_awa_client_subscribe import client_subscribe
from test_awa_server_define import server_define
from test_awa_server_delete import server_delete
from test_awa_server_execute import server_execute
from test_awa_server_execute import server_execute_stdin
from test_awa_server_list_clients import server_list_clients
from test_awa_server_observe import server_observe
from test_awa_server_read import server_read
from test_awa_server_write import server_write
class TestClientServer(tools_common.AwaTest):
def test_server_write_client_get_single_resource(self):
# test that a single resource can be written on the server and retrieved on the client
manufacturer = "ACME Corp."
expectedStdout = "Object1000[/1000/0]:\n Resource100[/1000/0/100]: %s\n" % (manufacturer,)
expectedStderr = ""
expectedCode = 0
result = server_write(self.config, "/1000/0/100=\"%s\"" % (manufacturer,))
self.assertEqual("", result.stderr)
self.assertEqual("", result.stdout)
self.assertEqual(0, result.code)
result = client_get(self.config, "/1000/0/100")
self.assertEqual(expectedStdout, result.stdout)
self.assertEqual(expectedStderr, result.stderr)
self.assertEqual(expectedCode, result.code)
def test_server_write_client_get_multiple_resources_same_instance(self):
# test that multiple resources from the same instance can be set on
# the server and retrieved on the client with single commands
timezone = "ACME Corp."
currentTime = 123456789
expectedStdout = \
"""Device[/3/0]:
Timezone[/3/0/15]: %s
CurrentTime[/3/0/13]: %d
""" % (timezone, currentTime)
expectedStderr = ""
expectedCode = 0
server_write(self.config,
"/3/0/15=\"%s\"" % (timezone,),
"/3/0/13=%d" % (currentTime,))
result = client_get(self.config, "/3/0/15", "/3/0/13")
self.assertEqual(expectedStdout, result.stdout)
self.assertEqual(expectedStderr, result.stderr)
self.assertEqual(expectedCode, result.code)
@unittest.skip("Multiple writes in a single request are not supported")
def test_server_write_client_get_multiple_resources_different_instances_single_write(self):
# test that multiple resources from the different instances can be set on
# the server and retrieved on the client with single commands
timezone = "ACME Corp."
modelNumber = "1234567890"
expectedStdout = \
"""Device[/3/0]:
Timezone[/3/0/15]: %s
Object1000[/1000/0]:
Resource100[/1000/0/100]: %s
""" % (timezone, modelNumber)
expectedStderr = ""
expectedCode = 0
result = server_write(self.config, "/3/0/15=\"%s\"" % (timezone,), "/1000/0/100=\"%s\"" % (modelNumber,))
self.assertEqual(0, result.code)
result = client_get(self.config, "/3/0/15", "/1000/0/100")
self.assertEqual(expectedStdout, result.stdout)
self.assertEqual(expectedStderr, result.stderr)
self.assertEqual(expectedCode, result.code)
def test_server_write_client_get_multiple_resources_different_instances(self):
# test that multiple resources from the different instances can be set on
# the server and retrieved on the client with single commands
timezone = "ACME Corp."
modelNumber = "1234567890"
expectedStdout = \
"""Device[/3/0]:
Timezone[/3/0/15]: %s
Object1000[/1000/0]:
Resource100[/1000/0/100]: %s
""" % (timezone, modelNumber)
expectedStderr = ""
expectedCode = 0
result = server_write(self.config, "/3/0/15=\"%s\"" % (timezone,))
self.assertEqual(0, result.code)
result = server_write(self.config, "/1000/0/100=\"%s\"" % (modelNumber,))
self.assertEqual(0, result.code)
result = client_get(self.config, "/3/0/15", "/1000/0/100")
self.assertEqual(expectedStdout, result.stdout)
self.assertEqual(expectedStderr, result.stderr)
self.assertEqual(expectedCode, result.code)
def test_client_set_server_read_single_resource(self):
# test that a single resource can be written on the client and retrieved on the server
manufacturer = "ACME Corp."
expectedStdout = "Object1000[/1000/0]:\n Resource100[/1000/0/100]: %s\n" % (manufacturer,)
expectedStderr = ""
expectedCode = 0
result = client_set(self.config, "/1000/0/100=\"%s\"" % (manufacturer,))
self.assertEqual(0, result.code)
result = server_read(self.config, "/1000/0/100")
self.assertEqual(expectedStdout, result.stdout)
self.assertEqual(expectedStderr, result.stderr)
self.assertEqual(expectedCode, result.code)
@unittest.skip("Reading multiple resources in single request is currently unsupported")
def test_client_set_server_read_multiple_resources_same_instance(self):
# test that multiple resources from the same instance can be set on
# the client and retrieved on the server with single commands
manufacturer = "ACME Corp."
memoryFree = 55
temperature = 24.6
expectedStdout = \
""" Manufacturer[1000/0/100]: %s
ModelNumber[1000/0/101]: %s
MemoryFree[1000/0/102]: %d
""" % (manufacturer, memoryFree, temperature)
expectedStderr = ""
expectedCode = 0
result = client_set(self.config, "/1000/0/100=\"%s\"" % (manufacturer,),
"/1000/0/101=\"%d\"" % (memoryFree,),
"/1000/0/102=%f" % (temperature,))
result = server_read(self.config, "/1000/0/100", "/1000/0/101", "/1000/0/102")
self.assertEqual(expectedStderr, result.stderr)
self.assertEqual(expectedStdout, result.stdout)
self.assertEqual(expectedCode, result.code)
@unittest.skip("Multiple reads in single operation currently unsupported")
def test_client_set_server_read_multiple_resources_different_instances_single_read_operation(self):
# test that multiple resources from the different instances can be set on
# the client and retrieved on the server with single commands
manufacturer = "ACME Corp."
modelNumber = "1234567890"
expectedStdout = \
""" Manufacturer[3/0/0]: %s
Resource100[1000/0/100]: %s
""" % (manufacturer, modelNumber)
expectedStderr = ""
expectedCode = 0
result = client_set(self.config, "/3/0/0=\"%s\"" % (manufacturer,), "/1000/0/100=\"%s\"" % (modelNumber,))
self.assertEqual("", result.stdout)
self.assertEqual("", result.stderr)
self.assertEqual(0, result.code)
result = server_read(self.config, "/3/0/0", "/1000/0/100")
self.assertEqual(expectedStdout, result.stdout)
self.assertEqual(expectedStderr, result.stderr)
self.assertEqual(expectedCode, result.code)
def test_client_subscribe_resource_server_execute(self):
# test that the client can subscribe to an executable resource and receive a notification
# when that resource is executed by the server
# open client subscribe subprocess. Only wait for a single execute of the resource
port = self.config.clientIpcPort
subscribeProcess = tools_common.run_non_blocking(tools_common.CLIENT_SUBSCRIBE,
"--verbose --ipcPort %i --waitCount 1 /3/0/4" % (port,))
# wait for subscribe process to start up
self.assertEqual(tools_common.strip_prefix(subscribeProcess.stdout.readline()), "Session IPC configured for UDP: address 127.0.0.1, port %d" % (port,))
self.assertEqual(tools_common.strip_prefix(subscribeProcess.stdout.readline()), "Session connected")
self.assertEqual(tools_common.strip_prefix(subscribeProcess.stdout.readline()), "Subscribe /3/0/4 Execute\n")
self.assertEqual(tools_common.strip_prefix(subscribeProcess.stdout.readline()), "Waiting for 1 notifications:\n")
# test we can execute a resource, specifying no payload
expectedStdout = "Target /3/0/4 executed successfully\n"
expectedStderr = ""
expectedCode = 0
result = server_execute(self.config, "/3/0/4")
self.assertEqual(expectedStdout, result.stdout)
self.assertEqual(expectedStderr, result.stderr)
self.assertEqual(expectedCode, result.code)
# read subscribe output
expectedStdout = "Execute 1:\nNO DATA\nSession disconnected\n"
expectedStderr = ""
expectedCode = 0
result = tools_common.non_blocking_get_run_result(subscribeProcess)
self.assertEqual(expectedStdout, result.stdout)
self.assertEqual(expectedStderr, result.stderr)
self.assertEqual(expectedCode, result.code)
def test_client_subscribe_resource_server_execute_with_payload(self):
# test that the client can subscribe to an executable resource and receive a notification
# when that resource is executed by the server. Payload should be printed on the client
# open client subscribe subprocess. Only wait for a single execute of the resource
port = self.config.clientIpcPort
subscribeProcess = tools_common.run_non_blocking(tools_common.CLIENT_SUBSCRIBE,
"--verbose --ipcPort %i --waitCount 1 /3/0/4" % (port,))
# wait for subscribe process to start up
self.assertEqual(tools_common.strip_prefix(subscribeProcess.stdout.readline()), "Session IPC configured for UDP: address 127.0.0.1, port %d" % (port,))
self.assertEqual(tools_common.strip_prefix(subscribeProcess.stdout.readline()), "Session connected")
self.assertEqual(tools_common.strip_prefix(subscribeProcess.stdout.readline()), "Subscribe /3/0/4 Execute\n")
self.assertEqual(tools_common.strip_prefix(subscribeProcess.stdout.readline()), "Waiting for 1 notifications:\n")
# test we can execute a resource, specifying a payload of data
expectedStdout = "Target /3/0/4 executed successfully\n"
expectedStderr = ""
expectedCode = 0
inputText = "QmFzZTY0IGlzIGEgZ2VuZXJpYyB0ZXJtIGZvciB"
result = server_execute_stdin(self.config, inputText, "/3/0/4")
self.assertEqual(expectedStdout, result.stdout)
self.assertEqual(expectedStderr, result.stderr)
self.assertEqual(expectedCode, result.code)
hexBytes = ""
for c in inputText:
hexBytes += c.encode("hex") + " "
# read subscribe output
expectedStdout = "Execute 1:\nDATA: length 39, payload: [" + hexBytes +"]\nSession disconnected\n"
expectedStderr = ""
expectedCode = 0
result = tools_common.non_blocking_get_run_result(subscribeProcess)
self.assertEqual(expectedStdout, result.stdout)
self.assertEqual(expectedStderr, result.stderr)
self.assertEqual(expectedCode, result.code)
@unittest.skip("Multiple executions in a single request is currently supported")
def test_client_subscribe_resource_server_multiple_execute_with_payload(self):
self.assertTrue(False)
def test_client_subscribe_resource_server_write(self):
# test that the client can subscribe to a resource and receive a notification
# when that resource is changed by the server through the write function
# open client subscribe subprocess. Only wait for a single execute of the resource
port = self.config.clientIpcPort
subscribeProcess = tools_common.run_non_blocking(tools_common.CLIENT_SUBSCRIBE,
"--verbose --ipcPort %i --waitCount 1 /3/0/15" % (port,))
# wait for subscribe process to start up
self.assertEqual(tools_common.strip_prefix(subscribeProcess.stdout.readline()), "Session IPC configured for UDP: address 127.0.0.1, port %d" % (port,))
self.assertEqual(tools_common.strip_prefix(subscribeProcess.stdout.readline()), "Session connected")
self.assertEqual(tools_common.strip_prefix(subscribeProcess.stdout.readline()), "Subscribe /3/0/15 Change\n")
self.assertEqual(tools_common.strip_prefix(subscribeProcess.stdout.readline()), "Waiting for 1 notifications:\n")
# do write command
expectedStdout = ""
expectedStderr = ""
expectedCode = 0
result = server_write(self.config, "/3/0/15=abc")
self.assertEqual(expectedStdout, result.stdout)
self.assertEqual(expectedStderr, result.stderr)
self.assertEqual(expectedCode, result.code)
# read subscribe output
expectedStdout = "Notify 1:\nChanged: /3/0/15 Resource Modified:\nDevice[/3/0]:\n Timezone[/3/0/15]: abc\nSession disconnected\n"
expectedStderr = ""
expectedCode = 0
result = tools_common.non_blocking_get_run_result(subscribeProcess)
self.assertEqual(expectedStdout, result.stdout)
self.assertEqual(expectedStderr, result.stderr)
self.assertEqual(expectedCode, result.code)
def test_server_observe_resource_client_set(self):
# test that the server can observe changes to a resource and receive a notification
# when that resource is changed by the client through the set function
# open client subscribe subprocess. Only wait for a single execute of the resource
port = self.config.serverIpcPort
clientEndpointName = self.config.clientEndpointName
observeProcess = tools_common.run_non_blocking(tools_common.SERVER_OBSERVE,
"--verbose --ipcPort %i --clientID %s --waitCount 1 /3/0/1" % (port, clientEndpointName))
# wait for observe process to start up
self.assertEqual(tools_common.strip_prefix(observeProcess.stdout.readline()), "Session IPC configured for UDP: address 127.0.0.1, port %d" % (port,))
self.assertEqual(tools_common.strip_prefix(observeProcess.stdout.readline()), "Session connected")
self.assertEqual(tools_common.strip_prefix(observeProcess.stdout.readline()), "Observe /3/0/1\n")
self.assertEqual(tools_common.strip_prefix(observeProcess.stdout.readline()), "Waiting for 1 notifications:\n")
self.assertEqual(tools_common.strip_prefix(observeProcess.stdout.readline()), "Notify 0 from clientID TestClient:\n")
self.assertEqual(tools_common.strip_prefix(observeProcess.stdout.readline()), "Changed: /3/0/1 Resource Modified:\n")
# do set command
expectedStdout = ""
expectedStderr = ""
expectedCode = 0
result = client_set(self.config, "/3/0/1=abc")
self.assertEqual(expectedStdout, result.stdout)
self.assertEqual(expectedStderr, result.stderr)
self.assertEqual(expectedCode, result.code)
# read subscribe output
expectedStdout = "Device[/3/0]:\n ModelNumber[/3/0/1]: Awa Client\nNotify 1 from clientID TestClient:\nChanged: /3/0/1 Resource Modified:\nDevice[/3/0]:\n ModelNumber[/3/0/1]: abc\nSession disconnected\n"
expectedStderr = ""
expectedCode = 0
result = tools_common.non_blocking_get_run_result(observeProcess)
self.assertEqual(expectedStdout, result.stdout)
self.assertEqual(expectedStderr, result.stderr)
self.assertEqual(expectedCode, result.code)
| bsd-3-clause | -7,709,224,375,576,309,000 | -1,970,213,149,635,990,500 | 49.129577 | 218 | 0.670432 | false |
dmitry-sobolev/ansible | test/units/module_utils/basic/test_no_log.py | 66 | 5263 | # -*- coding: utf-8 -*-
# (c) 2015, Toshio Kuratomi <tkuratomi@ansible.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division)
__metaclass__ = type
import json
import sys
import syslog
from ansible.compat.tests import unittest
from ansible.compat.tests.mock import patch, MagicMock
from ansible.module_utils import basic
from ansible.module_utils.basic import heuristic_log_sanitize
from ansible.module_utils.basic import return_values, remove_values
class TestReturnValues(unittest.TestCase):
dataset = (
('string', frozenset(['string'])),
('', frozenset()),
(1, frozenset(['1'])),
(1.0, frozenset(['1.0'])),
(False, frozenset()),
(['1', '2', '3'], frozenset(['1', '2', '3'])),
(('1', '2', '3'), frozenset(['1', '2', '3'])),
({'one': 1, 'two': 'dos'}, frozenset(['1', 'dos'])),
({'one': 1, 'two': 'dos',
'three': ['amigos', 'musketeers', None,
{'ping': 'pong', 'base': ('balls', 'raquets')}]},
frozenset(['1', 'dos', 'amigos', 'musketeers', 'pong', 'balls', 'raquets'])),
(u'Toshio くらとみ', frozenset(['Toshio くらとみ'])),
('Toshio くらとみ', frozenset(['Toshio くらとみ'])),
)
def test_return_values(self):
for data, expected in self.dataset:
self.assertEquals(frozenset(return_values(data)), expected)
def test_unknown_type(self):
self.assertRaises(TypeError, frozenset, return_values(object()))
class TestRemoveValues(unittest.TestCase):
OMIT = 'VALUE_SPECIFIED_IN_NO_LOG_PARAMETER'
dataset_no_remove = (
('string', frozenset(['nope'])),
(1234, frozenset(['4321'])),
(False, frozenset(['4321'])),
(1.0, frozenset(['4321'])),
(['string', 'strang', 'strung'], frozenset(['nope'])),
({'one': 1, 'two': 'dos', 'secret': 'key'}, frozenset(['nope'])),
({'one': 1, 'two': 'dos',
'three': ['amigos', 'musketeers', None,
{'ping': 'pong', 'base': ['balls', 'raquets']}]},
frozenset(['nope'])),
('Toshio くら', frozenset(['とみ'])),
(u'Toshio くら', frozenset(['とみ'])),
)
dataset_remove = (
('string', frozenset(['string']), OMIT),
(1234, frozenset(['1234']), OMIT),
(1234, frozenset(['23']), OMIT),
(1.0, frozenset(['1.0']), OMIT),
(['string', 'strang', 'strung'], frozenset(['strang']), ['string', OMIT, 'strung']),
(['string', 'strang', 'strung'], frozenset(['strang', 'string', 'strung']), [OMIT, OMIT, OMIT]),
(('string', 'strang', 'strung'), frozenset(['string', 'strung']), [OMIT, 'strang', OMIT]),
((1234567890, 345678, 987654321), frozenset(['1234567890']), [OMIT, 345678, 987654321]),
((1234567890, 345678, 987654321), frozenset(['345678']), [OMIT, OMIT, 987654321]),
({'one': 1, 'two': 'dos', 'secret': 'key'}, frozenset(['key']),
{'one': 1, 'two': 'dos', 'secret': OMIT}),
({'one': 1, 'two': 'dos', 'secret': 'key'}, frozenset(['key', 'dos', '1']),
{'one': OMIT, 'two': OMIT, 'secret': OMIT}),
({'one': 1, 'two': 'dos', 'secret': 'key'}, frozenset(['key', 'dos', '1']),
{'one': OMIT, 'two': OMIT, 'secret': OMIT}),
({'one': 1, 'two': 'dos', 'three': ['amigos', 'musketeers', None,
{'ping': 'pong', 'base': ['balls', 'raquets']}]},
frozenset(['balls', 'base', 'pong', 'amigos']),
{'one': 1, 'two': 'dos', 'three': [OMIT, 'musketeers',
None, {'ping': OMIT, 'base': [OMIT, 'raquets']}]}),
('This sentence has an enigma wrapped in a mystery inside of a secret. - mr mystery',
frozenset(['enigma', 'mystery', 'secret']),
'This sentence has an ******** wrapped in a ******** inside of a ********. - mr ********'),
('Toshio くらとみ', frozenset(['くらとみ']), 'Toshio ********'),
(u'Toshio くらとみ', frozenset(['くらとみ']), u'Toshio ********'),
)
def test_no_removal(self):
for value, no_log_strings in self.dataset_no_remove:
self.assertEquals(remove_values(value, no_log_strings), value)
def test_strings_to_remove(self):
for value, no_log_strings, expected in self.dataset_remove:
self.assertEquals(remove_values(value, no_log_strings), expected)
def test_unknown_type(self):
self.assertRaises(TypeError, remove_values, object(), frozenset())
| gpl-3.0 | -3,724,892,255,778,731,000 | -5,339,922,670,299,858,000 | 43.681034 | 107 | 0.558364 | false |
airbnb/airflow | tests/providers/google/cloud/sensors/test_gcs.py | 7 | 11744 | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from datetime import datetime, timedelta, timezone
from unittest import TestCase, mock
import pendulum
from airflow.exceptions import AirflowSensorTimeout
from airflow.models.dag import DAG, AirflowException
from airflow.providers.google.cloud.sensors.gcs import (
GCSObjectExistenceSensor,
GCSObjectsWtihPrefixExistenceSensor,
GCSObjectUpdateSensor,
GCSUploadSessionCompleteSensor,
ts_function,
)
TEST_BUCKET = "TEST_BUCKET"
TEST_OBJECT = "TEST_OBJECT"
TEST_DELEGATE_TO = "TEST_DELEGATE_TO"
TEST_GCP_CONN_ID = 'TEST_GCP_CONN_ID'
TEST_IMPERSONATION_CHAIN = ["ACCOUNT_1", "ACCOUNT_2", "ACCOUNT_3"]
TEST_PREFIX = "TEST_PREFIX"
TEST_DAG_ID = 'unit_tests_gcs_sensor'
DEFAULT_DATE = datetime(2015, 1, 1)
MOCK_DATE_ARRAY = [datetime(2019, 2, 24, 12, 0, 0) - i * timedelta(seconds=10) for i in range(25)]
def next_time_side_effect():
"""
This each time this is called mock a time 10 seconds later
than the previous call.
"""
return MOCK_DATE_ARRAY.pop()
mock_time = mock.Mock(side_effect=next_time_side_effect)
class TestGoogleCloudStorageObjectSensor(TestCase):
@mock.patch("airflow.providers.google.cloud.sensors.gcs.GCSHook")
def test_should_pass_argument_to_hook(self, mock_hook):
task = GCSObjectExistenceSensor(
task_id="task-id",
bucket=TEST_BUCKET,
object=TEST_OBJECT,
google_cloud_conn_id=TEST_GCP_CONN_ID,
delegate_to=TEST_DELEGATE_TO,
impersonation_chain=TEST_IMPERSONATION_CHAIN,
)
mock_hook.return_value.exists.return_value = True
result = task.poke(mock.MagicMock())
self.assertEqual(True, result)
mock_hook.assert_called_once_with(
delegate_to=TEST_DELEGATE_TO,
google_cloud_storage_conn_id=TEST_GCP_CONN_ID,
impersonation_chain=TEST_IMPERSONATION_CHAIN,
)
mock_hook.return_value.exists.assert_called_once_with(TEST_BUCKET, TEST_OBJECT)
class TestTsFunction(TestCase):
def test_should_support_datetime(self):
context = {
'dag': DAG(dag_id=TEST_DAG_ID, schedule_interval=timedelta(days=5)),
'execution_date': datetime(2019, 2, 14, 0, 0),
}
result = ts_function(context)
self.assertEqual(datetime(2019, 2, 19, 0, 0, tzinfo=timezone.utc), result)
def test_should_support_cron(self):
dag = DAG(dag_id=TEST_DAG_ID, start_date=datetime(2019, 2, 19, 0, 0), schedule_interval='@weekly')
context = {
'dag': dag,
'execution_date': datetime(2019, 2, 19),
}
result = ts_function(context)
self.assertEqual(pendulum.instance(datetime(2019, 2, 24)).isoformat(), result.isoformat())
class TestGoogleCloudStorageObjectUpdatedSensor(TestCase):
@mock.patch("airflow.providers.google.cloud.sensors.gcs.GCSHook")
def test_should_pass_argument_to_hook(self, mock_hook):
task = GCSObjectUpdateSensor(
task_id="task-id",
bucket=TEST_BUCKET,
object=TEST_OBJECT,
google_cloud_conn_id=TEST_GCP_CONN_ID,
delegate_to=TEST_DELEGATE_TO,
impersonation_chain=TEST_IMPERSONATION_CHAIN,
)
mock_hook.return_value.is_updated_after.return_value = True
result = task.poke(mock.MagicMock())
mock_hook.assert_called_once_with(
delegate_to=TEST_DELEGATE_TO,
google_cloud_storage_conn_id=TEST_GCP_CONN_ID,
impersonation_chain=TEST_IMPERSONATION_CHAIN,
)
mock_hook.return_value.is_updated_after.assert_called_once_with(TEST_BUCKET, TEST_OBJECT, mock.ANY)
self.assertEqual(True, result)
class TestGoogleCloudStoragePrefixSensor(TestCase):
@mock.patch("airflow.providers.google.cloud.sensors.gcs.GCSHook")
def test_should_pass_arguments_to_hook(self, mock_hook):
task = GCSObjectsWtihPrefixExistenceSensor(
task_id="task-id",
bucket=TEST_BUCKET,
prefix=TEST_PREFIX,
google_cloud_conn_id=TEST_GCP_CONN_ID,
delegate_to=TEST_DELEGATE_TO,
impersonation_chain=TEST_IMPERSONATION_CHAIN,
)
mock_hook.return_value.list.return_value = ["NOT_EMPTY_LIST"]
result = task.poke(mock.MagicMock)
mock_hook.assert_called_once_with(
delegate_to=TEST_DELEGATE_TO,
google_cloud_storage_conn_id=TEST_GCP_CONN_ID,
impersonation_chain=TEST_IMPERSONATION_CHAIN,
)
mock_hook.return_value.list.assert_called_once_with(TEST_BUCKET, prefix=TEST_PREFIX)
self.assertEqual(True, result)
@mock.patch("airflow.providers.google.cloud.sensors.gcs.GCSHook")
def test_should_return_false_on_empty_list(self, mock_hook):
task = GCSObjectsWtihPrefixExistenceSensor(
task_id="task-id",
bucket=TEST_BUCKET,
prefix=TEST_PREFIX,
google_cloud_conn_id=TEST_GCP_CONN_ID,
delegate_to=TEST_DELEGATE_TO,
)
mock_hook.return_value.list.return_value = []
result = task.poke(mock.MagicMock)
self.assertEqual(False, result)
@mock.patch('airflow.providers.google.cloud.sensors.gcs.GCSHook')
def test_execute(self, mock_hook):
task = GCSObjectsWtihPrefixExistenceSensor(
task_id="task-id",
bucket=TEST_BUCKET,
prefix=TEST_PREFIX,
google_cloud_conn_id=TEST_GCP_CONN_ID,
delegate_to=TEST_DELEGATE_TO,
impersonation_chain=TEST_IMPERSONATION_CHAIN,
poke_interval=0,
)
generated_messages = ['test-prefix/obj%s' % i for i in range(5)]
mock_hook.return_value.list.return_value = generated_messages
response = task.execute(None)
mock_hook.assert_called_once_with(
delegate_to=TEST_DELEGATE_TO,
google_cloud_storage_conn_id=TEST_GCP_CONN_ID,
impersonation_chain=TEST_IMPERSONATION_CHAIN,
)
mock_hook.return_value.list.assert_called_once_with(TEST_BUCKET, prefix=TEST_PREFIX)
self.assertEqual(response, generated_messages)
@mock.patch('airflow.providers.google.cloud.sensors.gcs.GCSHook')
def test_execute_timeout(self, mock_hook):
task = GCSObjectsWtihPrefixExistenceSensor(
task_id="task-id", bucket=TEST_BUCKET, prefix=TEST_PREFIX, poke_interval=0, timeout=1
)
mock_hook.return_value.list.return_value = []
with self.assertRaises(AirflowSensorTimeout):
task.execute(mock.MagicMock)
mock_hook.return_value.list.assert_called_once_with(TEST_BUCKET, prefix=TEST_PREFIX)
class TestGCSUploadSessionCompleteSensor(TestCase):
def setUp(self):
args = {
'owner': 'airflow',
'start_date': DEFAULT_DATE,
}
dag = DAG(TEST_DAG_ID + 'test_schedule_dag_once', default_args=args)
dag.schedule_interval = '@once'
self.dag = dag
self.sensor = GCSUploadSessionCompleteSensor(
task_id='sensor_1',
bucket='test-bucket',
prefix='test-prefix/path',
inactivity_period=12,
poke_interval=10,
min_objects=1,
allow_delete=False,
google_cloud_conn_id=TEST_GCP_CONN_ID,
delegate_to=TEST_DELEGATE_TO,
impersonation_chain=TEST_IMPERSONATION_CHAIN,
dag=self.dag,
)
self.last_mocked_date = datetime(2019, 4, 24, 0, 0, 0)
@mock.patch("airflow.providers.google.cloud.sensors.gcs.GCSHook")
def test_get_gcs_hook(self, mock_hook):
self.sensor._get_gcs_hook()
mock_hook.assert_called_once_with(
gcp_conn_id=TEST_GCP_CONN_ID,
delegate_to=TEST_DELEGATE_TO,
impersonation_chain=TEST_IMPERSONATION_CHAIN,
)
self.assertEqual(mock_hook.return_value, self.sensor.hook)
@mock.patch('airflow.providers.google.cloud.sensors.gcs.get_time', mock_time)
def test_files_deleted_between_pokes_throw_error(self):
self.sensor.is_bucket_updated({'a', 'b'})
with self.assertRaises(AirflowException):
self.sensor.is_bucket_updated({'a'})
@mock.patch('airflow.providers.google.cloud.sensors.gcs.get_time', mock_time)
def test_files_deleted_between_pokes_allow_delete(self):
self.sensor = GCSUploadSessionCompleteSensor(
task_id='sensor_2',
bucket='test-bucket',
prefix='test-prefix/path',
inactivity_period=12,
poke_interval=10,
min_objects=1,
allow_delete=True,
dag=self.dag,
)
self.sensor.is_bucket_updated({'a', 'b'})
self.assertEqual(self.sensor.inactivity_seconds, 0)
self.sensor.is_bucket_updated({'a'})
self.assertEqual(len(self.sensor.previous_objects), 1)
self.assertEqual(self.sensor.inactivity_seconds, 0)
self.sensor.is_bucket_updated({'a', 'c'})
self.assertEqual(self.sensor.inactivity_seconds, 0)
self.sensor.is_bucket_updated({'a', 'd'})
self.assertEqual(self.sensor.inactivity_seconds, 0)
self.sensor.is_bucket_updated({'a', 'd'})
self.assertEqual(self.sensor.inactivity_seconds, 10)
self.assertTrue(self.sensor.is_bucket_updated({'a', 'd'}))
@mock.patch('airflow.providers.google.cloud.sensors.gcs.get_time', mock_time)
def test_incoming_data(self):
self.sensor.is_bucket_updated({'a'})
self.assertEqual(self.sensor.inactivity_seconds, 0)
self.sensor.is_bucket_updated({'a', 'b'})
self.assertEqual(self.sensor.inactivity_seconds, 0)
self.sensor.is_bucket_updated({'a', 'b', 'c'})
self.assertEqual(self.sensor.inactivity_seconds, 0)
@mock.patch('airflow.providers.google.cloud.sensors.gcs.get_time', mock_time)
def test_no_new_data(self):
self.sensor.is_bucket_updated({'a'})
self.assertEqual(self.sensor.inactivity_seconds, 0)
self.sensor.is_bucket_updated({'a'})
self.assertEqual(self.sensor.inactivity_seconds, 10)
@mock.patch('airflow.providers.google.cloud.sensors.gcs.get_time', mock_time)
def test_no_new_data_success_criteria(self):
self.sensor.is_bucket_updated({'a'})
self.assertEqual(self.sensor.inactivity_seconds, 0)
self.sensor.is_bucket_updated({'a'})
self.assertEqual(self.sensor.inactivity_seconds, 10)
self.assertTrue(self.sensor.is_bucket_updated({'a'}))
@mock.patch('airflow.providers.google.cloud.sensors.gcs.get_time', mock_time)
def test_not_enough_objects(self):
self.sensor.is_bucket_updated(set())
self.assertEqual(self.sensor.inactivity_seconds, 0)
self.sensor.is_bucket_updated(set())
self.assertEqual(self.sensor.inactivity_seconds, 10)
self.assertFalse(self.sensor.is_bucket_updated(set()))
| apache-2.0 | -6,028,902,926,742,881,000 | -1,399,277,268,901,096,200 | 38.409396 | 107 | 0.652418 | false |
h0nIg/ansible-modules-extras | network/snmp_facts.py | 14 | 13252 | #!/usr/bin/python
# This file is part of Networklore's snmp library for Ansible
#
# The module is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# The module is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: snmp_facts
version_added: "1.9"
author: "Patrick Ogenstad (@ogenstad)"
short_description: Retrive facts for a device using SNMP.
description:
- Retrieve facts for a device using SNMP, the facts will be
inserted to the ansible_facts key.
requirements:
- pysnmp
options:
host:
description:
- Set to target snmp server (normally {{inventory_hostname}})
required: true
version:
description:
- SNMP Version to use, v2/v2c or v3
choices: [ 'v2', 'v2c', 'v3' ]
required: true
community:
description:
- The SNMP community string, required if version is v2/v2c
required: false
level:
description:
- Authentication level, required if version is v3
choices: [ 'authPriv', 'authNoPriv' ]
required: false
username:
description:
- Username for SNMPv3, required if version is v3
required: false
integrity:
description:
- Hashing algoritm, required if version is v3
choices: [ 'md5', 'sha' ]
required: false
authkey:
description:
- Authentication key, required if version is v3
required: false
privacy:
description:
- Encryption algoritm, required if level is authPriv
choices: [ 'des', 'aes' ]
required: false
privkey:
description:
- Encryption key, required if version is authPriv
required: false
'''
EXAMPLES = '''
# Gather facts with SNMP version 2
- snmp_facts: host={{ inventory_hostname }} version=2c community=public
connection: local
# Gather facts using SNMP version 3
- snmp_facts:
host={{ inventory_hostname }}
version=v3
level=authPriv
integrity=sha
privacy=aes
username=snmp-user
authkey=abc12345
privkey=def6789
delegate_to: localhost
'''
from ansible.module_utils.basic import *
from collections import defaultdict
try:
from pysnmp.entity.rfc3413.oneliner import cmdgen
has_pysnmp = True
except:
has_pysnmp = False
class DefineOid(object):
def __init__(self,dotprefix=False):
if dotprefix:
dp = "."
else:
dp = ""
# From SNMPv2-MIB
self.sysDescr = dp + "1.3.6.1.2.1.1.1.0"
self.sysObjectId = dp + "1.3.6.1.2.1.1.2.0"
self.sysUpTime = dp + "1.3.6.1.2.1.1.3.0"
self.sysContact = dp + "1.3.6.1.2.1.1.4.0"
self.sysName = dp + "1.3.6.1.2.1.1.5.0"
self.sysLocation = dp + "1.3.6.1.2.1.1.6.0"
# From IF-MIB
self.ifIndex = dp + "1.3.6.1.2.1.2.2.1.1"
self.ifDescr = dp + "1.3.6.1.2.1.2.2.1.2"
self.ifMtu = dp + "1.3.6.1.2.1.2.2.1.4"
self.ifSpeed = dp + "1.3.6.1.2.1.2.2.1.5"
self.ifPhysAddress = dp + "1.3.6.1.2.1.2.2.1.6"
self.ifAdminStatus = dp + "1.3.6.1.2.1.2.2.1.7"
self.ifOperStatus = dp + "1.3.6.1.2.1.2.2.1.8"
self.ifAlias = dp + "1.3.6.1.2.1.31.1.1.1.18"
# From IP-MIB
self.ipAdEntAddr = dp + "1.3.6.1.2.1.4.20.1.1"
self.ipAdEntIfIndex = dp + "1.3.6.1.2.1.4.20.1.2"
self.ipAdEntNetMask = dp + "1.3.6.1.2.1.4.20.1.3"
def decode_hex(hexstring):
if len(hexstring) < 3:
return hexstring
if hexstring[:2] == "0x":
return hexstring[2:].decode("hex")
else:
return hexstring
def decode_mac(hexstring):
if len(hexstring) != 14:
return hexstring
if hexstring[:2] == "0x":
return hexstring[2:]
else:
return hexstring
def lookup_adminstatus(int_adminstatus):
adminstatus_options = {
1: 'up',
2: 'down',
3: 'testing'
}
if int_adminstatus in adminstatus_options.keys():
return adminstatus_options[int_adminstatus]
else:
return ""
def lookup_operstatus(int_operstatus):
operstatus_options = {
1: 'up',
2: 'down',
3: 'testing',
4: 'unknown',
5: 'dormant',
6: 'notPresent',
7: 'lowerLayerDown'
}
if int_operstatus in operstatus_options.keys():
return operstatus_options[int_operstatus]
else:
return ""
def main():
module = AnsibleModule(
argument_spec=dict(
host=dict(required=True),
version=dict(required=True, choices=['v2', 'v2c', 'v3']),
community=dict(required=False, default=False),
username=dict(required=False),
level=dict(required=False, choices=['authNoPriv', 'authPriv']),
integrity=dict(required=False, choices=['md5', 'sha']),
privacy=dict(required=False, choices=['des', 'aes']),
authkey=dict(required=False),
privkey=dict(required=False),
removeplaceholder=dict(required=False)),
required_together = ( ['username','level','integrity','authkey'],['privacy','privkey'],),
supports_check_mode=False)
m_args = module.params
if not has_pysnmp:
module.fail_json(msg='Missing required pysnmp module (check docs)')
cmdGen = cmdgen.CommandGenerator()
# Verify that we receive a community when using snmp v2
if m_args['version'] == "v2" or m_args['version'] == "v2c":
if m_args['community'] == False:
module.fail_json(msg='Community not set when using snmp version 2')
if m_args['version'] == "v3":
if m_args['username'] == None:
module.fail_json(msg='Username not set when using snmp version 3')
if m_args['level'] == "authPriv" and m_args['privacy'] == None:
module.fail_json(msg='Privacy algorithm not set when using authPriv')
if m_args['integrity'] == "sha":
integrity_proto = cmdgen.usmHMACSHAAuthProtocol
elif m_args['integrity'] == "md5":
integrity_proto = cmdgen.usmHMACMD5AuthProtocol
if m_args['privacy'] == "aes":
privacy_proto = cmdgen.usmAesCfb128Protocol
elif m_args['privacy'] == "des":
privacy_proto = cmdgen.usmDESPrivProtocol
# Use SNMP Version 2
if m_args['version'] == "v2" or m_args['version'] == "v2c":
snmp_auth = cmdgen.CommunityData(m_args['community'])
# Use SNMP Version 3 with authNoPriv
elif m_args['level'] == "authNoPriv":
snmp_auth = cmdgen.UsmUserData(m_args['username'], authKey=m_args['authkey'], authProtocol=integrity_proto)
# Use SNMP Version 3 with authPriv
else:
snmp_auth = cmdgen.UsmUserData(m_args['username'], authKey=m_args['authkey'], privKey=m_args['privkey'], authProtocol=integrity_proto, privProtocol=privacy_proto)
# Use p to prefix OIDs with a dot for polling
p = DefineOid(dotprefix=True)
# Use v without a prefix to use with return values
v = DefineOid(dotprefix=False)
Tree = lambda: defaultdict(Tree)
results = Tree()
errorIndication, errorStatus, errorIndex, varBinds = cmdGen.getCmd(
snmp_auth,
cmdgen.UdpTransportTarget((m_args['host'], 161)),
cmdgen.MibVariable(p.sysDescr,),
cmdgen.MibVariable(p.sysObjectId,),
cmdgen.MibVariable(p.sysUpTime,),
cmdgen.MibVariable(p.sysContact,),
cmdgen.MibVariable(p.sysName,),
cmdgen.MibVariable(p.sysLocation,),
lookupMib=False
)
if errorIndication:
module.fail_json(msg=str(errorIndication))
for oid, val in varBinds:
current_oid = oid.prettyPrint()
current_val = val.prettyPrint()
if current_oid == v.sysDescr:
results['ansible_sysdescr'] = decode_hex(current_val)
elif current_oid == v.sysObjectId:
results['ansible_sysobjectid'] = current_val
elif current_oid == v.sysUpTime:
results['ansible_sysuptime'] = current_val
elif current_oid == v.sysContact:
results['ansible_syscontact'] = current_val
elif current_oid == v.sysName:
results['ansible_sysname'] = current_val
elif current_oid == v.sysLocation:
results['ansible_syslocation'] = current_val
errorIndication, errorStatus, errorIndex, varTable = cmdGen.nextCmd(
snmp_auth,
cmdgen.UdpTransportTarget((m_args['host'], 161)),
cmdgen.MibVariable(p.ifIndex,),
cmdgen.MibVariable(p.ifDescr,),
cmdgen.MibVariable(p.ifMtu,),
cmdgen.MibVariable(p.ifSpeed,),
cmdgen.MibVariable(p.ifPhysAddress,),
cmdgen.MibVariable(p.ifAdminStatus,),
cmdgen.MibVariable(p.ifOperStatus,),
cmdgen.MibVariable(p.ipAdEntAddr,),
cmdgen.MibVariable(p.ipAdEntIfIndex,),
cmdgen.MibVariable(p.ipAdEntNetMask,),
cmdgen.MibVariable(p.ifAlias,),
lookupMib=False
)
if errorIndication:
module.fail_json(msg=str(errorIndication))
interface_indexes = []
all_ipv4_addresses = []
ipv4_networks = Tree()
for varBinds in varTable:
for oid, val in varBinds:
current_oid = oid.prettyPrint()
current_val = val.prettyPrint()
if v.ifIndex in current_oid:
ifIndex = int(current_oid.rsplit('.', 1)[-1])
results['ansible_interfaces'][ifIndex]['ifindex'] = current_val
interface_indexes.append(ifIndex)
if v.ifDescr in current_oid:
ifIndex = int(current_oid.rsplit('.', 1)[-1])
results['ansible_interfaces'][ifIndex]['name'] = current_val
if v.ifMtu in current_oid:
ifIndex = int(current_oid.rsplit('.', 1)[-1])
results['ansible_interfaces'][ifIndex]['mtu'] = current_val
if v.ifMtu in current_oid:
ifIndex = int(current_oid.rsplit('.', 1)[-1])
results['ansible_interfaces'][ifIndex]['speed'] = current_val
if v.ifPhysAddress in current_oid:
ifIndex = int(current_oid.rsplit('.', 1)[-1])
results['ansible_interfaces'][ifIndex]['mac'] = decode_mac(current_val)
if v.ifAdminStatus in current_oid:
ifIndex = int(current_oid.rsplit('.', 1)[-1])
results['ansible_interfaces'][ifIndex]['adminstatus'] = lookup_adminstatus(int(current_val))
if v.ifOperStatus in current_oid:
ifIndex = int(current_oid.rsplit('.', 1)[-1])
results['ansible_interfaces'][ifIndex]['operstatus'] = lookup_operstatus(int(current_val))
if v.ipAdEntAddr in current_oid:
curIPList = current_oid.rsplit('.', 4)[-4:]
curIP = ".".join(curIPList)
ipv4_networks[curIP]['address'] = current_val
all_ipv4_addresses.append(current_val)
if v.ipAdEntIfIndex in current_oid:
curIPList = current_oid.rsplit('.', 4)[-4:]
curIP = ".".join(curIPList)
ipv4_networks[curIP]['interface'] = current_val
if v.ipAdEntNetMask in current_oid:
curIPList = current_oid.rsplit('.', 4)[-4:]
curIP = ".".join(curIPList)
ipv4_networks[curIP]['netmask'] = current_val
if v.ifAlias in current_oid:
ifIndex = int(current_oid.rsplit('.', 1)[-1])
results['ansible_interfaces'][ifIndex]['description'] = current_val
interface_to_ipv4 = {}
for ipv4_network in ipv4_networks:
current_interface = ipv4_networks[ipv4_network]['interface']
current_network = {
'address': ipv4_networks[ipv4_network]['address'],
'netmask': ipv4_networks[ipv4_network]['netmask']
}
if not current_interface in interface_to_ipv4:
interface_to_ipv4[current_interface] = []
interface_to_ipv4[current_interface].append(current_network)
else:
interface_to_ipv4[current_interface].append(current_network)
for interface in interface_to_ipv4:
results['ansible_interfaces'][int(interface)]['ipv4'] = interface_to_ipv4[interface]
results['ansible_all_ipv4_addresses'] = all_ipv4_addresses
module.exit_json(ansible_facts=results)
main()
| gpl-3.0 | 8,758,536,831,230,179,000 | 9,127,333,431,426,151,000 | 35.108992 | 170 | 0.584666 | false |
w1ll1am23/home-assistant | tests/components/rest/test_notify.py | 6 | 1522 | """The tests for the rest.notify platform."""
from os import path
from unittest.mock import patch
import respx
from homeassistant import config as hass_config
import homeassistant.components.notify as notify
from homeassistant.components.rest import DOMAIN
from homeassistant.const import SERVICE_RELOAD
from homeassistant.setup import async_setup_component
@respx.mock
async def test_reload_notify(hass):
"""Verify we can reload the notify service."""
respx.get("http://localhost") % 200
assert await async_setup_component(
hass,
notify.DOMAIN,
{
notify.DOMAIN: [
{
"name": DOMAIN,
"platform": DOMAIN,
"resource": "http://127.0.0.1/off",
},
]
},
)
await hass.async_block_till_done()
assert hass.services.has_service(notify.DOMAIN, DOMAIN)
yaml_path = path.join(
_get_fixtures_base_path(),
"fixtures",
"rest/configuration.yaml",
)
with patch.object(hass_config, "YAML_CONFIG_FILE", yaml_path):
await hass.services.async_call(
DOMAIN,
SERVICE_RELOAD,
{},
blocking=True,
)
await hass.async_block_till_done()
assert not hass.services.has_service(notify.DOMAIN, DOMAIN)
assert hass.services.has_service(notify.DOMAIN, "rest_reloaded")
def _get_fixtures_base_path():
return path.dirname(path.dirname(path.dirname(__file__)))
| apache-2.0 | -364,211,292,973,179,800 | 1,989,604,212,694,511,900 | 26.672727 | 68 | 0.612352 | false |
dharmabumstead/ansible | lib/ansible/plugins/action/win_copy.py | 7 | 23402 | # This file is part of Ansible
# Copyright (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import base64
import json
import os
import os.path
import shutil
import tempfile
import traceback
import zipfile
from ansible import constants as C
from ansible.errors import AnsibleError, AnsibleFileNotFound
from ansible.module_utils._text import to_bytes, to_native, to_text
from ansible.module_utils.parsing.convert_bool import boolean
from ansible.plugins.action import ActionBase
from ansible.utils.hashing import checksum
def _walk_dirs(topdir, loader, decrypt=True, base_path=None, local_follow=False, trailing_slash_detector=None, checksum_check=False):
"""
Walk a filesystem tree returning enough information to copy the files.
This is similar to the _walk_dirs function in ``copy.py`` but returns
a dict instead of a tuple for each entry and includes the checksum of
a local file if wanted.
:arg topdir: The directory that the filesystem tree is rooted at
:arg loader: The self._loader object from ActionBase
:kwarg decrypt: Whether to decrypt a file encrypted with ansible-vault
:kwarg base_path: The initial directory structure to strip off of the
files for the destination directory. If this is None (the default),
the base_path is set to ``top_dir``.
:kwarg local_follow: Whether to follow symlinks on the source. When set
to False, no symlinks are dereferenced. When set to True (the
default), the code will dereference most symlinks. However, symlinks
can still be present if needed to break a circular link.
:kwarg trailing_slash_detector: Function to determine if a path has
a trailing directory separator. Only needed when dealing with paths on
a remote machine (in which case, pass in a function that is aware of the
directory separator conventions on the remote machine).
:kawrg whether to get the checksum of the local file and add to the dict
:returns: dictionary of dictionaries. All of the path elements in the structure are text string.
This separates all the files, directories, and symlinks along with
import information about each::
{
'files'; [{
src: '/absolute/path/to/copy/from',
dest: 'relative/path/to/copy/to',
checksum: 'b54ba7f5621240d403f06815f7246006ef8c7d43'
}, ...],
'directories'; [{
src: '/absolute/path/to/copy/from',
dest: 'relative/path/to/copy/to'
}, ...],
'symlinks'; [{
src: '/symlink/target/path',
dest: 'relative/path/to/copy/to'
}, ...],
}
The ``symlinks`` field is only populated if ``local_follow`` is set to False
*or* a circular symlink cannot be dereferenced. The ``checksum`` entry is set
to None if checksum_check=False.
"""
# Convert the path segments into byte strings
r_files = {'files': [], 'directories': [], 'symlinks': []}
def _recurse(topdir, rel_offset, parent_dirs, rel_base=u'', checksum_check=False):
"""
This is a closure (function utilizing variables from it's parent
function's scope) so that we only need one copy of all the containers.
Note that this function uses side effects (See the Variables used from
outer scope).
:arg topdir: The directory we are walking for files
:arg rel_offset: Integer defining how many characters to strip off of
the beginning of a path
:arg parent_dirs: Directories that we're copying that this directory is in.
:kwarg rel_base: String to prepend to the path after ``rel_offset`` is
applied to form the relative path.
Variables used from the outer scope
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
:r_files: Dictionary of files in the hierarchy. See the return value
for :func:`walk` for the structure of this dictionary.
:local_follow: Read-only inside of :func:`_recurse`. Whether to follow symlinks
"""
for base_path, sub_folders, files in os.walk(topdir):
for filename in files:
filepath = os.path.join(base_path, filename)
dest_filepath = os.path.join(rel_base, filepath[rel_offset:])
if os.path.islink(filepath):
# Dereference the symlnk
real_file = loader.get_real_file(os.path.realpath(filepath), decrypt=decrypt)
if local_follow and os.path.isfile(real_file):
# Add the file pointed to by the symlink
r_files['files'].append(
{
"src": real_file,
"dest": dest_filepath,
"checksum": _get_local_checksum(checksum_check, real_file)
}
)
else:
# Mark this file as a symlink to copy
r_files['symlinks'].append({"src": os.readlink(filepath), "dest": dest_filepath})
else:
# Just a normal file
real_file = loader.get_real_file(filepath, decrypt=decrypt)
r_files['files'].append(
{
"src": real_file,
"dest": dest_filepath,
"checksum": _get_local_checksum(checksum_check, real_file)
}
)
for dirname in sub_folders:
dirpath = os.path.join(base_path, dirname)
dest_dirpath = os.path.join(rel_base, dirpath[rel_offset:])
real_dir = os.path.realpath(dirpath)
dir_stats = os.stat(real_dir)
if os.path.islink(dirpath):
if local_follow:
if (dir_stats.st_dev, dir_stats.st_ino) in parent_dirs:
# Just insert the symlink if the target directory
# exists inside of the copy already
r_files['symlinks'].append({"src": os.readlink(dirpath), "dest": dest_dirpath})
else:
# Walk the dirpath to find all parent directories.
new_parents = set()
parent_dir_list = os.path.dirname(dirpath).split(os.path.sep)
for parent in range(len(parent_dir_list), 0, -1):
parent_stat = os.stat(u'/'.join(parent_dir_list[:parent]))
if (parent_stat.st_dev, parent_stat.st_ino) in parent_dirs:
# Reached the point at which the directory
# tree is already known. Don't add any
# more or we might go to an ancestor that
# isn't being copied.
break
new_parents.add((parent_stat.st_dev, parent_stat.st_ino))
if (dir_stats.st_dev, dir_stats.st_ino) in new_parents:
# This was a a circular symlink. So add it as
# a symlink
r_files['symlinks'].append({"src": os.readlink(dirpath), "dest": dest_dirpath})
else:
# Walk the directory pointed to by the symlink
r_files['directories'].append({"src": real_dir, "dest": dest_dirpath})
offset = len(real_dir) + 1
_recurse(real_dir, offset, parent_dirs.union(new_parents),
rel_base=dest_dirpath,
checksum_check=checksum_check)
else:
# Add the symlink to the destination
r_files['symlinks'].append({"src": os.readlink(dirpath), "dest": dest_dirpath})
else:
# Just a normal directory
r_files['directories'].append({"src": dirpath, "dest": dest_dirpath})
# Check if the source ends with a "/" so that we know which directory
# level to work at (similar to rsync)
source_trailing_slash = False
if trailing_slash_detector:
source_trailing_slash = trailing_slash_detector(topdir)
else:
source_trailing_slash = topdir.endswith(os.path.sep)
# Calculate the offset needed to strip the base_path to make relative
# paths
if base_path is None:
base_path = topdir
if not source_trailing_slash:
base_path = os.path.dirname(base_path)
if topdir.startswith(base_path):
offset = len(base_path)
# Make sure we're making the new paths relative
if trailing_slash_detector and not trailing_slash_detector(base_path):
offset += 1
elif not base_path.endswith(os.path.sep):
offset += 1
if os.path.islink(topdir) and not local_follow:
r_files['symlinks'] = {"src": os.readlink(topdir), "dest": os.path.basename(topdir)}
return r_files
dir_stats = os.stat(topdir)
parents = frozenset(((dir_stats.st_dev, dir_stats.st_ino),))
# Actually walk the directory hierarchy
_recurse(topdir, offset, parents, checksum_check=checksum_check)
return r_files
def _get_local_checksum(get_checksum, local_path):
if get_checksum:
return checksum(local_path)
else:
return None
class ActionModule(ActionBase):
WIN_PATH_SEPARATOR = "\\"
def _create_content_tempfile(self, content):
''' Create a tempfile containing defined content '''
fd, content_tempfile = tempfile.mkstemp(dir=C.DEFAULT_LOCAL_TMP)
f = os.fdopen(fd, 'wb')
content = to_bytes(content)
try:
f.write(content)
except Exception as err:
os.remove(content_tempfile)
raise Exception(err)
finally:
f.close()
return content_tempfile
def _create_zip_tempfile(self, files, directories):
tmpdir = tempfile.mkdtemp(dir=C.DEFAULT_LOCAL_TMP)
zip_file_path = os.path.join(tmpdir, "win_copy.zip")
zip_file = zipfile.ZipFile(zip_file_path, "w", zipfile.ZIP_STORED, True)
# encoding the file/dir name with base64 so Windows can unzip a unicode
# filename and get the right name, Windows doesn't handle unicode names
# very well
for directory in directories:
directory_path = to_bytes(directory['src'], errors='surrogate_or_strict')
archive_path = to_bytes(directory['dest'], errors='surrogate_or_strict')
encoded_path = to_text(base64.b64encode(archive_path), errors='surrogate_or_strict')
zip_file.write(directory_path, encoded_path, zipfile.ZIP_DEFLATED)
for file in files:
file_path = to_bytes(file['src'], errors='surrogate_or_strict')
archive_path = to_bytes(file['dest'], errors='surrogate_or_strict')
encoded_path = to_text(base64.b64encode(archive_path), errors='surrogate_or_strict')
zip_file.write(file_path, encoded_path, zipfile.ZIP_DEFLATED)
return zip_file_path
def _remove_tempfile_if_content_defined(self, content, content_tempfile):
if content is not None:
os.remove(content_tempfile)
def _copy_single_file(self, local_file, dest, source_rel, task_vars, tmp):
if self._play_context.check_mode:
module_return = dict(changed=True)
return module_return
# copy the file across to the server
tmp_src = self._connection._shell.join_path(tmp, 'source')
self._transfer_file(local_file, tmp_src)
copy_args = self._task.args.copy()
copy_args.update(
dict(
dest=dest,
src=tmp_src,
original_basename=source_rel,
_copy_mode="single"
)
)
copy_args.pop('content', None)
copy_result = self._execute_module(module_name="copy",
module_args=copy_args,
task_vars=task_vars)
return copy_result
def _copy_zip_file(self, dest, files, directories, task_vars, tmp):
# create local zip file containing all the files and directories that
# need to be copied to the server
if self._play_context.check_mode:
module_return = dict(changed=True)
return module_return
try:
zip_file = self._create_zip_tempfile(files, directories)
except Exception as e:
module_return = dict(
changed=False,
failed=True,
msg="failed to create tmp zip file: %s" % to_text(e),
exception=traceback.format_exc()
)
return module_return
zip_path = self._loader.get_real_file(zip_file)
# send zip file to remote, file must end in .zip so
# Com Shell.Application works
tmp_src = self._connection._shell.join_path(tmp, 'source.zip')
self._transfer_file(zip_path, tmp_src)
# run the explode operation of win_copy on remote
copy_args = self._task.args.copy()
copy_args.update(
dict(
src=tmp_src,
dest=dest,
_copy_mode="explode"
)
)
copy_args.pop('content', None)
module_return = self._execute_module(module_name='copy',
module_args=copy_args,
task_vars=task_vars)
shutil.rmtree(os.path.dirname(zip_path))
return module_return
def run(self, tmp=None, task_vars=None):
''' handler for file transfer operations '''
if task_vars is None:
task_vars = dict()
result = super(ActionModule, self).run(tmp, task_vars)
del tmp # tmp no longer has any effect
source = self._task.args.get('src', None)
content = self._task.args.get('content', None)
dest = self._task.args.get('dest', None)
remote_src = boolean(self._task.args.get('remote_src', False), strict=False)
local_follow = boolean(self._task.args.get('local_follow', False), strict=False)
force = boolean(self._task.args.get('force', True), strict=False)
decrypt = boolean(self._task.args.get('decrypt', True), strict=False)
result['src'] = source
result['dest'] = dest
result['failed'] = True
if (source is None and content is None) or dest is None:
result['msg'] = "src (or content) and dest are required"
elif source is not None and content is not None:
result['msg'] = "src and content are mutually exclusive"
elif content is not None and dest is not None and (
dest.endswith(os.path.sep) or dest.endswith(self.WIN_PATH_SEPARATOR)):
result['msg'] = "dest must be a file if content is defined"
else:
del result['failed']
if result.get('failed'):
return result
# If content is defined make a temp file and write the content into it
content_tempfile = None
if content is not None:
try:
# if content comes to us as a dict it should be decoded json.
# We need to encode it back into a string and write it out
if isinstance(content, dict) or isinstance(content, list):
content_tempfile = self._create_content_tempfile(json.dumps(content))
else:
content_tempfile = self._create_content_tempfile(content)
source = content_tempfile
except Exception as err:
result['failed'] = True
result['msg'] = "could not write content tmp file: %s" % to_native(err)
return result
# all actions should occur on the remote server, run win_copy module
elif remote_src:
new_module_args = self._task.args.copy()
new_module_args.update(
dict(
_copy_mode="remote",
dest=dest,
src=source,
force=force
)
)
new_module_args.pop('content', None)
result.update(self._execute_module(module_args=new_module_args, task_vars=task_vars))
return result
# find_needle returns a path that may not have a trailing slash on a
# directory so we need to find that out first and append at the end
else:
trailing_slash = source.endswith(os.path.sep)
try:
# find in expected paths
source = self._find_needle('files', source)
except AnsibleError as e:
result['failed'] = True
result['msg'] = to_text(e)
result['exception'] = traceback.format_exc()
return result
if trailing_slash != source.endswith(os.path.sep):
if source[-1] == os.path.sep:
source = source[:-1]
else:
source = source + os.path.sep
# A list of source file tuples (full_path, relative_path) which will try to copy to the destination
source_files = {'files': [], 'directories': [], 'symlinks': []}
# If source is a directory populate our list else source is a file and translate it to a tuple.
if os.path.isdir(to_bytes(source, errors='surrogate_or_strict')):
result['operation'] = 'folder_copy'
# Get a list of the files we want to replicate on the remote side
source_files = _walk_dirs(source, self._loader, decrypt=decrypt, local_follow=local_follow,
trailing_slash_detector=self._connection._shell.path_has_trailing_slash,
checksum_check=force)
# If it's recursive copy, destination is always a dir,
# explicitly mark it so (note - win_copy module relies on this).
if not self._connection._shell.path_has_trailing_slash(dest):
dest = "%s%s" % (dest, self.WIN_PATH_SEPARATOR)
check_dest = dest
# Source is a file, add details to source_files dict
else:
result['operation'] = 'file_copy'
# If the local file does not exist, get_real_file() raises AnsibleFileNotFound
try:
source_full = self._loader.get_real_file(source, decrypt=decrypt)
except AnsibleFileNotFound as e:
result['failed'] = True
result['msg'] = "could not find src=%s, %s" % (source_full, to_text(e))
return result
original_basename = os.path.basename(source)
result['original_basename'] = original_basename
# check if dest ends with / or \ and append source filename to dest
if self._connection._shell.path_has_trailing_slash(dest):
check_dest = dest
filename = original_basename
result['dest'] = self._connection._shell.join_path(dest, filename)
else:
# replace \\ with / so we can use os.path to get the filename or dirname
unix_path = dest.replace(self.WIN_PATH_SEPARATOR, os.path.sep)
filename = os.path.basename(unix_path)
check_dest = os.path.dirname(unix_path)
file_checksum = _get_local_checksum(force, source_full)
source_files['files'].append(
dict(
src=source_full,
dest=filename,
checksum=file_checksum
)
)
result['checksum'] = file_checksum
result['size'] = os.path.getsize(to_bytes(source_full, errors='surrogate_or_strict'))
# find out the files/directories/symlinks that we need to copy to the server
query_args = self._task.args.copy()
query_args.update(
dict(
_copy_mode="query",
dest=check_dest,
force=force,
files=source_files['files'],
directories=source_files['directories'],
symlinks=source_files['symlinks']
)
)
# src is not required for query, will fail path validation is src has unix allowed chars
query_args.pop('src', None)
query_args.pop('content', None)
query_return = self._execute_module(module_args=query_args,
task_vars=task_vars)
if query_return.get('failed') is True:
result.update(query_return)
return result
if len(query_return['files']) > 0 or len(query_return['directories']) > 0 and self._connection._shell.tmpdir is None:
self._connection._shell.tmpdir = self._make_tmp_path()
if len(query_return['files']) == 1 and len(query_return['directories']) == 0:
# we only need to copy 1 file, don't mess around with zips
file_src = query_return['files'][0]['src']
file_dest = query_return['files'][0]['dest']
copy_result = self._copy_single_file(file_src, dest, file_dest,
task_vars, self._connection._shell.tmpdir)
result['changed'] = True
if copy_result.get('failed') is True:
result['failed'] = True
result['msg'] = "failed to copy file %s: %s" % (file_src, copy_result['msg'])
elif len(query_return['files']) > 0 or len(query_return['directories']) > 0:
# either multiple files or directories need to be copied, compress
# to a zip and 'explode' the zip on the server
# TODO: handle symlinks
result.update(self._copy_zip_file(dest, source_files['files'],
source_files['directories'],
task_vars, self._connection._shell.tmpdir))
result['changed'] = True
else:
# no operations need to occur
result['failed'] = False
result['changed'] = False
# remove the content tmp file and remote tmp file if it was created
self._remove_tempfile_if_content_defined(content, content_tempfile)
self._remove_tmp_path(self._connection._shell.tmpdir)
return result
| gpl-3.0 | 8,645,823,289,667,851,000 | 5,619,780,546,042,264,000 | 44.090559 | 133 | 0.556235 | false |
rx2130/Leetcode | python/322 Coin Change.py | 1 | 1430 | class Solution(object):
def coinChange(self, coins, amount):
"""
:type coins: List[int]
:type amount: int
:rtype: int
"""
if amount < 1:
return 0
return self.helper(coins, amount, [0] * amount)
def helper(self, coins, target, cache):
# target: remaining coins after the last step
# cache[target]: minimum number of coins to sum up to target
if target < 0:
return -1
if target == 0:
return 0
if cache[target - 1]:
return cache[target - 1]
min_ = float('inf')
for coin in coins:
res = self.helper(coins, target - coin, cache)
if 0 <= res < min_:
min_ = 1 + res
cache[target - 1] = min_ if min_ != float('inf') else -1
return cache[target - 1]
# DP O(n*amount) time O(amount) space
def coinChange2(self, coins, amount):
dp = [float('inf')] * (amount + 1)
dp[0] = 0
for coin in coins:
for i in range(coin, amount + 1):
dp[i] = min(dp[i], dp[i - coin] + 1)
return -1 if dp[amount] > amount else dp[amount]
def main():
coins = [302, 500, 481, 399, 489, 285, 148, 51, 433, 197]
amount = 4462
coins = [1, 2, 5]
amount = 11
test = Solution()
# test.coinChange(coins, amount)
test.coinChange2(coins, amount)
main()
| apache-2.0 | -6,110,821,404,553,474,000 | -3,963,302,816,952,809,000 | 28.791667 | 68 | 0.511888 | false |
ovnicraft/odoo | addons/membership/__openerp__.py | 197 | 2207 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Membership Management',
'version': '0.1',
'category': 'Association',
'description': """
This module allows you to manage all operations for managing memberships.
=========================================================================
It supports different kind of members:
--------------------------------------
* Free member
* Associated member (e.g.: a group subscribes to a membership for all subsidiaries)
* Paid members
* Special member prices
It is integrated with sales and accounting to allow you to automatically
invoice and send propositions for membership renewal.
""",
'author': 'OpenERP SA',
'depends': ['base', 'product', 'account'],
'data': [
'security/ir.model.access.csv',
'wizard/membership_invoice_view.xml',
'membership_data.xml',
'membership_view.xml',
'report/report_membership_view.xml',
],
'demo': [
'membership_demo.xml',
'membership_demo.yml'
],
'website': 'https://www.odoo.com/page/community-builder',
'test': ['test/test_membership.yml'],
'installable': True,
'auto_install': False,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 | -4,650,683,714,754,859,000 | -1,492,038,001,219,239,200 | 36.40678 | 87 | 0.5913 | false |
yokose-ks/edx-platform | common/lib/xmodule/xmodule/modulestore/xml_exporter.py | 5 | 10839 | """
Methods for exporting course data to XML
"""
import logging
import lxml.etree
from xblock.fields import Scope
from xmodule.contentstore.content import StaticContent
from xmodule.exceptions import NotFoundError
from xmodule.modulestore import Location
from xmodule.modulestore.inheritance import own_metadata
from fs.osfs import OSFS
from json import dumps
import json
import datetime
import os
from path import path
import shutil
DRAFT_DIR = "drafts"
PUBLISHED_DIR = "published"
EXPORT_VERSION_FILE = "format.json"
EXPORT_VERSION_KEY = "export_format"
DEFAULT_CONTENT_FIELDS = ['metadata', 'data']
class EdxJSONEncoder(json.JSONEncoder):
"""
Custom JSONEncoder that handles `Location` and `datetime.datetime` objects.
`Location`s are encoded as their url string form, and `datetime`s as
ISO date strings
"""
def default(self, obj):
if isinstance(obj, Location):
return obj.url()
elif isinstance(obj, datetime.datetime):
if obj.tzinfo is not None:
if obj.utcoffset() is None:
return obj.isoformat() + 'Z'
else:
return obj.isoformat()
else:
return obj.isoformat()
else:
return super(EdxJSONEncoder, self).default(obj)
def export_to_xml(modulestore, contentstore, course_location, root_dir, course_dir, draft_modulestore=None):
"""
Export all modules from `modulestore` and content from `contentstore` as xml to `root_dir`.
`modulestore`: A `ModuleStore` object that is the source of the modules to export
`contentstore`: A `ContentStore` object that is the source of the content to export, can be None
`course_location`: The `Location` of the `CourseModuleDescriptor` to export
`root_dir`: The directory to write the exported xml to
`course_dir`: The name of the directory inside `root_dir` to write the course content to
`draft_modulestore`: An optional `DraftModuleStore` that contains draft content, which will be exported
alongside the public content in the course.
"""
course_id = course_location.course_id
course = modulestore.get_course(course_id)
fs = OSFS(root_dir)
export_fs = course.runtime.export_fs = fs.makeopendir(course_dir)
root = lxml.etree.Element('unknown')
course.add_xml_to_node(root)
with export_fs.open('course.xml', 'w') as course_xml:
lxml.etree.ElementTree(root).write(course_xml)
# export the static assets
policies_dir = export_fs.makeopendir('policies')
if contentstore:
contentstore.export_all_for_course(
course_location,
root_dir + '/' + course_dir + '/static/',
root_dir + '/' + course_dir + '/policies/assets.json',
)
# If we are using the default course image, export it to the
# legacy location to support backwards compatibility.
if course.course_image == course.fields['course_image'].default:
try:
course_image = contentstore.find(
StaticContent.compute_location(
course.location.org,
course.location.course,
course.course_image
),
)
except NotFoundError:
pass
else:
output_dir = root_dir + '/' + course_dir + '/static/images/'
if not os.path.isdir(output_dir):
os.makedirs(output_dir)
with OSFS(output_dir).open('course_image.jpg', 'wb') as course_image_file:
course_image_file.write(course_image.data)
# export the static tabs
export_extra_content(export_fs, modulestore, course_id, course_location, 'static_tab', 'tabs', '.html')
# export the custom tags
export_extra_content(export_fs, modulestore, course_id, course_location, 'custom_tag_template', 'custom_tags')
# export the course updates
export_extra_content(export_fs, modulestore, course_id, course_location, 'course_info', 'info', '.html')
# export the 'about' data (e.g. overview, etc.)
export_extra_content(export_fs, modulestore, course_id, course_location, 'about', 'about', '.html')
# export the grading policy
course_run_policy_dir = policies_dir.makeopendir(course.location.name)
with course_run_policy_dir.open('grading_policy.json', 'w') as grading_policy:
grading_policy.write(dumps(course.grading_policy, cls=EdxJSONEncoder))
# export all of the course metadata in policy.json
with course_run_policy_dir.open('policy.json', 'w') as course_policy:
policy = {'course/' + course.location.name: own_metadata(course)}
course_policy.write(dumps(policy, cls=EdxJSONEncoder))
# export draft content
# NOTE: this code assumes that verticals are the top most draftable container
# should we change the application, then this assumption will no longer
# be valid
if draft_modulestore is not None:
draft_verticals = draft_modulestore.get_items([None, course_location.org, course_location.course,
'vertical', None, 'draft'])
if len(draft_verticals) > 0:
draft_course_dir = export_fs.makeopendir(DRAFT_DIR)
for draft_vertical in draft_verticals:
parent_locs = draft_modulestore.get_parent_locations(draft_vertical.location, course.location.course_id)
# Don't try to export orphaned items.
if len(parent_locs) > 0:
logging.debug('parent_locs = {0}'.format(parent_locs))
draft_vertical.xml_attributes['parent_sequential_url'] = Location(parent_locs[0]).url()
sequential = modulestore.get_item(Location(parent_locs[0]))
index = sequential.children.index(draft_vertical.location.url())
draft_vertical.xml_attributes['index_in_children_list'] = str(index)
draft_vertical.runtime.export_fs = draft_course_dir
node = lxml.etree.Element('unknown')
draft_vertical.add_xml_to_node(node)
def _export_field_content(xblock_item, item_dir):
"""
Export all fields related to 'xblock_item' other than 'metadata' and 'data' to json file in provided directory
"""
module_data = xblock_item.get_explicitly_set_fields_by_scope(Scope.content)
if isinstance(module_data, dict):
for field_name in module_data:
if field_name not in DEFAULT_CONTENT_FIELDS:
# filename format: {dirname}.{field_name}.json
with item_dir.open('{0}.{1}.{2}'.format(xblock_item.location.name, field_name, 'json'),
'w') as field_content_file:
field_content_file.write(dumps(module_data.get(field_name, {}), cls=EdxJSONEncoder))
def export_extra_content(export_fs, modulestore, course_id, course_location, category_type, dirname, file_suffix=''):
query_loc = Location('i4x', course_location.org, course_location.course, category_type, None)
items = modulestore.get_items(query_loc, course_id)
if len(items) > 0:
item_dir = export_fs.makeopendir(dirname)
for item in items:
with item_dir.open(item.location.name + file_suffix, 'w') as item_file:
item_file.write(item.data.encode('utf8'))
# export content fields other then metadata and data in json format in current directory
_export_field_content(item, item_dir)
def convert_between_versions(source_dir, target_dir):
"""
Converts a version 0 export format to version 1, and vice versa.
@param source_dir: the directory structure with the course export that should be converted.
The contents of source_dir will not be altered.
@param target_dir: the directory where the converted export should be written.
@return: the version number of the converted export.
"""
def convert_to_version_1():
""" Convert a version 0 archive to version 0 """
os.mkdir(copy_root)
with open(copy_root / EXPORT_VERSION_FILE, 'w') as f:
f.write('{{"{export_key}": 1}}\n'.format(export_key=EXPORT_VERSION_KEY))
# If a drafts folder exists, copy it over.
copy_drafts()
# Now copy everything into the published directory
published_dir = copy_root / PUBLISHED_DIR
shutil.copytree(path(source_dir) / course_name, published_dir)
# And delete the nested drafts directory, if it exists.
nested_drafts_dir = published_dir / DRAFT_DIR
if nested_drafts_dir.isdir():
shutil.rmtree(nested_drafts_dir)
def convert_to_version_0():
""" Convert a version 1 archive to version 0 """
# Copy everything in "published" up to the top level.
published_dir = path(source_dir) / course_name / PUBLISHED_DIR
if not published_dir.isdir():
raise ValueError("a version 1 archive must contain a published branch")
shutil.copytree(published_dir, copy_root)
# If there is a "draft" branch, copy it. All other branches are ignored.
copy_drafts()
def copy_drafts():
"""
Copy drafts directory from the old archive structure to the new.
"""
draft_dir = path(source_dir) / course_name / DRAFT_DIR
if draft_dir.isdir():
shutil.copytree(draft_dir, copy_root / DRAFT_DIR)
root = os.listdir(source_dir)
if len(root) != 1 or (path(source_dir) / root[0]).isfile():
raise ValueError("source archive does not have single course directory at top level")
course_name = root[0]
# For this version of the script, we simply convert back and forth between version 0 and 1.
original_version = get_version(path(source_dir) / course_name)
if original_version not in [0, 1]:
raise ValueError("unknown version: " + str(original_version))
desired_version = 1 if original_version is 0 else 0
copy_root = path(target_dir) / course_name
if desired_version == 1:
convert_to_version_1()
else:
convert_to_version_0()
return desired_version
def get_version(course_path):
"""
Return the export format version number for the given
archive directory structure (represented as a path instance).
If the archived file does not correspond to a known export
format, None will be returned.
"""
format_file = course_path / EXPORT_VERSION_FILE
if not format_file.isfile():
return 0
with open(format_file, "r") as f:
data = json.load(f)
if EXPORT_VERSION_KEY in data:
return data[EXPORT_VERSION_KEY]
return None
| agpl-3.0 | 7,734,683,756,432,319,000 | 644,367,801,386,522,900 | 40.528736 | 120 | 0.639635 | false |
nuncjo/odoo | openerp/addons/base/module/wizard/base_import_language.py | 337 | 2644 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import base64
from tempfile import TemporaryFile
from openerp import tools
from openerp.osv import osv, fields
class base_language_import(osv.osv_memory):
""" Language Import """
_name = "base.language.import"
_description = "Language Import"
_columns = {
'name': fields.char('Language Name', required=True),
'code': fields.char('ISO Code', size=5, help="ISO Language and Country code, e.g. en_US", required=True),
'data': fields.binary('File', required=True),
'overwrite': fields.boolean('Overwrite Existing Terms',
help="If you enable this option, existing translations (including custom ones) "
"will be overwritten and replaced by those in this file"),
}
def import_lang(self, cr, uid, ids, context=None):
if context is None:
context = {}
this = self.browse(cr, uid, ids[0])
if this.overwrite:
context = dict(context, overwrite=True)
fileobj = TemporaryFile('w+')
try:
fileobj.write(base64.decodestring(this.data))
# now we determine the file format
fileobj.seek(0)
first_line = fileobj.readline().strip().replace('"', '').replace(' ', '')
fileformat = first_line.endswith("type,name,res_id,src,value") and 'csv' or 'po'
fileobj.seek(0)
tools.trans_load_data(cr, fileobj, fileformat, this.code, lang_name=this.name, context=context)
finally:
fileobj.close()
return True
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 | -8,780,522,520,567,533,000 | -2,391,506,323,711,975,000 | 40.3125 | 116 | 0.596445 | false |
oliverlee/sympy | sympy/physics/unitsystems/tests/test_units.py | 92 | 3071 | # -*- coding: utf-8 -*-
from __future__ import division
from sympy.physics.unitsystems.units import Unit
from sympy.physics.unitsystems.systems.mks import length, time
from sympy.physics.unitsystems.prefixes import PREFIXES
from sympy.utilities.pytest import raises
k = PREFIXES['k']
def test_definition():
u = Unit(length, factor=10, abbrev="dm")
assert u.dim == length
assert u._factor == 10
assert u._abbrev == "dm"
assert u.prefix is None
km = Unit(length, prefix=k)
assert km.prefix == k
v = Unit(u, factor=5)
assert v.dim == length
assert v._factor == 5 * 10
def test_error_definition():
raises(TypeError, lambda: Unit("m"))
def test_factor():
u = Unit(length, factor=10, abbrev="dm")
assert u.factor == 10
u = Unit(length, factor=5, prefix=k)
assert u.factor == 5000
def test_abbrev():
u = Unit(length)
assert u.abbrev == ""
u = Unit(length, abbrev="m")
assert u.abbrev == "m"
u = Unit(length, abbrev="m", prefix=k)
assert u.abbrev == "km"
def test_abbrev_dim():
u = Unit(length, factor=10)
assert u.abbrev_dim == "(10 L)"
def test_str():
u = Unit(length, factor=10)
assert str(u) == u.abbrev_dim
u = Unit(length, factor=10, abbrev="m")
assert str(u) == "m"
def test_repr():
u = Unit(length, factor=10, abbrev="m")
assert repr(u) == u.abbrev_dim
def test_eq():
u = Unit(length, factor=10, abbrev="dm")
v = Unit(length, factor=10)
assert (u == v) is True
v = Unit(time, factor=10, abbrev="ds")
assert (u == v) is False
v = Unit(length, factor=1, abbrev="dm")
assert (u == v) is False
def test_add_sub():
u = Unit(length, factor=10)
v = Unit(length, factor=5)
w = Unit(time, factor=2)
assert u.add(v) == Unit(length, factor=15)
assert u.sub(v) == Unit(length, factor=5)
raises(ValueError, lambda: u.add(w))
raises(ValueError, lambda: u.sub(w))
raises(TypeError, lambda: u.add(1))
raises(TypeError, lambda: u.sub(1))
def test_pow():
u = Unit(length, factor=10)
assert u.pow(0) == 1
assert u.pow(1) == u
assert u.pow(2) == Unit(length.pow(2), factor=100)
assert u.pow(-1) == Unit(length.pow(-1), factor=0.1)
def test_mul():
u = Unit(length, factor=10)
assert u.mul(1) == u
assert u.mul(Unit(time, factor=2)) == Unit(length.mul(time), factor=20)
assert u.mul(Unit(length.pow(-1), factor=2)) == 20
def test_div():
u = Unit(length, factor=10)
assert u.rdiv(1) == u.pow(-1)
assert u.div(1) == u
assert u.div(Unit(time, factor=2)) == Unit(length.div(time), factor=5)
assert u.div(Unit(length, factor=2)) == 5
def test_is_compatible():
u = Unit(length, factor=10)
assert u.is_compatible(Unit(length)) is True
assert u.is_compatible(Unit(time)) is False
assert u.is_compatible(2) is False
def test_as_quantity():
from sympy.physics.unitsystems.quantities import Quantity
u = Unit(length, factor=10)
q = Quantity(10, Unit(length))
assert u.as_quantity == q
| bsd-3-clause | 6,843,733,158,110,877,000 | -3,990,669,586,883,523,600 | 21.093525 | 75 | 0.61283 | false |
jfietkau/Streets4MPI | persistence.py | 1 | 1926 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# persistence.py
# Copyright 2012 Julian Fietkau <http://www.julian-fietkau.de/>
#
# This file is part of Streets4MPI.
#
# Streets4MPI is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Streets4MPI is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Streets4MPI. If not, see <http://www.gnu.org/licenses/>.
#
import cPickle
import zlib
import array
# This function serializes and compresses an object
def persist_serialize(data, compress = True):
if compress:
return zlib.compress(cPickle.dumps(data))
else:
return cPickle.dumps(data)
# This function deserializes and decompresses an object
def persist_deserialize(data, compressed = True):
if compressed:
return cPickle.loads(zlib.decompress(data))
else:
return cPickle.loads(data)
# This function saves a data structure to a file
def persist_write(filename, data, compress = True, is_array = False):
file = open(filename, "w")
if is_array:
data = zlib.compress(data.tostring())
else:
data = persist_serialize(data, compress)
file.write(data)
# This function reads a data structure from a file
def persist_read(filename, compressed = True, is_array = False):
file = open(filename, "r")
data = file.read()
if is_array:
result = array.array("I")
result.fromstring(zlib.decompress(data))
else:
result = persist_deserialize(data, compressed)
return result
| gpl-3.0 | -5,996,315,703,277,914,000 | -6,889,285,371,386,586,000 | 31.1 | 70 | 0.708204 | false |
eldabbagh/gae-boilerplate | bp_includes/external/pytz/reference.py | 839 | 3649 | '''
Reference tzinfo implementations from the Python docs.
Used for testing against as they are only correct for the years
1987 to 2006. Do not use these for real code.
'''
from datetime import tzinfo, timedelta, datetime
from pytz import utc, UTC, HOUR, ZERO
# A class building tzinfo objects for fixed-offset time zones.
# Note that FixedOffset(0, "UTC") is a different way to build a
# UTC tzinfo object.
class FixedOffset(tzinfo):
"""Fixed offset in minutes east from UTC."""
def __init__(self, offset, name):
self.__offset = timedelta(minutes = offset)
self.__name = name
def utcoffset(self, dt):
return self.__offset
def tzname(self, dt):
return self.__name
def dst(self, dt):
return ZERO
# A class capturing the platform's idea of local time.
import time as _time
STDOFFSET = timedelta(seconds = -_time.timezone)
if _time.daylight:
DSTOFFSET = timedelta(seconds = -_time.altzone)
else:
DSTOFFSET = STDOFFSET
DSTDIFF = DSTOFFSET - STDOFFSET
class LocalTimezone(tzinfo):
def utcoffset(self, dt):
if self._isdst(dt):
return DSTOFFSET
else:
return STDOFFSET
def dst(self, dt):
if self._isdst(dt):
return DSTDIFF
else:
return ZERO
def tzname(self, dt):
return _time.tzname[self._isdst(dt)]
def _isdst(self, dt):
tt = (dt.year, dt.month, dt.day,
dt.hour, dt.minute, dt.second,
dt.weekday(), 0, -1)
stamp = _time.mktime(tt)
tt = _time.localtime(stamp)
return tt.tm_isdst > 0
Local = LocalTimezone()
# A complete implementation of current DST rules for major US time zones.
def first_sunday_on_or_after(dt):
days_to_go = 6 - dt.weekday()
if days_to_go:
dt += timedelta(days_to_go)
return dt
# In the US, DST starts at 2am (standard time) on the first Sunday in April.
DSTSTART = datetime(1, 4, 1, 2)
# and ends at 2am (DST time; 1am standard time) on the last Sunday of Oct.
# which is the first Sunday on or after Oct 25.
DSTEND = datetime(1, 10, 25, 1)
class USTimeZone(tzinfo):
def __init__(self, hours, reprname, stdname, dstname):
self.stdoffset = timedelta(hours=hours)
self.reprname = reprname
self.stdname = stdname
self.dstname = dstname
def __repr__(self):
return self.reprname
def tzname(self, dt):
if self.dst(dt):
return self.dstname
else:
return self.stdname
def utcoffset(self, dt):
return self.stdoffset + self.dst(dt)
def dst(self, dt):
if dt is None or dt.tzinfo is None:
# An exception may be sensible here, in one or both cases.
# It depends on how you want to treat them. The default
# fromutc() implementation (called by the default astimezone()
# implementation) passes a datetime with dt.tzinfo is self.
return ZERO
assert dt.tzinfo is self
# Find first Sunday in April & the last in October.
start = first_sunday_on_or_after(DSTSTART.replace(year=dt.year))
end = first_sunday_on_or_after(DSTEND.replace(year=dt.year))
# Can't compare naive to aware objects, so strip the timezone from
# dt first.
if start <= dt.replace(tzinfo=None) < end:
return HOUR
else:
return ZERO
Eastern = USTimeZone(-5, "Eastern", "EST", "EDT")
Central = USTimeZone(-6, "Central", "CST", "CDT")
Mountain = USTimeZone(-7, "Mountain", "MST", "MDT")
Pacific = USTimeZone(-8, "Pacific", "PST", "PDT")
| lgpl-3.0 | -4,838,737,443,765,407,000 | -9,117,788,438,405,882,000 | 27.732283 | 76 | 0.619622 | false |
lordkman/burnman | examples/example_geotherms.py | 4 | 4049 | # This file is part of BurnMan - a thermoelastic and thermodynamic toolkit for the Earth and Planetary Sciences
# Copyright (C) 2012 - 2015 by the BurnMan team, released under the GNU
# GPL v2 or later.
"""
example_geotherms
-----------------
This example shows each of the geotherms currently possible with BurnMan.
These are:
1. Brown and Shankland, 1981 :cite:`Brown1981`
2. Anderson, 1982 :cite:`anderson1982earth`
3. Watson and Baxter, 2007 :cite:`Watson2007`
4. linear extrapolation
5. Read in from file from user
6. Adiabatic from potential temperature and choice of mineral
*Uses:*
* :func:`burnman.geotherm.brown_shankland`
* :func:`burnman.geotherm.anderson`
* input geotherm file *input_geotherm/example_geotherm.txt* (optional)
* :class:`burnman.composite.Composite` for adiabat
*Demonstrates:*
* the available geotherms
"""
from __future__ import absolute_import
import os
import sys
import numpy as np
import matplotlib.pyplot as plt
# hack to allow scripts to be placed in subdirectories next to burnman:
if not os.path.exists('burnman') and os.path.exists('../burnman'):
sys.path.insert(1, os.path.abspath('..'))
import burnman
from burnman import minerals
if __name__ == "__main__":
# we want to evaluate several geotherms at these values
pressures = np.arange(9.0e9, 128e9, 3e9)
seismic_model = burnman.seismic.PREM()
depths = seismic_model.depth(pressures)
# load two builtin geotherms and evaluate the temperatures at all pressures
temperature1 = burnman.geotherm.brown_shankland(depths)
temperature2 = burnman.geotherm.anderson(depths)
# a geotherm is actually just a function that returns a list of temperatures given pressures in Pa
# so we can just write our own function
my_geotherm_function = lambda p: [1500 + (2500 - 1500) * x / 128e9 for x in p]
temperature3 = my_geotherm_function(pressures)
# what about a geotherm defined from datapoints given in a file (our
# inline)?
table = [[1e9, 1600], [30e9, 1700], [130e9, 2700]]
# this could also be loaded from a file, just uncomment this
# table = burnman.tools.read_table("input_geotherm/example_geotherm.txt")
table_pressure = np.array(table)[:, 0]
table_temperature = np.array(table)[:, 1]
my_geotherm_interpolate = lambda p: [np.interp(x, table_pressure,
table_temperature) for x in p]
temperature4 = my_geotherm_interpolate(pressures)
# finally, we can also calculate a self consistent
# geotherm for an assemblage of minerals
# based on self compression of the composite rock.
# First we need to define an assemblage
amount_perovskite = 0.8
fe_pv = 0.05
fe_pc = 0.2
pv = minerals.SLB_2011.mg_fe_perovskite()
pc = minerals.SLB_2011.ferropericlase()
pv.set_composition([1. - fe_pv, fe_pv, 0.])
pc.set_composition([1. - fe_pc, fe_pc])
example_rock = burnman.Composite(
[pv, pc], [amount_perovskite, 1.0 - amount_perovskite])
# next, define an anchor temperature at which we are starting.
# Perhaps 1500 K for the upper mantle
T0 = 1500.
# then generate temperature values using the self consistent function.
# This takes more time than the above methods
temperature5 = burnman.geotherm.adiabatic(pressures, T0, example_rock)
# you can also look at burnman/geotherm.py to see how the geotherms are
# implemented
plt.plot(pressures / 1e9, temperature1, '-r', label="Brown, Shankland")
plt.plot(pressures / 1e9, temperature2, '-c', label="Anderson")
plt.plot(pressures / 1e9, temperature3, '-b', label="handwritten linear")
plt.plot(pressures / 1e9, temperature4,
'-k', label="handwritten from table")
plt.plot(pressures / 1e9, temperature5, '-m',
label="Adiabat with pv (70%) and fp(30%)")
plt.legend(loc='lower right')
plt.xlim([8.5, 130])
plt.xlabel('Pressure/GPa')
plt.ylabel('Temperature')
plt.savefig("output_figures/example_geotherm.png")
plt.show()
| gpl-2.0 | 1,669,251,744,062,720,000 | 7,055,329,350,013,807,000 | 35.809091 | 111 | 0.690047 | false |
gauribhoite/personfinder | env/google_appengine/google/appengine/api/channel/channel.py | 12 | 6716 | #!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Channel API.
This module allows App Engine apps to push messages to a client.
Functions defined in this module:
create_channel: Creates a channel to send messages to.
send_message: Send a message to any clients listening on the given channel.
"""
import os
from google.appengine.api import api_base_pb
from google.appengine.api import apiproxy_stub_map
from google.appengine.api.channel import channel_service_pb
from google.appengine.runtime import apiproxy_errors
MAXIMUM_CLIENT_ID_LENGTH = 256
MAXIMUM_TOKEN_DURATION_MINUTES = 24 * 60
MAXIMUM_MESSAGE_LENGTH = 32767
class Error(Exception):
"""Base error class for this module."""
class InvalidChannelClientIdError(Error):
"""Error that indicates a bad client id."""
class InvalidChannelTokenDurationError(Error):
"""Error that indicates the requested duration is invalid."""
class InvalidMessageError(Error):
"""Error that indicates a message is malformed."""
class AppIdAliasRequired(Error):
"""Error that indicates you must assign an application alias to your app."""
def _ToChannelError(error):
"""Translate an application error to a channel Error, if possible.
Args:
error: An ApplicationError to translate.
Returns:
The appropriate channel service error, if a match is found, or the original
ApplicationError.
"""
error_map = {
channel_service_pb.ChannelServiceError.INVALID_CHANNEL_KEY:
InvalidChannelClientIdError,
channel_service_pb.ChannelServiceError.BAD_MESSAGE:
InvalidMessageError,
channel_service_pb.ChannelServiceError.APPID_ALIAS_REQUIRED:
AppIdAliasRequired
}
if error.application_error in error_map:
return error_map[error.application_error](error.error_detail)
else:
return error
def _GetService():
"""Gets the service name to use, based on if we're on the dev server."""
server_software = os.environ.get('SERVER_SOFTWARE', '')
if (server_software.startswith('Devel') or
server_software.startswith('test')):
return 'channel'
else:
return 'xmpp'
def _ValidateClientId(client_id):
"""Validates a client id.
Args:
client_id: The client id provided by the application.
Returns:
If the client id is of type str, returns the original client id.
If the client id is of type unicode, returns the id encoded to utf-8.
Raises:
InvalidChannelClientIdError: if client id is not an instance of str or
unicode, or if the (utf-8 encoded) string is longer than 64 characters.
"""
if not isinstance(client_id, basestring):
raise InvalidChannelClientIdError('"%s" is not a string.' % client_id)
if isinstance(client_id, unicode):
client_id = client_id.encode('utf-8')
if len(client_id) > MAXIMUM_CLIENT_ID_LENGTH:
msg = 'Client id length %d is greater than max length %d' % (
len(client_id), MAXIMUM_CLIENT_ID_LENGTH)
raise InvalidChannelClientIdError(msg)
return client_id
def create_channel(client_id, duration_minutes=None):
"""Create a channel.
Args:
client_id: A string to identify this channel on the server side.
duration_minutes: An int specifying the number of minutes for which the
returned token should be valid.
Returns:
A token that the client can use to connect to the channel.
Raises:
InvalidChannelClientIdError: if clientid is not an instance of str or
unicode, or if the (utf-8 encoded) string is longer than 64 characters.
InvalidChannelTokenDurationError: if duration_minutes is not a number, less
than 1, or greater than 1440 (the number of minutes in a day).
Other errors returned by _ToChannelError
"""
client_id = _ValidateClientId(client_id)
if not duration_minutes is None:
if not isinstance(duration_minutes, (int, long)):
raise InvalidChannelTokenDurationError(
'Argument duration_minutes must be integral')
elif duration_minutes < 1:
raise InvalidChannelTokenDurationError(
'Argument duration_minutes must not be less than 1')
elif duration_minutes > MAXIMUM_TOKEN_DURATION_MINUTES:
msg = ('Argument duration_minutes must be less than %d'
% (MAXIMUM_TOKEN_DURATION_MINUTES + 1))
raise InvalidChannelTokenDurationError(msg)
request = channel_service_pb.CreateChannelRequest()
response = channel_service_pb.CreateChannelResponse()
request.set_application_key(client_id)
if not duration_minutes is None:
request.set_duration_minutes(duration_minutes)
try:
apiproxy_stub_map.MakeSyncCall(_GetService(),
'CreateChannel',
request,
response)
except apiproxy_errors.ApplicationError, e:
raise _ToChannelError(e)
return response.token()
def send_message(client_id, message):
"""Send a message to a channel.
Args:
client_id: The client id passed to create_channel.
message: A string representing the message to send.
Raises:
InvalidChannelClientIdError: if client_id is not an instance of str or
unicode, or if the (utf-8 encoded) string is longer than 64 characters.
InvalidMessageError: if the message isn't a string or is too long.
Errors returned by _ToChannelError
"""
client_id = _ValidateClientId(client_id)
if isinstance(message, unicode):
message = message.encode('utf-8')
elif not isinstance(message, str):
raise InvalidMessageError('Message must be a string')
if len(message) > MAXIMUM_MESSAGE_LENGTH:
raise InvalidMessageError(
'Message must be no longer than %d chars' % MAXIMUM_MESSAGE_LENGTH)
request = channel_service_pb.SendMessageRequest()
response = api_base_pb.VoidProto()
request.set_application_key(client_id)
request.set_message(message)
try:
apiproxy_stub_map.MakeSyncCall(_GetService(),
'SendChannelMessage',
request,
response)
except apiproxy_errors.ApplicationError, e:
raise _ToChannelError(e)
| apache-2.0 | -6,721,909,459,026,204,000 | -5,702,897,137,277,521,000 | 26.86722 | 79 | 0.701757 | false |
austinzheng/swift | utils/swift_build_support/tests/products/test_llvm.py | 39 | 5436 | # tests/products/test_llvm.py -----------------------------------*- python -*-
#
# This source file is part of the LLVM.org open source project
#
# Copyright (c) 2014 - 2017 Apple Inc. and the LLVM project authors
# Licensed under Apache License v2.0 with Runtime Library Exception
#
# See https://swift.org/LICENSE.txt for license information
# See https://swift.org/CONTRIBUTORS.txt for the list of LLVM project authors
# ----------------------------------------------------------------------------
import argparse
import os
import shutil
import sys
import tempfile
import unittest
try:
# py2
from StringIO import StringIO
except ImportError:
# py3
from io import StringIO
from swift_build_support import shell
from swift_build_support.products import LLVM
from swift_build_support.toolchain import host_toolchain
from swift_build_support.workspace import Workspace
class LLVMTestCase(unittest.TestCase):
def setUp(self):
# Setup workspace
tmpdir1 = os.path.realpath(tempfile.mkdtemp())
tmpdir2 = os.path.realpath(tempfile.mkdtemp())
os.makedirs(os.path.join(tmpdir1, 'llvm'))
self.workspace = Workspace(source_root=tmpdir1,
build_root=tmpdir2)
# Setup toolchain
self.toolchain = host_toolchain()
self.toolchain.cc = '/path/to/cc'
self.toolchain.cxx = '/path/to/cxx'
# Setup args
self.args = argparse.Namespace(
llvm_targets_to_build='X86;ARM;AArch64;PowerPC;SystemZ',
llvm_assertions='true',
compiler_vendor='none',
clang_compiler_version=None,
clang_user_visible_version=None,
darwin_deployment_version_osx='10.9')
# Setup shell
shell.dry_run = True
self._orig_stdout = sys.stdout
self._orig_stderr = sys.stderr
self.stdout = StringIO()
self.stderr = StringIO()
sys.stdout = self.stdout
sys.stderr = self.stderr
def tearDown(self):
shutil.rmtree(self.workspace.build_root)
shutil.rmtree(self.workspace.source_root)
sys.stdout = self._orig_stdout
sys.stderr = self._orig_stderr
shell.dry_run = False
self.workspace = None
self.toolchain = None
self.args = None
def test_llvm_targets_to_build(self):
llvm = LLVM(
args=self.args,
toolchain=self.toolchain,
source_dir='/path/to/src',
build_dir='/path/to/build')
expected_targets = 'X86;ARM;AArch64;PowerPC;SystemZ'
expected_arg = '-DLLVM_TARGETS_TO_BUILD=%s' % expected_targets
self.assertIn(expected_arg, llvm.cmake_options)
def test_llvm_enable_assertions(self):
self.args.llvm_assertions = True
llvm = LLVM(
args=self.args,
toolchain=self.toolchain,
source_dir='/path/to/src',
build_dir='/path/to/build')
self.assertIn('-DLLVM_ENABLE_ASSERTIONS=TRUE', llvm.cmake_options)
self.args.llvm_assertions = False
llvm = LLVM(
args=self.args,
toolchain=self.toolchain,
source_dir='/path/to/src',
build_dir='/path/to/build')
self.assertIn('-DLLVM_ENABLE_ASSERTIONS=FALSE', llvm.cmake_options)
def test_compiler_vendor_flags(self):
self.args.compiler_vendor = "none"
self.args.clang_user_visible_version = "1.2.3"
llvm = LLVM(
args=self.args,
toolchain=self.toolchain,
source_dir='/path/to/src',
build_dir='/path/to/build')
self.assertNotIn('-DCLANG_VENDOR=Apple', llvm.cmake_options)
self.assertNotIn(
'-DCLANG_VENDOR_UTI=com.apple.compilers.llvm.clang',
llvm.cmake_options
)
self.assertNotIn('-DPACKAGE_VERSION=1.2.3', llvm.cmake_options)
self.args.compiler_vendor = "apple"
self.args.clang_user_visible_version = "2.2.3"
llvm = LLVM(
args=self.args,
toolchain=self.toolchain,
source_dir='/path/to/src',
build_dir='/path/to/build')
self.assertIn('-DCLANG_VENDOR=Apple', llvm.cmake_options)
self.assertIn(
'-DCLANG_VENDOR_UTI=com.apple.compilers.llvm.clang',
llvm.cmake_options
)
self.assertIn('-DPACKAGE_VERSION=2.2.3', llvm.cmake_options)
self.args.compiler_vendor = "unknown"
with self.assertRaises(RuntimeError):
llvm = LLVM(
args=self.args,
toolchain=self.toolchain,
source_dir='/path/to/src',
build_dir='/path/to/build')
def test_version_flags(self):
self.args.clang_compiler_version = None
llvm = LLVM(
args=self.args,
toolchain=self.toolchain,
source_dir='/path/to/src',
build_dir='/path/to/build')
self.assertListEqual(
[],
[x for x in llvm.cmake_options if 'CLANG_REPOSITORY_STRING' in x]
)
self.args.clang_compiler_version = "2.2.3"
llvm = LLVM(
args=self.args,
toolchain=self.toolchain,
source_dir='/path/to/src',
build_dir='/path/to/build')
self.assertIn(
'-DCLANG_REPOSITORY_STRING=clang-2.2.3',
llvm.cmake_options
)
| apache-2.0 | 6,787,762,606,772,073,000 | 6,168,343,842,579,036,000 | 32.975 | 78 | 0.582965 | false |
kiran/bart-sign | venv/lib/python2.7/site-packages/numpy/polynomial/laguerre.py | 75 | 55335 | """
Objects for dealing with Laguerre series.
This module provides a number of objects (mostly functions) useful for
dealing with Laguerre series, including a `Laguerre` class that
encapsulates the usual arithmetic operations. (General information
on how this module represents and works with such polynomials is in the
docstring for its "parent" sub-package, `numpy.polynomial`).
Constants
---------
- `lagdomain` -- Laguerre series default domain, [-1,1].
- `lagzero` -- Laguerre series that evaluates identically to 0.
- `lagone` -- Laguerre series that evaluates identically to 1.
- `lagx` -- Laguerre series for the identity map, ``f(x) = x``.
Arithmetic
----------
- `lagmulx` -- multiply a Laguerre series in ``P_i(x)`` by ``x``.
- `lagadd` -- add two Laguerre series.
- `lagsub` -- subtract one Laguerre series from another.
- `lagmul` -- multiply two Laguerre series.
- `lagdiv` -- divide one Laguerre series by another.
- `lagval` -- evaluate a Laguerre series at given points.
- `lagval2d` -- evaluate a 2D Laguerre series at given points.
- `lagval3d` -- evaluate a 3D Laguerre series at given points.
- `laggrid2d` -- evaluate a 2D Laguerre series on a Cartesian product.
- `laggrid3d` -- evaluate a 3D Laguerre series on a Cartesian product.
Calculus
--------
- `lagder` -- differentiate a Laguerre series.
- `lagint` -- integrate a Laguerre series.
Misc Functions
--------------
- `lagfromroots` -- create a Laguerre series with specified roots.
- `lagroots` -- find the roots of a Laguerre series.
- `lagvander` -- Vandermonde-like matrix for Laguerre polynomials.
- `lagvander2d` -- Vandermonde-like matrix for 2D power series.
- `lagvander3d` -- Vandermonde-like matrix for 3D power series.
- `laggauss` -- Gauss-Laguerre quadrature, points and weights.
- `lagweight` -- Laguerre weight function.
- `lagcompanion` -- symmetrized companion matrix in Laguerre form.
- `lagfit` -- least-squares fit returning a Laguerre series.
- `lagtrim` -- trim leading coefficients from a Laguerre series.
- `lagline` -- Laguerre series of given straight line.
- `lag2poly` -- convert a Laguerre series to a polynomial.
- `poly2lag` -- convert a polynomial to a Laguerre series.
Classes
-------
- `Laguerre` -- A Laguerre series class.
See also
--------
`numpy.polynomial`
"""
from __future__ import division, absolute_import, print_function
import warnings
import numpy as np
import numpy.linalg as la
from . import polyutils as pu
from ._polybase import ABCPolyBase
__all__ = [
'lagzero', 'lagone', 'lagx', 'lagdomain', 'lagline', 'lagadd',
'lagsub', 'lagmulx', 'lagmul', 'lagdiv', 'lagpow', 'lagval', 'lagder',
'lagint', 'lag2poly', 'poly2lag', 'lagfromroots', 'lagvander',
'lagfit', 'lagtrim', 'lagroots', 'Laguerre', 'lagval2d', 'lagval3d',
'laggrid2d', 'laggrid3d', 'lagvander2d', 'lagvander3d', 'lagcompanion',
'laggauss', 'lagweight']
lagtrim = pu.trimcoef
def poly2lag(pol):
"""
poly2lag(pol)
Convert a polynomial to a Laguerre series.
Convert an array representing the coefficients of a polynomial (relative
to the "standard" basis) ordered from lowest degree to highest, to an
array of the coefficients of the equivalent Laguerre series, ordered
from lowest to highest degree.
Parameters
----------
pol : array_like
1-D array containing the polynomial coefficients
Returns
-------
c : ndarray
1-D array containing the coefficients of the equivalent Laguerre
series.
See Also
--------
lag2poly
Notes
-----
The easy way to do conversions between polynomial basis sets
is to use the convert method of a class instance.
Examples
--------
>>> from numpy.polynomial.laguerre import poly2lag
>>> poly2lag(np.arange(4))
array([ 23., -63., 58., -18.])
"""
[pol] = pu.as_series([pol])
deg = len(pol) - 1
res = 0
for i in range(deg, -1, -1):
res = lagadd(lagmulx(res), pol[i])
return res
def lag2poly(c):
"""
Convert a Laguerre series to a polynomial.
Convert an array representing the coefficients of a Laguerre series,
ordered from lowest degree to highest, to an array of the coefficients
of the equivalent polynomial (relative to the "standard" basis) ordered
from lowest to highest degree.
Parameters
----------
c : array_like
1-D array containing the Laguerre series coefficients, ordered
from lowest order term to highest.
Returns
-------
pol : ndarray
1-D array containing the coefficients of the equivalent polynomial
(relative to the "standard" basis) ordered from lowest order term
to highest.
See Also
--------
poly2lag
Notes
-----
The easy way to do conversions between polynomial basis sets
is to use the convert method of a class instance.
Examples
--------
>>> from numpy.polynomial.laguerre import lag2poly
>>> lag2poly([ 23., -63., 58., -18.])
array([ 0., 1., 2., 3.])
"""
from .polynomial import polyadd, polysub, polymulx
[c] = pu.as_series([c])
n = len(c)
if n == 1:
return c
else:
c0 = c[-2]
c1 = c[-1]
# i is the current degree of c1
for i in range(n - 1, 1, -1):
tmp = c0
c0 = polysub(c[i - 2], (c1*(i - 1))/i)
c1 = polyadd(tmp, polysub((2*i - 1)*c1, polymulx(c1))/i)
return polyadd(c0, polysub(c1, polymulx(c1)))
#
# These are constant arrays are of integer type so as to be compatible
# with the widest range of other types, such as Decimal.
#
# Laguerre
lagdomain = np.array([0, 1])
# Laguerre coefficients representing zero.
lagzero = np.array([0])
# Laguerre coefficients representing one.
lagone = np.array([1])
# Laguerre coefficients representing the identity x.
lagx = np.array([1, -1])
def lagline(off, scl):
"""
Laguerre series whose graph is a straight line.
Parameters
----------
off, scl : scalars
The specified line is given by ``off + scl*x``.
Returns
-------
y : ndarray
This module's representation of the Laguerre series for
``off + scl*x``.
See Also
--------
polyline, chebline
Examples
--------
>>> from numpy.polynomial.laguerre import lagline, lagval
>>> lagval(0,lagline(3, 2))
3.0
>>> lagval(1,lagline(3, 2))
5.0
"""
if scl != 0:
return np.array([off + scl, -scl])
else:
return np.array([off])
def lagfromroots(roots):
"""
Generate a Laguerre series with given roots.
The function returns the coefficients of the polynomial
.. math:: p(x) = (x - r_0) * (x - r_1) * ... * (x - r_n),
in Laguerre form, where the `r_n` are the roots specified in `roots`.
If a zero has multiplicity n, then it must appear in `roots` n times.
For instance, if 2 is a root of multiplicity three and 3 is a root of
multiplicity 2, then `roots` looks something like [2, 2, 2, 3, 3]. The
roots can appear in any order.
If the returned coefficients are `c`, then
.. math:: p(x) = c_0 + c_1 * L_1(x) + ... + c_n * L_n(x)
The coefficient of the last term is not generally 1 for monic
polynomials in Laguerre form.
Parameters
----------
roots : array_like
Sequence containing the roots.
Returns
-------
out : ndarray
1-D array of coefficients. If all roots are real then `out` is a
real array, if some of the roots are complex, then `out` is complex
even if all the coefficients in the result are real (see Examples
below).
See Also
--------
polyfromroots, legfromroots, chebfromroots, hermfromroots,
hermefromroots.
Examples
--------
>>> from numpy.polynomial.laguerre import lagfromroots, lagval
>>> coef = lagfromroots((-1, 0, 1))
>>> lagval((-1, 0, 1), coef)
array([ 0., 0., 0.])
>>> coef = lagfromroots((-1j, 1j))
>>> lagval((-1j, 1j), coef)
array([ 0.+0.j, 0.+0.j])
"""
if len(roots) == 0:
return np.ones(1)
else:
[roots] = pu.as_series([roots], trim=False)
roots.sort()
p = [lagline(-r, 1) for r in roots]
n = len(p)
while n > 1:
m, r = divmod(n, 2)
tmp = [lagmul(p[i], p[i+m]) for i in range(m)]
if r:
tmp[0] = lagmul(tmp[0], p[-1])
p = tmp
n = m
return p[0]
def lagadd(c1, c2):
"""
Add one Laguerre series to another.
Returns the sum of two Laguerre series `c1` + `c2`. The arguments
are sequences of coefficients ordered from lowest order term to
highest, i.e., [1,2,3] represents the series ``P_0 + 2*P_1 + 3*P_2``.
Parameters
----------
c1, c2 : array_like
1-D arrays of Laguerre series coefficients ordered from low to
high.
Returns
-------
out : ndarray
Array representing the Laguerre series of their sum.
See Also
--------
lagsub, lagmul, lagdiv, lagpow
Notes
-----
Unlike multiplication, division, etc., the sum of two Laguerre series
is a Laguerre series (without having to "reproject" the result onto
the basis set) so addition, just like that of "standard" polynomials,
is simply "component-wise."
Examples
--------
>>> from numpy.polynomial.laguerre import lagadd
>>> lagadd([1, 2, 3], [1, 2, 3, 4])
array([ 2., 4., 6., 4.])
"""
# c1, c2 are trimmed copies
[c1, c2] = pu.as_series([c1, c2])
if len(c1) > len(c2):
c1[:c2.size] += c2
ret = c1
else:
c2[:c1.size] += c1
ret = c2
return pu.trimseq(ret)
def lagsub(c1, c2):
"""
Subtract one Laguerre series from another.
Returns the difference of two Laguerre series `c1` - `c2`. The
sequences of coefficients are from lowest order term to highest, i.e.,
[1,2,3] represents the series ``P_0 + 2*P_1 + 3*P_2``.
Parameters
----------
c1, c2 : array_like
1-D arrays of Laguerre series coefficients ordered from low to
high.
Returns
-------
out : ndarray
Of Laguerre series coefficients representing their difference.
See Also
--------
lagadd, lagmul, lagdiv, lagpow
Notes
-----
Unlike multiplication, division, etc., the difference of two Laguerre
series is a Laguerre series (without having to "reproject" the result
onto the basis set) so subtraction, just like that of "standard"
polynomials, is simply "component-wise."
Examples
--------
>>> from numpy.polynomial.laguerre import lagsub
>>> lagsub([1, 2, 3, 4], [1, 2, 3])
array([ 0., 0., 0., 4.])
"""
# c1, c2 are trimmed copies
[c1, c2] = pu.as_series([c1, c2])
if len(c1) > len(c2):
c1[:c2.size] -= c2
ret = c1
else:
c2 = -c2
c2[:c1.size] += c1
ret = c2
return pu.trimseq(ret)
def lagmulx(c):
"""Multiply a Laguerre series by x.
Multiply the Laguerre series `c` by x, where x is the independent
variable.
Parameters
----------
c : array_like
1-D array of Laguerre series coefficients ordered from low to
high.
Returns
-------
out : ndarray
Array representing the result of the multiplication.
Notes
-----
The multiplication uses the recursion relationship for Laguerre
polynomials in the form
.. math::
xP_i(x) = (-(i + 1)*P_{i + 1}(x) + (2i + 1)P_{i}(x) - iP_{i - 1}(x))
Examples
--------
>>> from numpy.polynomial.laguerre import lagmulx
>>> lagmulx([1, 2, 3])
array([ -1., -1., 11., -9.])
"""
# c is a trimmed copy
[c] = pu.as_series([c])
# The zero series needs special treatment
if len(c) == 1 and c[0] == 0:
return c
prd = np.empty(len(c) + 1, dtype=c.dtype)
prd[0] = c[0]
prd[1] = -c[0]
for i in range(1, len(c)):
prd[i + 1] = -c[i]*(i + 1)
prd[i] += c[i]*(2*i + 1)
prd[i - 1] -= c[i]*i
return prd
def lagmul(c1, c2):
"""
Multiply one Laguerre series by another.
Returns the product of two Laguerre series `c1` * `c2`. The arguments
are sequences of coefficients, from lowest order "term" to highest,
e.g., [1,2,3] represents the series ``P_0 + 2*P_1 + 3*P_2``.
Parameters
----------
c1, c2 : array_like
1-D arrays of Laguerre series coefficients ordered from low to
high.
Returns
-------
out : ndarray
Of Laguerre series coefficients representing their product.
See Also
--------
lagadd, lagsub, lagdiv, lagpow
Notes
-----
In general, the (polynomial) product of two C-series results in terms
that are not in the Laguerre polynomial basis set. Thus, to express
the product as a Laguerre series, it is necessary to "reproject" the
product onto said basis set, which may produce "unintuitive" (but
correct) results; see Examples section below.
Examples
--------
>>> from numpy.polynomial.laguerre import lagmul
>>> lagmul([1, 2, 3], [0, 1, 2])
array([ 8., -13., 38., -51., 36.])
"""
# s1, s2 are trimmed copies
[c1, c2] = pu.as_series([c1, c2])
if len(c1) > len(c2):
c = c2
xs = c1
else:
c = c1
xs = c2
if len(c) == 1:
c0 = c[0]*xs
c1 = 0
elif len(c) == 2:
c0 = c[0]*xs
c1 = c[1]*xs
else:
nd = len(c)
c0 = c[-2]*xs
c1 = c[-1]*xs
for i in range(3, len(c) + 1):
tmp = c0
nd = nd - 1
c0 = lagsub(c[-i]*xs, (c1*(nd - 1))/nd)
c1 = lagadd(tmp, lagsub((2*nd - 1)*c1, lagmulx(c1))/nd)
return lagadd(c0, lagsub(c1, lagmulx(c1)))
def lagdiv(c1, c2):
"""
Divide one Laguerre series by another.
Returns the quotient-with-remainder of two Laguerre series
`c1` / `c2`. The arguments are sequences of coefficients from lowest
order "term" to highest, e.g., [1,2,3] represents the series
``P_0 + 2*P_1 + 3*P_2``.
Parameters
----------
c1, c2 : array_like
1-D arrays of Laguerre series coefficients ordered from low to
high.
Returns
-------
[quo, rem] : ndarrays
Of Laguerre series coefficients representing the quotient and
remainder.
See Also
--------
lagadd, lagsub, lagmul, lagpow
Notes
-----
In general, the (polynomial) division of one Laguerre series by another
results in quotient and remainder terms that are not in the Laguerre
polynomial basis set. Thus, to express these results as a Laguerre
series, it is necessary to "reproject" the results onto the Laguerre
basis set, which may produce "unintuitive" (but correct) results; see
Examples section below.
Examples
--------
>>> from numpy.polynomial.laguerre import lagdiv
>>> lagdiv([ 8., -13., 38., -51., 36.], [0, 1, 2])
(array([ 1., 2., 3.]), array([ 0.]))
>>> lagdiv([ 9., -12., 38., -51., 36.], [0, 1, 2])
(array([ 1., 2., 3.]), array([ 1., 1.]))
"""
# c1, c2 are trimmed copies
[c1, c2] = pu.as_series([c1, c2])
if c2[-1] == 0:
raise ZeroDivisionError()
lc1 = len(c1)
lc2 = len(c2)
if lc1 < lc2:
return c1[:1]*0, c1
elif lc2 == 1:
return c1/c2[-1], c1[:1]*0
else:
quo = np.empty(lc1 - lc2 + 1, dtype=c1.dtype)
rem = c1
for i in range(lc1 - lc2, - 1, -1):
p = lagmul([0]*i + [1], c2)
q = rem[-1]/p[-1]
rem = rem[:-1] - q*p[:-1]
quo[i] = q
return quo, pu.trimseq(rem)
def lagpow(c, pow, maxpower=16):
"""Raise a Laguerre series to a power.
Returns the Laguerre series `c` raised to the power `pow`. The
argument `c` is a sequence of coefficients ordered from low to high.
i.e., [1,2,3] is the series ``P_0 + 2*P_1 + 3*P_2.``
Parameters
----------
c : array_like
1-D array of Laguerre series coefficients ordered from low to
high.
pow : integer
Power to which the series will be raised
maxpower : integer, optional
Maximum power allowed. This is mainly to limit growth of the series
to unmanageable size. Default is 16
Returns
-------
coef : ndarray
Laguerre series of power.
See Also
--------
lagadd, lagsub, lagmul, lagdiv
Examples
--------
>>> from numpy.polynomial.laguerre import lagpow
>>> lagpow([1, 2, 3], 2)
array([ 14., -16., 56., -72., 54.])
"""
# c is a trimmed copy
[c] = pu.as_series([c])
power = int(pow)
if power != pow or power < 0:
raise ValueError("Power must be a non-negative integer.")
elif maxpower is not None and power > maxpower:
raise ValueError("Power is too large")
elif power == 0:
return np.array([1], dtype=c.dtype)
elif power == 1:
return c
else:
# This can be made more efficient by using powers of two
# in the usual way.
prd = c
for i in range(2, power + 1):
prd = lagmul(prd, c)
return prd
def lagder(c, m=1, scl=1, axis=0):
"""
Differentiate a Laguerre series.
Returns the Laguerre series coefficients `c` differentiated `m` times
along `axis`. At each iteration the result is multiplied by `scl` (the
scaling factor is for use in a linear change of variable). The argument
`c` is an array of coefficients from low to high degree along each
axis, e.g., [1,2,3] represents the series ``1*L_0 + 2*L_1 + 3*L_2``
while [[1,2],[1,2]] represents ``1*L_0(x)*L_0(y) + 1*L_1(x)*L_0(y) +
2*L_0(x)*L_1(y) + 2*L_1(x)*L_1(y)`` if axis=0 is ``x`` and axis=1 is
``y``.
Parameters
----------
c : array_like
Array of Laguerre series coefficients. If `c` is multidimensional
the different axis correspond to different variables with the
degree in each axis given by the corresponding index.
m : int, optional
Number of derivatives taken, must be non-negative. (Default: 1)
scl : scalar, optional
Each differentiation is multiplied by `scl`. The end result is
multiplication by ``scl**m``. This is for use in a linear change of
variable. (Default: 1)
axis : int, optional
Axis over which the derivative is taken. (Default: 0).
.. versionadded:: 1.7.0
Returns
-------
der : ndarray
Laguerre series of the derivative.
See Also
--------
lagint
Notes
-----
In general, the result of differentiating a Laguerre series does not
resemble the same operation on a power series. Thus the result of this
function may be "unintuitive," albeit correct; see Examples section
below.
Examples
--------
>>> from numpy.polynomial.laguerre import lagder
>>> lagder([ 1., 1., 1., -3.])
array([ 1., 2., 3.])
>>> lagder([ 1., 0., 0., -4., 3.], m=2)
array([ 1., 2., 3.])
"""
c = np.array(c, ndmin=1, copy=1)
if c.dtype.char in '?bBhHiIlLqQpP':
c = c.astype(np.double)
cnt, iaxis = [int(t) for t in [m, axis]]
if cnt != m:
raise ValueError("The order of derivation must be integer")
if cnt < 0:
raise ValueError("The order of derivation must be non-negative")
if iaxis != axis:
raise ValueError("The axis must be integer")
if not -c.ndim <= iaxis < c.ndim:
raise ValueError("The axis is out of range")
if iaxis < 0:
iaxis += c.ndim
if cnt == 0:
return c
c = np.rollaxis(c, iaxis)
n = len(c)
if cnt >= n:
c = c[:1]*0
else:
for i in range(cnt):
n = n - 1
c *= scl
der = np.empty((n,) + c.shape[1:], dtype=c.dtype)
for j in range(n, 1, -1):
der[j - 1] = -c[j]
c[j - 1] += c[j]
der[0] = -c[1]
c = der
c = np.rollaxis(c, 0, iaxis + 1)
return c
def lagint(c, m=1, k=[], lbnd=0, scl=1, axis=0):
"""
Integrate a Laguerre series.
Returns the Laguerre series coefficients `c` integrated `m` times from
`lbnd` along `axis`. At each iteration the resulting series is
**multiplied** by `scl` and an integration constant, `k`, is added.
The scaling factor is for use in a linear change of variable. ("Buyer
beware": note that, depending on what one is doing, one may want `scl`
to be the reciprocal of what one might expect; for more information,
see the Notes section below.) The argument `c` is an array of
coefficients from low to high degree along each axis, e.g., [1,2,3]
represents the series ``L_0 + 2*L_1 + 3*L_2`` while [[1,2],[1,2]]
represents ``1*L_0(x)*L_0(y) + 1*L_1(x)*L_0(y) + 2*L_0(x)*L_1(y) +
2*L_1(x)*L_1(y)`` if axis=0 is ``x`` and axis=1 is ``y``.
Parameters
----------
c : array_like
Array of Laguerre series coefficients. If `c` is multidimensional
the different axis correspond to different variables with the
degree in each axis given by the corresponding index.
m : int, optional
Order of integration, must be positive. (Default: 1)
k : {[], list, scalar}, optional
Integration constant(s). The value of the first integral at
``lbnd`` is the first value in the list, the value of the second
integral at ``lbnd`` is the second value, etc. If ``k == []`` (the
default), all constants are set to zero. If ``m == 1``, a single
scalar can be given instead of a list.
lbnd : scalar, optional
The lower bound of the integral. (Default: 0)
scl : scalar, optional
Following each integration the result is *multiplied* by `scl`
before the integration constant is added. (Default: 1)
axis : int, optional
Axis over which the integral is taken. (Default: 0).
.. versionadded:: 1.7.0
Returns
-------
S : ndarray
Laguerre series coefficients of the integral.
Raises
------
ValueError
If ``m < 0``, ``len(k) > m``, ``np.isscalar(lbnd) == False``, or
``np.isscalar(scl) == False``.
See Also
--------
lagder
Notes
-----
Note that the result of each integration is *multiplied* by `scl`.
Why is this important to note? Say one is making a linear change of
variable :math:`u = ax + b` in an integral relative to `x`. Then
.. math::`dx = du/a`, so one will need to set `scl` equal to
:math:`1/a` - perhaps not what one would have first thought.
Also note that, in general, the result of integrating a C-series needs
to be "reprojected" onto the C-series basis set. Thus, typically,
the result of this function is "unintuitive," albeit correct; see
Examples section below.
Examples
--------
>>> from numpy.polynomial.laguerre import lagint
>>> lagint([1,2,3])
array([ 1., 1., 1., -3.])
>>> lagint([1,2,3], m=2)
array([ 1., 0., 0., -4., 3.])
>>> lagint([1,2,3], k=1)
array([ 2., 1., 1., -3.])
>>> lagint([1,2,3], lbnd=-1)
array([ 11.5, 1. , 1. , -3. ])
>>> lagint([1,2], m=2, k=[1,2], lbnd=-1)
array([ 11.16666667, -5. , -3. , 2. ])
"""
c = np.array(c, ndmin=1, copy=1)
if c.dtype.char in '?bBhHiIlLqQpP':
c = c.astype(np.double)
if not np.iterable(k):
k = [k]
cnt, iaxis = [int(t) for t in [m, axis]]
if cnt != m:
raise ValueError("The order of integration must be integer")
if cnt < 0:
raise ValueError("The order of integration must be non-negative")
if len(k) > cnt:
raise ValueError("Too many integration constants")
if iaxis != axis:
raise ValueError("The axis must be integer")
if not -c.ndim <= iaxis < c.ndim:
raise ValueError("The axis is out of range")
if iaxis < 0:
iaxis += c.ndim
if cnt == 0:
return c
c = np.rollaxis(c, iaxis)
k = list(k) + [0]*(cnt - len(k))
for i in range(cnt):
n = len(c)
c *= scl
if n == 1 and np.all(c[0] == 0):
c[0] += k[i]
else:
tmp = np.empty((n + 1,) + c.shape[1:], dtype=c.dtype)
tmp[0] = c[0]
tmp[1] = -c[0]
for j in range(1, n):
tmp[j] += c[j]
tmp[j + 1] = -c[j]
tmp[0] += k[i] - lagval(lbnd, tmp)
c = tmp
c = np.rollaxis(c, 0, iaxis + 1)
return c
def lagval(x, c, tensor=True):
"""
Evaluate a Laguerre series at points x.
If `c` is of length `n + 1`, this function returns the value:
.. math:: p(x) = c_0 * L_0(x) + c_1 * L_1(x) + ... + c_n * L_n(x)
The parameter `x` is converted to an array only if it is a tuple or a
list, otherwise it is treated as a scalar. In either case, either `x`
or its elements must support multiplication and addition both with
themselves and with the elements of `c`.
If `c` is a 1-D array, then `p(x)` will have the same shape as `x`. If
`c` is multidimensional, then the shape of the result depends on the
value of `tensor`. If `tensor` is true the shape will be c.shape[1:] +
x.shape. If `tensor` is false the shape will be c.shape[1:]. Note that
scalars have shape (,).
Trailing zeros in the coefficients will be used in the evaluation, so
they should be avoided if efficiency is a concern.
Parameters
----------
x : array_like, compatible object
If `x` is a list or tuple, it is converted to an ndarray, otherwise
it is left unchanged and treated as a scalar. In either case, `x`
or its elements must support addition and multiplication with
with themselves and with the elements of `c`.
c : array_like
Array of coefficients ordered so that the coefficients for terms of
degree n are contained in c[n]. If `c` is multidimensional the
remaining indices enumerate multiple polynomials. In the two
dimensional case the coefficients may be thought of as stored in
the columns of `c`.
tensor : boolean, optional
If True, the shape of the coefficient array is extended with ones
on the right, one for each dimension of `x`. Scalars have dimension 0
for this action. The result is that every column of coefficients in
`c` is evaluated for every element of `x`. If False, `x` is broadcast
over the columns of `c` for the evaluation. This keyword is useful
when `c` is multidimensional. The default value is True.
.. versionadded:: 1.7.0
Returns
-------
values : ndarray, algebra_like
The shape of the return value is described above.
See Also
--------
lagval2d, laggrid2d, lagval3d, laggrid3d
Notes
-----
The evaluation uses Clenshaw recursion, aka synthetic division.
Examples
--------
>>> from numpy.polynomial.laguerre import lagval
>>> coef = [1,2,3]
>>> lagval(1, coef)
-0.5
>>> lagval([[1,2],[3,4]], coef)
array([[-0.5, -4. ],
[-4.5, -2. ]])
"""
c = np.array(c, ndmin=1, copy=0)
if c.dtype.char in '?bBhHiIlLqQpP':
c = c.astype(np.double)
if isinstance(x, (tuple, list)):
x = np.asarray(x)
if isinstance(x, np.ndarray) and tensor:
c = c.reshape(c.shape + (1,)*x.ndim)
if len(c) == 1:
c0 = c[0]
c1 = 0
elif len(c) == 2:
c0 = c[0]
c1 = c[1]
else:
nd = len(c)
c0 = c[-2]
c1 = c[-1]
for i in range(3, len(c) + 1):
tmp = c0
nd = nd - 1
c0 = c[-i] - (c1*(nd - 1))/nd
c1 = tmp + (c1*((2*nd - 1) - x))/nd
return c0 + c1*(1 - x)
def lagval2d(x, y, c):
"""
Evaluate a 2-D Laguerre series at points (x, y).
This function returns the values:
.. math:: p(x,y) = \\sum_{i,j} c_{i,j} * L_i(x) * L_j(y)
The parameters `x` and `y` are converted to arrays only if they are
tuples or a lists, otherwise they are treated as a scalars and they
must have the same shape after conversion. In either case, either `x`
and `y` or their elements must support multiplication and addition both
with themselves and with the elements of `c`.
If `c` is a 1-D array a one is implicitly appended to its shape to make
it 2-D. The shape of the result will be c.shape[2:] + x.shape.
Parameters
----------
x, y : array_like, compatible objects
The two dimensional series is evaluated at the points `(x, y)`,
where `x` and `y` must have the same shape. If `x` or `y` is a list
or tuple, it is first converted to an ndarray, otherwise it is left
unchanged and if it isn't an ndarray it is treated as a scalar.
c : array_like
Array of coefficients ordered so that the coefficient of the term
of multi-degree i,j is contained in ``c[i,j]``. If `c` has
dimension greater than two the remaining indices enumerate multiple
sets of coefficients.
Returns
-------
values : ndarray, compatible object
The values of the two dimensional polynomial at points formed with
pairs of corresponding values from `x` and `y`.
See Also
--------
lagval, laggrid2d, lagval3d, laggrid3d
Notes
-----
.. versionadded::1.7.0
"""
try:
x, y = np.array((x, y), copy=0)
except:
raise ValueError('x, y are incompatible')
c = lagval(x, c)
c = lagval(y, c, tensor=False)
return c
def laggrid2d(x, y, c):
"""
Evaluate a 2-D Laguerre series on the Cartesian product of x and y.
This function returns the values:
.. math:: p(a,b) = \sum_{i,j} c_{i,j} * L_i(a) * L_j(b)
where the points `(a, b)` consist of all pairs formed by taking
`a` from `x` and `b` from `y`. The resulting points form a grid with
`x` in the first dimension and `y` in the second.
The parameters `x` and `y` are converted to arrays only if they are
tuples or a lists, otherwise they are treated as a scalars. In either
case, either `x` and `y` or their elements must support multiplication
and addition both with themselves and with the elements of `c`.
If `c` has fewer than two dimensions, ones are implicitly appended to
its shape to make it 2-D. The shape of the result will be c.shape[2:] +
x.shape + y.shape.
Parameters
----------
x, y : array_like, compatible objects
The two dimensional series is evaluated at the points in the
Cartesian product of `x` and `y`. If `x` or `y` is a list or
tuple, it is first converted to an ndarray, otherwise it is left
unchanged and, if it isn't an ndarray, it is treated as a scalar.
c : array_like
Array of coefficients ordered so that the coefficient of the term of
multi-degree i,j is contained in `c[i,j]`. If `c` has dimension
greater than two the remaining indices enumerate multiple sets of
coefficients.
Returns
-------
values : ndarray, compatible object
The values of the two dimensional Chebyshev series at points in the
Cartesian product of `x` and `y`.
See Also
--------
lagval, lagval2d, lagval3d, laggrid3d
Notes
-----
.. versionadded::1.7.0
"""
c = lagval(x, c)
c = lagval(y, c)
return c
def lagval3d(x, y, z, c):
"""
Evaluate a 3-D Laguerre series at points (x, y, z).
This function returns the values:
.. math:: p(x,y,z) = \\sum_{i,j,k} c_{i,j,k} * L_i(x) * L_j(y) * L_k(z)
The parameters `x`, `y`, and `z` are converted to arrays only if
they are tuples or a lists, otherwise they are treated as a scalars and
they must have the same shape after conversion. In either case, either
`x`, `y`, and `z` or their elements must support multiplication and
addition both with themselves and with the elements of `c`.
If `c` has fewer than 3 dimensions, ones are implicitly appended to its
shape to make it 3-D. The shape of the result will be c.shape[3:] +
x.shape.
Parameters
----------
x, y, z : array_like, compatible object
The three dimensional series is evaluated at the points
`(x, y, z)`, where `x`, `y`, and `z` must have the same shape. If
any of `x`, `y`, or `z` is a list or tuple, it is first converted
to an ndarray, otherwise it is left unchanged and if it isn't an
ndarray it is treated as a scalar.
c : array_like
Array of coefficients ordered so that the coefficient of the term of
multi-degree i,j,k is contained in ``c[i,j,k]``. If `c` has dimension
greater than 3 the remaining indices enumerate multiple sets of
coefficients.
Returns
-------
values : ndarray, compatible object
The values of the multidimension polynomial on points formed with
triples of corresponding values from `x`, `y`, and `z`.
See Also
--------
lagval, lagval2d, laggrid2d, laggrid3d
Notes
-----
.. versionadded::1.7.0
"""
try:
x, y, z = np.array((x, y, z), copy=0)
except:
raise ValueError('x, y, z are incompatible')
c = lagval(x, c)
c = lagval(y, c, tensor=False)
c = lagval(z, c, tensor=False)
return c
def laggrid3d(x, y, z, c):
"""
Evaluate a 3-D Laguerre series on the Cartesian product of x, y, and z.
This function returns the values:
.. math:: p(a,b,c) = \\sum_{i,j,k} c_{i,j,k} * L_i(a) * L_j(b) * L_k(c)
where the points `(a, b, c)` consist of all triples formed by taking
`a` from `x`, `b` from `y`, and `c` from `z`. The resulting points form
a grid with `x` in the first dimension, `y` in the second, and `z` in
the third.
The parameters `x`, `y`, and `z` are converted to arrays only if they
are tuples or a lists, otherwise they are treated as a scalars. In
either case, either `x`, `y`, and `z` or their elements must support
multiplication and addition both with themselves and with the elements
of `c`.
If `c` has fewer than three dimensions, ones are implicitly appended to
its shape to make it 3-D. The shape of the result will be c.shape[3:] +
x.shape + y.shape + z.shape.
Parameters
----------
x, y, z : array_like, compatible objects
The three dimensional series is evaluated at the points in the
Cartesian product of `x`, `y`, and `z`. If `x`,`y`, or `z` is a
list or tuple, it is first converted to an ndarray, otherwise it is
left unchanged and, if it isn't an ndarray, it is treated as a
scalar.
c : array_like
Array of coefficients ordered so that the coefficients for terms of
degree i,j are contained in ``c[i,j]``. If `c` has dimension
greater than two the remaining indices enumerate multiple sets of
coefficients.
Returns
-------
values : ndarray, compatible object
The values of the two dimensional polynomial at points in the Cartesian
product of `x` and `y`.
See Also
--------
lagval, lagval2d, laggrid2d, lagval3d
Notes
-----
.. versionadded::1.7.0
"""
c = lagval(x, c)
c = lagval(y, c)
c = lagval(z, c)
return c
def lagvander(x, deg):
"""Pseudo-Vandermonde matrix of given degree.
Returns the pseudo-Vandermonde matrix of degree `deg` and sample points
`x`. The pseudo-Vandermonde matrix is defined by
.. math:: V[..., i] = L_i(x)
where `0 <= i <= deg`. The leading indices of `V` index the elements of
`x` and the last index is the degree of the Laguerre polynomial.
If `c` is a 1-D array of coefficients of length `n + 1` and `V` is the
array ``V = lagvander(x, n)``, then ``np.dot(V, c)`` and
``lagval(x, c)`` are the same up to roundoff. This equivalence is
useful both for least squares fitting and for the evaluation of a large
number of Laguerre series of the same degree and sample points.
Parameters
----------
x : array_like
Array of points. The dtype is converted to float64 or complex128
depending on whether any of the elements are complex. If `x` is
scalar it is converted to a 1-D array.
deg : int
Degree of the resulting matrix.
Returns
-------
vander : ndarray
The pseudo-Vandermonde matrix. The shape of the returned matrix is
``x.shape + (deg + 1,)``, where The last index is the degree of the
corresponding Laguerre polynomial. The dtype will be the same as
the converted `x`.
Examples
--------
>>> from numpy.polynomial.laguerre import lagvander
>>> x = np.array([0, 1, 2])
>>> lagvander(x, 3)
array([[ 1. , 1. , 1. , 1. ],
[ 1. , 0. , -0.5 , -0.66666667],
[ 1. , -1. , -1. , -0.33333333]])
"""
ideg = int(deg)
if ideg != deg:
raise ValueError("deg must be integer")
if ideg < 0:
raise ValueError("deg must be non-negative")
x = np.array(x, copy=0, ndmin=1) + 0.0
dims = (ideg + 1,) + x.shape
dtyp = x.dtype
v = np.empty(dims, dtype=dtyp)
v[0] = x*0 + 1
if ideg > 0:
v[1] = 1 - x
for i in range(2, ideg + 1):
v[i] = (v[i-1]*(2*i - 1 - x) - v[i-2]*(i - 1))/i
return np.rollaxis(v, 0, v.ndim)
def lagvander2d(x, y, deg):
"""Pseudo-Vandermonde matrix of given degrees.
Returns the pseudo-Vandermonde matrix of degrees `deg` and sample
points `(x, y)`. The pseudo-Vandermonde matrix is defined by
.. math:: V[..., deg[1]*i + j] = L_i(x) * L_j(y),
where `0 <= i <= deg[0]` and `0 <= j <= deg[1]`. The leading indices of
`V` index the points `(x, y)` and the last index encodes the degrees of
the Laguerre polynomials.
If ``V = lagvander2d(x, y, [xdeg, ydeg])``, then the columns of `V`
correspond to the elements of a 2-D coefficient array `c` of shape
(xdeg + 1, ydeg + 1) in the order
.. math:: c_{00}, c_{01}, c_{02} ... , c_{10}, c_{11}, c_{12} ...
and ``np.dot(V, c.flat)`` and ``lagval2d(x, y, c)`` will be the same
up to roundoff. This equivalence is useful both for least squares
fitting and for the evaluation of a large number of 2-D Laguerre
series of the same degrees and sample points.
Parameters
----------
x, y : array_like
Arrays of point coordinates, all of the same shape. The dtypes
will be converted to either float64 or complex128 depending on
whether any of the elements are complex. Scalars are converted to
1-D arrays.
deg : list of ints
List of maximum degrees of the form [x_deg, y_deg].
Returns
-------
vander2d : ndarray
The shape of the returned matrix is ``x.shape + (order,)``, where
:math:`order = (deg[0]+1)*(deg([1]+1)`. The dtype will be the same
as the converted `x` and `y`.
See Also
--------
lagvander, lagvander3d. lagval2d, lagval3d
Notes
-----
.. versionadded::1.7.0
"""
ideg = [int(d) for d in deg]
is_valid = [id == d and id >= 0 for id, d in zip(ideg, deg)]
if is_valid != [1, 1]:
raise ValueError("degrees must be non-negative integers")
degx, degy = ideg
x, y = np.array((x, y), copy=0) + 0.0
vx = lagvander(x, degx)
vy = lagvander(y, degy)
v = vx[..., None]*vy[..., None,:]
return v.reshape(v.shape[:-2] + (-1,))
def lagvander3d(x, y, z, deg):
"""Pseudo-Vandermonde matrix of given degrees.
Returns the pseudo-Vandermonde matrix of degrees `deg` and sample
points `(x, y, z)`. If `l, m, n` are the given degrees in `x, y, z`,
then The pseudo-Vandermonde matrix is defined by
.. math:: V[..., (m+1)(n+1)i + (n+1)j + k] = L_i(x)*L_j(y)*L_k(z),
where `0 <= i <= l`, `0 <= j <= m`, and `0 <= j <= n`. The leading
indices of `V` index the points `(x, y, z)` and the last index encodes
the degrees of the Laguerre polynomials.
If ``V = lagvander3d(x, y, z, [xdeg, ydeg, zdeg])``, then the columns
of `V` correspond to the elements of a 3-D coefficient array `c` of
shape (xdeg + 1, ydeg + 1, zdeg + 1) in the order
.. math:: c_{000}, c_{001}, c_{002},... , c_{010}, c_{011}, c_{012},...
and ``np.dot(V, c.flat)`` and ``lagval3d(x, y, z, c)`` will be the
same up to roundoff. This equivalence is useful both for least squares
fitting and for the evaluation of a large number of 3-D Laguerre
series of the same degrees and sample points.
Parameters
----------
x, y, z : array_like
Arrays of point coordinates, all of the same shape. The dtypes will
be converted to either float64 or complex128 depending on whether
any of the elements are complex. Scalars are converted to 1-D
arrays.
deg : list of ints
List of maximum degrees of the form [x_deg, y_deg, z_deg].
Returns
-------
vander3d : ndarray
The shape of the returned matrix is ``x.shape + (order,)``, where
:math:`order = (deg[0]+1)*(deg([1]+1)*(deg[2]+1)`. The dtype will
be the same as the converted `x`, `y`, and `z`.
See Also
--------
lagvander, lagvander3d. lagval2d, lagval3d
Notes
-----
.. versionadded::1.7.0
"""
ideg = [int(d) for d in deg]
is_valid = [id == d and id >= 0 for id, d in zip(ideg, deg)]
if is_valid != [1, 1, 1]:
raise ValueError("degrees must be non-negative integers")
degx, degy, degz = ideg
x, y, z = np.array((x, y, z), copy=0) + 0.0
vx = lagvander(x, degx)
vy = lagvander(y, degy)
vz = lagvander(z, degz)
v = vx[..., None, None]*vy[..., None,:, None]*vz[..., None, None,:]
return v.reshape(v.shape[:-3] + (-1,))
def lagfit(x, y, deg, rcond=None, full=False, w=None):
"""
Least squares fit of Laguerre series to data.
Return the coefficients of a Laguerre series of degree `deg` that is the
least squares fit to the data values `y` given at points `x`. If `y` is
1-D the returned coefficients will also be 1-D. If `y` is 2-D multiple
fits are done, one for each column of `y`, and the resulting
coefficients are stored in the corresponding columns of a 2-D return.
The fitted polynomial(s) are in the form
.. math:: p(x) = c_0 + c_1 * L_1(x) + ... + c_n * L_n(x),
where `n` is `deg`.
Parameters
----------
x : array_like, shape (M,)
x-coordinates of the M sample points ``(x[i], y[i])``.
y : array_like, shape (M,) or (M, K)
y-coordinates of the sample points. Several data sets of sample
points sharing the same x-coordinates can be fitted at once by
passing in a 2D-array that contains one dataset per column.
deg : int
Degree of the fitting polynomial
rcond : float, optional
Relative condition number of the fit. Singular values smaller than
this relative to the largest singular value will be ignored. The
default value is len(x)*eps, where eps is the relative precision of
the float type, about 2e-16 in most cases.
full : bool, optional
Switch determining nature of return value. When it is False (the
default) just the coefficients are returned, when True diagnostic
information from the singular value decomposition is also returned.
w : array_like, shape (`M`,), optional
Weights. If not None, the contribution of each point
``(x[i],y[i])`` to the fit is weighted by `w[i]`. Ideally the
weights are chosen so that the errors of the products ``w[i]*y[i]``
all have the same variance. The default value is None.
Returns
-------
coef : ndarray, shape (M,) or (M, K)
Laguerre coefficients ordered from low to high. If `y` was 2-D,
the coefficients for the data in column k of `y` are in column
`k`.
[residuals, rank, singular_values, rcond] : list
These values are only returned if `full` = True
resid -- sum of squared residuals of the least squares fit
rank -- the numerical rank of the scaled Vandermonde matrix
sv -- singular values of the scaled Vandermonde matrix
rcond -- value of `rcond`.
For more details, see `linalg.lstsq`.
Warns
-----
RankWarning
The rank of the coefficient matrix in the least-squares fit is
deficient. The warning is only raised if `full` = False. The
warnings can be turned off by
>>> import warnings
>>> warnings.simplefilter('ignore', RankWarning)
See Also
--------
chebfit, legfit, polyfit, hermfit, hermefit
lagval : Evaluates a Laguerre series.
lagvander : pseudo Vandermonde matrix of Laguerre series.
lagweight : Laguerre weight function.
linalg.lstsq : Computes a least-squares fit from the matrix.
scipy.interpolate.UnivariateSpline : Computes spline fits.
Notes
-----
The solution is the coefficients of the Laguerre series `p` that
minimizes the sum of the weighted squared errors
.. math:: E = \\sum_j w_j^2 * |y_j - p(x_j)|^2,
where the :math:`w_j` are the weights. This problem is solved by
setting up as the (typically) overdetermined matrix equation
.. math:: V(x) * c = w * y,
where `V` is the weighted pseudo Vandermonde matrix of `x`, `c` are the
coefficients to be solved for, `w` are the weights, and `y` are the
observed values. This equation is then solved using the singular value
decomposition of `V`.
If some of the singular values of `V` are so small that they are
neglected, then a `RankWarning` will be issued. This means that the
coefficient values may be poorly determined. Using a lower order fit
will usually get rid of the warning. The `rcond` parameter can also be
set to a value smaller than its default, but the resulting fit may be
spurious and have large contributions from roundoff error.
Fits using Laguerre series are probably most useful when the data can
be approximated by ``sqrt(w(x)) * p(x)``, where `w(x)` is the Laguerre
weight. In that case the weight ``sqrt(w(x[i])`` should be used
together with data values ``y[i]/sqrt(w(x[i])``. The weight function is
available as `lagweight`.
References
----------
.. [1] Wikipedia, "Curve fitting",
http://en.wikipedia.org/wiki/Curve_fitting
Examples
--------
>>> from numpy.polynomial.laguerre import lagfit, lagval
>>> x = np.linspace(0, 10)
>>> err = np.random.randn(len(x))/10
>>> y = lagval(x, [1, 2, 3]) + err
>>> lagfit(x, y, 2)
array([ 0.96971004, 2.00193749, 3.00288744])
"""
order = int(deg) + 1
x = np.asarray(x) + 0.0
y = np.asarray(y) + 0.0
# check arguments.
if deg < 0:
raise ValueError("expected deg >= 0")
if x.ndim != 1:
raise TypeError("expected 1D vector for x")
if x.size == 0:
raise TypeError("expected non-empty vector for x")
if y.ndim < 1 or y.ndim > 2:
raise TypeError("expected 1D or 2D array for y")
if len(x) != len(y):
raise TypeError("expected x and y to have same length")
# set up the least squares matrices in transposed form
lhs = lagvander(x, deg).T
rhs = y.T
if w is not None:
w = np.asarray(w) + 0.0
if w.ndim != 1:
raise TypeError("expected 1D vector for w")
if len(x) != len(w):
raise TypeError("expected x and w to have same length")
# apply weights. Don't use inplace operations as they
# can cause problems with NA.
lhs = lhs * w
rhs = rhs * w
# set rcond
if rcond is None:
rcond = len(x)*np.finfo(x.dtype).eps
# Determine the norms of the design matrix columns.
if issubclass(lhs.dtype.type, np.complexfloating):
scl = np.sqrt((np.square(lhs.real) + np.square(lhs.imag)).sum(1))
else:
scl = np.sqrt(np.square(lhs).sum(1))
scl[scl == 0] = 1
# Solve the least squares problem.
c, resids, rank, s = la.lstsq(lhs.T/scl, rhs.T, rcond)
c = (c.T/scl).T
# warn on rank reduction
if rank != order and not full:
msg = "The fit may be poorly conditioned"
warnings.warn(msg, pu.RankWarning)
if full:
return c, [resids, rank, s, rcond]
else:
return c
def lagcompanion(c):
"""
Return the companion matrix of c.
The usual companion matrix of the Laguerre polynomials is already
symmetric when `c` is a basis Laguerre polynomial, so no scaling is
applied.
Parameters
----------
c : array_like
1-D array of Laguerre series coefficients ordered from low to high
degree.
Returns
-------
mat : ndarray
Companion matrix of dimensions (deg, deg).
Notes
-----
.. versionadded::1.7.0
"""
# c is a trimmed copy
[c] = pu.as_series([c])
if len(c) < 2:
raise ValueError('Series must have maximum degree of at least 1.')
if len(c) == 2:
return np.array([[1 + c[0]/c[1]]])
n = len(c) - 1
mat = np.zeros((n, n), dtype=c.dtype)
top = mat.reshape(-1)[1::n+1]
mid = mat.reshape(-1)[0::n+1]
bot = mat.reshape(-1)[n::n+1]
top[...] = -np.arange(1, n)
mid[...] = 2.*np.arange(n) + 1.
bot[...] = top
mat[:, -1] += (c[:-1]/c[-1])*n
return mat
def lagroots(c):
"""
Compute the roots of a Laguerre series.
Return the roots (a.k.a. "zeros") of the polynomial
.. math:: p(x) = \\sum_i c[i] * L_i(x).
Parameters
----------
c : 1-D array_like
1-D array of coefficients.
Returns
-------
out : ndarray
Array of the roots of the series. If all the roots are real,
then `out` is also real, otherwise it is complex.
See Also
--------
polyroots, legroots, chebroots, hermroots, hermeroots
Notes
-----
The root estimates are obtained as the eigenvalues of the companion
matrix, Roots far from the origin of the complex plane may have large
errors due to the numerical instability of the series for such
values. Roots with multiplicity greater than 1 will also show larger
errors as the value of the series near such points is relatively
insensitive to errors in the roots. Isolated roots near the origin can
be improved by a few iterations of Newton's method.
The Laguerre series basis polynomials aren't powers of `x` so the
results of this function may seem unintuitive.
Examples
--------
>>> from numpy.polynomial.laguerre import lagroots, lagfromroots
>>> coef = lagfromroots([0, 1, 2])
>>> coef
array([ 2., -8., 12., -6.])
>>> lagroots(coef)
array([ -4.44089210e-16, 1.00000000e+00, 2.00000000e+00])
"""
# c is a trimmed copy
[c] = pu.as_series([c])
if len(c) <= 1:
return np.array([], dtype=c.dtype)
if len(c) == 2:
return np.array([1 + c[0]/c[1]])
m = lagcompanion(c)
r = la.eigvals(m)
r.sort()
return r
def laggauss(deg):
"""
Gauss-Laguerre quadrature.
Computes the sample points and weights for Gauss-Laguerre quadrature.
These sample points and weights will correctly integrate polynomials of
degree :math:`2*deg - 1` or less over the interval :math:`[0, \inf]`
with the weight function :math:`f(x) = \exp(-x)`.
Parameters
----------
deg : int
Number of sample points and weights. It must be >= 1.
Returns
-------
x : ndarray
1-D ndarray containing the sample points.
y : ndarray
1-D ndarray containing the weights.
Notes
-----
.. versionadded::1.7.0
The results have only been tested up to degree 100 higher degrees may
be problematic. The weights are determined by using the fact that
.. math:: w_k = c / (L'_n(x_k) * L_{n-1}(x_k))
where :math:`c` is a constant independent of :math:`k` and :math:`x_k`
is the k'th root of :math:`L_n`, and then scaling the results to get
the right value when integrating 1.
"""
ideg = int(deg)
if ideg != deg or ideg < 1:
raise ValueError("deg must be a non-negative integer")
# first approximation of roots. We use the fact that the companion
# matrix is symmetric in this case in order to obtain better zeros.
c = np.array([0]*deg + [1])
m = lagcompanion(c)
x = la.eigvalsh(m)
# improve roots by one application of Newton
dy = lagval(x, c)
df = lagval(x, lagder(c))
x -= dy/df
# compute the weights. We scale the factor to avoid possible numerical
# overflow.
fm = lagval(x, c[1:])
fm /= np.abs(fm).max()
df /= np.abs(df).max()
w = 1/(fm * df)
# scale w to get the right value, 1 in this case
w /= w.sum()
return x, w
def lagweight(x):
"""Weight function of the Laguerre polynomials.
The weight function is :math:`exp(-x)` and the interval of integration
is :math:`[0, \inf]`. The Laguerre polynomials are orthogonal, but not
normalized, with respect to this weight function.
Parameters
----------
x : array_like
Values at which the weight function will be computed.
Returns
-------
w : ndarray
The weight function at `x`.
Notes
-----
.. versionadded::1.7.0
"""
w = np.exp(-x)
return w
#
# Laguerre series class
#
class Laguerre(ABCPolyBase):
"""A Laguerre series class.
The Laguerre class provides the standard Python numerical methods
'+', '-', '*', '//', '%', 'divmod', '**', and '()' as well as the
attributes and methods listed in the `ABCPolyBase` documentation.
Parameters
----------
coef : array_like
Laguerre coefficients in order of increasing degree, i.e,
``(1, 2, 3)`` gives ``1*L_0(x) + 2*L_1(X) + 3*L_2(x)``.
domain : (2,) array_like, optional
Domain to use. The interval ``[domain[0], domain[1]]`` is mapped
to the interval ``[window[0], window[1]]`` by shifting and scaling.
The default value is [0, 1].
window : (2,) array_like, optional
Window, see `domain` for its use. The default value is [0, 1].
.. versionadded:: 1.6.0
"""
# Virtual Functions
_add = staticmethod(lagadd)
_sub = staticmethod(lagsub)
_mul = staticmethod(lagmul)
_div = staticmethod(lagdiv)
_pow = staticmethod(lagpow)
_val = staticmethod(lagval)
_int = staticmethod(lagint)
_der = staticmethod(lagder)
_fit = staticmethod(lagfit)
_line = staticmethod(lagline)
_roots = staticmethod(lagroots)
_fromroots = staticmethod(lagfromroots)
# Virtual properties
nickname = 'lag'
domain = np.array(lagdomain)
window = np.array(lagdomain)
| mit | 8,757,384,447,060,017,000 | -1,924,022,416,249,775,400 | 30.087079 | 79 | 0.592193 | false |
DanteOnline/free-art | venv/lib/python3.4/site-packages/django/conf/locale/pt_BR/formats.py | 504 | 1434 | # -*- encoding: utf-8 -*-
# This file is distributed under the same license as the Django package.
#
from __future__ import unicode_literals
# The *_FORMAT strings use the Django date format syntax,
# see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = r'j \d\e F \d\e Y'
TIME_FORMAT = 'H:i'
DATETIME_FORMAT = r'j \d\e F \d\e Y à\s H:i'
YEAR_MONTH_FORMAT = r'F \d\e Y'
MONTH_DAY_FORMAT = r'j \d\e F'
SHORT_DATE_FORMAT = 'd/m/Y'
SHORT_DATETIME_FORMAT = 'd/m/Y H:i'
FIRST_DAY_OF_WEEK = 0 # Sunday
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see http://docs.python.org/library/datetime.html#strftime-strptime-behavior
DATE_INPUT_FORMATS = [
'%d/%m/%Y', '%d/%m/%y', # '25/10/2006', '25/10/06'
# '%d de %b de %Y', '%d de %b, %Y', # '25 de Out de 2006', '25 Out, 2006'
# '%d de %B de %Y', '%d de %B, %Y', # '25 de Outubro de 2006', '25 de Outubro, 2006'
]
DATETIME_INPUT_FORMATS = [
'%d/%m/%Y %H:%M:%S', # '25/10/2006 14:30:59'
'%d/%m/%Y %H:%M:%S.%f', # '25/10/2006 14:30:59.000200'
'%d/%m/%Y %H:%M', # '25/10/2006 14:30'
'%d/%m/%Y', # '25/10/2006'
'%d/%m/%y %H:%M:%S', # '25/10/06 14:30:59'
'%d/%m/%y %H:%M:%S.%f', # '25/10/06 14:30:59.000200'
'%d/%m/%y %H:%M', # '25/10/06 14:30'
'%d/%m/%y', # '25/10/06'
]
DECIMAL_SEPARATOR = ','
THOUSAND_SEPARATOR = '.'
NUMBER_GROUPING = 3
| gpl-3.0 | -5,415,795,086,803,233,000 | -377,918,503,209,876,500 | 38.805556 | 90 | 0.558269 | false |
abligh/xen4.2-minideb | tools/xm-test/tests/block-destroy/01_block-destroy_btblock_pos.py | 42 | 1058 | #!/usr/bin/python
# Copyright (C) International Business Machines Corp., 2005
# Author: Dan Smith <danms@us.ibm.com>
from XmTestLib import *
from XmTestLib.block_utils import block_detach
if ENABLE_HVM_SUPPORT:
SKIP("Block-detach not supported for HVM domains")
config = {"disk":"phy:/dev/ram0,xvda1,w"}
domain = XmTestDomain(extraConfig=config)
try:
console = domain.start()
except DomainError, e:
if verbose:
print e.extra
FAIL("Unable to create domain")
try:
console.setHistorySaveCmds(value=True)
run = console.runCmd("cat /proc/partitions | grep xvda1")
run2 = console.runCmd("cat /proc/partitions")
except ConsoleError, e:
FAIL(str(e))
if run["return"] != 0:
FAIL("block device isn't attached; can't detach!")
block_detach(domain, "xvda1")
try:
run = console.runCmd("cat /proc/partitions | grep xvda1")
except ConsoleError, e:
saveLog(console.getHistory())
FAIL(str(e))
domain.closeConsole()
domain.stop()
if run["return"] == 0:
FAIL("domU reported block device still connected!")
| gpl-2.0 | -8,557,119,938,035,515,000 | 6,489,077,180,249,035,000 | 23.045455 | 61 | 0.694707 | false |
strands-project/robomongo | src/third-party/mongodb/buildscripts/test_shell.py | 13 | 8877 | # Copyright 2009 10gen, Inc.
#
# This file is part of MongoDB.
#
# MongoDB is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# MongoDB is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with MongoDB. If not, see <http://www.gnu.org/licenses/>.
"""Tests for the MongoDB shell.
Right now these mostly just test that the shell handles command line arguments
appropriately.
"""
import unittest
import sys
import subprocess
import os
"""Exit codes for MongoDB."""
BADOPTS = 2
NOCONNECT = 255
"""Path to the mongo shell executable to be tested."""
mongo_path = None
class TestShell(unittest.TestCase):
def open_mongo(self, args=[]):
"""Get a subprocess.Popen instance of the shell with the given args.
"""
return subprocess.Popen([mongo_path] + args,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr = subprocess.PIPE)
def setUp(self):
assert mongo_path
def test_help(self):
mongo_h = self.open_mongo(["-h"])
mongo_help = self.open_mongo(["--help"])
out = mongo_h.communicate()
self.assertEqual(out, mongo_help.communicate())
self.assert_("usage:" in out[0])
self.assertEqual(0, mongo_h.returncode)
self.assertEqual(0, mongo_help.returncode)
def test_nodb(self):
mongo = self.open_mongo([])
mongo_nodb = self.open_mongo(["--nodb"])
out = mongo_nodb.communicate()
self.assert_("MongoDB shell version" in out[0])
self.assert_("bye" in out[0])
self.assert_("couldn't connect" not in out[0])
self.assertEqual(0, mongo_nodb.returncode)
out = mongo.communicate()
self.assert_("MongoDB shell version" in out[0])
self.assert_("bye" not in out[0])
self.assert_("couldn't connect" in out[0])
self.assertEqual(NOCONNECT, mongo.returncode)
def test_eval(self):
mongo = self.open_mongo(["--nodb", "--eval", "print('hello world');"])
out = mongo.communicate()
self.assert_("hello world" in out[0])
self.assert_("bye" not in out[0])
self.assertEqual(0, mongo.returncode)
mongo = self.open_mongo(["--eval"])
out = mongo.communicate()
self.assert_("required parameter is missing" in out[0])
self.assertEqual(BADOPTS, mongo.returncode)
def test_shell(self):
mongo = self.open_mongo(["--nodb", "--shell", "--eval", "print('hello world');"])
out = mongo.communicate()
self.assert_("hello world" in out[0])
self.assert_("bye" in out[0]) # the shell started and immediately exited because stdin was empty
self.assertEqual(0, mongo.returncode)
def test_host_port(self):
mongo = self.open_mongo([])
out = mongo.communicate()
self.assert_("url: test" in out[0])
self.assert_("connecting to: test" in out[0])
self.assertEqual(NOCONNECT, mongo.returncode)
mongo = self.open_mongo(["--host", "localhost"])
out = mongo.communicate()
self.assert_("url: test" in out[0])
self.assert_("connecting to: localhost/test" in out[0])
self.assertEqual(NOCONNECT, mongo.returncode)
mongo = self.open_mongo(["--port", "27018"])
out = mongo.communicate()
self.assert_("url: test" in out[0])
self.assert_("connecting to: 127.0.0.1:27018" in out[0])
self.assertEqual(NOCONNECT, mongo.returncode)
mongo = self.open_mongo(["--host", "localhost", "--port", "27018"])
out = mongo.communicate()
self.assert_("url: test" in out[0])
self.assert_("connecting to: localhost:27018/test" in out[0])
self.assertEqual(NOCONNECT, mongo.returncode)
mongo = self.open_mongo(["--host"])
out = mongo.communicate()
self.assert_("required parameter is missing" in out[0])
self.assertEqual(BADOPTS, mongo.returncode)
mongo = self.open_mongo(["--port"])
out = mongo.communicate()
self.assert_("required parameter is missing" in out[0])
self.assertEqual(BADOPTS, mongo.returncode)
def test_positionals(self):
dirname = os.path.dirname(__file__)
test_js = os.path.join(dirname, "testdata/test.js")
test_txt = os.path.join(dirname, "testdata/test.txt")
test = os.path.join(dirname, "testdata/test")
non_exist_js = os.path.join(dirname, "testdata/nonexist.js")
non_exist_txt = os.path.join(dirname, "testdata/nonexist.txt")
mongo = self.open_mongo(["--nodb", test_js])
out = mongo.communicate()
self.assert_("hello world" in out[0])
self.assert_("bye" not in out[0])
self.assertEqual(0, mongo.returncode)
mongo = self.open_mongo(["--nodb", test_txt])
out = mongo.communicate()
self.assert_("foobar" in out[0])
self.assert_("bye" not in out[0])
self.assertEqual(0, mongo.returncode)
mongo = self.open_mongo([test_js, test, test_txt])
out = mongo.communicate()
self.assert_("url: test" in out[0])
self.assert_("connecting to: test" in out[0])
self.assertEqual(NOCONNECT, mongo.returncode)
mongo = self.open_mongo([test_txt, test, test_js])
out = mongo.communicate()
self.assert_("url: test" in out[0])
self.assert_("connecting to: test" in out[0])
self.assertEqual(NOCONNECT, mongo.returncode)
mongo = self.open_mongo([test, test_js, test_txt])
out = mongo.communicate()
self.assert_("url: " + test in out[0])
self.assert_("connecting to: " + test in out[0])
self.assertEqual(NOCONNECT, mongo.returncode)
mongo = self.open_mongo([non_exist_js, test, test_txt])
out = mongo.communicate()
self.assert_("url: test" in out[0])
self.assert_("connecting to: test" in out[0])
self.assertEqual(NOCONNECT, mongo.returncode)
mongo = self.open_mongo([non_exist_txt, test_js, test_txt])
out = mongo.communicate()
self.assert_("url: " + non_exist_txt in out[0])
self.assert_("connecting to: " + non_exist_txt in out[0])
self.assertEqual(NOCONNECT, mongo.returncode)
def test_multiple_files(self):
dirname = os.path.dirname(__file__)
test_js = os.path.join(dirname, "testdata/test.js")
test_txt = os.path.join(dirname, "testdata/test.txt")
mongo = self.open_mongo(["--nodb", test_js, test_txt])
out = mongo.communicate()
self.assert_("hello world" in out[0])
self.assert_("foobar" in out[0])
self.assert_("bye" not in out[0])
self.assertEqual(0, mongo.returncode)
mongo = self.open_mongo(["--shell", "--nodb", test_js, test_txt])
out = mongo.communicate()
self.assert_("hello world" in out[0])
self.assert_("foobar" in out[0])
self.assert_("bye" in out[0])
self.assertEqual(0, mongo.returncode)
# just testing that they don't blow up
def test_username_and_password(self):
mongo = self.open_mongo(["--username", "mike"])
out = mongo.communicate()
self.assertEqual(NOCONNECT, mongo.returncode)
mongo = self.open_mongo(["-u", "mike"])
out = mongo.communicate()
self.assertEqual(NOCONNECT, mongo.returncode)
mongo = self.open_mongo(["--password", "mike"])
out = mongo.communicate()
self.assertEqual(NOCONNECT, mongo.returncode)
mongo = self.open_mongo(["-p", "mike"])
out = mongo.communicate()
self.assertEqual(NOCONNECT, mongo.returncode)
mongo = self.open_mongo(["--username"])
out = mongo.communicate()
self.assert_("required parameter is missing" in out[0])
self.assertEqual(BADOPTS, mongo.returncode)
mongo = self.open_mongo(["--password"])
out = mongo.communicate()
self.assert_("required parameter is missing" in out[0])
self.assertEqual(BADOPTS, mongo.returncode)
def run_tests():
suite = unittest.TestLoader().loadTestsFromTestCase(TestShell)
unittest.TextTestRunner(verbosity=1).run(suite)
if __name__ == "__main__":
if len(sys.argv) != 2:
print "must give the path to shell executable to be tested"
sys.exit()
mongo_path = sys.argv[1]
run_tests()
| gpl-3.0 | 3,105,826,937,737,200,600 | 827,062,742,952,107,000 | 36.142259 | 104 | 0.613496 | false |
mrquim/mrquimrepo | script.module.schism.common/lib/bs4/tests/test_tree.py | 36 | 78105 | # -*- coding: utf-8 -*-
"""Tests for Beautiful Soup's tree traversal methods.
The tree traversal methods are the main advantage of using Beautiful
Soup over just using a parser.
Different parsers will build different Beautiful Soup trees given the
same markup, but all Beautiful Soup trees can be traversed with the
methods tested here.
"""
from pdb import set_trace
import copy
import pickle
import re
import warnings
from bs4 import BeautifulSoup
from bs4.builder import (
builder_registry,
HTMLParserTreeBuilder,
)
from bs4.element import (
PY3K,
CData,
Comment,
Declaration,
Doctype,
NavigableString,
SoupStrainer,
Tag,
)
from bs4.testing import (
SoupTest,
skipIf,
)
XML_BUILDER_PRESENT = (builder_registry.lookup("xml") is not None)
LXML_PRESENT = (builder_registry.lookup("lxml") is not None)
class TreeTest(SoupTest):
def assertSelects(self, tags, should_match):
"""Make sure that the given tags have the correct text.
This is used in tests that define a bunch of tags, each
containing a single string, and then select certain strings by
some mechanism.
"""
self.assertEqual([tag.string for tag in tags], should_match)
def assertSelectsIDs(self, tags, should_match):
"""Make sure that the given tags have the correct IDs.
This is used in tests that define a bunch of tags, each
containing a single string, and then select certain strings by
some mechanism.
"""
self.assertEqual([tag['id'] for tag in tags], should_match)
class TestFind(TreeTest):
"""Basic tests of the find() method.
find() just calls find_all() with limit=1, so it's not tested all
that thouroughly here.
"""
def test_find_tag(self):
soup = self.soup("<a>1</a><b>2</b><a>3</a><b>4</b>")
self.assertEqual(soup.find("b").string, "2")
def test_unicode_text_find(self):
soup = self.soup(u'<h1>Räksmörgås</h1>')
self.assertEqual(soup.find(string=u'Räksmörgås'), u'Räksmörgås')
def test_unicode_attribute_find(self):
soup = self.soup(u'<h1 id="Räksmörgås">here it is</h1>')
str(soup)
self.assertEqual("here it is", soup.find(id=u'Räksmörgås').text)
def test_find_everything(self):
"""Test an optimization that finds all tags."""
soup = self.soup("<a>foo</a><b>bar</b>")
self.assertEqual(2, len(soup.find_all()))
def test_find_everything_with_name(self):
"""Test an optimization that finds all tags with a given name."""
soup = self.soup("<a>foo</a><b>bar</b><a>baz</a>")
self.assertEqual(2, len(soup.find_all('a')))
class TestFindAll(TreeTest):
"""Basic tests of the find_all() method."""
def test_find_all_text_nodes(self):
"""You can search the tree for text nodes."""
soup = self.soup("<html>Foo<b>bar</b>\xbb</html>")
# Exact match.
self.assertEqual(soup.find_all(string="bar"), [u"bar"])
self.assertEqual(soup.find_all(text="bar"), [u"bar"])
# Match any of a number of strings.
self.assertEqual(
soup.find_all(text=["Foo", "bar"]), [u"Foo", u"bar"])
# Match a regular expression.
self.assertEqual(soup.find_all(text=re.compile('.*')),
[u"Foo", u"bar", u'\xbb'])
# Match anything.
self.assertEqual(soup.find_all(text=True),
[u"Foo", u"bar", u'\xbb'])
def test_find_all_limit(self):
"""You can limit the number of items returned by find_all."""
soup = self.soup("<a>1</a><a>2</a><a>3</a><a>4</a><a>5</a>")
self.assertSelects(soup.find_all('a', limit=3), ["1", "2", "3"])
self.assertSelects(soup.find_all('a', limit=1), ["1"])
self.assertSelects(
soup.find_all('a', limit=10), ["1", "2", "3", "4", "5"])
# A limit of 0 means no limit.
self.assertSelects(
soup.find_all('a', limit=0), ["1", "2", "3", "4", "5"])
def test_calling_a_tag_is_calling_findall(self):
soup = self.soup("<a>1</a><b>2<a id='foo'>3</a></b>")
self.assertSelects(soup('a', limit=1), ["1"])
self.assertSelects(soup.b(id="foo"), ["3"])
def test_find_all_with_self_referential_data_structure_does_not_cause_infinite_recursion(self):
soup = self.soup("<a></a>")
# Create a self-referential list.
l = []
l.append(l)
# Without special code in _normalize_search_value, this would cause infinite
# recursion.
self.assertEqual([], soup.find_all(l))
def test_find_all_resultset(self):
"""All find_all calls return a ResultSet"""
soup = self.soup("<a></a>")
result = soup.find_all("a")
self.assertTrue(hasattr(result, "source"))
result = soup.find_all(True)
self.assertTrue(hasattr(result, "source"))
result = soup.find_all(text="foo")
self.assertTrue(hasattr(result, "source"))
class TestFindAllBasicNamespaces(TreeTest):
def test_find_by_namespaced_name(self):
soup = self.soup('<mathml:msqrt>4</mathml:msqrt><a svg:fill="red">')
self.assertEqual("4", soup.find("mathml:msqrt").string)
self.assertEqual("a", soup.find(attrs= { "svg:fill" : "red" }).name)
class TestFindAllByName(TreeTest):
"""Test ways of finding tags by tag name."""
def setUp(self):
super(TreeTest, self).setUp()
self.tree = self.soup("""<a>First tag.</a>
<b>Second tag.</b>
<c>Third <a>Nested tag.</a> tag.</c>""")
def test_find_all_by_tag_name(self):
# Find all the <a> tags.
self.assertSelects(
self.tree.find_all('a'), ['First tag.', 'Nested tag.'])
def test_find_all_by_name_and_text(self):
self.assertSelects(
self.tree.find_all('a', text='First tag.'), ['First tag.'])
self.assertSelects(
self.tree.find_all('a', text=True), ['First tag.', 'Nested tag.'])
self.assertSelects(
self.tree.find_all('a', text=re.compile("tag")),
['First tag.', 'Nested tag.'])
def test_find_all_on_non_root_element(self):
# You can call find_all on any node, not just the root.
self.assertSelects(self.tree.c.find_all('a'), ['Nested tag.'])
def test_calling_element_invokes_find_all(self):
self.assertSelects(self.tree('a'), ['First tag.', 'Nested tag.'])
def test_find_all_by_tag_strainer(self):
self.assertSelects(
self.tree.find_all(SoupStrainer('a')),
['First tag.', 'Nested tag.'])
def test_find_all_by_tag_names(self):
self.assertSelects(
self.tree.find_all(['a', 'b']),
['First tag.', 'Second tag.', 'Nested tag.'])
def test_find_all_by_tag_dict(self):
self.assertSelects(
self.tree.find_all({'a' : True, 'b' : True}),
['First tag.', 'Second tag.', 'Nested tag.'])
def test_find_all_by_tag_re(self):
self.assertSelects(
self.tree.find_all(re.compile('^[ab]$')),
['First tag.', 'Second tag.', 'Nested tag.'])
def test_find_all_with_tags_matching_method(self):
# You can define an oracle method that determines whether
# a tag matches the search.
def id_matches_name(tag):
return tag.name == tag.get('id')
tree = self.soup("""<a id="a">Match 1.</a>
<a id="1">Does not match.</a>
<b id="b">Match 2.</a>""")
self.assertSelects(
tree.find_all(id_matches_name), ["Match 1.", "Match 2."])
def test_find_with_multi_valued_attribute(self):
soup = self.soup(
"<div class='a b'>1</div><div class='a c'>2</div><div class='a d'>3</div>"
)
r1 = soup.find('div', 'a d');
r2 = soup.find('div', re.compile(r'a d'));
r3, r4 = soup.find_all('div', ['a b', 'a d']);
self.assertEqual('3', r1.string)
self.assertEqual('3', r2.string)
self.assertEqual('1', r3.string)
self.assertEqual('3', r4.string)
class TestFindAllByAttribute(TreeTest):
def test_find_all_by_attribute_name(self):
# You can pass in keyword arguments to find_all to search by
# attribute.
tree = self.soup("""
<a id="first">Matching a.</a>
<a id="second">
Non-matching <b id="first">Matching b.</b>a.
</a>""")
self.assertSelects(tree.find_all(id='first'),
["Matching a.", "Matching b."])
def test_find_all_by_utf8_attribute_value(self):
peace = u"םולש".encode("utf8")
data = u'<a title="םולש"></a>'.encode("utf8")
soup = self.soup(data)
self.assertEqual([soup.a], soup.find_all(title=peace))
self.assertEqual([soup.a], soup.find_all(title=peace.decode("utf8")))
self.assertEqual([soup.a], soup.find_all(title=[peace, "something else"]))
def test_find_all_by_attribute_dict(self):
# You can pass in a dictionary as the argument 'attrs'. This
# lets you search for attributes like 'name' (a fixed argument
# to find_all) and 'class' (a reserved word in Python.)
tree = self.soup("""
<a name="name1" class="class1">Name match.</a>
<a name="name2" class="class2">Class match.</a>
<a name="name3" class="class3">Non-match.</a>
<name1>A tag called 'name1'.</name1>
""")
# This doesn't do what you want.
self.assertSelects(tree.find_all(name='name1'),
["A tag called 'name1'."])
# This does what you want.
self.assertSelects(tree.find_all(attrs={'name' : 'name1'}),
["Name match."])
self.assertSelects(tree.find_all(attrs={'class' : 'class2'}),
["Class match."])
def test_find_all_by_class(self):
tree = self.soup("""
<a class="1">Class 1.</a>
<a class="2">Class 2.</a>
<b class="1">Class 1.</b>
<c class="3 4">Class 3 and 4.</c>
""")
# Passing in the class_ keyword argument will search against
# the 'class' attribute.
self.assertSelects(tree.find_all('a', class_='1'), ['Class 1.'])
self.assertSelects(tree.find_all('c', class_='3'), ['Class 3 and 4.'])
self.assertSelects(tree.find_all('c', class_='4'), ['Class 3 and 4.'])
# Passing in a string to 'attrs' will also search the CSS class.
self.assertSelects(tree.find_all('a', '1'), ['Class 1.'])
self.assertSelects(tree.find_all(attrs='1'), ['Class 1.', 'Class 1.'])
self.assertSelects(tree.find_all('c', '3'), ['Class 3 and 4.'])
self.assertSelects(tree.find_all('c', '4'), ['Class 3 and 4.'])
def test_find_by_class_when_multiple_classes_present(self):
tree = self.soup("<gar class='foo bar'>Found it</gar>")
f = tree.find_all("gar", class_=re.compile("o"))
self.assertSelects(f, ["Found it"])
f = tree.find_all("gar", class_=re.compile("a"))
self.assertSelects(f, ["Found it"])
# If the search fails to match the individual strings "foo" and "bar",
# it will be tried against the combined string "foo bar".
f = tree.find_all("gar", class_=re.compile("o b"))
self.assertSelects(f, ["Found it"])
def test_find_all_with_non_dictionary_for_attrs_finds_by_class(self):
soup = self.soup("<a class='bar'>Found it</a>")
self.assertSelects(soup.find_all("a", re.compile("ba")), ["Found it"])
def big_attribute_value(value):
return len(value) > 3
self.assertSelects(soup.find_all("a", big_attribute_value), [])
def small_attribute_value(value):
return len(value) <= 3
self.assertSelects(
soup.find_all("a", small_attribute_value), ["Found it"])
def test_find_all_with_string_for_attrs_finds_multiple_classes(self):
soup = self.soup('<a class="foo bar"></a><a class="foo"></a>')
a, a2 = soup.find_all("a")
self.assertEqual([a, a2], soup.find_all("a", "foo"))
self.assertEqual([a], soup.find_all("a", "bar"))
# If you specify the class as a string that contains a
# space, only that specific value will be found.
self.assertEqual([a], soup.find_all("a", class_="foo bar"))
self.assertEqual([a], soup.find_all("a", "foo bar"))
self.assertEqual([], soup.find_all("a", "bar foo"))
def test_find_all_by_attribute_soupstrainer(self):
tree = self.soup("""
<a id="first">Match.</a>
<a id="second">Non-match.</a>""")
strainer = SoupStrainer(attrs={'id' : 'first'})
self.assertSelects(tree.find_all(strainer), ['Match.'])
def test_find_all_with_missing_attribute(self):
# You can pass in None as the value of an attribute to find_all.
# This will match tags that do not have that attribute set.
tree = self.soup("""<a id="1">ID present.</a>
<a>No ID present.</a>
<a id="">ID is empty.</a>""")
self.assertSelects(tree.find_all('a', id=None), ["No ID present."])
def test_find_all_with_defined_attribute(self):
# You can pass in None as the value of an attribute to find_all.
# This will match tags that have that attribute set to any value.
tree = self.soup("""<a id="1">ID present.</a>
<a>No ID present.</a>
<a id="">ID is empty.</a>""")
self.assertSelects(
tree.find_all(id=True), ["ID present.", "ID is empty."])
def test_find_all_with_numeric_attribute(self):
# If you search for a number, it's treated as a string.
tree = self.soup("""<a id=1>Unquoted attribute.</a>
<a id="1">Quoted attribute.</a>""")
expected = ["Unquoted attribute.", "Quoted attribute."]
self.assertSelects(tree.find_all(id=1), expected)
self.assertSelects(tree.find_all(id="1"), expected)
def test_find_all_with_list_attribute_values(self):
# You can pass a list of attribute values instead of just one,
# and you'll get tags that match any of the values.
tree = self.soup("""<a id="1">1</a>
<a id="2">2</a>
<a id="3">3</a>
<a>No ID.</a>""")
self.assertSelects(tree.find_all(id=["1", "3", "4"]),
["1", "3"])
def test_find_all_with_regular_expression_attribute_value(self):
# You can pass a regular expression as an attribute value, and
# you'll get tags whose values for that attribute match the
# regular expression.
tree = self.soup("""<a id="a">One a.</a>
<a id="aa">Two as.</a>
<a id="ab">Mixed as and bs.</a>
<a id="b">One b.</a>
<a>No ID.</a>""")
self.assertSelects(tree.find_all(id=re.compile("^a+$")),
["One a.", "Two as."])
def test_find_by_name_and_containing_string(self):
soup = self.soup("<b>foo</b><b>bar</b><a>foo</a>")
a = soup.a
self.assertEqual([a], soup.find_all("a", text="foo"))
self.assertEqual([], soup.find_all("a", text="bar"))
self.assertEqual([], soup.find_all("a", text="bar"))
def test_find_by_name_and_containing_string_when_string_is_buried(self):
soup = self.soup("<a>foo</a><a><b><c>foo</c></b></a>")
self.assertEqual(soup.find_all("a"), soup.find_all("a", text="foo"))
def test_find_by_attribute_and_containing_string(self):
soup = self.soup('<b id="1">foo</b><a id="2">foo</a>')
a = soup.a
self.assertEqual([a], soup.find_all(id=2, text="foo"))
self.assertEqual([], soup.find_all(id=1, text="bar"))
class TestIndex(TreeTest):
"""Test Tag.index"""
def test_index(self):
tree = self.soup("""<div>
<a>Identical</a>
<b>Not identical</b>
<a>Identical</a>
<c><d>Identical with child</d></c>
<b>Also not identical</b>
<c><d>Identical with child</d></c>
</div>""")
div = tree.div
for i, element in enumerate(div.contents):
self.assertEqual(i, div.index(element))
self.assertRaises(ValueError, tree.index, 1)
class TestParentOperations(TreeTest):
"""Test navigation and searching through an element's parents."""
def setUp(self):
super(TestParentOperations, self).setUp()
self.tree = self.soup('''<ul id="empty"></ul>
<ul id="top">
<ul id="middle">
<ul id="bottom">
<b>Start here</b>
</ul>
</ul>''')
self.start = self.tree.b
def test_parent(self):
self.assertEqual(self.start.parent['id'], 'bottom')
self.assertEqual(self.start.parent.parent['id'], 'middle')
self.assertEqual(self.start.parent.parent.parent['id'], 'top')
def test_parent_of_top_tag_is_soup_object(self):
top_tag = self.tree.contents[0]
self.assertEqual(top_tag.parent, self.tree)
def test_soup_object_has_no_parent(self):
self.assertEqual(None, self.tree.parent)
def test_find_parents(self):
self.assertSelectsIDs(
self.start.find_parents('ul'), ['bottom', 'middle', 'top'])
self.assertSelectsIDs(
self.start.find_parents('ul', id="middle"), ['middle'])
def test_find_parent(self):
self.assertEqual(self.start.find_parent('ul')['id'], 'bottom')
self.assertEqual(self.start.find_parent('ul', id='top')['id'], 'top')
def test_parent_of_text_element(self):
text = self.tree.find(text="Start here")
self.assertEqual(text.parent.name, 'b')
def test_text_element_find_parent(self):
text = self.tree.find(text="Start here")
self.assertEqual(text.find_parent('ul')['id'], 'bottom')
def test_parent_generator(self):
parents = [parent['id'] for parent in self.start.parents
if parent is not None and 'id' in parent.attrs]
self.assertEqual(parents, ['bottom', 'middle', 'top'])
class ProximityTest(TreeTest):
def setUp(self):
super(TreeTest, self).setUp()
self.tree = self.soup(
'<html id="start"><head></head><body><b id="1">One</b><b id="2">Two</b><b id="3">Three</b></body></html>')
class TestNextOperations(ProximityTest):
def setUp(self):
super(TestNextOperations, self).setUp()
self.start = self.tree.b
def test_next(self):
self.assertEqual(self.start.next_element, "One")
self.assertEqual(self.start.next_element.next_element['id'], "2")
def test_next_of_last_item_is_none(self):
last = self.tree.find(text="Three")
self.assertEqual(last.next_element, None)
def test_next_of_root_is_none(self):
# The document root is outside the next/previous chain.
self.assertEqual(self.tree.next_element, None)
def test_find_all_next(self):
self.assertSelects(self.start.find_all_next('b'), ["Two", "Three"])
self.start.find_all_next(id=3)
self.assertSelects(self.start.find_all_next(id=3), ["Three"])
def test_find_next(self):
self.assertEqual(self.start.find_next('b')['id'], '2')
self.assertEqual(self.start.find_next(text="Three"), "Three")
def test_find_next_for_text_element(self):
text = self.tree.find(text="One")
self.assertEqual(text.find_next("b").string, "Two")
self.assertSelects(text.find_all_next("b"), ["Two", "Three"])
def test_next_generator(self):
start = self.tree.find(text="Two")
successors = [node for node in start.next_elements]
# There are two successors: the final <b> tag and its text contents.
tag, contents = successors
self.assertEqual(tag['id'], '3')
self.assertEqual(contents, "Three")
class TestPreviousOperations(ProximityTest):
def setUp(self):
super(TestPreviousOperations, self).setUp()
self.end = self.tree.find(text="Three")
def test_previous(self):
self.assertEqual(self.end.previous_element['id'], "3")
self.assertEqual(self.end.previous_element.previous_element, "Two")
def test_previous_of_first_item_is_none(self):
first = self.tree.find('html')
self.assertEqual(first.previous_element, None)
def test_previous_of_root_is_none(self):
# The document root is outside the next/previous chain.
# XXX This is broken!
#self.assertEqual(self.tree.previous_element, None)
pass
def test_find_all_previous(self):
# The <b> tag containing the "Three" node is the predecessor
# of the "Three" node itself, which is why "Three" shows up
# here.
self.assertSelects(
self.end.find_all_previous('b'), ["Three", "Two", "One"])
self.assertSelects(self.end.find_all_previous(id=1), ["One"])
def test_find_previous(self):
self.assertEqual(self.end.find_previous('b')['id'], '3')
self.assertEqual(self.end.find_previous(text="One"), "One")
def test_find_previous_for_text_element(self):
text = self.tree.find(text="Three")
self.assertEqual(text.find_previous("b").string, "Three")
self.assertSelects(
text.find_all_previous("b"), ["Three", "Two", "One"])
def test_previous_generator(self):
start = self.tree.find(text="One")
predecessors = [node for node in start.previous_elements]
# There are four predecessors: the <b> tag containing "One"
# the <body> tag, the <head> tag, and the <html> tag.
b, body, head, html = predecessors
self.assertEqual(b['id'], '1')
self.assertEqual(body.name, "body")
self.assertEqual(head.name, "head")
self.assertEqual(html.name, "html")
class SiblingTest(TreeTest):
def setUp(self):
super(SiblingTest, self).setUp()
markup = '''<html>
<span id="1">
<span id="1.1"></span>
</span>
<span id="2">
<span id="2.1"></span>
</span>
<span id="3">
<span id="3.1"></span>
</span>
<span id="4"></span>
</html>'''
# All that whitespace looks good but makes the tests more
# difficult. Get rid of it.
markup = re.compile("\n\s*").sub("", markup)
self.tree = self.soup(markup)
class TestNextSibling(SiblingTest):
def setUp(self):
super(TestNextSibling, self).setUp()
self.start = self.tree.find(id="1")
def test_next_sibling_of_root_is_none(self):
self.assertEqual(self.tree.next_sibling, None)
def test_next_sibling(self):
self.assertEqual(self.start.next_sibling['id'], '2')
self.assertEqual(self.start.next_sibling.next_sibling['id'], '3')
# Note the difference between next_sibling and next_element.
self.assertEqual(self.start.next_element['id'], '1.1')
def test_next_sibling_may_not_exist(self):
self.assertEqual(self.tree.html.next_sibling, None)
nested_span = self.tree.find(id="1.1")
self.assertEqual(nested_span.next_sibling, None)
last_span = self.tree.find(id="4")
self.assertEqual(last_span.next_sibling, None)
def test_find_next_sibling(self):
self.assertEqual(self.start.find_next_sibling('span')['id'], '2')
def test_next_siblings(self):
self.assertSelectsIDs(self.start.find_next_siblings("span"),
['2', '3', '4'])
self.assertSelectsIDs(self.start.find_next_siblings(id='3'), ['3'])
def test_next_sibling_for_text_element(self):
soup = self.soup("Foo<b>bar</b>baz")
start = soup.find(text="Foo")
self.assertEqual(start.next_sibling.name, 'b')
self.assertEqual(start.next_sibling.next_sibling, 'baz')
self.assertSelects(start.find_next_siblings('b'), ['bar'])
self.assertEqual(start.find_next_sibling(text="baz"), "baz")
self.assertEqual(start.find_next_sibling(text="nonesuch"), None)
class TestPreviousSibling(SiblingTest):
def setUp(self):
super(TestPreviousSibling, self).setUp()
self.end = self.tree.find(id="4")
def test_previous_sibling_of_root_is_none(self):
self.assertEqual(self.tree.previous_sibling, None)
def test_previous_sibling(self):
self.assertEqual(self.end.previous_sibling['id'], '3')
self.assertEqual(self.end.previous_sibling.previous_sibling['id'], '2')
# Note the difference between previous_sibling and previous_element.
self.assertEqual(self.end.previous_element['id'], '3.1')
def test_previous_sibling_may_not_exist(self):
self.assertEqual(self.tree.html.previous_sibling, None)
nested_span = self.tree.find(id="1.1")
self.assertEqual(nested_span.previous_sibling, None)
first_span = self.tree.find(id="1")
self.assertEqual(first_span.previous_sibling, None)
def test_find_previous_sibling(self):
self.assertEqual(self.end.find_previous_sibling('span')['id'], '3')
def test_previous_siblings(self):
self.assertSelectsIDs(self.end.find_previous_siblings("span"),
['3', '2', '1'])
self.assertSelectsIDs(self.end.find_previous_siblings(id='1'), ['1'])
def test_previous_sibling_for_text_element(self):
soup = self.soup("Foo<b>bar</b>baz")
start = soup.find(text="baz")
self.assertEqual(start.previous_sibling.name, 'b')
self.assertEqual(start.previous_sibling.previous_sibling, 'Foo')
self.assertSelects(start.find_previous_siblings('b'), ['bar'])
self.assertEqual(start.find_previous_sibling(text="Foo"), "Foo")
self.assertEqual(start.find_previous_sibling(text="nonesuch"), None)
class TestTagCreation(SoupTest):
"""Test the ability to create new tags."""
def test_new_tag(self):
soup = self.soup("")
new_tag = soup.new_tag("foo", bar="baz")
self.assertTrue(isinstance(new_tag, Tag))
self.assertEqual("foo", new_tag.name)
self.assertEqual(dict(bar="baz"), new_tag.attrs)
self.assertEqual(None, new_tag.parent)
def test_tag_inherits_self_closing_rules_from_builder(self):
if XML_BUILDER_PRESENT:
xml_soup = BeautifulSoup("", "lxml-xml")
xml_br = xml_soup.new_tag("br")
xml_p = xml_soup.new_tag("p")
# Both the <br> and <p> tag are empty-element, just because
# they have no contents.
self.assertEqual(b"<br/>", xml_br.encode())
self.assertEqual(b"<p/>", xml_p.encode())
html_soup = BeautifulSoup("", "html.parser")
html_br = html_soup.new_tag("br")
html_p = html_soup.new_tag("p")
# The HTML builder users HTML's rules about which tags are
# empty-element tags, and the new tags reflect these rules.
self.assertEqual(b"<br/>", html_br.encode())
self.assertEqual(b"<p></p>", html_p.encode())
def test_new_string_creates_navigablestring(self):
soup = self.soup("")
s = soup.new_string("foo")
self.assertEqual("foo", s)
self.assertTrue(isinstance(s, NavigableString))
def test_new_string_can_create_navigablestring_subclass(self):
soup = self.soup("")
s = soup.new_string("foo", Comment)
self.assertEqual("foo", s)
self.assertTrue(isinstance(s, Comment))
class TestTreeModification(SoupTest):
def test_attribute_modification(self):
soup = self.soup('<a id="1"></a>')
soup.a['id'] = 2
self.assertEqual(soup.decode(), self.document_for('<a id="2"></a>'))
del(soup.a['id'])
self.assertEqual(soup.decode(), self.document_for('<a></a>'))
soup.a['id2'] = 'foo'
self.assertEqual(soup.decode(), self.document_for('<a id2="foo"></a>'))
def test_new_tag_creation(self):
builder = builder_registry.lookup('html')()
soup = self.soup("<body></body>", builder=builder)
a = Tag(soup, builder, 'a')
ol = Tag(soup, builder, 'ol')
a['href'] = 'http://foo.com/'
soup.body.insert(0, a)
soup.body.insert(1, ol)
self.assertEqual(
soup.body.encode(),
b'<body><a href="http://foo.com/"></a><ol></ol></body>')
def test_append_to_contents_moves_tag(self):
doc = """<p id="1">Don't leave me <b>here</b>.</p>
<p id="2">Don\'t leave!</p>"""
soup = self.soup(doc)
second_para = soup.find(id='2')
bold = soup.b
# Move the <b> tag to the end of the second paragraph.
soup.find(id='2').append(soup.b)
# The <b> tag is now a child of the second paragraph.
self.assertEqual(bold.parent, second_para)
self.assertEqual(
soup.decode(), self.document_for(
'<p id="1">Don\'t leave me .</p>\n'
'<p id="2">Don\'t leave!<b>here</b></p>'))
def test_replace_with_returns_thing_that_was_replaced(self):
text = "<a></a><b><c></c></b>"
soup = self.soup(text)
a = soup.a
new_a = a.replace_with(soup.c)
self.assertEqual(a, new_a)
def test_unwrap_returns_thing_that_was_replaced(self):
text = "<a><b></b><c></c></a>"
soup = self.soup(text)
a = soup.a
new_a = a.unwrap()
self.assertEqual(a, new_a)
def test_replace_with_and_unwrap_give_useful_exception_when_tag_has_no_parent(self):
soup = self.soup("<a><b>Foo</b></a><c>Bar</c>")
a = soup.a
a.extract()
self.assertEqual(None, a.parent)
self.assertRaises(ValueError, a.unwrap)
self.assertRaises(ValueError, a.replace_with, soup.c)
def test_replace_tag_with_itself(self):
text = "<a><b></b><c>Foo<d></d></c></a><a><e></e></a>"
soup = self.soup(text)
c = soup.c
soup.c.replace_with(c)
self.assertEqual(soup.decode(), self.document_for(text))
def test_replace_tag_with_its_parent_raises_exception(self):
text = "<a><b></b></a>"
soup = self.soup(text)
self.assertRaises(ValueError, soup.b.replace_with, soup.a)
def test_insert_tag_into_itself_raises_exception(self):
text = "<a><b></b></a>"
soup = self.soup(text)
self.assertRaises(ValueError, soup.a.insert, 0, soup.a)
def test_replace_with_maintains_next_element_throughout(self):
soup = self.soup('<p><a>one</a><b>three</b></p>')
a = soup.a
b = a.contents[0]
# Make it so the <a> tag has two text children.
a.insert(1, "two")
# Now replace each one with the empty string.
left, right = a.contents
left.replaceWith('')
right.replaceWith('')
# The <b> tag is still connected to the tree.
self.assertEqual("three", soup.b.string)
def test_replace_final_node(self):
soup = self.soup("<b>Argh!</b>")
soup.find(text="Argh!").replace_with("Hooray!")
new_text = soup.find(text="Hooray!")
b = soup.b
self.assertEqual(new_text.previous_element, b)
self.assertEqual(new_text.parent, b)
self.assertEqual(new_text.previous_element.next_element, new_text)
self.assertEqual(new_text.next_element, None)
def test_consecutive_text_nodes(self):
# A builder should never create two consecutive text nodes,
# but if you insert one next to another, Beautiful Soup will
# handle it correctly.
soup = self.soup("<a><b>Argh!</b><c></c></a>")
soup.b.insert(1, "Hooray!")
self.assertEqual(
soup.decode(), self.document_for(
"<a><b>Argh!Hooray!</b><c></c></a>"))
new_text = soup.find(text="Hooray!")
self.assertEqual(new_text.previous_element, "Argh!")
self.assertEqual(new_text.previous_element.next_element, new_text)
self.assertEqual(new_text.previous_sibling, "Argh!")
self.assertEqual(new_text.previous_sibling.next_sibling, new_text)
self.assertEqual(new_text.next_sibling, None)
self.assertEqual(new_text.next_element, soup.c)
def test_insert_string(self):
soup = self.soup("<a></a>")
soup.a.insert(0, "bar")
soup.a.insert(0, "foo")
# The string were added to the tag.
self.assertEqual(["foo", "bar"], soup.a.contents)
# And they were converted to NavigableStrings.
self.assertEqual(soup.a.contents[0].next_element, "bar")
def test_insert_tag(self):
builder = self.default_builder
soup = self.soup(
"<a><b>Find</b><c>lady!</c><d></d></a>", builder=builder)
magic_tag = Tag(soup, builder, 'magictag')
magic_tag.insert(0, "the")
soup.a.insert(1, magic_tag)
self.assertEqual(
soup.decode(), self.document_for(
"<a><b>Find</b><magictag>the</magictag><c>lady!</c><d></d></a>"))
# Make sure all the relationships are hooked up correctly.
b_tag = soup.b
self.assertEqual(b_tag.next_sibling, magic_tag)
self.assertEqual(magic_tag.previous_sibling, b_tag)
find = b_tag.find(text="Find")
self.assertEqual(find.next_element, magic_tag)
self.assertEqual(magic_tag.previous_element, find)
c_tag = soup.c
self.assertEqual(magic_tag.next_sibling, c_tag)
self.assertEqual(c_tag.previous_sibling, magic_tag)
the = magic_tag.find(text="the")
self.assertEqual(the.parent, magic_tag)
self.assertEqual(the.next_element, c_tag)
self.assertEqual(c_tag.previous_element, the)
def test_append_child_thats_already_at_the_end(self):
data = "<a><b></b></a>"
soup = self.soup(data)
soup.a.append(soup.b)
self.assertEqual(data, soup.decode())
def test_move_tag_to_beginning_of_parent(self):
data = "<a><b></b><c></c><d></d></a>"
soup = self.soup(data)
soup.a.insert(0, soup.d)
self.assertEqual("<a><d></d><b></b><c></c></a>", soup.decode())
def test_insert_works_on_empty_element_tag(self):
# This is a little strange, since most HTML parsers don't allow
# markup like this to come through. But in general, we don't
# know what the parser would or wouldn't have allowed, so
# I'm letting this succeed for now.
soup = self.soup("<br/>")
soup.br.insert(1, "Contents")
self.assertEqual(str(soup.br), "<br>Contents</br>")
def test_insert_before(self):
soup = self.soup("<a>foo</a><b>bar</b>")
soup.b.insert_before("BAZ")
soup.a.insert_before("QUUX")
self.assertEqual(
soup.decode(), self.document_for("QUUX<a>foo</a>BAZ<b>bar</b>"))
soup.a.insert_before(soup.b)
self.assertEqual(
soup.decode(), self.document_for("QUUX<b>bar</b><a>foo</a>BAZ"))
def test_insert_after(self):
soup = self.soup("<a>foo</a><b>bar</b>")
soup.b.insert_after("BAZ")
soup.a.insert_after("QUUX")
self.assertEqual(
soup.decode(), self.document_for("<a>foo</a>QUUX<b>bar</b>BAZ"))
soup.b.insert_after(soup.a)
self.assertEqual(
soup.decode(), self.document_for("QUUX<b>bar</b><a>foo</a>BAZ"))
def test_insert_after_raises_exception_if_after_has_no_meaning(self):
soup = self.soup("")
tag = soup.new_tag("a")
string = soup.new_string("")
self.assertRaises(ValueError, string.insert_after, tag)
self.assertRaises(NotImplementedError, soup.insert_after, tag)
self.assertRaises(ValueError, tag.insert_after, tag)
def test_insert_before_raises_notimplementederror_if_before_has_no_meaning(self):
soup = self.soup("")
tag = soup.new_tag("a")
string = soup.new_string("")
self.assertRaises(ValueError, string.insert_before, tag)
self.assertRaises(NotImplementedError, soup.insert_before, tag)
self.assertRaises(ValueError, tag.insert_before, tag)
def test_replace_with(self):
soup = self.soup(
"<p>There's <b>no</b> business like <b>show</b> business</p>")
no, show = soup.find_all('b')
show.replace_with(no)
self.assertEqual(
soup.decode(),
self.document_for(
"<p>There's business like <b>no</b> business</p>"))
self.assertEqual(show.parent, None)
self.assertEqual(no.parent, soup.p)
self.assertEqual(no.next_element, "no")
self.assertEqual(no.next_sibling, " business")
def test_replace_first_child(self):
data = "<a><b></b><c></c></a>"
soup = self.soup(data)
soup.b.replace_with(soup.c)
self.assertEqual("<a><c></c></a>", soup.decode())
def test_replace_last_child(self):
data = "<a><b></b><c></c></a>"
soup = self.soup(data)
soup.c.replace_with(soup.b)
self.assertEqual("<a><b></b></a>", soup.decode())
def test_nested_tag_replace_with(self):
soup = self.soup(
"""<a>We<b>reserve<c>the</c><d>right</d></b></a><e>to<f>refuse</f><g>service</g></e>""")
# Replace the entire <b> tag and its contents ("reserve the
# right") with the <f> tag ("refuse").
remove_tag = soup.b
move_tag = soup.f
remove_tag.replace_with(move_tag)
self.assertEqual(
soup.decode(), self.document_for(
"<a>We<f>refuse</f></a><e>to<g>service</g></e>"))
# The <b> tag is now an orphan.
self.assertEqual(remove_tag.parent, None)
self.assertEqual(remove_tag.find(text="right").next_element, None)
self.assertEqual(remove_tag.previous_element, None)
self.assertEqual(remove_tag.next_sibling, None)
self.assertEqual(remove_tag.previous_sibling, None)
# The <f> tag is now connected to the <a> tag.
self.assertEqual(move_tag.parent, soup.a)
self.assertEqual(move_tag.previous_element, "We")
self.assertEqual(move_tag.next_element.next_element, soup.e)
self.assertEqual(move_tag.next_sibling, None)
# The gap where the <f> tag used to be has been mended, and
# the word "to" is now connected to the <g> tag.
to_text = soup.find(text="to")
g_tag = soup.g
self.assertEqual(to_text.next_element, g_tag)
self.assertEqual(to_text.next_sibling, g_tag)
self.assertEqual(g_tag.previous_element, to_text)
self.assertEqual(g_tag.previous_sibling, to_text)
def test_unwrap(self):
tree = self.soup("""
<p>Unneeded <em>formatting</em> is unneeded</p>
""")
tree.em.unwrap()
self.assertEqual(tree.em, None)
self.assertEqual(tree.p.text, "Unneeded formatting is unneeded")
def test_wrap(self):
soup = self.soup("I wish I was bold.")
value = soup.string.wrap(soup.new_tag("b"))
self.assertEqual(value.decode(), "<b>I wish I was bold.</b>")
self.assertEqual(
soup.decode(), self.document_for("<b>I wish I was bold.</b>"))
def test_wrap_extracts_tag_from_elsewhere(self):
soup = self.soup("<b></b>I wish I was bold.")
soup.b.next_sibling.wrap(soup.b)
self.assertEqual(
soup.decode(), self.document_for("<b>I wish I was bold.</b>"))
def test_wrap_puts_new_contents_at_the_end(self):
soup = self.soup("<b>I like being bold.</b>I wish I was bold.")
soup.b.next_sibling.wrap(soup.b)
self.assertEqual(2, len(soup.b.contents))
self.assertEqual(
soup.decode(), self.document_for(
"<b>I like being bold.I wish I was bold.</b>"))
def test_extract(self):
soup = self.soup(
'<html><body>Some content. <div id="nav">Nav crap</div> More content.</body></html>')
self.assertEqual(len(soup.body.contents), 3)
extracted = soup.find(id="nav").extract()
self.assertEqual(
soup.decode(), "<html><body>Some content. More content.</body></html>")
self.assertEqual(extracted.decode(), '<div id="nav">Nav crap</div>')
# The extracted tag is now an orphan.
self.assertEqual(len(soup.body.contents), 2)
self.assertEqual(extracted.parent, None)
self.assertEqual(extracted.previous_element, None)
self.assertEqual(extracted.next_element.next_element, None)
# The gap where the extracted tag used to be has been mended.
content_1 = soup.find(text="Some content. ")
content_2 = soup.find(text=" More content.")
self.assertEqual(content_1.next_element, content_2)
self.assertEqual(content_1.next_sibling, content_2)
self.assertEqual(content_2.previous_element, content_1)
self.assertEqual(content_2.previous_sibling, content_1)
def test_extract_distinguishes_between_identical_strings(self):
soup = self.soup("<a>foo</a><b>bar</b>")
foo_1 = soup.a.string
bar_1 = soup.b.string
foo_2 = soup.new_string("foo")
bar_2 = soup.new_string("bar")
soup.a.append(foo_2)
soup.b.append(bar_2)
# Now there are two identical strings in the <a> tag, and two
# in the <b> tag. Let's remove the first "foo" and the second
# "bar".
foo_1.extract()
bar_2.extract()
self.assertEqual(foo_2, soup.a.string)
self.assertEqual(bar_2, soup.b.string)
def test_extract_multiples_of_same_tag(self):
soup = self.soup("""
<html>
<head>
<script>foo</script>
</head>
<body>
<script>bar</script>
<a></a>
</body>
<script>baz</script>
</html>""")
[soup.script.extract() for i in soup.find_all("script")]
self.assertEqual("<body>\n\n<a></a>\n</body>", unicode(soup.body))
def test_extract_works_when_element_is_surrounded_by_identical_strings(self):
soup = self.soup(
'<html>\n'
'<body>hi</body>\n'
'</html>')
soup.find('body').extract()
self.assertEqual(None, soup.find('body'))
def test_clear(self):
"""Tag.clear()"""
soup = self.soup("<p><a>String <em>Italicized</em></a> and another</p>")
# clear using extract()
a = soup.a
soup.p.clear()
self.assertEqual(len(soup.p.contents), 0)
self.assertTrue(hasattr(a, "contents"))
# clear using decompose()
em = a.em
a.clear(decompose=True)
self.assertEqual(0, len(em.contents))
def test_string_set(self):
"""Tag.string = 'string'"""
soup = self.soup("<a></a> <b><c></c></b>")
soup.a.string = "foo"
self.assertEqual(soup.a.contents, ["foo"])
soup.b.string = "bar"
self.assertEqual(soup.b.contents, ["bar"])
def test_string_set_does_not_affect_original_string(self):
soup = self.soup("<a><b>foo</b><c>bar</c>")
soup.b.string = soup.c.string
self.assertEqual(soup.a.encode(), b"<a><b>bar</b><c>bar</c></a>")
def test_set_string_preserves_class_of_string(self):
soup = self.soup("<a></a>")
cdata = CData("foo")
soup.a.string = cdata
self.assertTrue(isinstance(soup.a.string, CData))
class TestElementObjects(SoupTest):
"""Test various features of element objects."""
def test_len(self):
"""The length of an element is its number of children."""
soup = self.soup("<top>1<b>2</b>3</top>")
# The BeautifulSoup object itself contains one element: the
# <top> tag.
self.assertEqual(len(soup.contents), 1)
self.assertEqual(len(soup), 1)
# The <top> tag contains three elements: the text node "1", the
# <b> tag, and the text node "3".
self.assertEqual(len(soup.top), 3)
self.assertEqual(len(soup.top.contents), 3)
def test_member_access_invokes_find(self):
"""Accessing a Python member .foo invokes find('foo')"""
soup = self.soup('<b><i></i></b>')
self.assertEqual(soup.b, soup.find('b'))
self.assertEqual(soup.b.i, soup.find('b').find('i'))
self.assertEqual(soup.a, None)
def test_deprecated_member_access(self):
soup = self.soup('<b><i></i></b>')
with warnings.catch_warnings(record=True) as w:
tag = soup.bTag
self.assertEqual(soup.b, tag)
self.assertEqual(
'.bTag is deprecated, use .find("b") instead.',
str(w[0].message))
def test_has_attr(self):
"""has_attr() checks for the presence of an attribute.
Please note note: has_attr() is different from
__in__. has_attr() checks the tag's attributes and __in__
checks the tag's chidlren.
"""
soup = self.soup("<foo attr='bar'>")
self.assertTrue(soup.foo.has_attr('attr'))
self.assertFalse(soup.foo.has_attr('attr2'))
def test_attributes_come_out_in_alphabetical_order(self):
markup = '<b a="1" z="5" m="3" f="2" y="4"></b>'
self.assertSoupEquals(markup, '<b a="1" f="2" m="3" y="4" z="5"></b>')
def test_string(self):
# A tag that contains only a text node makes that node
# available as .string.
soup = self.soup("<b>foo</b>")
self.assertEqual(soup.b.string, 'foo')
def test_empty_tag_has_no_string(self):
# A tag with no children has no .stirng.
soup = self.soup("<b></b>")
self.assertEqual(soup.b.string, None)
def test_tag_with_multiple_children_has_no_string(self):
# A tag with no children has no .string.
soup = self.soup("<a>foo<b></b><b></b></b>")
self.assertEqual(soup.b.string, None)
soup = self.soup("<a>foo<b></b>bar</b>")
self.assertEqual(soup.b.string, None)
# Even if all the children are strings, due to trickery,
# it won't work--but this would be a good optimization.
soup = self.soup("<a>foo</b>")
soup.a.insert(1, "bar")
self.assertEqual(soup.a.string, None)
def test_tag_with_recursive_string_has_string(self):
# A tag with a single child which has a .string inherits that
# .string.
soup = self.soup("<a><b>foo</b></a>")
self.assertEqual(soup.a.string, "foo")
self.assertEqual(soup.string, "foo")
def test_lack_of_string(self):
"""Only a tag containing a single text node has a .string."""
soup = self.soup("<b>f<i>e</i>o</b>")
self.assertFalse(soup.b.string)
soup = self.soup("<b></b>")
self.assertFalse(soup.b.string)
def test_all_text(self):
"""Tag.text and Tag.get_text(sep=u"") -> all child text, concatenated"""
soup = self.soup("<a>a<b>r</b> <r> t </r></a>")
self.assertEqual(soup.a.text, "ar t ")
self.assertEqual(soup.a.get_text(strip=True), "art")
self.assertEqual(soup.a.get_text(","), "a,r, , t ")
self.assertEqual(soup.a.get_text(",", strip=True), "a,r,t")
def test_get_text_ignores_comments(self):
soup = self.soup("foo<!--IGNORE-->bar")
self.assertEqual(soup.get_text(), "foobar")
self.assertEqual(
soup.get_text(types=(NavigableString, Comment)), "fooIGNOREbar")
self.assertEqual(
soup.get_text(types=None), "fooIGNOREbar")
def test_all_strings_ignores_comments(self):
soup = self.soup("foo<!--IGNORE-->bar")
self.assertEqual(['foo', 'bar'], list(soup.strings))
class TestCDAtaListAttributes(SoupTest):
"""Testing cdata-list attributes like 'class'.
"""
def test_single_value_becomes_list(self):
soup = self.soup("<a class='foo'>")
self.assertEqual(["foo"],soup.a['class'])
def test_multiple_values_becomes_list(self):
soup = self.soup("<a class='foo bar'>")
self.assertEqual(["foo", "bar"], soup.a['class'])
def test_multiple_values_separated_by_weird_whitespace(self):
soup = self.soup("<a class='foo\tbar\nbaz'>")
self.assertEqual(["foo", "bar", "baz"],soup.a['class'])
def test_attributes_joined_into_string_on_output(self):
soup = self.soup("<a class='foo\tbar'>")
self.assertEqual(b'<a class="foo bar"></a>', soup.a.encode())
def test_accept_charset(self):
soup = self.soup('<form accept-charset="ISO-8859-1 UTF-8">')
self.assertEqual(['ISO-8859-1', 'UTF-8'], soup.form['accept-charset'])
def test_cdata_attribute_applying_only_to_one_tag(self):
data = '<a accept-charset="ISO-8859-1 UTF-8"></a>'
soup = self.soup(data)
# We saw in another test that accept-charset is a cdata-list
# attribute for the <form> tag. But it's not a cdata-list
# attribute for any other tag.
self.assertEqual('ISO-8859-1 UTF-8', soup.a['accept-charset'])
def test_string_has_immutable_name_property(self):
string = self.soup("s").string
self.assertEqual(None, string.name)
def t():
string.name = 'foo'
self.assertRaises(AttributeError, t)
class TestPersistence(SoupTest):
"Testing features like pickle and deepcopy."
def setUp(self):
super(TestPersistence, self).setUp()
self.page = """<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN"
"http://www.w3.org/TR/REC-html40/transitional.dtd">
<html>
<head>
<meta http-equiv="Content-Type" content="text/html; charset=utf-8">
<title>Beautiful Soup: We called him Tortoise because he taught us.</title>
<link rev="made" href="mailto:leonardr@segfault.org">
<meta name="Description" content="Beautiful Soup: an HTML parser optimized for screen-scraping.">
<meta name="generator" content="Markov Approximation 1.4 (module: leonardr)">
<meta name="author" content="Leonard Richardson">
</head>
<body>
<a href="foo">foo</a>
<a href="foo"><b>bar</b></a>
</body>
</html>"""
self.tree = self.soup(self.page)
def test_pickle_and_unpickle_identity(self):
# Pickling a tree, then unpickling it, yields a tree identical
# to the original.
dumped = pickle.dumps(self.tree, 2)
loaded = pickle.loads(dumped)
self.assertEqual(loaded.__class__, BeautifulSoup)
self.assertEqual(loaded.decode(), self.tree.decode())
def test_deepcopy_identity(self):
# Making a deepcopy of a tree yields an identical tree.
copied = copy.deepcopy(self.tree)
self.assertEqual(copied.decode(), self.tree.decode())
def test_copy_preserves_encoding(self):
soup = BeautifulSoup(b'<p> </p>', 'html.parser')
encoding = soup.original_encoding
copy = soup.__copy__()
self.assertEqual(u"<p> </p>", unicode(copy))
self.assertEqual(encoding, copy.original_encoding)
def test_unicode_pickle(self):
# A tree containing Unicode characters can be pickled.
html = u"<b>\N{SNOWMAN}</b>"
soup = self.soup(html)
dumped = pickle.dumps(soup, pickle.HIGHEST_PROTOCOL)
loaded = pickle.loads(dumped)
self.assertEqual(loaded.decode(), soup.decode())
def test_copy_navigablestring_is_not_attached_to_tree(self):
html = u"<b>Foo<a></a></b><b>Bar</b>"
soup = self.soup(html)
s1 = soup.find(string="Foo")
s2 = copy.copy(s1)
self.assertEqual(s1, s2)
self.assertEqual(None, s2.parent)
self.assertEqual(None, s2.next_element)
self.assertNotEqual(None, s1.next_sibling)
self.assertEqual(None, s2.next_sibling)
self.assertEqual(None, s2.previous_element)
def test_copy_navigablestring_subclass_has_same_type(self):
html = u"<b><!--Foo--></b>"
soup = self.soup(html)
s1 = soup.string
s2 = copy.copy(s1)
self.assertEqual(s1, s2)
self.assertTrue(isinstance(s2, Comment))
def test_copy_entire_soup(self):
html = u"<div><b>Foo<a></a></b><b>Bar</b></div>end"
soup = self.soup(html)
soup_copy = copy.copy(soup)
self.assertEqual(soup, soup_copy)
def test_copy_tag_copies_contents(self):
html = u"<div><b>Foo<a></a></b><b>Bar</b></div>end"
soup = self.soup(html)
div = soup.div
div_copy = copy.copy(div)
# The two tags look the same, and evaluate to equal.
self.assertEqual(unicode(div), unicode(div_copy))
self.assertEqual(div, div_copy)
# But they're not the same object.
self.assertFalse(div is div_copy)
# And they don't have the same relation to the parse tree. The
# copy is not associated with a parse tree at all.
self.assertEqual(None, div_copy.parent)
self.assertEqual(None, div_copy.previous_element)
self.assertEqual(None, div_copy.find(string='Bar').next_element)
self.assertNotEqual(None, div.find(string='Bar').next_element)
class TestSubstitutions(SoupTest):
def test_default_formatter_is_minimal(self):
markup = u"<b><<Sacr\N{LATIN SMALL LETTER E WITH ACUTE} bleu!>></b>"
soup = self.soup(markup)
decoded = soup.decode(formatter="minimal")
# The < is converted back into < but the e-with-acute is left alone.
self.assertEqual(
decoded,
self.document_for(
u"<b><<Sacr\N{LATIN SMALL LETTER E WITH ACUTE} bleu!>></b>"))
def test_formatter_html(self):
markup = u"<b><<Sacr\N{LATIN SMALL LETTER E WITH ACUTE} bleu!>></b>"
soup = self.soup(markup)
decoded = soup.decode(formatter="html")
self.assertEqual(
decoded,
self.document_for("<b><<Sacré bleu!>></b>"))
def test_formatter_minimal(self):
markup = u"<b><<Sacr\N{LATIN SMALL LETTER E WITH ACUTE} bleu!>></b>"
soup = self.soup(markup)
decoded = soup.decode(formatter="minimal")
# The < is converted back into < but the e-with-acute is left alone.
self.assertEqual(
decoded,
self.document_for(
u"<b><<Sacr\N{LATIN SMALL LETTER E WITH ACUTE} bleu!>></b>"))
def test_formatter_null(self):
markup = u"<b><<Sacr\N{LATIN SMALL LETTER E WITH ACUTE} bleu!>></b>"
soup = self.soup(markup)
decoded = soup.decode(formatter=None)
# Neither the angle brackets nor the e-with-acute are converted.
# This is not valid HTML, but it's what the user wanted.
self.assertEqual(decoded,
self.document_for(u"<b><<Sacr\N{LATIN SMALL LETTER E WITH ACUTE} bleu!>></b>"))
def test_formatter_custom(self):
markup = u"<b><foo></b><b>bar</b>"
soup = self.soup(markup)
decoded = soup.decode(formatter = lambda x: x.upper())
# Instead of normal entity conversion code, the custom
# callable is called on every string.
self.assertEqual(
decoded,
self.document_for(u"<b><FOO></b><b>BAR</b>"))
def test_formatter_is_run_on_attribute_values(self):
markup = u'<a href="http://a.com?a=b&c=é">e</a>'
soup = self.soup(markup)
a = soup.a
expect_minimal = u'<a href="http://a.com?a=b&c=é">e</a>'
self.assertEqual(expect_minimal, a.decode())
self.assertEqual(expect_minimal, a.decode(formatter="minimal"))
expect_html = u'<a href="http://a.com?a=b&c=é">e</a>'
self.assertEqual(expect_html, a.decode(formatter="html"))
self.assertEqual(markup, a.decode(formatter=None))
expect_upper = u'<a href="HTTP://A.COM?A=B&C=É">E</a>'
self.assertEqual(expect_upper, a.decode(formatter=lambda x: x.upper()))
def test_formatter_skips_script_tag_for_html_documents(self):
doc = """
<script type="text/javascript">
console.log("< < hey > > ");
</script>
"""
encoded = BeautifulSoup(doc, 'html.parser').encode()
self.assertTrue(b"< < hey > >" in encoded)
def test_formatter_skips_style_tag_for_html_documents(self):
doc = """
<style type="text/css">
console.log("< < hey > > ");
</style>
"""
encoded = BeautifulSoup(doc, 'html.parser').encode()
self.assertTrue(b"< < hey > >" in encoded)
def test_prettify_leaves_preformatted_text_alone(self):
soup = self.soup("<div> foo <pre> \tbar\n \n </pre> baz ")
# Everything outside the <pre> tag is reformatted, but everything
# inside is left alone.
self.assertEqual(
u'<div>\n foo\n <pre> \tbar\n \n </pre>\n baz\n</div>',
soup.div.prettify())
def test_prettify_accepts_formatter(self):
soup = BeautifulSoup("<html><body>foo</body></html>", 'html.parser')
pretty = soup.prettify(formatter = lambda x: x.upper())
self.assertTrue("FOO" in pretty)
def test_prettify_outputs_unicode_by_default(self):
soup = self.soup("<a></a>")
self.assertEqual(unicode, type(soup.prettify()))
def test_prettify_can_encode_data(self):
soup = self.soup("<a></a>")
self.assertEqual(bytes, type(soup.prettify("utf-8")))
def test_html_entity_substitution_off_by_default(self):
markup = u"<b>Sacr\N{LATIN SMALL LETTER E WITH ACUTE} bleu!</b>"
soup = self.soup(markup)
encoded = soup.b.encode("utf-8")
self.assertEqual(encoded, markup.encode('utf-8'))
def test_encoding_substitution(self):
# Here's the <meta> tag saying that a document is
# encoded in Shift-JIS.
meta_tag = ('<meta content="text/html; charset=x-sjis" '
'http-equiv="Content-type"/>')
soup = self.soup(meta_tag)
# Parse the document, and the charset apprears unchanged.
self.assertEqual(soup.meta['content'], 'text/html; charset=x-sjis')
# Encode the document into some encoding, and the encoding is
# substituted into the meta tag.
utf_8 = soup.encode("utf-8")
self.assertTrue(b"charset=utf-8" in utf_8)
euc_jp = soup.encode("euc_jp")
self.assertTrue(b"charset=euc_jp" in euc_jp)
shift_jis = soup.encode("shift-jis")
self.assertTrue(b"charset=shift-jis" in shift_jis)
utf_16_u = soup.encode("utf-16").decode("utf-16")
self.assertTrue("charset=utf-16" in utf_16_u)
def test_encoding_substitution_doesnt_happen_if_tag_is_strained(self):
markup = ('<head><meta content="text/html; charset=x-sjis" '
'http-equiv="Content-type"/></head><pre>foo</pre>')
# Beautiful Soup used to try to rewrite the meta tag even if the
# meta tag got filtered out by the strainer. This test makes
# sure that doesn't happen.
strainer = SoupStrainer('pre')
soup = self.soup(markup, parse_only=strainer)
self.assertEqual(soup.contents[0].name, 'pre')
class TestEncoding(SoupTest):
"""Test the ability to encode objects into strings."""
def test_unicode_string_can_be_encoded(self):
html = u"<b>\N{SNOWMAN}</b>"
soup = self.soup(html)
self.assertEqual(soup.b.string.encode("utf-8"),
u"\N{SNOWMAN}".encode("utf-8"))
def test_tag_containing_unicode_string_can_be_encoded(self):
html = u"<b>\N{SNOWMAN}</b>"
soup = self.soup(html)
self.assertEqual(
soup.b.encode("utf-8"), html.encode("utf-8"))
def test_encoding_substitutes_unrecognized_characters_by_default(self):
html = u"<b>\N{SNOWMAN}</b>"
soup = self.soup(html)
self.assertEqual(soup.b.encode("ascii"), b"<b>☃</b>")
def test_encoding_can_be_made_strict(self):
html = u"<b>\N{SNOWMAN}</b>"
soup = self.soup(html)
self.assertRaises(
UnicodeEncodeError, soup.encode, "ascii", errors="strict")
def test_decode_contents(self):
html = u"<b>\N{SNOWMAN}</b>"
soup = self.soup(html)
self.assertEqual(u"\N{SNOWMAN}", soup.b.decode_contents())
def test_encode_contents(self):
html = u"<b>\N{SNOWMAN}</b>"
soup = self.soup(html)
self.assertEqual(
u"\N{SNOWMAN}".encode("utf8"), soup.b.encode_contents(
encoding="utf8"))
def test_deprecated_renderContents(self):
html = u"<b>\N{SNOWMAN}</b>"
soup = self.soup(html)
self.assertEqual(
u"\N{SNOWMAN}".encode("utf8"), soup.b.renderContents())
def test_repr(self):
html = u"<b>\N{SNOWMAN}</b>"
soup = self.soup(html)
if PY3K:
self.assertEqual(html, repr(soup))
else:
self.assertEqual(b'<b>\\u2603</b>', repr(soup))
class TestNavigableStringSubclasses(SoupTest):
def test_cdata(self):
# None of the current builders turn CDATA sections into CData
# objects, but you can create them manually.
soup = self.soup("")
cdata = CData("foo")
soup.insert(1, cdata)
self.assertEqual(str(soup), "<![CDATA[foo]]>")
self.assertEqual(soup.find(text="foo"), "foo")
self.assertEqual(soup.contents[0], "foo")
def test_cdata_is_never_formatted(self):
"""Text inside a CData object is passed into the formatter.
But the return value is ignored.
"""
self.count = 0
def increment(*args):
self.count += 1
return "BITTER FAILURE"
soup = self.soup("")
cdata = CData("<><><>")
soup.insert(1, cdata)
self.assertEqual(
b"<![CDATA[<><><>]]>", soup.encode(formatter=increment))
self.assertEqual(1, self.count)
def test_doctype_ends_in_newline(self):
# Unlike other NavigableString subclasses, a DOCTYPE always ends
# in a newline.
doctype = Doctype("foo")
soup = self.soup("")
soup.insert(1, doctype)
self.assertEqual(soup.encode(), b"<!DOCTYPE foo>\n")
def test_declaration(self):
d = Declaration("foo")
self.assertEqual("<?foo?>", d.output_ready())
class TestSoupSelector(TreeTest):
HTML = """
<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01//EN"
"http://www.w3.org/TR/html4/strict.dtd">
<html>
<head>
<title>The title</title>
<link rel="stylesheet" href="blah.css" type="text/css" id="l1">
</head>
<body>
<custom-dashed-tag class="dashed" id="dash1">Hello there.</custom-dashed-tag>
<div id="main" class="fancy">
<div id="inner">
<h1 id="header1">An H1</h1>
<p>Some text</p>
<p class="onep" id="p1">Some more text</p>
<h2 id="header2">An H2</h2>
<p class="class1 class2 class3" id="pmulti">Another</p>
<a href="http://bob.example.org/" rel="friend met" id="bob">Bob</a>
<h2 id="header3">Another H2</h2>
<a id="me" href="http://simonwillison.net/" rel="me">me</a>
<span class="s1">
<a href="#" id="s1a1">span1a1</a>
<a href="#" id="s1a2">span1a2 <span id="s1a2s1">test</span></a>
<span class="span2">
<a href="#" id="s2a1">span2a1</a>
</span>
<span class="span3"></span>
<custom-dashed-tag class="dashed" id="dash2"/>
<div data-tag="dashedvalue" id="data1"/>
</span>
</div>
<x id="xid">
<z id="zida"/>
<z id="zidab"/>
<z id="zidac"/>
</x>
<y id="yid">
<z id="zidb"/>
</y>
<p lang="en" id="lang-en">English</p>
<p lang="en-gb" id="lang-en-gb">English UK</p>
<p lang="en-us" id="lang-en-us">English US</p>
<p lang="fr" id="lang-fr">French</p>
</div>
<div id="footer">
</div>
"""
def setUp(self):
self.soup = BeautifulSoup(self.HTML, 'html.parser')
def assertSelects(self, selector, expected_ids, **kwargs):
el_ids = [el['id'] for el in self.soup.select(selector, **kwargs)]
el_ids.sort()
expected_ids.sort()
self.assertEqual(expected_ids, el_ids,
"Selector %s, expected [%s], got [%s]" % (
selector, ', '.join(expected_ids), ', '.join(el_ids)
)
)
assertSelect = assertSelects
def assertSelectMultiple(self, *tests):
for selector, expected_ids in tests:
self.assertSelect(selector, expected_ids)
def test_one_tag_one(self):
els = self.soup.select('title')
self.assertEqual(len(els), 1)
self.assertEqual(els[0].name, 'title')
self.assertEqual(els[0].contents, [u'The title'])
def test_one_tag_many(self):
els = self.soup.select('div')
self.assertEqual(len(els), 4)
for div in els:
self.assertEqual(div.name, 'div')
el = self.soup.select_one('div')
self.assertEqual('main', el['id'])
def test_select_one_returns_none_if_no_match(self):
match = self.soup.select_one('nonexistenttag')
self.assertEqual(None, match)
def test_tag_in_tag_one(self):
els = self.soup.select('div div')
self.assertSelects('div div', ['inner', 'data1'])
def test_tag_in_tag_many(self):
for selector in ('html div', 'html body div', 'body div'):
self.assertSelects(selector, ['data1', 'main', 'inner', 'footer'])
def test_limit(self):
self.assertSelects('html div', ['main'], limit=1)
self.assertSelects('html body div', ['inner', 'main'], limit=2)
self.assertSelects('body div', ['data1', 'main', 'inner', 'footer'],
limit=10)
def test_tag_no_match(self):
self.assertEqual(len(self.soup.select('del')), 0)
def test_invalid_tag(self):
self.assertRaises(ValueError, self.soup.select, 'tag%t')
def test_select_dashed_tag_ids(self):
self.assertSelects('custom-dashed-tag', ['dash1', 'dash2'])
def test_select_dashed_by_id(self):
dashed = self.soup.select('custom-dashed-tag[id=\"dash2\"]')
self.assertEqual(dashed[0].name, 'custom-dashed-tag')
self.assertEqual(dashed[0]['id'], 'dash2')
def test_dashed_tag_text(self):
self.assertEqual(self.soup.select('body > custom-dashed-tag')[0].text, u'Hello there.')
def test_select_dashed_matches_find_all(self):
self.assertEqual(self.soup.select('custom-dashed-tag'), self.soup.find_all('custom-dashed-tag'))
def test_header_tags(self):
self.assertSelectMultiple(
('h1', ['header1']),
('h2', ['header2', 'header3']),
)
def test_class_one(self):
for selector in ('.onep', 'p.onep', 'html p.onep'):
els = self.soup.select(selector)
self.assertEqual(len(els), 1)
self.assertEqual(els[0].name, 'p')
self.assertEqual(els[0]['class'], ['onep'])
def test_class_mismatched_tag(self):
els = self.soup.select('div.onep')
self.assertEqual(len(els), 0)
def test_one_id(self):
for selector in ('div#inner', '#inner', 'div div#inner'):
self.assertSelects(selector, ['inner'])
def test_bad_id(self):
els = self.soup.select('#doesnotexist')
self.assertEqual(len(els), 0)
def test_items_in_id(self):
els = self.soup.select('div#inner p')
self.assertEqual(len(els), 3)
for el in els:
self.assertEqual(el.name, 'p')
self.assertEqual(els[1]['class'], ['onep'])
self.assertFalse(els[0].has_attr('class'))
def test_a_bunch_of_emptys(self):
for selector in ('div#main del', 'div#main div.oops', 'div div#main'):
self.assertEqual(len(self.soup.select(selector)), 0)
def test_multi_class_support(self):
for selector in ('.class1', 'p.class1', '.class2', 'p.class2',
'.class3', 'p.class3', 'html p.class2', 'div#inner .class2'):
self.assertSelects(selector, ['pmulti'])
def test_multi_class_selection(self):
for selector in ('.class1.class3', '.class3.class2',
'.class1.class2.class3'):
self.assertSelects(selector, ['pmulti'])
def test_child_selector(self):
self.assertSelects('.s1 > a', ['s1a1', 's1a2'])
self.assertSelects('.s1 > a span', ['s1a2s1'])
def test_child_selector_id(self):
self.assertSelects('.s1 > a#s1a2 span', ['s1a2s1'])
def test_attribute_equals(self):
self.assertSelectMultiple(
('p[class="onep"]', ['p1']),
('p[id="p1"]', ['p1']),
('[class="onep"]', ['p1']),
('[id="p1"]', ['p1']),
('link[rel="stylesheet"]', ['l1']),
('link[type="text/css"]', ['l1']),
('link[href="blah.css"]', ['l1']),
('link[href="no-blah.css"]', []),
('[rel="stylesheet"]', ['l1']),
('[type="text/css"]', ['l1']),
('[href="blah.css"]', ['l1']),
('[href="no-blah.css"]', []),
('p[href="no-blah.css"]', []),
('[href="no-blah.css"]', []),
)
def test_attribute_tilde(self):
self.assertSelectMultiple(
('p[class~="class1"]', ['pmulti']),
('p[class~="class2"]', ['pmulti']),
('p[class~="class3"]', ['pmulti']),
('[class~="class1"]', ['pmulti']),
('[class~="class2"]', ['pmulti']),
('[class~="class3"]', ['pmulti']),
('a[rel~="friend"]', ['bob']),
('a[rel~="met"]', ['bob']),
('[rel~="friend"]', ['bob']),
('[rel~="met"]', ['bob']),
)
def test_attribute_startswith(self):
self.assertSelectMultiple(
('[rel^="style"]', ['l1']),
('link[rel^="style"]', ['l1']),
('notlink[rel^="notstyle"]', []),
('[rel^="notstyle"]', []),
('link[rel^="notstyle"]', []),
('link[href^="bla"]', ['l1']),
('a[href^="http://"]', ['bob', 'me']),
('[href^="http://"]', ['bob', 'me']),
('[id^="p"]', ['pmulti', 'p1']),
('[id^="m"]', ['me', 'main']),
('div[id^="m"]', ['main']),
('a[id^="m"]', ['me']),
('div[data-tag^="dashed"]', ['data1'])
)
def test_attribute_endswith(self):
self.assertSelectMultiple(
('[href$=".css"]', ['l1']),
('link[href$=".css"]', ['l1']),
('link[id$="1"]', ['l1']),
('[id$="1"]', ['data1', 'l1', 'p1', 'header1', 's1a1', 's2a1', 's1a2s1', 'dash1']),
('div[id$="1"]', ['data1']),
('[id$="noending"]', []),
)
def test_attribute_contains(self):
self.assertSelectMultiple(
# From test_attribute_startswith
('[rel*="style"]', ['l1']),
('link[rel*="style"]', ['l1']),
('notlink[rel*="notstyle"]', []),
('[rel*="notstyle"]', []),
('link[rel*="notstyle"]', []),
('link[href*="bla"]', ['l1']),
('[href*="http://"]', ['bob', 'me']),
('[id*="p"]', ['pmulti', 'p1']),
('div[id*="m"]', ['main']),
('a[id*="m"]', ['me']),
# From test_attribute_endswith
('[href*=".css"]', ['l1']),
('link[href*=".css"]', ['l1']),
('link[id*="1"]', ['l1']),
('[id*="1"]', ['data1', 'l1', 'p1', 'header1', 's1a1', 's1a2', 's2a1', 's1a2s1', 'dash1']),
('div[id*="1"]', ['data1']),
('[id*="noending"]', []),
# New for this test
('[href*="."]', ['bob', 'me', 'l1']),
('a[href*="."]', ['bob', 'me']),
('link[href*="."]', ['l1']),
('div[id*="n"]', ['main', 'inner']),
('div[id*="nn"]', ['inner']),
('div[data-tag*="edval"]', ['data1'])
)
def test_attribute_exact_or_hypen(self):
self.assertSelectMultiple(
('p[lang|="en"]', ['lang-en', 'lang-en-gb', 'lang-en-us']),
('[lang|="en"]', ['lang-en', 'lang-en-gb', 'lang-en-us']),
('p[lang|="fr"]', ['lang-fr']),
('p[lang|="gb"]', []),
)
def test_attribute_exists(self):
self.assertSelectMultiple(
('[rel]', ['l1', 'bob', 'me']),
('link[rel]', ['l1']),
('a[rel]', ['bob', 'me']),
('[lang]', ['lang-en', 'lang-en-gb', 'lang-en-us', 'lang-fr']),
('p[class]', ['p1', 'pmulti']),
('[blah]', []),
('p[blah]', []),
('div[data-tag]', ['data1'])
)
def test_quoted_space_in_selector_name(self):
html = """<div style="display: wrong">nope</div>
<div style="display: right">yes</div>
"""
soup = BeautifulSoup(html, 'html.parser')
[chosen] = soup.select('div[style="display: right"]')
self.assertEqual("yes", chosen.string)
def test_unsupported_pseudoclass(self):
self.assertRaises(
NotImplementedError, self.soup.select, "a:no-such-pseudoclass")
self.assertRaises(
NotImplementedError, self.soup.select, "a:nth-of-type(a)")
def test_nth_of_type(self):
# Try to select first paragraph
els = self.soup.select('div#inner p:nth-of-type(1)')
self.assertEqual(len(els), 1)
self.assertEqual(els[0].string, u'Some text')
# Try to select third paragraph
els = self.soup.select('div#inner p:nth-of-type(3)')
self.assertEqual(len(els), 1)
self.assertEqual(els[0].string, u'Another')
# Try to select (non-existent!) fourth paragraph
els = self.soup.select('div#inner p:nth-of-type(4)')
self.assertEqual(len(els), 0)
# Pass in an invalid value.
self.assertRaises(
ValueError, self.soup.select, 'div p:nth-of-type(0)')
def test_nth_of_type_direct_descendant(self):
els = self.soup.select('div#inner > p:nth-of-type(1)')
self.assertEqual(len(els), 1)
self.assertEqual(els[0].string, u'Some text')
def test_id_child_selector_nth_of_type(self):
self.assertSelects('#inner > p:nth-of-type(2)', ['p1'])
def test_select_on_element(self):
# Other tests operate on the tree; this operates on an element
# within the tree.
inner = self.soup.find("div", id="main")
selected = inner.select("div")
# The <div id="inner"> tag was selected. The <div id="footer">
# tag was not.
self.assertSelectsIDs(selected, ['inner', 'data1'])
def test_overspecified_child_id(self):
self.assertSelects(".fancy #inner", ['inner'])
self.assertSelects(".normal #inner", [])
def test_adjacent_sibling_selector(self):
self.assertSelects('#p1 + h2', ['header2'])
self.assertSelects('#p1 + h2 + p', ['pmulti'])
self.assertSelects('#p1 + #header2 + .class1', ['pmulti'])
self.assertEqual([], self.soup.select('#p1 + p'))
def test_general_sibling_selector(self):
self.assertSelects('#p1 ~ h2', ['header2', 'header3'])
self.assertSelects('#p1 ~ #header2', ['header2'])
self.assertSelects('#p1 ~ h2 + a', ['me'])
self.assertSelects('#p1 ~ h2 + [rel="me"]', ['me'])
self.assertEqual([], self.soup.select('#inner ~ h2'))
def test_dangling_combinator(self):
self.assertRaises(ValueError, self.soup.select, 'h1 >')
def test_sibling_combinator_wont_select_same_tag_twice(self):
self.assertSelects('p[lang] ~ p', ['lang-en-gb', 'lang-en-us', 'lang-fr'])
# Test the selector grouping operator (the comma)
def test_multiple_select(self):
self.assertSelects('x, y', ['xid', 'yid'])
def test_multiple_select_with_no_space(self):
self.assertSelects('x,y', ['xid', 'yid'])
def test_multiple_select_with_more_space(self):
self.assertSelects('x, y', ['xid', 'yid'])
def test_multiple_select_duplicated(self):
self.assertSelects('x, x', ['xid'])
def test_multiple_select_sibling(self):
self.assertSelects('x, y ~ p[lang=fr]', ['xid', 'lang-fr'])
def test_multiple_select_tag_and_direct_descendant(self):
self.assertSelects('x, y > z', ['xid', 'zidb'])
def test_multiple_select_direct_descendant_and_tags(self):
self.assertSelects('div > x, y, z', ['xid', 'yid', 'zida', 'zidb', 'zidab', 'zidac'])
def test_multiple_select_indirect_descendant(self):
self.assertSelects('div x,y, z', ['xid', 'yid', 'zida', 'zidb', 'zidab', 'zidac'])
def test_invalid_multiple_select(self):
self.assertRaises(ValueError, self.soup.select, ',x, y')
self.assertRaises(ValueError, self.soup.select, 'x,,y')
def test_multiple_select_attrs(self):
self.assertSelects('p[lang=en], p[lang=en-gb]', ['lang-en', 'lang-en-gb'])
def test_multiple_select_ids(self):
self.assertSelects('x, y > z[id=zida], z[id=zidab], z[id=zidb]', ['xid', 'zidb', 'zidab'])
def test_multiple_select_nested(self):
self.assertSelects('body > div > x, y > z', ['xid', 'zidb'])
| gpl-2.0 | 1,488,686,008,359,035,000 | 5,737,793,616,422,445,000 | 37.19863 | 118 | 0.57025 | false |
runekaagaard/django-contrib-locking | django/contrib/gis/gdal/srs.py | 8 | 12155 | """
The Spatial Reference class, represents OGR Spatial Reference objects.
Example:
>>> from django.contrib.gis.gdal import SpatialReference
>>> srs = SpatialReference('WGS84')
>>> print(srs)
GEOGCS["WGS 84",
DATUM["WGS_1984",
SPHEROID["WGS 84",6378137,298.257223563,
AUTHORITY["EPSG","7030"]],
TOWGS84[0,0,0,0,0,0,0],
AUTHORITY["EPSG","6326"]],
PRIMEM["Greenwich",0,
AUTHORITY["EPSG","8901"]],
UNIT["degree",0.01745329251994328,
AUTHORITY["EPSG","9122"]],
AUTHORITY["EPSG","4326"]]
>>> print(srs.proj)
+proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs
>>> print(srs.ellipsoid)
(6378137.0, 6356752.3142451793, 298.25722356300003)
>>> print(srs.projected, srs.geographic)
False True
>>> srs.import_epsg(32140)
>>> print(srs.name)
NAD83 / Texas South Central
"""
from ctypes import byref, c_char_p, c_int
# Getting the error checking routine and exceptions
from django.contrib.gis.gdal.base import GDALBase
from django.contrib.gis.gdal.error import SRSException
from django.contrib.gis.gdal.prototypes import srs as capi
from django.utils import six
from django.utils.encoding import force_bytes, force_text
#### Spatial Reference class. ####
class SpatialReference(GDALBase):
"""
A wrapper for the OGRSpatialReference object. According to the GDAL Web site,
the SpatialReference object "provide[s] services to represent coordinate
systems (projections and datums) and to transform between them."
"""
#### Python 'magic' routines ####
def __init__(self, srs_input='', srs_type='user'):
"""
Creates a GDAL OSR Spatial Reference object from the given input.
The input may be string of OGC Well Known Text (WKT), an integer
EPSG code, a PROJ.4 string, and/or a projection "well known" shorthand
string (one of 'WGS84', 'WGS72', 'NAD27', 'NAD83').
"""
if srs_type == 'wkt':
self.ptr = capi.new_srs(c_char_p(b''))
self.import_wkt(srs_input)
return
elif isinstance(srs_input, six.string_types):
# Encoding to ASCII if unicode passed in.
if isinstance(srs_input, six.text_type):
srs_input = srs_input.encode('ascii')
try:
# If SRID is a string, e.g., '4326', then make acceptable
# as user input.
srid = int(srs_input)
srs_input = 'EPSG:%d' % srid
except ValueError:
pass
elif isinstance(srs_input, six.integer_types):
# EPSG integer code was input.
srs_type = 'epsg'
elif isinstance(srs_input, self.ptr_type):
srs = srs_input
srs_type = 'ogr'
else:
raise TypeError('Invalid SRS type "%s"' % srs_type)
if srs_type == 'ogr':
# Input is already an SRS pointer.
srs = srs_input
else:
# Creating a new SRS pointer, using the string buffer.
buf = c_char_p(b'')
srs = capi.new_srs(buf)
# If the pointer is NULL, throw an exception.
if not srs:
raise SRSException('Could not create spatial reference from: %s' % srs_input)
else:
self.ptr = srs
# Importing from either the user input string or an integer SRID.
if srs_type == 'user':
self.import_user_input(srs_input)
elif srs_type == 'epsg':
self.import_epsg(srs_input)
def __del__(self):
"Destroys this spatial reference."
if self._ptr and capi:
capi.release_srs(self._ptr)
def __getitem__(self, target):
"""
Returns the value of the given string attribute node, None if the node
doesn't exist. Can also take a tuple as a parameter, (target, child),
where child is the index of the attribute in the WKT. For example:
>>> wkt = 'GEOGCS["WGS 84", DATUM["WGS_1984, ... AUTHORITY["EPSG","4326"]]'
>>> srs = SpatialReference(wkt) # could also use 'WGS84', or 4326
>>> print(srs['GEOGCS'])
WGS 84
>>> print(srs['DATUM'])
WGS_1984
>>> print(srs['AUTHORITY'])
EPSG
>>> print(srs['AUTHORITY', 1]) # The authority value
4326
>>> print(srs['TOWGS84', 4]) # the fourth value in this wkt
0
>>> print(srs['UNIT|AUTHORITY']) # For the units authority, have to use the pipe symbole.
EPSG
>>> print(srs['UNIT|AUTHORITY', 1]) # The authority value for the units
9122
"""
if isinstance(target, tuple):
return self.attr_value(*target)
else:
return self.attr_value(target)
def __str__(self):
"The string representation uses 'pretty' WKT."
return self.pretty_wkt
#### SpatialReference Methods ####
def attr_value(self, target, index=0):
"""
The attribute value for the given target node (e.g. 'PROJCS'). The index
keyword specifies an index of the child node to return.
"""
if not isinstance(target, six.string_types) or not isinstance(index, int):
raise TypeError
return capi.get_attr_value(self.ptr, force_bytes(target), index)
def auth_name(self, target):
"Returns the authority name for the given string target node."
return capi.get_auth_name(self.ptr, force_bytes(target))
def auth_code(self, target):
"Returns the authority code for the given string target node."
return capi.get_auth_code(self.ptr, force_bytes(target))
def clone(self):
"Returns a clone of this SpatialReference object."
return SpatialReference(capi.clone_srs(self.ptr))
def from_esri(self):
"Morphs this SpatialReference from ESRI's format to EPSG."
capi.morph_from_esri(self.ptr)
def identify_epsg(self):
"""
This method inspects the WKT of this SpatialReference, and will
add EPSG authority nodes where an EPSG identifier is applicable.
"""
capi.identify_epsg(self.ptr)
def to_esri(self):
"Morphs this SpatialReference to ESRI's format."
capi.morph_to_esri(self.ptr)
def validate(self):
"Checks to see if the given spatial reference is valid."
capi.srs_validate(self.ptr)
#### Name & SRID properties ####
@property
def name(self):
"Returns the name of this Spatial Reference."
if self.projected:
return self.attr_value('PROJCS')
elif self.geographic:
return self.attr_value('GEOGCS')
elif self.local:
return self.attr_value('LOCAL_CS')
else:
return None
@property
def srid(self):
"Returns the SRID of top-level authority, or None if undefined."
try:
return int(self.attr_value('AUTHORITY', 1))
except (TypeError, ValueError):
return None
#### Unit Properties ####
@property
def linear_name(self):
"Returns the name of the linear units."
units, name = capi.linear_units(self.ptr, byref(c_char_p()))
return name
@property
def linear_units(self):
"Returns the value of the linear units."
units, name = capi.linear_units(self.ptr, byref(c_char_p()))
return units
@property
def angular_name(self):
"Returns the name of the angular units."
units, name = capi.angular_units(self.ptr, byref(c_char_p()))
return name
@property
def angular_units(self):
"Returns the value of the angular units."
units, name = capi.angular_units(self.ptr, byref(c_char_p()))
return units
@property
def units(self):
"""
Returns a 2-tuple of the units value and the units name,
and will automatically determines whether to return the linear
or angular units.
"""
units, name = None, None
if self.projected or self.local:
units, name = capi.linear_units(self.ptr, byref(c_char_p()))
elif self.geographic:
units, name = capi.angular_units(self.ptr, byref(c_char_p()))
if name is not None:
name = force_text(name)
return (units, name)
#### Spheroid/Ellipsoid Properties ####
@property
def ellipsoid(self):
"""
Returns a tuple of the ellipsoid parameters:
(semimajor axis, semiminor axis, and inverse flattening)
"""
return (self.semi_major, self.semi_minor, self.inverse_flattening)
@property
def semi_major(self):
"Returns the Semi Major Axis for this Spatial Reference."
return capi.semi_major(self.ptr, byref(c_int()))
@property
def semi_minor(self):
"Returns the Semi Minor Axis for this Spatial Reference."
return capi.semi_minor(self.ptr, byref(c_int()))
@property
def inverse_flattening(self):
"Returns the Inverse Flattening for this Spatial Reference."
return capi.invflattening(self.ptr, byref(c_int()))
#### Boolean Properties ####
@property
def geographic(self):
"""
Returns True if this SpatialReference is geographic
(root node is GEOGCS).
"""
return bool(capi.isgeographic(self.ptr))
@property
def local(self):
"Returns True if this SpatialReference is local (root node is LOCAL_CS)."
return bool(capi.islocal(self.ptr))
@property
def projected(self):
"""
Returns True if this SpatialReference is a projected coordinate system
(root node is PROJCS).
"""
return bool(capi.isprojected(self.ptr))
#### Import Routines #####
def import_epsg(self, epsg):
"Imports the Spatial Reference from the EPSG code (an integer)."
capi.from_epsg(self.ptr, epsg)
def import_proj(self, proj):
"Imports the Spatial Reference from a PROJ.4 string."
capi.from_proj(self.ptr, proj)
def import_user_input(self, user_input):
"Imports the Spatial Reference from the given user input string."
capi.from_user_input(self.ptr, force_bytes(user_input))
def import_wkt(self, wkt):
"Imports the Spatial Reference from OGC WKT (string)"
capi.from_wkt(self.ptr, byref(c_char_p(wkt)))
def import_xml(self, xml):
"Imports the Spatial Reference from an XML string."
capi.from_xml(self.ptr, xml)
#### Export Properties ####
@property
def wkt(self):
"Returns the WKT representation of this Spatial Reference."
return capi.to_wkt(self.ptr, byref(c_char_p()))
@property
def pretty_wkt(self, simplify=0):
"Returns the 'pretty' representation of the WKT."
return capi.to_pretty_wkt(self.ptr, byref(c_char_p()), simplify)
@property
def proj(self):
"Returns the PROJ.4 representation for this Spatial Reference."
return capi.to_proj(self.ptr, byref(c_char_p()))
@property
def proj4(self):
"Alias for proj()."
return self.proj
@property
def xml(self, dialect=''):
"Returns the XML representation of this Spatial Reference."
return capi.to_xml(self.ptr, byref(c_char_p()), dialect)
class CoordTransform(GDALBase):
"The coordinate system transformation object."
def __init__(self, source, target):
"Initializes on a source and target SpatialReference objects."
if not isinstance(source, SpatialReference) or not isinstance(target, SpatialReference):
raise TypeError('source and target must be of type SpatialReference')
self.ptr = capi.new_ct(source._ptr, target._ptr)
self._srs1_name = source.name
self._srs2_name = target.name
def __del__(self):
"Deletes this Coordinate Transformation object."
if self._ptr and capi:
capi.destroy_ct(self._ptr)
def __str__(self):
return 'Transform from "%s" to "%s"' % (self._srs1_name, self._srs2_name)
| bsd-3-clause | 6,202,832,718,386,507,000 | -1,673,807,318,395,052,800 | 33.53125 | 97 | 0.605101 | false |
jpush/jbox | Server/venv/lib/python3.5/site-packages/requests/packages/urllib3/util/connection.py | 353 | 3380 | from __future__ import absolute_import
import socket
try:
from select import poll, POLLIN
except ImportError: # `poll` doesn't exist on OSX and other platforms
poll = False
try:
from select import select
except ImportError: # `select` doesn't exist on AppEngine.
select = False
def is_connection_dropped(conn): # Platform-specific
"""
Returns True if the connection is dropped and should be closed.
:param conn:
:class:`httplib.HTTPConnection` object.
Note: For platforms like AppEngine, this will always return ``False`` to
let the platform handle connection recycling transparently for us.
"""
sock = getattr(conn, 'sock', False)
if sock is False: # Platform-specific: AppEngine
return False
if sock is None: # Connection already closed (such as by httplib).
return True
if not poll:
if not select: # Platform-specific: AppEngine
return False
try:
return select([sock], [], [], 0.0)[0]
except socket.error:
return True
# This version is better on platforms that support it.
p = poll()
p.register(sock, POLLIN)
for (fno, ev) in p.poll(0.0):
if fno == sock.fileno():
# Either data is buffered (bad), or the connection is dropped.
return True
# This function is copied from socket.py in the Python 2.7 standard
# library test suite. Added to its signature is only `socket_options`.
def create_connection(address, timeout=socket._GLOBAL_DEFAULT_TIMEOUT,
source_address=None, socket_options=None):
"""Connect to *address* and return the socket object.
Convenience function. Connect to *address* (a 2-tuple ``(host,
port)``) and return the socket object. Passing the optional
*timeout* parameter will set the timeout on the socket instance
before attempting to connect. If no *timeout* is supplied, the
global default timeout setting returned by :func:`getdefaulttimeout`
is used. If *source_address* is set it must be a tuple of (host, port)
for the socket to bind as a source address before making the connection.
An host of '' or port 0 tells the OS to use the default.
"""
host, port = address
if host.startswith('['):
host = host.strip('[]')
err = None
for res in socket.getaddrinfo(host, port, 0, socket.SOCK_STREAM):
af, socktype, proto, canonname, sa = res
sock = None
try:
sock = socket.socket(af, socktype, proto)
# If provided, set socket level options before connecting.
# This is the only addition urllib3 makes to this function.
_set_socket_options(sock, socket_options)
if timeout is not socket._GLOBAL_DEFAULT_TIMEOUT:
sock.settimeout(timeout)
if source_address:
sock.bind(source_address)
sock.connect(sa)
return sock
except socket.error as e:
err = e
if sock is not None:
sock.close()
sock = None
if err is not None:
raise err
raise socket.error("getaddrinfo returns an empty list")
def _set_socket_options(sock, options):
if options is None:
return
for opt in options:
sock.setsockopt(*opt)
| mit | 94,503,071,292,411,040 | -6,191,766,499,668,224,000 | 32.465347 | 76 | 0.627811 | false |
sunzuolei/youtube-dl | youtube_dl/downloader/__init__.py | 101 | 1168 | from __future__ import unicode_literals
from .common import FileDownloader
from .external import get_external_downloader
from .f4m import F4mFD
from .hls import HlsFD
from .hls import NativeHlsFD
from .http import HttpFD
from .rtsp import RtspFD
from .rtmp import RtmpFD
from .dash import DashSegmentsFD
from ..utils import (
determine_protocol,
)
PROTOCOL_MAP = {
'rtmp': RtmpFD,
'm3u8_native': NativeHlsFD,
'm3u8': HlsFD,
'mms': RtspFD,
'rtsp': RtspFD,
'f4m': F4mFD,
'http_dash_segments': DashSegmentsFD,
}
def get_suitable_downloader(info_dict, params={}):
"""Get the downloader class that can handle the info dict."""
protocol = determine_protocol(info_dict)
info_dict['protocol'] = protocol
external_downloader = params.get('external_downloader')
if external_downloader is not None:
ed = get_external_downloader(external_downloader)
if ed.supports(info_dict):
return ed
if protocol == 'm3u8' and params.get('hls_prefer_native'):
return NativeHlsFD
return PROTOCOL_MAP.get(protocol, HttpFD)
__all__ = [
'get_suitable_downloader',
'FileDownloader',
]
| unlicense | -317,719,309,764,543,700 | 2,397,618,305,894,911,000 | 23.333333 | 65 | 0.685788 | false |
sandeepkbhat/pylearn2 | pylearn2/costs/gated_autoencoder.py | 39 | 5793 | """
Definitions of the cost for the gated-autoencoder.
"""
from pylearn2.costs.cost import Cost, DefaultDataSpecsMixin
from pylearn2.space import VectorSpace
class SymmetricCost(DefaultDataSpecsMixin, Cost):
"""
Summary (Class representing the symmetric cost).
Subclasses can define the type of data they will use.
Mean reconstruction error is used for real valued data
and cross-Entropy loss is used for binary.
See Also
--------
"Gradient-based learning of higher-order image features"
"""
@staticmethod
def cost(x, y, rx, ry):
"""
Symmetric reconstruction cost.
Parameters
----------
x : tensor_like
Theano symbolic representing the first input minibatch.
Assumed to be 2-tensors, with the first dimension
indexing training examples and the second indexing
data dimensions.
y : tensor_like
Theano symbolic representing the seconde input minibatch.
Assumed to be 2-tensors, with the first dimension
indexing training examples and the second indexing
data dimensions.
rx : tensor_like
Reconstruction of the first minibatch by the model.
ry: tensor_like
Reconstruction of the second minibatch by the model.
Returns
-------
Cost: theano_like expression
Representation of the cost
"""
raise NotImplementedError
def expr(self, model, data, *args, **kwargs):
"""
Returns a theano expression for the cost function.
Returns a symbolic expression for a cost function applied to the
minibatch of data.
Optionally, may return None. This represents that the cost function
is intractable but may be optimized via the get_gradients method.
Parameters
----------
model : a pylearn2 Model instance
data : a batch in cost.get_data_specs() form
kwargs : dict
Optional extra arguments. Not used by the base class.
"""
self.get_data_specs(model)[0].validate(data)
x, y = data
input_space = model.get_input_space()
if not isinstance(input_space.components[0], VectorSpace):
conv = input_space.components[0]
vec = VectorSpace(conv.get_total_dimension())
x = conv.format_as(x, vec)
if not isinstance(input_space.components[1], VectorSpace):
conv = input_space.components[1]
vec = VectorSpace(conv.get_total_dimension())
y = conv.format_as(y, vec)
rx, ry = model.reconstructXY((x, y))
return self.cost(x, y, rx, ry)
class SymmetricMSRE(SymmetricCost):
"""
Summary (Symmetric cost for real valued data).
See Also
--------
"Gradient-based learning of higher-order image features"
"""
@staticmethod
def cost(x, y, rx, ry):
"""
Summary (Definition of the cost).
Mean squared reconstruction error.
Parameters
----------
x : tensor_like
Theano symbolic representing the first input minibatch.
Assumed to be 2-tensors, with the first dimension
indexing training examples and the second indexing
data dimensions.
y : tensor_like
Theano symbolic representing the seconde input minibatch.
Assumed to be 2-tensors, with the first dimension
indexing training examples and the second indexing
data dimensions.
rx : tensor_like
Reconstruction of the first minibatch by the model.
ry: tensor_like
Reconstruction of the second minibatch by the model.
Returns
-------
Cost: theano_like expression
Representation of the cost
Notes
-----
Symmetric reconstruction cost as defined by Memisevic in:
"Gradient-based learning of higher-order image features".
This function only works with real valued data.
"""
return (
((0.5*((x - rx)**2)) + (0.5*((y - ry)**2)))).sum(axis=1).mean()
class NormalizedSymmetricMSRE(SymmetricCost):
"""
Summary (Normalized Symmetric cost for real valued data).
Notes
-----
Value used to observe the percentage of reconstruction.
"""
@staticmethod
def cost(x, y, rx, ry):
"""
Summary (Definition of the cost).
Normalized Mean squared reconstruction error. Values
between 0 and 1.
Parameters
----------
x : tensor_like
Theano symbolic representing the first input minibatch.
Assumed to be 2-tensors, with the first dimension
indexing training examples and the second indexing
data dimensions.
y : tensor_like
Theano symbolic representing the seconde input minibatch.
Assumed to be 2-tensors, with the first dimension
indexing training examples and the second indexing
data dimensions.
rx : tensor_like
Reconstruction of the first minibatch by the model.
ry: tensor_like
Reconstruction of the second minibatch by the model.
Returns
-------
Cost: theano_like expression
Representation of the cost
Notes
-----
Do not use this function to train, only to monitor the
average percentage of reconstruction achieved when training on
real valued data.
"""
num = (((0.5*((x - rx)**2)) + (0.5*((y - ry)**2)))).sum(axis=1).mean()
den = ((0.5*(x.norm(2, 1)**2)) + (0.5*(y.norm(2, 1)**2))).mean()
return num/den
| bsd-3-clause | -2,228,437,924,873,230,300 | -4,208,130,434,367,547,400 | 32.293103 | 78 | 0.599862 | false |
mlabru/ptracks | view/dbedit/dlg_exe_data_new.py | 1 | 36600 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
---------------------------------------------------------------------------------------------------
dlg_exe_data_new
mantém as informações sobre a dialog de edição da tabela de exercícios
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
revision 0.1 2014/nov mlabru
initial release (Linux/Python)
---------------------------------------------------------------------------------------------------
"""
__version__ = "$revision: 0.1$"
__author__ = "Milton Abrunhosa"
__date__ = "2015/11"
# < imports >--------------------------------------------------------------------------------------
# python library
import os
import logging
import sys
# PyQt library
from PyQt4 import QtCore, QtGui
# libs
import libs.coords.coord_defs as cdefs
# model
import model.items.exe_data as dctExe
# view
import view.dbedit.dlg_exe_edit_new as dlgEdit
import view.dbedit.dlg_exe_data_new_ui as dlgData_ui
import view.dbedit.dlg_anv_edit_new as dlgAnv
# control
import control.events.events_basic as events
import control.events.events_config as evtConfig
# < class CDlgExeDataNEW >---------------------------------------------------------------------------
class CDlgExeDataNEW (QtGui.QDialog, dlgData_ui.Ui_CDlgExeDataNEW):
"""
mantém as informações sobre a dialog de edição da tabela de exercícios
"""
# ---------------------------------------------------------------------------------------------
def __init__(self, f_control, f_parent=None):
"""
constructor
@param f_control: control manager do editor da base de dados
@param f_parent: janela vinculada
"""
# verifica parâmetros de entrada
assert f_control
# init super class
super(CDlgExeDataNEW, self).__init__(f_parent)
# salva o control manager localmente
self._control = f_control
# obtém o event manager
self._event = f_control.event
assert self._event
# obtém o gerente de configuração
self._config = f_control.config
assert self._config
# obtém o dicionário de configuração
self._dctConfig = self._config.dct_config
assert self._dctConfig
# obtém o model manager
self._model = f_control.model
assert self._model
# salva a parent window localmente
self._parent = f_parent
# existe uma parent window ?
if self._parent is not None:
# esconde a parent window
self._parent.setVisible(False)
# pointer para os itens correntes
self._oExe = None
# pointer para os dicionários a editar
self._dctExe = None
# monta a dialog
self.setupUi(self)
# configurações de conexões slot/signal
self.configConnects()
# configurações de títulos e mensagens da janela de edição
self.configTexts()
# restaura as configurações da janela de edição
self.restoreSettings()
# configura título da dialog
self.setWindowTitle(u"dbEdit [ Edição de Exercícios ]")
# Não permitir a inclusão e exclusão exercícios e de tráfegos do exercício
self.btnExeNew.setEnabled(False)
self.btnExeDel.setEnabled(False)
self.btnTrfNew.setEnabled(False)
self.btnTrfDel.setEnabled(False)
# faz a carrga inicial do diretório de exercícios
QtCore.QTimer.singleShot(0, self.loadInitial)
# ---------------------------------------------------------------------------------------------
def accept(self):
"""
callback de btnOk da dialog de edição
faz o accept da dialog
"""
# ok para continuar ?
if self.okToContinue():
# faz o "accept"
QtGui.QDialog.accept(self)
# fecha a janela de edição
self.close()
# ---------------------------------------------------------------------------------------------
def closeEvent(self, event):
"""
callback de tratamento do evento Close
@param event : ..
"""
# ok para continuar ?
if self.okToContinue():
# obtém os settings
l_set = QtCore.QSettings()
assert l_set
# salva geometria da janela
l_set.setValue("%s/Geometry" % (self._txtSettings),
QtCore.QVariant(self.saveGeometry()))
# existe a parent window ?
if self._parent is not None:
# exibe a parent window
self._parent.setVisible(True)
# senão, ignora o request
else:
# ignora o evento
event.ignore()
# ---------------------------------------------------------------------------------------------
def configConnects(self):
"""
configura as conexões slot/signal
"""
# exercício
# conecta click a remoção de exercício
self.connect(self.btnExeDel,
QtCore.SIGNAL("clicked()"),
self.exeDel)
# conecta click a edição de exercício
self.connect(self.btnExeEdit,
QtCore.SIGNAL("clicked()"),
self.exeEdit)
# conecta click a inclusão de exercício
self.connect(self.btnExeNew,
QtCore.SIGNAL("clicked()"),
self.exeNew)
# conecta click a remoção do tráfego do exercício
self.connect(self.btnTrfDel,
QtCore.SIGNAL("clicked()"),
self.trfDel)
# conecta click a edição do tráfego do exercício
self.connect(self.btnTrfEdit,
QtCore.SIGNAL("clicked()"),
self.trfEdit)
# conecta click a inclusão do tráfego do exercício
self.connect(self.btnTrfNew,
QtCore.SIGNAL("clicked()"),
self.trfNew)
# conecta click a seleção da linha
self.connect(self.qtwExeTab,
QtCore.SIGNAL("itemSelectionChanged()"),
self.exeSelect)
# conecta botão Ok
self.connect(self.bbxExeTab,
QtCore.SIGNAL("accepted()"),
self.accept)
# conecta botão Cancela
self.connect(self.bbxExeTab,
QtCore.SIGNAL("rejected()"),
self.reject)
# configura botões
self.bbxExeTab.button(QtGui.QDialogButtonBox.Cancel).setText("&Cancela")
self.bbxExeTab.button(QtGui.QDialogButtonBox.Ok).setFocus()
# ---------------------------------------------------------------------------------------------
def configTexts(self):
"""
configura títulos e mensagens
"""
self._txtSettings = "CDlgExeDataNEW"
# self._txtContinueTit = u"TrackS - Alterações pendentes"
# self._txtContinueMsg = u"Salva alterações pendentes ?"
self._txtDelExeTit = u"TrackS - Apaga exercício"
self._txtDelExeMsg = u"Apaga exercício {0} ?"
self._txtDelTrfTit = u"TrackS - Apaga o tráfego do exercício"
self._txtDelTrfMsg = u"Apaga o tráfego do exercício {0} ?"
# ---------------------------------------------------------------------------------------------
def exeDel(self):
"""
callback de btnDel da dialog de edição
deleta um exercício da lista
"""
# verifica condições de execução
assert self.qtwExeTab is not None
assert self._dctExe is not None
# obtém o exercício selecionado
self._oExe = self.getCurrentSel(self._dctExe, self.qtwExeTab)
if self._oExe is not None:
# apaga o exercício atual ?
if QtGui.QMessageBox.Yes == QtGui.QMessageBox.question(self,
self._txtDelExeTit,
self._txtDelExeMsg.format(self._oExe.s_exe_id),
QtGui.QMessageBox.Yes | QtGui.QMessageBox.No):
# apaga o exercício
self.exeRemove(self._oExe)
# ---------------------------------------------------------------------------------------------
def exeEdit(self):
"""
callback de btnEdit da dialog de edição
edita um exercício da QTableWidget
"""
# verifica condições de execução
assert self.qtwExeTab is not None
assert self._dctExe is not None
# obtém o exercício selecionado
self._oExe = self.getCurrentSel(self._dctExe, self.qtwExeTab)
if(self._oExe is not None):
# cria a dialog de edição de exercícios
l_Dlg = dlgEdit.CDlgExeEditNEW(self._control, self._oExe, self)
assert l_Dlg
# processa a dialog de edição de exercícios (modal)
if(l_Dlg.exec_ ()):
# obtém os dados alterados
self._oExe = l_Dlg.getData()
# exercício existente ?
if (self._oExe is not None) and (self._dctExe is not None):
# atualiza o exercicio no dicionário de exercícios
self._dctExe[self._oExe.s_exe_id] = self._oExe
# Cria o evento para salvar no disco o exercício atualizado
l_evtUpd2Disk = events.CUpd2Disk(fs_table="EXE", fs_filename=self._oExe.s_exe_id)
assert l_evtUpd2Disk
# dissemina o evento
self._event.post(l_evtUpd2Disk)
# se ok, atualiza a QTableWidget de exercícios
self.exeUpdateWidget ()
# ---------------------------------------------------------------------------------------------
def exeNew(self):
"""
callback de btnNew da dialog de edição
cria um novo exercício na lista
"""
# cria a dialog de edição de exercícios
l_Dlg = dlgEdit.CDlgExeEditNEW(self._control, None, self)
assert l_Dlg
# processa a dialog de edição de exercícios (modal)
if l_Dlg.exec_():
# obtém os dados da edição
self._oExe = l_Dlg.getData()
# exercício existente ?
if (self._oExe is not None) and (self._dctExe is not None):
# insere o exercício na lista
self._dctExe[self._oExe.s_exe_id] = self._oExe
# Cria o evento para salvar no disco os exercícios
l_evtSave2Disk = events.CSave2Disk(fs_table="EXE")
assert l_evtSave2Disk
# dissemina o evento
self._event.post(l_evtSave2Disk)
# se ok, atualiza a QTableWidget de exercícios
self.exeUpdateWidget()
# ---------------------------------------------------------------------------------------------
def exeRemove(self, f_oExe):
"""
remove o exercício selecionado
@param f_oExe : pointer para o exercício a remover
"""
# verifica condições de execução
assert f_oExe is not None
# remove a linha da widget
self.qtwExeTab.removeRow(self.qtwExeTab.currentRow())
# Cria o evento para apagar do disco o exercício
l_evtDelFromDisk = events.CDelFromDisk(fs_table="EXE", fs_filename=f_oExe.s_exe_id)
assert l_evtDelFromDisk
# dissemina o evento
self._event.post(l_evtDelFromDisk)
# ---------------------------------------------------------------------------------------------
def exeSelect(self):
"""
seleciona um exercício a editar
"""
# verifica condições de execução
assert self._dctExe is not None
assert self.qtwExeTab is not None
# obtém o exercício selecionado
self._oExe = self.getCurrentSel(self._dctExe, self.qtwExeTab)
assert self._oExe
# Cria o evento para configurar o exercício atual
l_evtConfigExe = evtConfig.CConfigExe(ls_exe=self._oExe.s_exe_id)
assert l_evtConfigExe
# dissemina o evento
self._event.post(l_evtConfigExe)
# atualiza a área de dados do exercício selecionado
self.exeUpdateSel()
# ---------------------------------------------------------------------------------------------
def exeUpdateList(self):
"""
atualiza na tela os dados da lista de exercícios
"""
# verifica condições de execução
assert self._dctExe is not None
assert self.qtwExeTab is not None
# atualiza a QTableWidget de exercícios
self.exeUpdateWidget()
# obtém o exercício selecionado
self._oExe = self.getCurrentSel(self._dctExe, self.qtwExeTab)
# assert self._oExe
# ---------------------------------------------------------------------------------------------
def exeUpdateSel(self):
"""
atualiza na tela os dados do exercício selecionado
"""
# exercício selecionado existe ?
l_log = logging.getLogger("CDlgExeDataNEW::exeUpdateSel")
l_log.setLevel(logging.DEBUG)
if self._oExe is not None:
# indicativo do exercício
#l_sExeID = self._oExe.s_exe_id
# atualiza a visualização do exercício
# self._oSrv.configExe(l_sExeID, dbus_interface = self.cSRV_Path)
# identificação
self.txtExeID.setText(self._oExe.s_exe_id)
self.qleExeDesc.setText(self._oExe.s_exe_desc)
l_exe_hor_ini = str(self._oExe.t_exe_hor_ini[0]) + ":" + str(self._oExe.t_exe_hor_ini[1])
self.tedHorIni.setTime(QtCore.QTime.fromString(l_exe_hor_ini,"h:m"))
l_log.debug(" Exercicio [%s] Descricao [%s] Hora Inicial [%s]" %
(self._oExe.s_exe_id, self._oExe.s_exe_desc, l_exe_hor_ini))
# atualiza a lista de tráfegos do exercício
self.trfUpdateWidget()
# senão, o exercício não existe
else:
# posiciona cursor no início do formulário
self.txtExeID.setFocus()
# ---------------------------------------------------------------------------------------------
def exeUpdateWidget(self):
"""
atualiza na tela os dados da QTableWidget de exercícios
"""
l_log = logging.getLogger("CDlgExeDataNEW::exeUpdateWidget")
l_log.setLevel(logging.DEBUG)
l_log.debug(" load exes on the table ...")
# verifica condições de execução
assert self.qtwExeTab is not None
assert self._dctExe is not None
# limpa a QTableWidget
self.qtwExeTab.clear()
# seta o número de linhas da QTableWidget para o tamanho da lista
self.qtwExeTab.setRowCount(len(self._dctExe))
# seta número de colunas e cabeçalho das colunas
self.qtwExeTab.setColumnCount(2)
self.qtwExeTab.setHorizontalHeaderLabels([u"Indicativo", u"Descrição"])
# seta QTableWidget
self.qtwExeTab.setAlternatingRowColors(True)
self.qtwExeTab.setEditTriggers(QtGui.QTableWidget.NoEditTriggers)
self.qtwExeTab.setSelectionBehavior(QtGui.QTableWidget.SelectRows)
self.qtwExeTab.setSelectionMode(QtGui.QTableWidget.SingleSelection)
self.qtwExeTab.setSortingEnabled(False)
# linha 0 (objeto exercício)
l_oA0 = None
# linha selecionada (objeto exercício)
l_oSItem = None
# para cada exercício no dicionário..
for l_iNdx, l_sExeID in enumerate(sorted(self._dctExe.keys())):
# indicativo do exercício
l_twiExeID = QtGui.QTableWidgetItem(l_sExeID)
l_twiExeID.setData(QtCore.Qt.UserRole, QtCore.QVariant(l_sExeID))
self.qtwExeTab.setItem(l_iNdx, 0, l_twiExeID)
# é o exercício selecionado ?
if (self._oExe is not None) and (self._oExe.s_exe_id == l_sExeID):
# salva pointer para o item selecionado
l_oSItem = l_twiExeID
# obtém o exercício
l_oExe = self._dctExe[l_sExeID]
assert l_oExe
# descrição
l_twiExeDesc = QtGui.QTableWidgetItem(l_oExe.s_exe_desc)
l_log.debug(" Exe ID [%s] Descricao [%s]" % (l_oExe.s_exe_id, l_twiExeDesc.text()))
l_log.debug(" Quantidade de tráfegos [%d]" % l_oExe.i_exe_qtd_trf)
self.qtwExeTab.setItem(l_iNdx, 1, l_twiExeDesc)
# existe um exercício selecionado ?
if self._oExe is not None:
# seleciona o item
self.qtwExeTab.setCurrentItem(l_oSItem)
# posiciona no item selecionado
self.qtwExeTab.scrollToItem(l_oSItem)
# marca que existe seleção
l_oSItem.setSelected(True)
# senão, não existe um exercício selecionado
else:
# seleciona a primeira linha
self.qtwExeTab.selectRow(0)
# obtém o exercício atual
self._oExe = self.getCurrentSel(self._dctExe, self.qtwExeTab)
# assert self._oExe
# ajusta o tamanho das colunas pelo conteúdo
self.qtwExeTab.resizeColumnsToContents()
# habilita a ordenação
self.qtwExeTab.setSortingEnabled(True)
# ---------------------------------------------------------------------------------------------
def getCurrentData(self, f_qtwTab, f_iCol):
"""
retorna os dados associados a linha selecionada
"""
l_log = logging.getLogger("CDlgExeDataNEW::getCurrentData")
l_log.setLevel(logging.DEBUG)
l_log.debug("Obter os dados elemento selecionado")
# verifica condições de execução
assert f_qtwTab is not None
# o dado da linha selecionada
l_sData = ""
# obtém o item da linha selecionada
l_oItem = self.getCurrentItem(f_qtwTab, f_iCol)
l_log.debug(" Dados do Item [%s]" % str(l_oItem.text()))
# existe uma linha selecionada ?
if l_oItem is not None:
# obtém o dado associado a linha
#l_sData = l_oItem.data(QtCore.Qt.UserRole).toString()
l_sData = l_oItem.text()
# retorna o dado associado a linha selecionada
return l_sData
# ---------------------------------------------------------------------------------------------
def getCurrentItem(self, f_qtwTab, f_iCol):
"""
retorna o item associado a linha selecionada
"""
# o item selecionado
l_oItem = None
# verifica condições de execução
assert f_qtwTab is not None
# obtém o número da linha selecionada
l_iRow = f_qtwTab.currentRow()
# existe uma linha selecionada ?
if l_iRow > -1:
# obtém o item associado
l_oItem = f_qtwTab.item(l_iRow, f_iCol)
assert l_oItem
# retorna o item selecionado na lista
return l_oItem
# ---------------------------------------------------------------------------------------------
def getCurrentSel(self, f_dct, f_qtw):
"""
retorna o elemento associado a linha selecionada na lista
"""
# verifica condições de execução
assert f_dct is not None
assert f_qtw is not None
# obtém o index da linha selecionada
l_sID = self.getCurrentData(f_qtw, 0)
# indice válido ?
if str(l_sID) in f_dct:
# obtém o elemento selecionado se existir uma linha selecionada
l_oSel = f_dct[str(l_sID)]
assert l_oSel
# senão, índice inválido
else:
# não há elemento selecionado
l_oSel = None
# retorna o elemento da linha selecionada na lista
return l_oSel
# ---------------------------------------------------------------------------------------------
def getCurrentTrfSel(self, f_dct, f_qtw):
"""
retorna o tráfego do exercício associado a linha selecionada na lista
"""
l_log = logging.getLogger("CDlgExeDataNEW::getCurrentTrfSel")
l_log.setLevel(logging.DEBUG)
l_log.debug("Obter o elemento selecionado")
# verifica condições de execução
assert f_dct is not None
assert f_qtw is not None
# obtém o index da linha selecionada
l_sID = self.getCurrentData(f_qtw, 0)
l_log.debug ("ID [%s]" % l_sID)
l_iID = int(l_sID)
# indice válido ?
if l_iID in f_dct:
# obtém o elemento selecionado se existir uma linha selecionada
l_oSel = f_dct[l_iID]
assert l_oSel
# senão, índice inválido
else:
# não há elemento selecionado
l_oSel = None
# retorna o elemento da linha selecionada na lista
return l_oSel
# ---------------------------------------------------------------------------------------------
def loadInitial(self):
"""
faz a carga inicial da tabela de exercícios
"""
l_log = logging.getLogger("CDlgExeDataNEW::loadInitial")
l_log.setLevel(logging.DEBUG)
l_log.debug(" load exes ...")
# obtém o dicionário de exercícios
self._dctExe = self._model.dct_exe
# o dicionário de exercícios não existe ?
if self._dctExe is None:
# logger
l_log.critical(u"<E01: Tabela de exercícios não carregada !")
# cria um evento de quit
l_evtQuit = events.CQuit()
assert l_evtQuit
# dissemina o evento
self._event.post(l_evtQuit)
# cai fora..
sys.exit(1)
# atualiza na tela os dados da tabela de exercícios
self.exeUpdateList()
# ---------------------------------------------------------------------------------------------
def okToContinue(self):
"""
cria uma messageBox
@return True se tratou a resposta, senão False
"""
# resposta
l_bAns = True
'''
# flag de alterações setado ?
if(self._bChanged):
# questiona sobre alterações pendentes
l_Resp = QtGui.QMessageBox.question(self, self._txtContinueTit,
self._txtContinueMsg,
QtGui.QMessageBox.Yes |
QtGui.QMessageBox.No |
QtGui.QMessageBox.Cancel)
# cancela ?
if(QtGui.QMessageBox.Cancel == l_Resp):
# não sai
l_bAns = False
# salva ?
elif(QtGui.QMessageBox.Yes == l_Resp):
# salva as pendências e sai
l_bAns = True
# não salva ?
else:
# reseta o flag de alterações..
self._bChanged = False
# ...e sai
l_bAns = True
'''
# return
return l_bAns
# ---------------------------------------------------------------------------------------------
def reject(self):
"""
DOCUMENT ME!
"""
self._oExe = None
# faz o "reject"
QtGui.QDialog.reject(self)
# close dialog
self.close()
# ---------------------------------------------------------------------------------------------
def restoreSettings(self):
"""
restaura as configurações salvas para esta janela
"""
# obtém os settings
l_set = QtCore.QSettings("sophosoft", "dbedit")
assert l_set
# restaura geometria da janela
self.restoreGeometry(l_set.value("%s/Geometry" % (self._txtSettings)).toByteArray())
# return
return True
# ---------------------------------------------------------------------------------------------
def trfDel(self):
"""
:return:
"""
# verifica condições de execução
assert self.qtw_anv is not None
assert self._oExe is not None
assert self._oExe.dct_exe_trf is not None
# obtém o tráfego do exercício selecionado
l_oTrf = self.getCurrentTrfSel(self._oExe.dct_exe_trf, self.qtw_anv)
if l_oTrf is not None:
# apaga o tráfego do exercício atual ?
if QtGui.QMessageBox.Yes == QtGui.QMessageBox.question(self,
self._txtDelTrfTit,
self._txtDelTrfMsg.format(l_oTrf.s_trf_ind),
QtGui.QMessageBox.Yes | QtGui.QMessageBox.No):
# apaga o tráfego do exercício
self.trfRemove(l_oTrf)
# ---------------------------------------------------------------------------------------------
def trfEdit(self):
"""
callback de btnTrfNew da dialog de edição de tráfegos do exercício
atualiza o tráfego do exercício na lista
:return:
"""
l_log = logging.getLogger("CDlgExeDataNEW::trfEdit")
l_log.setLevel(logging.DEBUG)
l_log.debug("Editar um tráfego do exercício")
# verifica condições de execução
assert self.qtw_anv is not None
assert self._oExe is not None
assert self._oExe.dct_exe_trf is not None
# obtém o tráfego do exercício selecionado
l_oTrf = self.getCurrentTrfSel(self._oExe.dct_exe_trf, self.qtw_anv)
l_log.debug("Tráfego : [%s] - Id [%s]" % (l_oTrf, l_oTrf.i_trf_id))
if (l_oTrf is not None):
# cria a dialog de edição do tráfego do exercício
l_Dlg = dlgAnv.CDlgAnvEditNEW(self._control, l_oTrf, self)
assert l_Dlg
# processa a dialog de edição de exercícios (modal)
if(l_Dlg.exec_ ()):
# obtém os dados alterados
l_oTrf = l_Dlg.getData()
l_log.debug("Tráfego Id [%s]" % str(l_oTrf.i_trf_id))
# tráfego do exercício existente ?
if (l_oTrf is not None) and (self._oExe is not None):
# atualiza o tráfego do exercício na lista
self._oExe.dct_exe_trf[l_oTrf.i_trf_id] = l_oTrf
# atualiza o exercicio no dicionário de exercícios
self._dctExe[self._oExe.s_exe_id] = self._oExe
# Cria o evento para salvar no disco o exercício atualizado
l_evtUpd2Disk = events.CUpd2Disk(fs_table="ANV", fs_filename=self._oExe.s_exe_id)
assert l_evtUpd2Disk
# dissemina o evento
self._event.post(l_evtUpd2Disk)
# se ok, atualiza a QTableWidget de exercícios
self.trfUpdateWidget ()
# ---------------------------------------------------------------------------------------------
def trfNew(self):
"""
callback de btnTrfNew da dialog de edição de tráfegos do exercício
cria um novo tráfego do exercício na lista
"""
l_log = logging.getLogger("CDlgExeDataNEW::trfNew")
l_log.setLevel(logging.DEBUG)
l_log.debug("Um novo tráfego do exercício [%s]" % self._oExe.s_exe_id)
# cria a dialog de edição de tráfegos do exercício
l_Dlg = dlgAnv.CDlgAnvEditNEW(self._control, None, self)
assert l_Dlg
# processa a dialog de edição de tráfegos do exercícios (modal)
if l_Dlg.exec_():
# obtém os dados da edição
l_oTrf = l_Dlg.getData()
li_node = 1
# Obtém o número do nó que não está sendo usado
if self._oExe.dct_exe_trf is not None:
ll_node_hosts = self._oExe.dct_exe_trf.keys()
l_log.debug("Dicionario de tráfegos do exercício [%s]" % self._oExe.dct_exe_trf)
l_log.debug("Lista de node hosts [%s]" % ll_node_hosts)
ll_node_hosts.sort()
li_index = 0
li_node = 1
while li_index < len(ll_node_hosts):
if (li_index > 0):
if ll_node_hosts[li_index] != ll_node_hosts[li_index - 1] + 1:
li_node = ll_node_hosts[li_index - 1] + 1
break
li_index = li_index + 1
if li_index != 0 and li_index == len(ll_node_hosts):
li_node = ll_node_hosts[li_index -1] + 1
# tráfego do exercício existente ?
if (l_oTrf is not None) and (self._oExe is not None):
# insere o tráfego do exercício na lista
l_oTrf.i_trf_id = li_node
self._oExe.dct_exe_trf[li_node] = l_oTrf
# atualiza o exercicio no dicionário de exercícios
self._dctExe[self._oExe.s_exe_id] = self._oExe
# Cria o evento para salvar no disco os exercícios
l_evtSave2Disk = events.CSave2Disk(fs_table="ANV")
assert l_evtSave2Disk
# dissemina o evento
self._event.post(l_evtSave2Disk)
# se ok, atualiza a QTableWidget de exercícios
self.trfUpdateWidget()
# ---------------------------------------------------------------------------------------------
def trfRemove(self, f_oTrf):
"""
remove o tráfego selecionado do exercício atual
@param f_oTrf : pointer para o tráfego selecionado
"""
# verifica condições de execução
assert f_oTrf is not None
# remove a linha da widget
self.qtw_anv.removeRow(self.qtw_anv.currentRow())
# atualiza a tabela de tráfegos do exercício
l_dctTrf = self._oExe.dct_exe_trf
del l_dctTrf[f_oTrf.i_trf_id]
self._oExe.dct_exe_trf = l_dctTrf
# atualiza o exercicio no dicionário de exercícios
self._dctExe[self._oExe.s_exe_id] = self._oExe
l_evt = None
if len(self._oExe.dct_exe_trf):
# Cria o evento para atualizar a tabela de trafegos no disco
l_evt = events.CSave2Disk(fs_table="ANV")
assert l_evt
else:
# Cria o evento para apagar do disco o exercício
l_evt = events.CDelFromDisk(fs_table="ANV", fs_filename=self._oExe.s_exe_id)
assert l_evt
# dissemina o evento
if l_evt is not None:
self._event.post(l_evt)
# ---------------------------------------------------------------------------------------------
def trfUpdateWidget(self):
"""
atualiza na tela os dados da QtableWidget de tráfegos do exercício
:return:
"""
# verifica condições de execução
# assert self.qtw_anv is not None
# assert self._oExe is not None
l_log = logging.getLogger("CDlgExeDataNEW::trfUpdateWidget")
l_log.setLevel(logging.DEBUG)
l_log.debug("Atualizando a tabela de tráfegos")
for li_row in range(self.qtw_anv.rowCount()):
self.qtw_anv.removeRow(li_row)
self.qtw_anv.setRowCount(0)
self.qtw_anv.setColumnCount(14)
self.qtw_anv.setHorizontalHeaderLabels(
["Node", "Latitude", "Longitude", "Tipo da Anv", "SSR", "Indicativo", "Origem", "Destino", "Proa", "Velocidade (Kt)",
"Altitude (Ft)", "Procedimento", "Tempo (min)", "ID"])
self.qtw_anv.setColumnHidden(13, True)
self.qtw_anv.horizontalHeader().setResizeMode(QtGui.QHeaderView.ResizeToContents)
self.qtw_anv.setEditTriggers(QtGui.QTableWidget.NoEditTriggers)
self.qtw_anv.setSelectionBehavior(QtGui.QTableWidget.SelectRows)
self.qtw_anv.setSelectionMode(QtGui.QTableWidget.SingleSelection)
li_row = 0
# para todas as linhas da tabela...
l_log.debug(" Número de tráfegos [%s]" % len(self._oExe.dct_exe_trf))
for li_key, l_oTrf in self._oExe.dct_exe_trf.iteritems():
# cria nova linha na tabela
self.qtw_anv.insertRow(li_row)
# node name
lqtwi_item = QtGui.QTableWidgetItem(str(li_key))
lqtwi_item.setTextAlignment(QtCore.Qt.AlignCenter)
self.qtw_anv.setItem(li_row, 0, lqtwi_item)
# latitude
lqtwi_item = QtGui.QTableWidgetItem(str(l_oTrf.f_trf_lat))
lqtwi_item.setTextAlignment(QtCore.Qt.AlignCenter)
self.qtw_anv.setItem(li_row, 1, lqtwi_item)
# longitude
lqtwi_item = QtGui.QTableWidgetItem(str(l_oTrf.f_trf_lng))
lqtwi_item.setTextAlignment(QtCore.Qt.AlignCenter)
self.qtw_anv.setItem(li_row, 2, lqtwi_item)
# designador
lqtwi_item = QtGui.QTableWidgetItem(l_oTrf.ptr_trf_prf.s_prf_id)
lqtwi_item.setTextAlignment(QtCore.Qt.AlignCenter)
self.qtw_anv.setItem(li_row, 3, lqtwi_item)
# ssr
lqtwi_item = QtGui.QTableWidgetItem(str(l_oTrf.i_trf_ssr).zfill(4))
lqtwi_item.setTextAlignment(QtCore.Qt.AlignCenter)
self.qtw_anv.setItem(li_row, 4, lqtwi_item)
# indicativo
lqtwi_item = QtGui.QTableWidgetItem(l_oTrf.s_trf_ind)
lqtwi_item.setTextAlignment(QtCore.Qt.AlignCenter)
self.qtw_anv.setItem(li_row, 5, lqtwi_item)
# origem
lqtwi_item = QtGui.QTableWidgetItem(l_oTrf.ptr_trf_aer_ori.s_aer_indc)
lqtwi_item.setTextAlignment(QtCore.Qt.AlignCenter)
self.qtw_anv.setItem(li_row, 6, lqtwi_item)
# destino
lqtwi_item = QtGui.QTableWidgetItem(l_oTrf.ptr_trf_aer_dst.s_aer_indc)
lqtwi_item.setTextAlignment(QtCore.Qt.AlignCenter)
self.qtw_anv.setItem(li_row, 7, lqtwi_item)
# proa
lqtwi_item = QtGui.QTableWidgetItem(str(l_oTrf.f_trf_pro_atu))
lqtwi_item.setTextAlignment(QtCore.Qt.AlignCenter)
self.qtw_anv.setItem(li_row, 8, lqtwi_item)
# velocidade
lf_VelKt = l_oTrf.f_trf_vel_atu * cdefs.D_CNV_MS2KT
lqtwi_item = QtGui.QTableWidgetItem(str(lf_VelKt))
lqtwi_item.setTextAlignment(QtCore.Qt.AlignCenter)
self.qtw_anv.setItem(li_row, 9, lqtwi_item)
# altitude
lf_AltFt = l_oTrf.f_trf_alt_atu * cdefs.D_CNV_M2FT
lqtwi_item = QtGui.QTableWidgetItem(str(lf_AltFt))
lqtwi_item.setTextAlignment(QtCore.Qt.AlignCenter)
self.qtw_anv.setItem(li_row, 10, lqtwi_item)
# procedimento
lqtwi_item = QtGui.QTableWidgetItem(l_oTrf.s_trf_prc)
lqtwi_item.setTextAlignment(QtCore.Qt.AlignCenter)
self.qtw_anv.setItem(li_row, 11, lqtwi_item)
# tempo de apresentação do tráfego
li_HorIni, li_MinIni, li_SegIni = self._oExe.t_exe_hor_ini
li_MinIni = li_MinIni + (li_HorIni * 60) + (li_SegIni / 60)
li_Hor, li_Min, li_Seg = l_oTrf.t_trf_hor_atv
li_Min = li_Min + (li_Hor * 60) + (li_Seg / 60)
lqtwi_item = QtGui.QTableWidgetItem(str(li_Min - li_MinIni))
lqtwi_item.setTextAlignment(QtCore.Qt.AlignCenter)
self.qtw_anv.setItem(li_row, 12, lqtwi_item)
li_row = li_row + 1
# redefine o tamanho da QTableWidget
self.qtw_anv.resizeRowsToContents()
self.qtw_anv.resizeColumnsToContents()
# < the end >--------------------------------------------------------------------------------------
| gpl-3.0 | -3,663,699,010,544,031,000 | 1,416,518,329,352,746,500 | 35.080677 | 129 | 0.528502 | false |
xiaotdl/ansible | contrib/inventory/serf.py | 395 | 3032 | #!/usr/bin/env python
# (c) 2015, Marc Abramowitz <marca@surveymonkey.com>
#
# This file is part of Ansible.
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Dynamic inventory script which lets you use nodes discovered by Serf
# (https://serfdom.io/).
#
# Requires the `serfclient` Python module from
# https://pypi.python.org/pypi/serfclient
#
# Environment variables
# ---------------------
# - `SERF_RPC_ADDR`
# - `SERF_RPC_AUTH`
#
# These variables are described at https://www.serfdom.io/docs/commands/members.html#_rpc_addr
import argparse
import collections
import os
import sys
# https://pypi.python.org/pypi/serfclient
from serfclient import SerfClient, EnvironmentConfig
try:
import json
except ImportError:
import simplejson as json
_key = 'serf'
def _serf_client():
env = EnvironmentConfig()
return SerfClient(host=env.host, port=env.port, rpc_auth=env.auth_key)
def get_serf_members_data():
return _serf_client().members().body['Members']
def get_nodes(data):
return [node['Name'] for node in data]
def get_groups(data):
groups = collections.defaultdict(list)
for node in data:
for key, value in node['Tags'].items():
groups[value].append(node['Name'])
return groups
def get_meta(data):
meta = {'hostvars': {}}
for node in data:
meta['hostvars'][node['Name']] = node['Tags']
return meta
def print_list():
data = get_serf_members_data()
nodes = get_nodes(data)
groups = get_groups(data)
meta = get_meta(data)
inventory_data = {_key: nodes, '_meta': meta}
inventory_data.update(groups)
print(json.dumps(inventory_data))
def print_host(host):
data = get_serf_members_data()
meta = get_meta(data)
print(json.dumps(meta['hostvars'][host]))
def get_args(args_list):
parser = argparse.ArgumentParser(
description='ansible inventory script reading from serf cluster')
mutex_group = parser.add_mutually_exclusive_group(required=True)
help_list = 'list all hosts from serf cluster'
mutex_group.add_argument('--list', action='store_true', help=help_list)
help_host = 'display variables for a host'
mutex_group.add_argument('--host', help=help_host)
return parser.parse_args(args_list)
def main(args_list):
args = get_args(args_list)
if args.list:
print_list()
if args.host:
print_host(args.host)
if __name__ == '__main__':
main(sys.argv[1:])
| gpl-3.0 | -7,255,066,973,571,412,000 | -8,030,916,537,666,298,000 | 25.365217 | 94 | 0.683047 | false |
cubicova17/annet | venv/lib/python2.7/site-packages/django/utils/http.py | 31 | 9025 | from __future__ import unicode_literals
import base64
import calendar
import datetime
import re
import sys
from binascii import Error as BinasciiError
from email.utils import formatdate
from django.utils.datastructures import MultiValueDict
from django.utils.encoding import force_str, force_text
from django.utils.functional import allow_lazy
from django.utils import six
from django.utils.six.moves.urllib.parse import (
quote, quote_plus, unquote, unquote_plus, urlparse,
urlencode as original_urlencode)
ETAG_MATCH = re.compile(r'(?:W/)?"((?:\\.|[^"])*)"')
MONTHS = 'jan feb mar apr may jun jul aug sep oct nov dec'.split()
__D = r'(?P<day>\d{2})'
__D2 = r'(?P<day>[ \d]\d)'
__M = r'(?P<mon>\w{3})'
__Y = r'(?P<year>\d{4})'
__Y2 = r'(?P<year>\d{2})'
__T = r'(?P<hour>\d{2}):(?P<min>\d{2}):(?P<sec>\d{2})'
RFC1123_DATE = re.compile(r'^\w{3}, %s %s %s %s GMT$' % (__D, __M, __Y, __T))
RFC850_DATE = re.compile(r'^\w{6,9}, %s-%s-%s %s GMT$' % (__D, __M, __Y2, __T))
ASCTIME_DATE = re.compile(r'^\w{3} %s %s %s %s$' % (__M, __D2, __T, __Y))
def urlquote(url, safe='/'):
"""
A version of Python's urllib.quote() function that can operate on unicode
strings. The url is first UTF-8 encoded before quoting. The returned string
can safely be used as part of an argument to a subsequent iri_to_uri() call
without double-quoting occurring.
"""
return force_text(quote(force_str(url), force_str(safe)))
urlquote = allow_lazy(urlquote, six.text_type)
def urlquote_plus(url, safe=''):
"""
A version of Python's urllib.quote_plus() function that can operate on
unicode strings. The url is first UTF-8 encoded before quoting. The
returned string can safely be used as part of an argument to a subsequent
iri_to_uri() call without double-quoting occurring.
"""
return force_text(quote_plus(force_str(url), force_str(safe)))
urlquote_plus = allow_lazy(urlquote_plus, six.text_type)
def urlunquote(quoted_url):
"""
A wrapper for Python's urllib.unquote() function that can operate on
the result of django.utils.http.urlquote().
"""
return force_text(unquote(force_str(quoted_url)))
urlunquote = allow_lazy(urlunquote, six.text_type)
def urlunquote_plus(quoted_url):
"""
A wrapper for Python's urllib.unquote_plus() function that can operate on
the result of django.utils.http.urlquote_plus().
"""
return force_text(unquote_plus(force_str(quoted_url)))
urlunquote_plus = allow_lazy(urlunquote_plus, six.text_type)
def urlencode(query, doseq=0):
"""
A version of Python's urllib.urlencode() function that can operate on
unicode strings. The parameters are first cast to UTF-8 encoded strings and
then encoded as per normal.
"""
if isinstance(query, MultiValueDict):
query = query.lists()
elif hasattr(query, 'items'):
query = query.items()
return original_urlencode(
[(force_str(k),
[force_str(i) for i in v] if isinstance(v, (list,tuple)) else force_str(v))
for k, v in query],
doseq)
def cookie_date(epoch_seconds=None):
"""
Formats the time to ensure compatibility with Netscape's cookie standard.
Accepts a floating point number expressed in seconds since the epoch, in
UTC - such as that outputted by time.time(). If set to None, defaults to
the current time.
Outputs a string in the format 'Wdy, DD-Mon-YYYY HH:MM:SS GMT'.
"""
rfcdate = formatdate(epoch_seconds)
return '%s-%s-%s GMT' % (rfcdate[:7], rfcdate[8:11], rfcdate[12:25])
def http_date(epoch_seconds=None):
"""
Formats the time to match the RFC1123 date format as specified by HTTP
RFC2616 section 3.3.1.
Accepts a floating point number expressed in seconds since the epoch, in
UTC - such as that outputted by time.time(). If set to None, defaults to
the current time.
Outputs a string in the format 'Wdy, DD Mon YYYY HH:MM:SS GMT'.
"""
rfcdate = formatdate(epoch_seconds)
return '%s GMT' % rfcdate[:25]
def parse_http_date(date):
"""
Parses a date format as specified by HTTP RFC2616 section 3.3.1.
The three formats allowed by the RFC are accepted, even if only the first
one is still in widespread use.
Returns an integer expressed in seconds since the epoch, in UTC.
"""
# emails.Util.parsedate does the job for RFC1123 dates; unfortunately
# RFC2616 makes it mandatory to support RFC850 dates too. So we roll
# our own RFC-compliant parsing.
for regex in RFC1123_DATE, RFC850_DATE, ASCTIME_DATE:
m = regex.match(date)
if m is not None:
break
else:
raise ValueError("%r is not in a valid HTTP date format" % date)
try:
year = int(m.group('year'))
if year < 100:
if year < 70:
year += 2000
else:
year += 1900
month = MONTHS.index(m.group('mon').lower()) + 1
day = int(m.group('day'))
hour = int(m.group('hour'))
min = int(m.group('min'))
sec = int(m.group('sec'))
result = datetime.datetime(year, month, day, hour, min, sec)
return calendar.timegm(result.utctimetuple())
except Exception:
six.reraise(ValueError, ValueError("%r is not a valid date" % date), sys.exc_info()[2])
def parse_http_date_safe(date):
"""
Same as parse_http_date, but returns None if the input is invalid.
"""
try:
return parse_http_date(date)
except Exception:
pass
# Base 36 functions: useful for generating compact URLs
def base36_to_int(s):
"""
Converts a base 36 string to an ``int``. Raises ``ValueError` if the
input won't fit into an int.
"""
# To prevent overconsumption of server resources, reject any
# base36 string that is long than 13 base36 digits (13 digits
# is sufficient to base36-encode any 64-bit integer)
if len(s) > 13:
raise ValueError("Base36 input too large")
value = int(s, 36)
# ... then do a final check that the value will fit into an int to avoid
# returning a long (#15067). The long type was removed in Python 3.
if six.PY2 and value > sys.maxint:
raise ValueError("Base36 input too large")
return value
def int_to_base36(i):
"""
Converts an integer to a base36 string
"""
digits = "0123456789abcdefghijklmnopqrstuvwxyz"
factor = 0
if i < 0:
raise ValueError("Negative base36 conversion input.")
if six.PY2:
if not isinstance(i, six.integer_types):
raise TypeError("Non-integer base36 conversion input.")
if i > sys.maxint:
raise ValueError("Base36 conversion input too large.")
# Find starting factor
while True:
factor += 1
if i < 36 ** factor:
factor -= 1
break
base36 = []
# Construct base36 representation
while factor >= 0:
j = 36 ** factor
base36.append(digits[i // j])
i = i % j
factor -= 1
return ''.join(base36)
def urlsafe_base64_encode(s):
"""
Encodes a bytestring in base64 for use in URLs, stripping any trailing
equal signs.
"""
return base64.urlsafe_b64encode(s).rstrip(b'\n=')
def urlsafe_base64_decode(s):
"""
Decodes a base64 encoded string, adding back any trailing equal signs that
might have been stripped.
"""
s = s.encode('utf-8') # base64encode should only return ASCII.
try:
return base64.urlsafe_b64decode(s.ljust(len(s) + len(s) % 4, b'='))
except (LookupError, BinasciiError) as e:
raise ValueError(e)
def parse_etags(etag_str):
"""
Parses a string with one or several etags passed in If-None-Match and
If-Match headers by the rules in RFC 2616. Returns a list of etags
without surrounding double quotes (") and unescaped from \<CHAR>.
"""
etags = ETAG_MATCH.findall(etag_str)
if not etags:
# etag_str has wrong format, treat it as an opaque string then
return [etag_str]
etags = [e.encode('ascii').decode('unicode_escape') for e in etags]
return etags
def quote_etag(etag):
"""
Wraps a string in double quotes escaping contents as necessary.
"""
return '"%s"' % etag.replace('\\', '\\\\').replace('"', '\\"')
def same_origin(url1, url2):
"""
Checks if two URLs are 'same-origin'
"""
p1, p2 = urlparse(url1), urlparse(url2)
try:
return (p1.scheme, p1.hostname, p1.port) == (p2.scheme, p2.hostname, p2.port)
except ValueError:
return False
def is_safe_url(url, host=None):
"""
Return ``True`` if the url is a safe redirection (i.e. it doesn't point to
a different host and uses a safe scheme).
Always returns ``False`` on an empty url.
"""
if not url:
return False
url_info = urlparse(url)
return (not url_info.netloc or url_info.netloc == host) and \
(not url_info.scheme or url_info.scheme in ['http', 'https'])
| mit | -1,625,105,788,658,344,200 | -8,071,759,211,070,219,000 | 33.578544 | 95 | 0.637008 | false |
scality/cinder | cinder/tests/unit/test_san.py | 34 | 2525 | # Copyright 2015 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import mock
from cinder import test
from cinder.volume import configuration
from cinder.volume.drivers.san import san
class SanDriverTestCase(test.TestCase):
"""Tests for SAN driver"""
def __init__(self, *args, **kwargs):
super(SanDriverTestCase, self).__init__(*args, **kwargs)
def setUp(self):
super(SanDriverTestCase, self).setUp()
self.configuration = mock.Mock(spec=configuration.Configuration)
self.configuration.san_is_local = False
self.configuration.san_ip = "10.0.0.1"
self.configuration.san_login = "admin"
self.configuration.san_password = "password"
self.configuration.san_ssh_port = 22
self.configuration.san_thin_provision = True
self.configuration.san_private_key = 'private_key'
self.configuration.ssh_min_pool_conn = 1
self.configuration.ssh_max_pool_conn = 5
self.configuration.ssh_conn_timeout = 30
class fake_san_driver(san.SanDriver):
def initialize_connection():
pass
def create_volume():
pass
def delete_volume():
pass
def terminate_connection():
pass
@mock.patch.object(san.processutils, 'ssh_execute')
@mock.patch.object(san.ssh_utils, 'SSHPool')
@mock.patch.object(san.utils, 'check_ssh_injection')
def test_ssh_formatted_command(self, mock_check_ssh_injection,
mock_ssh_pool, mock_ssh_execute):
driver = self.fake_san_driver(configuration=self.configuration)
cmd_list = ['uname', '-s']
expected_cmd = 'uname -s'
driver.san_execute(*cmd_list)
# get the same used mocked item from the pool
with driver.sshpool.item() as ssh_item:
mock_ssh_execute.assert_called_with(ssh_item, expected_cmd,
check_exit_code=None)
| apache-2.0 | 8,617,659,275,709,611,000 | 6,751,399,885,430,833,000 | 36.132353 | 78 | 0.646337 | false |
siketh/ASR | catkin_ws/src/asr/src/slam_controller.py | 1 | 1585 | #!/usr/bin/env python
import rospy, os, signal, subprocess
from std_msgs.msg import String
from std_msgs.msg import Bool
def state_callback(data):
if data.data == "Shutdown":
rospy.signal_shutdown(shutdown_hook())
def mapping_callback(data):
global mapping
mapping = data.data
def shutdown_hook():
print("\n...SLAM CONTROLLER SHUTTING DOWN...")
if slam_process is not False:
os.killpg(slam_process.pid, signal.SIGTERM)
def slam_controller():
global mapping, slam_process
slam_cmd = "roslaunch asr asr_mapping.launch"
launched = False
mapping = False
slam_process = False
rospy.init_node('slam_controller', anonymous=False)
rospy.Subscriber("current_state", String, state_callback, queue_size=10)
rospy.Subscriber("mapping_active", Bool, mapping_callback, queue_size=10)
rate = rospy.Rate(10) # 10hz
print("********** [SLAM CONTROLLER] **********")
while not rospy.is_shutdown():
if mapping and not launched:
print("\nINITIATING SLAM")
slam_process = subprocess.Popen(slam_cmd, stdout=subprocess.PIPE, shell=True, preexec_fn=os.setsid)
launched = True
if not mapping and launched:
print("\nQUITTING SLAM")
os.killpg(slam_process.pid, signal.SIGTERM)
slam_process = False
launched = False
if mapping:
print("Performing SLAM...")
rate.sleep()
if __name__ == '__main__':
try:
slam_controller()
except rospy.ROSInterruptException:
pass
| mit | 5,750,784,783,696,143,000 | 7,836,973,745,898,765,000 | 23.384615 | 111 | 0.625237 | false |
seanchen/taiga-back | taiga/urls.py | 4 | 1875 | # Copyright (C) 2014 Andrey Antukh <niwi@niwi.be>
# Copyright (C) 2014 Jesús Espino <jespinog@gmail.com>
# Copyright (C) 2014 David Barragán <bameda@dbarragan.com>
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from django.conf import settings
from django.conf.urls import patterns, include, url
from django.contrib.staticfiles.urls import staticfiles_urlpatterns
from django.contrib import admin
from .routers import router
from .contrib_routers import router as contrib_router
urlpatterns = [
url(r'^api/v1/', include(router.urls)),
url(r'^api/v1/', include(contrib_router.urls)),
url(r'^api/v1/api-auth/', include('taiga.base.api.urls', namespace='api')),
url(r'^admin/', include(admin.site.urls)),
]
def mediafiles_urlpatterns(prefix):
"""
Method for serve media files with runserver.
"""
import re
from django.views.static import serve
return [
url(r'^%s(?P<path>.*)$' % re.escape(prefix.lstrip('/')), serve,
{'document_root': settings.MEDIA_ROOT})
]
if settings.DEBUG:
# Hardcoded only for development server
urlpatterns += staticfiles_urlpatterns(prefix="/static/")
urlpatterns += mediafiles_urlpatterns(prefix="/media/")
handler500 = "taiga.base.api.views.api_server_error"
| agpl-3.0 | 251,990,696,649,950,530 | 8,414,051,464,515,948,000 | 37.22449 | 79 | 0.723972 | false |
diox/olympia | src/olympia/activity/tests/test_commands.py | 4 | 2696 | from django.core.management import call_command
from django.core.management.base import CommandError
import pytest
from olympia import amo
from olympia.activity.models import ActivityLogToken
from olympia.amo.tests import TestCase, addon_factory, user_factory
class TestRepudiateActivityLogToken(TestCase):
def setUp(self):
addon = addon_factory()
self.version = addon.find_latest_version(channel=amo.RELEASE_CHANNEL_LISTED)
self.token1 = ActivityLogToken.objects.create(
uuid='5a0b8a83d501412589cc5d562334b46b',
version=self.version,
user=user_factory(),
)
self.token2 = ActivityLogToken.objects.create(
uuid='8a0b8a834e71412589cc5d562334b46b',
version=self.version,
user=user_factory(),
)
self.token3 = ActivityLogToken.objects.create(
uuid='336ae924bc23804cef345d562334b46b',
version=self.version,
user=user_factory(),
)
addon2 = addon_factory()
addon2_version = addon2.find_latest_version(channel=amo.RELEASE_CHANNEL_LISTED)
self.token_diff_version = ActivityLogToken.objects.create(
uuid='470023efdac5730773340eaf3080b589',
version=addon2_version,
user=user_factory(),
)
def test_with_tokens(self):
call_command(
'repudiate_token',
'5a0b8a83d501412589cc5d562334b46b',
'8a0b8a834e71412589cc5d562334b46b',
)
assert self.token1.reload().is_expired()
assert self.token2.reload().is_expired()
assert not self.token3.reload().is_expired()
assert not self.token_diff_version.reload().is_expired()
def test_with_version(self):
call_command('repudiate_token', version_id=self.version.id)
assert self.token1.reload().is_expired()
assert self.token2.reload().is_expired()
assert self.token3.reload().is_expired()
assert not self.token_diff_version.reload().is_expired()
def test_with_token_and_version_ignores_version(self):
call_command(
'repudiate_token',
'5a0b8a83d501412589cc5d562334b46b',
version_id=self.version.id,
)
assert self.token1.reload().is_expired() # token supplied is expired.
assert not self.token2.reload().is_expired() # version supplied isn't.
assert not self.token3.reload().is_expired() # check the others too.
assert not self.token_diff_version.reload().is_expired()
def test_no_tokens_no_version_is_error(self):
with pytest.raises(CommandError):
call_command('repudiate_token')
| bsd-3-clause | 3,922,737,118,081,088,000 | -6,144,730,977,428,730,000 | 38.072464 | 87 | 0.649852 | false |
Yoshiofthewire/CrunchyXBMC-Legacy | script.module.cryptopy/lib/crypto/cipher/arc4_test.py | 3 | 8035 | #! /usr/bin/env python
# -*- coding: iso-8859-1 -*-
""" crypto.cipher.arc4_test
Tests for arc4 encryption, uses AES for base algorithm
Copyright © (c) 2002 by Paul A. Lambert
Read LICENSE.txt for license information.
July 24, 2002
"""
import unittest
from crypto.cipher.arc4 import ARC4
from binascii_plus import b2a_p
class ARC4_TestVectors(unittest.TestCase):
""" Test ARC4 algorithm using know values """
def testKnowValues(self):
""" Test using vectors from..."""
def ARC4testVector(testCase,plainText,key,cipherText):
""" Process ARC4 test vectors from RFCxxxx"""
print '%s %s %s'%('='*((54-len(testCase))/2),testCase,'='*((54-len(testCase))/2))
# Convert from octet lists to string
pt = ''.join([chr(i) for i in plainText])
key = ''.join([chr(i) for i in key])
kct = ''.join([chr(i) for i in cipherText])
alg = ARC4(key)
print 'key: %s'%b2a_p(key)[9:]
print 'pt: %s'%b2a_p(pt)[9:]
ct = alg.encrypt(pt)
print 'ct: %s'%b2a_p(ct)[9:]
print 'kct: %s'%b2a_p(kct)[9:]
print '========================================================'
self.assertEqual( ct, kct )
alg.setKey(key)
dct = alg.decrypt( ct )
self.assertEqual( dct, pt )
ARC4testVector(
testCase = "Test Vectors from [CRYPTLIB]",
plainText = (0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00),
key = (0x01, 0x23, 0x45, 0x67, 0x89, 0xAB, 0xCD, 0xEF),
cipherText = (0x74, 0x94, 0xC2, 0xE7, 0x10, 0x4B, 0x08, 0x79))
ARC4testVector(
testCase = "Test Vectors from [COMMERCE]",
plainText = (0xdc, 0xee, 0x4c, 0xf9, 0x2c),
key = (0x61, 0x8a, 0x63, 0xd2, 0xfb),
cipherText = (0xf1, 0x38, 0x29, 0xc9, 0xde))
ARC4testVector(
testCase = "Test Vectors from [SSH ARCFOUR]",
plainText = (0x52, 0x75, 0x69, 0x73, 0x6c, 0x69, 0x6e, 0x6e,
0x75, 0x6e, 0x20, 0x6c, 0x61, 0x75, 0x6c, 0x75,
0x20, 0x6b, 0x6f, 0x72, 0x76, 0x69, 0x73, 0x73,
0x73, 0x61, 0x6e, 0x69, 0x2c, 0x20, 0x74, 0xe4,
0x68, 0x6b, 0xe4, 0x70, 0xe4, 0x69, 0x64, 0x65,
0x6e, 0x20, 0x70, 0xe4, 0xe4, 0x6c, 0x6c, 0xe4,
0x20, 0x74, 0xe4, 0x79, 0x73, 0x69, 0x6b, 0x75,
0x75, 0x2e, 0x20, 0x4b, 0x65, 0x73, 0xe4, 0x79,
0xf6, 0x6e, 0x20, 0x6f, 0x6e, 0x20, 0x6f, 0x6e,
0x6e, 0x69, 0x20, 0x6f, 0x6d, 0x61, 0x6e, 0x61,
0x6e, 0x69, 0x2c, 0x20, 0x6b, 0x61, 0x73, 0x6b,
0x69, 0x73, 0x61, 0x76, 0x75, 0x75, 0x6e, 0x20,
0x6c, 0x61, 0x61, 0x6b, 0x73, 0x6f, 0x74, 0x20,
0x76, 0x65, 0x72, 0x68, 0x6f, 0x75, 0x75, 0x2e,
0x20, 0x45, 0x6e, 0x20, 0x6d, 0x61, 0x20, 0x69,
0x6c, 0x6f, 0x69, 0x74, 0x73, 0x65, 0x2c, 0x20,
0x73, 0x75, 0x72, 0x65, 0x20, 0x68, 0x75, 0x6f,
0x6b, 0x61, 0x61, 0x2c, 0x20, 0x6d, 0x75, 0x74,
0x74, 0x61, 0x20, 0x6d, 0x65, 0x74, 0x73, 0xe4,
0x6e, 0x20, 0x74, 0x75, 0x6d, 0x6d, 0x75, 0x75,
0x73, 0x20, 0x6d, 0x75, 0x6c, 0x6c, 0x65, 0x20,
0x74, 0x75, 0x6f, 0x6b, 0x61, 0x61, 0x2e, 0x20,
0x50, 0x75, 0x75, 0x6e, 0x74, 0x6f, 0x20, 0x70,
0x69, 0x6c, 0x76, 0x65, 0x6e, 0x2c, 0x20, 0x6d,
0x69, 0x20, 0x68, 0x75, 0x6b, 0x6b, 0x75, 0x75,
0x2c, 0x20, 0x73, 0x69, 0x69, 0x6e, 0x74, 0x6f,
0x20, 0x76, 0x61, 0x72, 0x61, 0x6e, 0x20, 0x74,
0x75, 0x75, 0x6c, 0x69, 0x73, 0x65, 0x6e, 0x2c,
0x20, 0x6d, 0x69, 0x20, 0x6e, 0x75, 0x6b, 0x6b,
0x75, 0x75, 0x2e, 0x20, 0x54, 0x75, 0x6f, 0x6b,
0x73, 0x75, 0x74, 0x20, 0x76, 0x61, 0x6e, 0x61,
0x6d, 0x6f, 0x6e, 0x20, 0x6a, 0x61, 0x20, 0x76,
0x61, 0x72, 0x6a, 0x6f, 0x74, 0x20, 0x76, 0x65,
0x65, 0x6e, 0x2c, 0x20, 0x6e, 0x69, 0x69, 0x73,
0x74, 0xe4, 0x20, 0x73, 0x79, 0x64, 0xe4, 0x6d,
0x65, 0x6e, 0x69, 0x20, 0x6c, 0x61, 0x75, 0x6c,
0x75, 0x6e, 0x20, 0x74, 0x65, 0x65, 0x6e, 0x2e,
0x20, 0x2d, 0x20, 0x45, 0x69, 0x6e, 0x6f, 0x20,
0x4c, 0x65, 0x69, 0x6e, 0x6f),
key = (0x29, 0x04, 0x19, 0x72, 0xfb, 0x42, 0xba, 0x5f,
0xc7, 0x12, 0x77, 0x12, 0xf1, 0x38, 0x29, 0xc9),
cipherText = (0x35, 0x81, 0x86, 0x99, 0x90, 0x01, 0xe6, 0xb5,
0xda, 0xf0, 0x5e, 0xce, 0xeb, 0x7e, 0xee, 0x21,
0xe0, 0x68, 0x9c, 0x1f, 0x00, 0xee, 0xa8, 0x1f,
0x7d, 0xd2, 0xca, 0xae, 0xe1, 0xd2, 0x76, 0x3e,
0x68, 0xaf, 0x0e, 0xad, 0x33, 0xd6, 0x6c, 0x26,
0x8b, 0xc9, 0x46, 0xc4, 0x84, 0xfb, 0xe9, 0x4c,
0x5f, 0x5e, 0x0b, 0x86, 0xa5, 0x92, 0x79, 0xe4,
0xf8, 0x24, 0xe7, 0xa6, 0x40, 0xbd, 0x22, 0x32,
0x10, 0xb0, 0xa6, 0x11, 0x60, 0xb7, 0xbc, 0xe9,
0x86, 0xea, 0x65, 0x68, 0x80, 0x03, 0x59, 0x6b,
0x63, 0x0a, 0x6b, 0x90, 0xf8, 0xe0, 0xca, 0xf6,
0x91, 0x2a, 0x98, 0xeb, 0x87, 0x21, 0x76, 0xe8,
0x3c, 0x20, 0x2c, 0xaa, 0x64, 0x16, 0x6d, 0x2c,
0xce, 0x57, 0xff, 0x1b, 0xca, 0x57, 0xb2, 0x13,
0xf0, 0xed, 0x1a, 0xa7, 0x2f, 0xb8, 0xea, 0x52,
0xb0, 0xbe, 0x01, 0xcd, 0x1e, 0x41, 0x28, 0x67,
0x72, 0x0b, 0x32, 0x6e, 0xb3, 0x89, 0xd0, 0x11,
0xbd, 0x70, 0xd8, 0xaf, 0x03, 0x5f, 0xb0, 0xd8,
0x58, 0x9d, 0xbc, 0xe3, 0xc6, 0x66, 0xf5, 0xea,
0x8d, 0x4c, 0x79, 0x54, 0xc5, 0x0c, 0x3f, 0x34,
0x0b, 0x04, 0x67, 0xf8, 0x1b, 0x42, 0x59, 0x61,
0xc1, 0x18, 0x43, 0x07, 0x4d, 0xf6, 0x20, 0xf2,
0x08, 0x40, 0x4b, 0x39, 0x4c, 0xf9, 0xd3, 0x7f,
0xf5, 0x4b, 0x5f, 0x1a, 0xd8, 0xf6, 0xea, 0x7d,
0xa3, 0xc5, 0x61, 0xdf, 0xa7, 0x28, 0x1f, 0x96,
0x44, 0x63, 0xd2, 0xcc, 0x35, 0xa4, 0xd1, 0xb0,
0x34, 0x90, 0xde, 0xc5, 0x1b, 0x07, 0x11, 0xfb,
0xd6, 0xf5, 0x5f, 0x79, 0x23, 0x4d, 0x5b, 0x7c,
0x76, 0x66, 0x22, 0xa6, 0x6d, 0xe9, 0x2b, 0xe9,
0x96, 0x46, 0x1d, 0x5e, 0x4d, 0xc8, 0x78, 0xef,
0x9b, 0xca, 0x03, 0x05, 0x21, 0xe8, 0x35, 0x1e,
0x4b, 0xae, 0xd2, 0xfd, 0x04, 0xf9, 0x46, 0x73,
0x68, 0xc4, 0xad, 0x6a, 0xc1, 0x86, 0xd0, 0x82,
0x45, 0xb2, 0x63, 0xa2, 0x66, 0x6d, 0x1f, 0x6c,
0x54, 0x20, 0xf1, 0x59, 0x9d, 0xfd, 0x9f, 0x43,
0x89, 0x21, 0xc2, 0xf5, 0xa4, 0x63, 0x93, 0x8c,
0xe0, 0x98, 0x22, 0x65, 0xee, 0xf7, 0x01, 0x79,
0xbc, 0x55, 0x3f, 0x33, 0x9e, 0xb1, 0xa4, 0xc1,
0xaf, 0x5f, 0x6a, 0x54, 0x7f))
# Make this test module runnable from the command prompt
if __name__ == "__main__":
unittest.main()
| gpl-2.0 | -7,135,853,158,477,769,000 | -1,567,186,981,866,414,300 | 55.985816 | 93 | 0.459739 | false |
alexgorban/models | official/vision/image_classification/resnet_model.py | 1 | 12450 | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""ResNet50 model for Keras.
Adapted from tf.keras.applications.resnet50.ResNet50().
This is ResNet model version 1.5.
Related papers/blogs:
- https://arxiv.org/abs/1512.03385
- https://arxiv.org/pdf/1603.05027v2.pdf
- http://torch.ch/blog/2016/02/04/resnets.html
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from tensorflow.python.keras import backend
from tensorflow.python.keras import initializers
from tensorflow.python.keras import layers as tf_python_keras_layers
from tensorflow.python.keras import models
from tensorflow.python.keras import regularizers
from official.vision.image_classification import imagenet_preprocessing
L2_WEIGHT_DECAY = 1e-4
BATCH_NORM_DECAY = 0.9
BATCH_NORM_EPSILON = 1e-5
layers = tf_python_keras_layers
def change_keras_layer(use_tf_keras_layers=False):
"""Change layers to either tf.keras.layers or tf.python.keras.layers.
Layer version of tf.keras.layers is depends on tensorflow version, but
tf.python.keras.layers checks environment variable TF2_BEHAVIOR.
This function is a temporal function to use tf.keras.layers.
Currently, tf v2 batchnorm layer is slower than tf v1 batchnorm layer.
this function is useful for tracking benchmark result for each version.
This function will be removed when we use tf.keras.layers as default.
TODO(b/146939027): Remove this function when tf v2 batchnorm reaches training
speed parity with tf v1 batchnorm.
Args:
use_tf_keras_layers: whether to use tf.keras.layers.
"""
global layers
if use_tf_keras_layers:
layers = tf.keras.layers
else:
layers = tf_python_keras_layers
def _gen_l2_regularizer(use_l2_regularizer=True):
return regularizers.l2(L2_WEIGHT_DECAY) if use_l2_regularizer else None
def identity_block(input_tensor,
kernel_size,
filters,
stage,
block,
use_l2_regularizer=True):
"""The identity block is the block that has no conv layer at shortcut.
Args:
input_tensor: input tensor
kernel_size: default 3, the kernel size of middle conv layer at main path
filters: list of integers, the filters of 3 conv layer at main path
stage: integer, current stage label, used for generating layer names
block: 'a','b'..., current block label, used for generating layer names
use_l2_regularizer: whether to use L2 regularizer on Conv layer.
Returns:
Output tensor for the block.
"""
filters1, filters2, filters3 = filters
if backend.image_data_format() == 'channels_last':
bn_axis = 3
else:
bn_axis = 1
conv_name_base = 'res' + str(stage) + block + '_branch'
bn_name_base = 'bn' + str(stage) + block + '_branch'
x = layers.Conv2D(
filters1, (1, 1),
use_bias=False,
kernel_initializer='he_normal',
kernel_regularizer=_gen_l2_regularizer(use_l2_regularizer),
name=conv_name_base + '2a')(
input_tensor)
x = layers.BatchNormalization(
axis=bn_axis,
momentum=BATCH_NORM_DECAY,
epsilon=BATCH_NORM_EPSILON,
name=bn_name_base + '2a')(
x)
x = layers.Activation('relu')(x)
x = layers.Conv2D(
filters2,
kernel_size,
padding='same',
use_bias=False,
kernel_initializer='he_normal',
kernel_regularizer=_gen_l2_regularizer(use_l2_regularizer),
name=conv_name_base + '2b')(
x)
x = layers.BatchNormalization(
axis=bn_axis,
momentum=BATCH_NORM_DECAY,
epsilon=BATCH_NORM_EPSILON,
name=bn_name_base + '2b')(
x)
x = layers.Activation('relu')(x)
x = layers.Conv2D(
filters3, (1, 1),
use_bias=False,
kernel_initializer='he_normal',
kernel_regularizer=_gen_l2_regularizer(use_l2_regularizer),
name=conv_name_base + '2c')(
x)
x = layers.BatchNormalization(
axis=bn_axis,
momentum=BATCH_NORM_DECAY,
epsilon=BATCH_NORM_EPSILON,
name=bn_name_base + '2c')(
x)
x = layers.add([x, input_tensor])
x = layers.Activation('relu')(x)
return x
def conv_block(input_tensor,
kernel_size,
filters,
stage,
block,
strides=(2, 2),
use_l2_regularizer=True):
"""A block that has a conv layer at shortcut.
Note that from stage 3,
the second conv layer at main path is with strides=(2, 2)
And the shortcut should have strides=(2, 2) as well
Args:
input_tensor: input tensor
kernel_size: default 3, the kernel size of middle conv layer at main path
filters: list of integers, the filters of 3 conv layer at main path
stage: integer, current stage label, used for generating layer names
block: 'a','b'..., current block label, used for generating layer names
strides: Strides for the second conv layer in the block.
use_l2_regularizer: whether to use L2 regularizer on Conv layer.
Returns:
Output tensor for the block.
"""
filters1, filters2, filters3 = filters
if backend.image_data_format() == 'channels_last':
bn_axis = 3
else:
bn_axis = 1
conv_name_base = 'res' + str(stage) + block + '_branch'
bn_name_base = 'bn' + str(stage) + block + '_branch'
x = layers.Conv2D(
filters1, (1, 1),
use_bias=False,
kernel_initializer='he_normal',
kernel_regularizer=_gen_l2_regularizer(use_l2_regularizer),
name=conv_name_base + '2a')(
input_tensor)
x = layers.BatchNormalization(
axis=bn_axis,
momentum=BATCH_NORM_DECAY,
epsilon=BATCH_NORM_EPSILON,
name=bn_name_base + '2a')(
x)
x = layers.Activation('relu')(x)
x = layers.Conv2D(
filters2,
kernel_size,
strides=strides,
padding='same',
use_bias=False,
kernel_initializer='he_normal',
kernel_regularizer=_gen_l2_regularizer(use_l2_regularizer),
name=conv_name_base + '2b')(
x)
x = layers.BatchNormalization(
axis=bn_axis,
momentum=BATCH_NORM_DECAY,
epsilon=BATCH_NORM_EPSILON,
name=bn_name_base + '2b')(
x)
x = layers.Activation('relu')(x)
x = layers.Conv2D(
filters3, (1, 1),
use_bias=False,
kernel_initializer='he_normal',
kernel_regularizer=_gen_l2_regularizer(use_l2_regularizer),
name=conv_name_base + '2c')(
x)
x = layers.BatchNormalization(
axis=bn_axis,
momentum=BATCH_NORM_DECAY,
epsilon=BATCH_NORM_EPSILON,
name=bn_name_base + '2c')(
x)
shortcut = layers.Conv2D(
filters3, (1, 1),
strides=strides,
use_bias=False,
kernel_initializer='he_normal',
kernel_regularizer=_gen_l2_regularizer(use_l2_regularizer),
name=conv_name_base + '1')(
input_tensor)
shortcut = layers.BatchNormalization(
axis=bn_axis,
momentum=BATCH_NORM_DECAY,
epsilon=BATCH_NORM_EPSILON,
name=bn_name_base + '1')(
shortcut)
x = layers.add([x, shortcut])
x = layers.Activation('relu')(x)
return x
def resnet50(num_classes,
batch_size=None,
use_l2_regularizer=True,
rescale_inputs=False):
"""Instantiates the ResNet50 architecture.
Args:
num_classes: `int` number of classes for image classification.
batch_size: Size of the batches for each step.
use_l2_regularizer: whether to use L2 regularizer on Conv/Dense layer.
rescale_inputs: whether to rescale inputs from 0 to 1.
Returns:
A Keras model instance.
"""
input_shape = (224, 224, 3)
img_input = layers.Input(shape=input_shape, batch_size=batch_size)
if rescale_inputs:
# Hub image modules expect inputs in the range [0, 1]. This rescales these
# inputs to the range expected by the trained model.
x = layers.Lambda(
lambda x: x * 255.0 - backend.constant(
imagenet_preprocessing.CHANNEL_MEANS,
shape=[1, 1, 3],
dtype=x.dtype),
name='rescale')(
img_input)
else:
x = img_input
if backend.image_data_format() == 'channels_first':
x = layers.Lambda(
lambda x: backend.permute_dimensions(x, (0, 3, 1, 2)),
name='transpose')(x)
bn_axis = 1
else: # channels_last
bn_axis = 3
x = layers.ZeroPadding2D(padding=(3, 3), name='conv1_pad')(x)
x = layers.Conv2D(
64, (7, 7),
strides=(2, 2),
padding='valid',
use_bias=False,
kernel_initializer='he_normal',
kernel_regularizer=_gen_l2_regularizer(use_l2_regularizer),
name='conv1')(
x)
x = layers.BatchNormalization(
axis=bn_axis,
momentum=BATCH_NORM_DECAY,
epsilon=BATCH_NORM_EPSILON,
name='bn_conv1')(
x)
x = layers.Activation('relu')(x)
x = layers.MaxPooling2D((3, 3), strides=(2, 2), padding='same')(x)
x = conv_block(
x,
3, [64, 64, 256],
stage=2,
block='a',
strides=(1, 1),
use_l2_regularizer=use_l2_regularizer)
x = identity_block(
x,
3, [64, 64, 256],
stage=2,
block='b',
use_l2_regularizer=use_l2_regularizer)
x = identity_block(
x,
3, [64, 64, 256],
stage=2,
block='c',
use_l2_regularizer=use_l2_regularizer)
x = conv_block(
x,
3, [128, 128, 512],
stage=3,
block='a',
use_l2_regularizer=use_l2_regularizer)
x = identity_block(
x,
3, [128, 128, 512],
stage=3,
block='b',
use_l2_regularizer=use_l2_regularizer)
x = identity_block(
x,
3, [128, 128, 512],
stage=3,
block='c',
use_l2_regularizer=use_l2_regularizer)
x = identity_block(
x,
3, [128, 128, 512],
stage=3,
block='d',
use_l2_regularizer=use_l2_regularizer)
x = conv_block(
x,
3, [256, 256, 1024],
stage=4,
block='a',
use_l2_regularizer=use_l2_regularizer)
x = identity_block(
x,
3, [256, 256, 1024],
stage=4,
block='b',
use_l2_regularizer=use_l2_regularizer)
x = identity_block(
x,
3, [256, 256, 1024],
stage=4,
block='c',
use_l2_regularizer=use_l2_regularizer)
x = identity_block(
x,
3, [256, 256, 1024],
stage=4,
block='d',
use_l2_regularizer=use_l2_regularizer)
x = identity_block(
x,
3, [256, 256, 1024],
stage=4,
block='e',
use_l2_regularizer=use_l2_regularizer)
x = identity_block(
x,
3, [256, 256, 1024],
stage=4,
block='f',
use_l2_regularizer=use_l2_regularizer)
x = conv_block(
x,
3, [512, 512, 2048],
stage=5,
block='a',
use_l2_regularizer=use_l2_regularizer)
x = identity_block(
x,
3, [512, 512, 2048],
stage=5,
block='b',
use_l2_regularizer=use_l2_regularizer)
x = identity_block(
x,
3, [512, 512, 2048],
stage=5,
block='c',
use_l2_regularizer=use_l2_regularizer)
rm_axes = [1, 2] if backend.image_data_format() == 'channels_last' else [2, 3]
x = layers.Lambda(lambda x: backend.mean(x, rm_axes), name='reduce_mean')(x)
x = layers.Dense(
num_classes,
kernel_initializer=initializers.RandomNormal(stddev=0.01),
kernel_regularizer=_gen_l2_regularizer(use_l2_regularizer),
bias_regularizer=_gen_l2_regularizer(use_l2_regularizer),
name='fc1000')(
x)
# A softmax that is followed by the model loss must be done cannot be done
# in float16 due to numeric issues. So we pass dtype=float32.
x = layers.Activation('softmax', dtype='float32')(x)
# Create model.
return models.Model(img_input, x, name='resnet50')
| apache-2.0 | 936,799,316,832,973,000 | -441,714,130,579,702,800 | 28.363208 | 80 | 0.622249 | false |