repo_name
stringlengths 5
100
| path
stringlengths 4
299
| copies
stringclasses 990
values | size
stringlengths 4
7
| content
stringlengths 666
1.03M
| license
stringclasses 15
values | hash
int64 -9,223,351,895,964,839,000
9,223,297,778B
| line_mean
float64 3.17
100
| line_max
int64 7
1k
| alpha_frac
float64 0.25
0.98
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
kgyl/twitter-data-analysis | scripts/categorize_user_vectors.py | 1 | 2047 | from scipy.spatial.distance import euclidean
from sklearn.decomposition import PCA
import numpy as np
import math
collection_name = 'ns-users'
DATA_DIR = '../../data/categories/ns-users/'
user_vecs = np.load('../../data/'+collection_name+'/retweeter_w2v_by_avg_word_vec_10_30/features.npy')
category_vecs = np.load('../../data/categories/features.npy')
category_titles = np.load('../../data/categories/categories.npy')
#vecs = np.concatenate((category_vecs, user_vecs))
pca = PCA(n_components=3).fit(category_vecs)
#category_vecs = [ [pca.components_[jnd][ind] for jnd in range(0,3)] for ind in range(0,len(category_vecs))]
#user_vecs = [ [pca.components_[jnd][ind] for jnd in range(0,3)] for ind in range(len(category_vecs),len(user_vecs)+len(category_vecs))]
metadata = open(DATA_DIR+'metadata.txt','w')
metadata.write('Best category\tCategories\n')
similiarities = []
category_vecs_3d = pca.transform(category_vecs)
user_vecs_3d = pca.transform(user_vecs)
for ind in range(0,len(category_vecs_3d)):
for category in category_vecs_3d:
similiarity.append(euclidean(user, category))
similiarity = np.asarray(similiarity)
#print category_titles[similiarity.argmin(axis=0)], similiarity.argmin(axis=0), similiarity
metadata.write('\t'.join([category_titles[similiarity.argmin(axis=0)],
' '.join([ category_titles[ind] + '-' + str(similiarity[ind]) for ind in range(len(category_vecs))])]) + '\n')
similiarities.append(similiarity)
for user in user_vecs:
similiarity = []
for category in category_vecs_3d:
similiarity.append(cosine(user, category))
similiarity = np.asarray(similiarity)
#print category_titles[similiarity.argmin(axis=0)], similiarity.argmin(axis=0), similiarity
metadata.write('\t'.join([category_titles[similiarity.argmin(axis=0)],
' '.join([ category_titles[ind] + '-' + str(similiarity[ind]) for ind in range(len(category_vecs))])]) + '\n')
similiarities.append(similiarity)
np.save(DATA_DIR+'features.npy', similiarities)
metadata.close()
| gpl-3.0 | -3,690,693,589,987,609,600 | 39.94 | 136 | 0.705911 | false |
ohio813/pyflag | src/plugins/Urwid/urwid/widget.py | 8 | 78773 | #!/usr/bin/python
#
# Urwid basic widget classes
# Copyright (C) 2004-2007 Ian Ward
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# Urwid web site: http://excess.org/urwid/
import string
from util import *
from canvas import *
try: sum # old python?
except: sum = lambda l: reduce(lambda a,b: a+b, l, 0)
class WidgetMeta(MetaSuper, MetaSignals):
"""
Automatic caching of render and rows methods.
Class variable no_cache is a list of names of methods to not cache.
Class variable ignore_focus if defined and True indicates that this
widget is not affected by the focus parameter, so it may be ignored
when caching.
"""
def __init__(cls, name, bases, d):
no_cache = d.get("no_cache", [])
super(WidgetMeta, cls).__init__(name, bases, d)
if "render" in d:
if "render" not in no_cache:
render_fn = cache_widget_render(cls)
else:
render_fn = nocache_widget_render(cls)
cls.render = render_fn
if "rows" in d and "rows" not in no_cache:
cls.rows = cache_widget_rows(cls)
if "no_cache" in d:
del cls.no_cache
if "ignore_focus" in d:
del cls.ignore_focus
class WidgetError(Exception):
pass
def validate_size(widget, size, canv):
"""
Raise a WidgetError if a canv does not match size size.
"""
if (size and size[1:] != (0,) and size[0] != canv.cols()) or \
(len(size)>1 and size[1] != canv.rows()):
raise WidgetError("Widget %r rendered (%d x %d) canvas"
" when passed size %r!" % (widget, canv.cols(),
canv.rows(), size))
def cache_widget_render(cls):
"""
Return a function that wraps the cls.render() method
and fetches and stores canvases with CanvasCache.
"""
ignore_focus = bool(getattr(cls, "ignore_focus", False))
fn = cls.render
def cached_render(self, size, focus=False):
focus = focus and not ignore_focus
canv = CanvasCache.fetch(self, size, focus)
if canv:
return canv
canv = fn(self, size, focus=focus)
validate_size(self, size, canv)
if canv.widget_info:
canv = CompositeCanvas(canv)
canv.finalize(self, size, focus)
CanvasCache.store(canv)
return canv
cached_render.original_fn = fn
return cached_render
def nocache_widget_render(cls):
"""
Return a function that wraps the cls.render() method
and finalizes the canvas that it returns.
"""
fn = cls.render
if hasattr(fn, "original_fn"):
fn = fn.original_fn
def finalize_render(self, size, focus=False):
canv = fn(self, size, focus=focus)
if canv.widget_info:
canv = CompositeCanvas(canv)
validate_size(self, size, canv)
canv.finalize(self, size, focus)
return canv
finalize_render.original_fn = fn
return finalize_render
def nocache_widget_render_instance(self):
"""
Return a function that wraps the cls.render() method
and finalizes the canvas that it returns, but does not
cache the canvas.
"""
fn = self.render.original_fn
def finalize_render(size, focus=False):
canv = fn(self, size, focus=focus)
if canv.widget_info:
canv = CompositeCanvas(canv)
canv.finalize(self, size, focus)
return canv
finalize_render.original_fn = fn
return finalize_render
def cache_widget_rows(cls):
"""
Return a function that wraps the cls.rows() method
and returns rows from the CanvasCache if available.
"""
ignore_focus = bool(getattr(cls, "ignore_focus", False))
fn = cls.rows
def cached_rows(self, size, focus=False):
focus = focus and not ignore_focus
canv = CanvasCache.fetch(self, size, focus)
if canv:
return canv.rows()
return fn(self, size, focus)
return cached_rows
class Widget(object):
"""
base class of widgets
"""
__metaclass__ = WidgetMeta
_selectable = False
def _invalidate(self):
CanvasCache.invalidate(self)
def _emit(self, name, *args):
Signals.emit(self, name, *args)
def selectable(self):
return self._selectable
class FlowWidget(Widget):
"""
base class of widgets
"""
def rows(self, (maxcol,), focus=False):
"""
All flow widgets must implement this function.
"""
raise NotImplementedError()
def render(self, (maxcol,), focus=False):
"""
All widgets must implement this function.
"""
raise NotImplementedError()
class BoxWidget(Widget):
"""
base class of width and height constrained widgets such as
the top level widget attached to the display object
"""
_selectable = True
def render(self, size, focus=False):
"""
All widgets must implement this function.
"""
raise NotImplementedError()
def fixed_size(size):
"""
raise ValueError if size != ().
Used by FixedWidgets to test size parameter.
"""
if size != ():
raise ValueError("FixedWidget takes only () for size." \
"passed: %s" % `size`)
class FixedWidget(Widget):
"""
base class of widgets that know their width and height and
cannot be resized
"""
def render(self, size, focus=False):
"""
All widgets must implement this function.
"""
raise NotImplementedError()
def pack(self, size=None, focus=False):
"""
All fixed widgets must implement this function.
"""
raise NotImplementedError()
class Divider(FlowWidget):
"""
Horizontal divider widget
"""
ignore_focus = True
def __init__(self,div_char=" ",top=0,bottom=0):
"""
div_char -- character to repeat across line
top -- number of blank lines above
bottom -- number of blank lines below
"""
self.__super.__init__()
self.div_char = div_char
self.top = top
self.bottom = bottom
def rows(self, (maxcol,), focus=False):
"""Return the number of lines that will be rendered."""
return self.top + 1 + self.bottom
def render(self, (maxcol,), focus=False):
"""Render the divider as a canvas and return it."""
canv = SolidCanvas(self.div_char, maxcol, 1)
canv = CompositeCanvas(canv)
if self.top or self.bottom:
canv.pad_trim_top_bottom(self.top, self.bottom)
return canv
class SolidFill(BoxWidget):
_selectable = False
ignore_focus = True
def __init__(self,fill_char=" "):
"""
fill_char -- character to fill area with
"""
self.__super.__init__()
self.fill_char = fill_char
def render(self,(maxcol,maxrow), focus=False ):
"""Render the Fill as a canvas and return it."""
return SolidCanvas(self.fill_char, maxcol, maxrow)
class TextError(Exception):
pass
class Text(FlowWidget):
"""
a horizontally resizeable text widget
"""
ignore_focus = True
def __init__(self,markup, align='left', wrap='space', layout=None):
"""
markup -- content of text widget, one of:
plain string -- string is displayed
( attr, markup2 ) -- markup2 is given attribute attr
[ markupA, markupB, ... ] -- list items joined together
align -- align mode for text layout
wrap -- wrap mode for text layout
layout -- layout object to use, defaults to StandardTextLayout
"""
self.__super.__init__()
self._cache_maxcol = None
self.set_text(markup)
self.set_layout(align, wrap, layout)
def _invalidate(self):
self._cache_maxcol = None
self.__super._invalidate()
def set_text(self,markup):
"""Set content of text widget."""
self.text, self.attrib = decompose_tagmarkup(markup)
self._invalidate()
def get_text(self):
"""
Returns (text, attributes).
text -- complete string content of text widget
attributes -- run length encoded attributes for text
"""
return self.text, self.attrib
def set_align_mode(self, mode):
"""
Set text alignment / justification.
Valid modes for StandardTextLayout are:
'left', 'center' and 'right'
"""
if not self.layout.supports_align_mode(mode):
raise TextError("Alignment mode %s not supported."%
`mode`)
self.align_mode = mode
self._invalidate()
def set_wrap_mode(self, mode):
"""
Set wrap mode.
Valid modes for StandardTextLayout are :
'any' : wrap at any character
'space' : wrap on space character
'clip' : truncate lines instead of wrapping
"""
if not self.layout.supports_wrap_mode(mode):
raise TextError("Wrap mode %s not supported"%`mode`)
self.wrap_mode = mode
self._invalidate()
def set_layout(self, align, wrap, layout=None):
"""
Set layout object, align and wrap modes.
align -- align mode for text layout
wrap -- wrap mode for text layout
layout -- layout object to use, defaults to StandardTextLayout
"""
if layout is None:
layout = default_layout
self.layout = layout
self.set_align_mode( align )
self.set_wrap_mode( wrap )
def render(self,(maxcol,), focus=False):
"""
Render contents with wrapping and alignment. Return canvas.
"""
text, attr = self.get_text()
trans = self.get_line_translation( maxcol, (text,attr) )
return apply_text_layout(text, attr, trans, maxcol)
def rows(self,(maxcol,), focus=False):
"""Return the number of rows the rendered text spans."""
return len(self.get_line_translation(maxcol))
def get_line_translation(self, maxcol, ta=None):
"""Return layout structure for mapping self.text to a canvas.
"""
if not self._cache_maxcol or self._cache_maxcol != maxcol:
self._update_cache_translation(maxcol, ta)
return self._cache_translation
def _update_cache_translation(self,maxcol, ta):
if ta:
text, attr = ta
else:
text, attr = self.get_text()
self._cache_maxcol = maxcol
self._cache_translation = self._calc_line_translation(
text, maxcol )
def _calc_line_translation(self, text, maxcol ):
return self.layout.layout(
text, self._cache_maxcol,
self.align_mode, self.wrap_mode )
def pack(self, size=None, focus=False):
"""
Return the number of screen columns required for this Text
widget to be displayed without wrapping or clipping, as a
single element tuple.
size -- None for unlimited screen columns or (maxcol,) to
specify a maximum column size
"""
text, attr = self.get_text()
if size is not None:
(maxcol,) = size
if not hasattr(self.layout, "pack"):
return size
trans = self.get_line_translation( maxcol, (text,attr))
cols = self.layout.pack( maxcol, trans )
return (cols,)
i = 0
cols = 0
while i < len(text):
j = text.find('\n', i)
if j == -1:
j = len(text)
c = calc_width(text, i, j)
if c>cols:
cols = c
i = j+1
return (cols,)
class Edit(Text):
"""Text edit widget"""
def valid_char(self, ch):
"""Return true for printable characters."""
return is_wide_char(ch,0) or (len(ch)==1 and ord(ch) >= 32)
def selectable(self): return True
def __init__(self, caption = "", edit_text = "", multiline = False,
align = 'left', wrap = 'space', allow_tab = False,
edit_pos = None, layout=None):
"""
caption -- markup for caption preceeding edit_text
edit_text -- text string for editing
multiline -- True: 'enter' inserts newline False: return it
align -- align mode
wrap -- wrap mode
allow_tab -- True: 'tab' inserts 1-8 spaces False: return it
edit_pos -- initial position for cursor, None:at end
layout -- layout object
"""
self.__super.__init__("", align, wrap, layout)
assert type(edit_text)==type("") or type(edit_text)==type(u"")
self.multiline = multiline
self.allow_tab = allow_tab
self.edit_pos = 0
self.set_caption(caption)
self.set_edit_text(edit_text)
if edit_pos is None:
edit_pos = len(edit_text)
self.set_edit_pos(edit_pos)
self._shift_view_to_cursor = False
def get_text(self):
"""get_text() -> text, attributes
text -- complete text of caption and edit_text
attributes -- run length encoded attributes for text
"""
return self.caption + self.edit_text, self.attrib
def get_pref_col(self, (maxcol,)):
"""Return the preferred column for the cursor, or the
current cursor x value."""
pref_col, then_maxcol = self.pref_col_maxcol
if then_maxcol != maxcol:
return self.get_cursor_coords((maxcol,))[0]
else:
return pref_col
def update_text(self):
"""Deprecated. Use set_caption and/or set_edit_text instead.
Make sure any cached line translation is not reused."""
self._invalidate()
def set_caption(self, caption):
"""Set the caption markup for this widget."""
self.caption, self.attrib = decompose_tagmarkup(caption)
self._invalidate()
def set_edit_pos(self, pos):
"""Set the cursor position with a self.edit_text offset."""
assert pos >= 0 and pos <= len(self.edit_text), "out of range"
self.highlight = None
self.pref_col_maxcol = None, None
self.edit_pos = pos
self._invalidate()
def set_edit_text(self, text):
"""Set the edit text for this widget."""
self.highlight = None
self.edit_text = text
if self.edit_pos > len(text):
self.edit_pos = len(text)
self._invalidate()
def get_edit_text(self):
"""Return the edit text for this widget."""
return self.edit_text
def insert_text(self, text):
"""Insert text at the cursor position and update cursor."""
p = self.edit_pos
self.set_edit_text( self.edit_text[:p] + text +
self.edit_text[p:] )
self.set_edit_pos( self.edit_pos + len(text))
def keypress(self,(maxcol,),key):
"""Handle editing keystrokes, return others."""
p = self.edit_pos
if self.valid_char(key):
self._delete_highlighted()
if type(key) == type(u""):
key = key.encode("utf-8")
self.insert_text( key )
elif key=="tab" and self.allow_tab:
self._delete_highlighted()
key = " "*(8-(self.edit_pos%8))
self.insert_text( key )
elif key=="enter" and self.multiline:
self._delete_highlighted()
key = "\n"
self.insert_text( key )
elif key=="left":
if p==0: return key
p = move_prev_char(self.edit_text,0,p)
self.set_edit_pos(p)
elif key=="right":
if p >= len(self.edit_text): return key
p = move_next_char(self.edit_text,p,len(self.edit_text))
self.set_edit_pos(p)
elif key in ("up","down"):
self.highlight = None
x,y = self.get_cursor_coords((maxcol,))
pref_col = self.get_pref_col((maxcol,))
assert pref_col is not None
#if pref_col is None:
# pref_col = x
if key == "up": y -= 1
else: y += 1
if not self.move_cursor_to_coords((maxcol,),pref_col,y):
return key
elif key=="backspace":
self._delete_highlighted()
self.pref_col_maxcol = None, None
if p == 0: return key
p = move_prev_char(self.edit_text,0,p)
self.set_edit_text( self.edit_text[:p] +
self.edit_text[self.edit_pos:] )
self.set_edit_pos( p )
elif key=="delete":
self._delete_highlighted()
self.pref_col_maxcol = None, None
if p >= len(self.edit_text):
return key
p = move_next_char(self.edit_text,p,len(self.edit_text))
self.set_edit_text( self.edit_text[:self.edit_pos] +
self.edit_text[p:] )
elif key in ("home", "end"):
self.highlight = None
self.pref_col_maxcol = None, None
x,y = self.get_cursor_coords((maxcol,))
if key == "home":
self.move_cursor_to_coords((maxcol,),'left',y)
else:
self.move_cursor_to_coords((maxcol,),'right',y)
return
else:
# key wasn't handled
return key
def move_cursor_to_coords(self, (maxcol,), x, y):
"""Set the cursor position with (x,y) coordinates.
Returns True if move succeeded, False otherwise.
"""
trans = self.get_line_translation(maxcol)
top_x, top_y = self.position_coords(maxcol, 0)
if y < top_y or y >= len(trans):
return False
pos = calc_pos( self.get_text()[0], trans, x, y )
e_pos = pos - len(self.caption)
if e_pos < 0: e_pos = 0
if e_pos > len(self.edit_text): e_pos = len(self.edit_text)
self.edit_pos = e_pos
self.pref_col_maxcol = x, maxcol
self._invalidate()
return True
def mouse_event(self, (maxcol,), event, button, x, y, focus):
"""
Move the cursor to the location clicked for button 1.
"""
if button==1:
return self.move_cursor_to_coords( (maxcol,), x, y )
def _delete_highlighted(self):
"""
Delete all highlighted text and update cursor position, if any
text is highlighted.
"""
if not self.highlight: return
start, stop = self.highlight
btext, etext = self.edit_text[:start], self.edit_text[stop:]
self.set_edit_text( btext + etext )
self.edit_pos = start
self.highlight = None
def render(self,(maxcol,), focus=False):
"""
Render edit widget and return canvas. Include cursor when in
focus.
"""
self._shift_view_to_cursor = not not focus # force bool
canv = Text.render(self,(maxcol,))
if focus:
canv = CompositeCanvas(canv)
canv.cursor = self.get_cursor_coords((maxcol,))
# .. will need to FIXME if I want highlight to work again
#if self.highlight:
# hstart, hstop = self.highlight_coords()
# d.coords['highlight'] = [ hstart, hstop ]
return canv
def get_line_translation(self, maxcol, ta=None ):
trans = Text.get_line_translation(self, maxcol, ta)
if not self._shift_view_to_cursor:
return trans
text, ignore = self.get_text()
x,y = calc_coords( text, trans,
self.edit_pos + len(self.caption) )
if x < 0:
return ( trans[:y]
+ [shift_line(trans[y],-x)]
+ trans[y+1:] )
elif x >= maxcol:
return ( trans[:y]
+ [shift_line(trans[y],-(x-maxcol+1))]
+ trans[y+1:] )
return trans
def get_cursor_coords(self,(maxcol,)):
"""Return the (x,y) coordinates of cursor within widget."""
self._shift_view_to_cursor = True
return self.position_coords(maxcol,self.edit_pos)
def position_coords(self,maxcol,pos):
"""
Return (x,y) coordinates for an offset into self.edit_text.
"""
p = pos + len(self.caption)
trans = self.get_line_translation(maxcol)
x,y = calc_coords(self.get_text()[0], trans,p)
return x,y
class IntEdit(Edit):
"""Edit widget for integer values"""
def valid_char(self, ch):
"""Return true for decimal digits."""
return len(ch)==1 and ord(ch)>=ord('0') and ord(ch)<=ord('9')
def __init__(self,caption="",default=None):
"""
caption -- caption markup
default -- default edit value
"""
if default is not None: val = str(default)
else: val = ""
self.__super.__init__(caption,val)
def keypress(self,(maxcol,),key):
"""Handle editing keystrokes. Return others."""
if key in list("0123456789"):
# trim leading zeros
while self.edit_pos > 0 and self.edit_text[:1] == "0":
self.set_edit_pos( self.edit_pos - 1)
self.set_edit_text(self.edit_text[1:])
unhandled = Edit.keypress(self,(maxcol,),key)
return unhandled
def value(self):
"""Return the numeric value of self.edit_text."""
if self.edit_text:
return long(self.edit_text)
else:
return 0
class WidgetWrap(Widget):
def __init__(self, w):
"""
w -- widget to wrap, stored as self.w
This object will pass the functions defined in Widget interface
definition to self.w.
"""
self._w = w
def get_w(self):
return self._w
def set_w(self, w):
self._w = w
self._invalidate()
w = property(get_w, set_w)
def render(self, size, focus=False):
"""Render self.w."""
canv = self.w.render(size, focus=focus)
return CompositeCanvas(canv)
def selectable(self):
return self.w.selectable()
def __getattr__(self,name):
"""Call self.w if name is in Widget interface definition."""
if name in ['get_cursor_coords','get_pref_col','keypress',
'move_cursor_to_coords','rows','mouse_event',]:
return getattr(self._w, name)
raise AttributeError, name
class SelectableIcon(Text):
def selectable(self):
return True
def render(self, (maxcol,), focus=False):
c = Text.render(self, (maxcol,), focus )
if focus:
c = CompositeCanvas(c)
c.cursor = self.get_cursor_coords((maxcol,))
return c
def get_cursor_coords(self, (maxcol,)):
if maxcol>1:
return (1,0)
def keypress(self, (maxcol,), key):
return key
class CheckBox(WidgetWrap):
states = {
True: SelectableIcon("[X]"),
False: SelectableIcon("[ ]"),
'mixed': SelectableIcon("[#]") }
reserve_columns = 4
def selectable(self): return True
def __init__(self, label, state=False, has_mixed=False,
on_state_change=None, user_data=None):
"""
label -- markup for check box label
state -- False, True or "mixed"
has_mixed -- True if "mixed" is a state to cycle through
on_state_change -- callback function for state changes
on_state_change( check box, new state,
user_data=None)
user_data -- additional param for on_press callback,
ommited if None for compatibility reasons
"""
self.__super.__init__(None) # self.w set by set_state below
self.label = Text("")
self.has_mixed = has_mixed
self.state = None
self.on_state_change = on_state_change
self.user_data = user_data
self.set_label(label)
self.set_state(state)
def set_label(self, label):
"""Change the check box label."""
self.label.set_text(label)
self._invalidate()
def get_label(self):
"""Return label text."""
text, attr = self.label.get_text()
return text
def set_state(self, state, do_callback=True):
"""
Call on_state_change if do_callback is True,
then change the check box state.
"""
if (do_callback and self.state is not None and
self.on_state_change):
if self.user_data is None:
self.on_state_change(self, state)
else:
self.on_state_change(self, state,
self.user_data)
self.state = state
self.w = Columns( [
('fixed', self.reserve_columns, self.states[state] ),
self.label ] )
self.w.focus_col = 0
self._invalidate()
def get_state(self):
"""Return the state of the checkbox."""
return self.state
def keypress(self, (maxcol,), key):
"""Toggle state on space or enter."""
if key not in (' ','enter'):
return key
self.toggle_state()
def toggle_state(self):
"""Cycle to the next valid state."""
if self.state == False:
self.set_state(True)
elif self.state == True:
if self.has_mixed:
self.set_state('mixed')
else:
self.set_state(False)
elif self.state == 'mixed':
self.set_state(False)
self._invalidate()
def mouse_event(self, (maxcol,), event, button, x, y, focus):
"""Toggle state on button 1 press."""
if button != 1 or not is_mouse_press(event):
return False
self.toggle_state()
return True
class RadioButton(WidgetWrap):
states = {
True: SelectableIcon("(X)"),
False: SelectableIcon("( )"),
'mixed': SelectableIcon("(#)") }
reserve_columns = 4
def selectable(self): return True
def __init__(self, group, label, state="first True",
on_state_change=None, user_data=None):
"""
group -- list for radio buttons in same group
label -- markup for radio button label
state -- False, True, "mixed" or "first True"
on_state_change -- callback function for state changes
on_state_change( radio_button, new_state,
user_data=None)
user_data -- additional param for on_press callback,
ommited if None for compatibility reasons
This function will append the new radio button to group.
"first True" will set to True if group is empty.
"""
self.__super.__init__(None) # self.w set by set_state below
if state=="first True":
state = not group
self.group = group
self.label = Text("")
self.state = None
self.on_state_change = on_state_change
self.user_data = user_data
self.set_label(label)
self.set_state(state)
group.append(self)
def set_label(self, label):
"""Change the check box label."""
self.label.set_text(label)
self._invalidate()
def get_label(self):
"""Return label text."""
text, attr = self.label.get_text()
return text
def set_state(self, state, do_callback=True):
"""
Call on_state_change if do_callback is True,
then change the radio button state.
if state is True set all other radio buttons in group to False.
"""
if (do_callback and self.state is not None and
self.on_state_change):
if self.user_data is None:
self.on_state_change(self, state)
else:
self.on_state_change(self, state,
self.user_data)
self.state = state
self.w = Columns( [
('fixed', self.reserve_columns, self.states[state] ),
self.label ] )
self.w.focus_col = 0
self._invalidate()
if state is not True:
return
for cb in self.group:
if cb is self: continue
if cb.state:
cb.set_state(False)
def get_state(self):
"""Return the state of the radio button."""
return self.state
def keypress(self, (maxcol,), key):
"""Set state to True on space or enter."""
if key not in (' ','enter'):
return key
if self.state is not True:
self.set_state(True)
else:
return key
def mouse_event(self, (maxcol,), event, button, x, y, focus):
"""Set state to True on button 1 press."""
if button != 1 or not is_mouse_press(event):
return False
if self.state is not True:
self.set_state(True)
return True
class Button(WidgetWrap):
button_left = Text("<")
button_right = Text(">")
def selectable(self):
return True
def __init__(self, label, on_press=None, user_data=None):
"""
label -- markup for button label
on_press -- callback function for button "press"
on_press( button object, user_data=None)
user_data -- additional param for on_press callback,
ommited if None for compatibility reasons
"""
self.__super.__init__(None) # self.w set by set_label below
self.set_label( label )
self.on_press = on_press
self.user_data = user_data
def set_label(self, label):
self.label = label
self.w = Columns([
('fixed', 1, self.button_left),
Text( label ),
('fixed', 1, self.button_right)],
dividechars=1)
self._invalidate()
def get_label(self):
return self.label
def render(self, (maxcol,), focus=False):
"""Display button. Show a cursor when in focus."""
canv = self.__super.render((maxcol,), focus=focus)
canv = CompositeCanvas(canv)
if focus and maxcol >2:
canv.cursor = (2,0)
return canv
def get_cursor_coords(self, (maxcol,)):
"""Return the location of the cursor."""
if maxcol >2:
return (2,0)
return None
def keypress(self, (maxcol,), key):
"""Call on_press on spage or enter."""
if key not in (' ','enter'):
return key
if self.on_press:
if self.user_data is None:
self.on_press(self)
else:
self.on_press(self, self.user_data)
def mouse_event(self, (maxcol,), event, button, x, y, focus):
"""Call on_press on button 1 press."""
if button != 1 or not is_mouse_press(event):
return False
self.on_press( self )
return True
class GridFlow(FlowWidget):
def selectable(self):
"""Return True if the cell in focus is selectable."""
return self.focus_cell and self.focus_cell.selectable()
def __init__(self, cells, cell_width, h_sep, v_sep, align):
"""
cells -- list of flow widgets to display
cell_width -- column width for each cell
h_sep -- blank columns between each cell horizontally
v_sep -- blank rows between cells vertically (if more than
one row is required to display all the cells)
align -- horizontal alignment of cells, see "align" parameter
of Padding widget for available options
"""
self.__super.__init__()
self.cells = cells
self.cell_width = cell_width
self.h_sep = h_sep
self.v_sep = v_sep
self.align = align
self.focus_cell = None
if cells:
self.focus_cell = cells[0]
self._cache_maxcol = None
def set_focus(self, cell):
"""Set the cell in focus.
cell -- widget or integer index into self.cells"""
if type(cell) == type(0):
assert cell>=0 and cell<len(self.cells)
self.focus_cell = self.cells[cell]
else:
assert cell in self.cells
self.focus_cell = cell
self._cache_maxcol = None
self._invalidate()
def get_display_widget(self, (maxcol,)):
"""
Arrange the cells into columns (and possibly a pile) for
display, input or to calculate rows.
"""
# use cache if possible
if self._cache_maxcol == maxcol:
return self._cache_display_widget
self._cache_maxcol = maxcol
self._cache_display_widget = self.generate_display_widget(
(maxcol,))
return self._cache_display_widget
def generate_display_widget(self, (maxcol,)):
"""
Actually generate display widget (ignoring cache)
"""
d = Divider()
if len(self.cells) == 0: # how dull
return d
if self.v_sep > 1:
# increase size of divider
d.top = self.v_sep-1
# cells per row
bpr = (maxcol+self.h_sep) / (self.cell_width+self.h_sep)
if bpr == 0: # too narrow, pile them on top of eachother
l = [self.cells[0]]
f = 0
for b in self.cells[1:]:
if b is self.focus_cell:
f = len(l)
if self.v_sep:
l.append(d)
l.append(b)
return Pile(l, f)
if bpr >= len(self.cells): # all fit on one row
k = len(self.cells)
f = self.cells.index(self.focus_cell)
cols = Columns(self.cells, self.h_sep, f)
rwidth = (self.cell_width+self.h_sep)*k - self.h_sep
row = Padding(cols, self.align, rwidth)
return row
out = []
s = 0
f = 0
while s < len(self.cells):
if out and self.v_sep:
out.append(d)
k = min( len(self.cells), s+bpr )
cells = self.cells[s:k]
if self.focus_cell in cells:
f = len(out)
fcol = cells.index(self.focus_cell)
cols = Columns(cells, self.h_sep, fcol)
else:
cols = Columns(cells, self.h_sep)
rwidth = (self.cell_width+self.h_sep)*(k-s)-self.h_sep
row = Padding(cols, self.align, rwidth)
out.append(row)
s += bpr
return Pile(out, f)
def _set_focus_from_display_widget(self, w):
"""Set the focus to the item in focus in the display widget."""
if isinstance(w, Padding):
# unwrap padding
w = w.w
w = w.get_focus()
if w in self.cells:
self.set_focus(w)
return
if isinstance(w, Padding):
# unwrap padding
w = w.w
w = w.get_focus()
#assert w == self.cells[0], `w, self.cells`
self.set_focus(w)
def keypress(self, (maxcol,), key):
"""
Pass keypress to display widget for handling.
Capture focus changes."""
d = self.get_display_widget((maxcol,))
if not d.selectable():
return key
key = d.keypress( (maxcol,), key)
if key is None:
self._set_focus_from_display_widget(d)
return key
def rows(self, (maxcol,), focus=False):
"""Return rows used by this widget."""
d = self.get_display_widget((maxcol,))
return d.rows((maxcol,), focus=focus)
def render(self, (maxcol,), focus=False ):
"""Use display widget to render."""
d = self.get_display_widget((maxcol,))
return d.render((maxcol,), focus)
def get_cursor_coords(self, (maxcol,)):
"""Get cursor from display widget."""
d = self.get_display_widget((maxcol,))
if not d.selectable():
return None
return d.get_cursor_coords((maxcol,))
def move_cursor_to_coords(self, (maxcol,), col, row ):
"""Set the widget in focus based on the col + row."""
d = self.get_display_widget((maxcol,))
if not d.selectable():
# happy is the default
return True
r = d.move_cursor_to_coords((maxcol,), col, row)
if not r:
return False
self._set_focus_from_display_widget(d)
self._invalidate()
return True
def mouse_event(self, (maxcol,), event, button, col, row, focus):
"""Send mouse event to contained widget."""
d = self.get_display_widget((maxcol,))
r = d.mouse_event( (maxcol,), event, button, col, row, focus )
if not r:
return False
self._set_focus_from_display_widget(d)
self._invalidate()
return True
def get_pref_col(self, (maxcol,)):
"""Return pref col from display widget."""
d = self.get_display_widget((maxcol,))
if not d.selectable():
return None
return d.get_pref_col((maxcol,))
class PaddingError(Exception):
pass
class Padding(Widget):
def __init__(self, w, align, width, min_width=None):
"""
w -- a box, flow or fixed widget to pad on the left and/or right
align -- one of:
'left', 'center', 'right'
('fixed left', columns)
('fixed right', columns)
('relative', percentage 0=left 100=right)
width -- one of:
number of columns wide
('fixed right', columns) Only if align is 'fixed left'
('fixed left', columns) Only if align is 'fixed right'
('relative', percentage of total width)
None to enable clipping mode
min_width -- the minimum number of columns for w or None
Padding widgets will try to satisfy width argument first by
reducing the align amount when necessary. If width still
cannot be satisfied it will also be reduced.
Clipping Mode:
In clipping mode w is treated as a fixed widget and this
widget expects to be treated as a flow widget. w will
be clipped to fit within the space given. For example,
if align is 'left' then w may be clipped on the right.
"""
self.__super.__init__()
at,aa,wt,wa=decompose_align_width(align, width, PaddingError)
self.w = w
self.align_type, self.align_amount = at, aa
self.width_type, self.width_amount = wt, wa
self.min_width = min_width
def render(self, size, focus=False):
left, right = self.padding_values(size, focus)
maxcol = size[0]
maxcol -= left+right
if self.width_type is None:
canv = self.w.render((), focus)
else:
canv = self.w.render((maxcol,)+size[1:], focus)
if canv.cols() == 0:
canv = SolidCanvas(' ', size[0], canv.rows())
canv = CompositeCanvas(canv)
canv.set_depends([self.w])
return canv
canv = CompositeCanvas(canv)
canv.set_depends([self.w])
if left != 0 or right != 0:
canv.pad_trim_left_right(left, right)
return canv
def padding_values(self, size, focus):
"""Return the number of columns to pad on the left and right.
Override this method to define custom padding behaviour."""
maxcol = size[0]
if self.width_type is None:
width, ignore = self.w.pack(focus=focus)
return calculate_padding(self.align_type,
self.align_amount, 'fixed', width,
None, maxcol, clip=True )
return calculate_padding( self.align_type, self.align_amount,
self.width_type, self.width_amount,
self.min_width, maxcol )
def selectable(self):
"""Return the selectable value of self.w."""
return self.w.selectable()
def rows(self, (maxcol,), focus=False ):
"""Return the rows needed for self.w."""
if self.width_type is None:
ignore, height = self.w.pack(focus)
return height
left, right = self.padding_values((maxcol,), focus)
return self.w.rows( (maxcol-left-right,), focus=focus )
def keypress(self, size, key):
"""Pass keypress to self.w."""
maxcol = size[0]
left, right = self.padding_values(size, True)
maxvals = (maxcol-left-right,)+size[1:]
return self.w.keypress(maxvals, key)
def get_cursor_coords(self,size):
"""Return the (x,y) coordinates of cursor within self.w."""
if not hasattr(self.w,'get_cursor_coords'):
return None
left, right = self.padding_values(size, True)
maxcol = size[0]
maxvals = (maxcol-left-right,)+size[1:]
coords = self.w.get_cursor_coords(maxvals)
if coords is None:
return None
x, y = coords
return x+left, y
def move_cursor_to_coords(self, size, x, y):
"""Set the cursor position with (x,y) coordinates of self.w.
Returns True if move succeeded, False otherwise.
"""
if not hasattr(self.w,'move_cursor_to_coords'):
return True
left, right = self.padding_values(size, True)
maxcol = size[0]
maxvals = (maxcol-left-right,)+size[1:]
if type(x)==type(0):
if x < left:
x = left
elif x >= maxcol-right:
x = maxcol-right-1
x -= left
return self.w.move_cursor_to_coords(maxvals, x, y)
def mouse_event(self, size, event, button, x, y, focus):
"""Send mouse event if position is within self.w."""
if not hasattr(self.w,'mouse_event'):
return False
left, right = self.padding_values(size, focus)
maxcol = size[0]
if x < left or x >= maxcol-right:
return False
maxvals = (maxcol-left-right,)+size[1:]
return self.w.mouse_event(maxvals, event, button, x-left, y,
focus)
def get_pref_col(self, size):
"""Return the preferred column from self.w, or None."""
if not hasattr(self.w,'get_pref_col'):
return None
left, right = self.padding_values(size, True)
maxcol = size[0]
maxvals = (maxcol-left-right,)+size[1:]
x = self.w.get_pref_col(maxvals)
if type(x) == type(0):
return x+left
return x
class FillerError(Exception):
pass
class Filler(BoxWidget):
def __init__(self, body, valign="middle", height=None, min_height=None):
"""
body -- a flow widget or box widget to be filled around
valign -- one of:
'top', 'middle', 'bottom'
('fixed top', rows)
('fixed bottom', rows)
('relative', percentage 0=top 100=bottom)
height -- one of:
None if body is a flow widget
number of rows high
('fixed bottom', rows) Only if valign is 'fixed top'
('fixed top', rows) Only if valign is 'fixed bottom'
('relative', percentage of total height)
min_height -- one of:
None if no minimum or if body is a flow widget
minimum number of rows for the widget when height not fixed
If body is a flow widget then height and min_height must be set
to None.
Filler widgets will try to satisfy height argument first by
reducing the valign amount when necessary. If height still
cannot be satisfied it will also be reduced.
"""
self.__super.__init__()
vt,va,ht,ha=decompose_valign_height(valign,height,FillerError)
self.body = body
self.valign_type, self.valign_amount = vt, va
self.height_type, self.height_amount = ht, ha
if self.height_type not in ('fixed', None):
self.min_height = min_height
else:
self.min_height = None
def selectable(self):
"""Return selectable from body."""
return self.body.selectable()
def filler_values(self, (maxcol, maxrow), focus):
"""Return the number of rows to pad on the top and bottom.
Override this method to define custom padding behaviour."""
if self.height_type is None:
height = self.body.rows((maxcol,),focus=focus)
return calculate_filler( self.valign_type,
self.valign_amount, 'fixed', height,
None, maxrow )
return calculate_filler( self.valign_type, self.valign_amount,
self.height_type, self.height_amount,
self.min_height, maxrow)
def render(self, (maxcol,maxrow), focus=False):
"""Render self.body with space above and/or below."""
top, bottom = self.filler_values((maxcol,maxrow), focus)
if self.height_type is None:
canv = self.body.render( (maxcol,), focus)
else:
canv = self.body.render( (maxcol,maxrow-top-bottom),focus)
canv = CompositeCanvas(canv)
if maxrow and canv.rows() > maxrow and canv.cursor is not None:
cx, cy = canv.cursor
if cy >= maxrow:
canv.trim(cy-maxrow+1,maxrow-top-bottom)
if canv.rows() > maxrow:
canv.trim(0, maxrow)
return canv
canv.pad_trim_top_bottom(top, bottom)
return canv
def keypress(self, (maxcol,maxrow), key):
"""Pass keypress to self.body."""
if self.height_type is None:
return self.body.keypress( (maxcol,), key )
top, bottom = self.filler_values((maxcol,maxrow), True)
return self.body.keypress( (maxcol,maxrow-top-bottom), key )
def get_cursor_coords(self, (maxcol,maxrow)):
"""Return cursor coords from self.body if any."""
if not hasattr(self.body, 'get_cursor_coords'):
return None
top, bottom = self.filler_values((maxcol,maxrow), True)
if self.height_type is None:
coords = self.body.get_cursor_coords((maxcol,))
else:
coords = self.body.get_cursor_coords(
(maxcol,maxrow-top-bottom))
if not coords:
return None
x, y = coords
if y >= maxrow:
y = maxrow-1
return x, y+top
def get_pref_col(self, (maxcol,maxrow)):
"""Return pref_col from self.body if any."""
if not hasattr(self.body, 'get_pref_col'):
return None
if self.height_type is None:
x = self.body.get_pref_col((maxcol,))
else:
top, bottom = self.filler_values((maxcol,maxrow), True)
x = self.body.get_pref_col(
(maxcol,maxrow-top-bottom))
return x
def move_cursor_to_coords(self, (maxcol,maxrow), col, row):
"""Pass to self.body."""
if not hasattr(self.body, 'move_cursor_to_coords'):
return True
top, bottom = self.filler_values((maxcol,maxrow), True)
if row < top or row >= maxcol-bottom:
return False
if self.height_type is None:
return self.body.move_cursor_to_coords((maxcol,),
col, row-top)
return self.body.move_cursor_to_coords(
(maxcol, maxrow-top-bottom), col, row-top)
def mouse_event(self, (maxcol,maxrow), event, button, col, row, focus):
"""Pass to self.body."""
if not hasattr(self.body, 'mouse_event'):
return False
top, bottom = self.filler_values((maxcol,maxrow), True)
if row < top or row >= maxcol-bottom:
return False
if self.height_type is None:
return self.body.mouse_event((maxcol,),
event, button, col, row-top, focus)
return self.body.mouse_event( (maxcol, maxrow-top-bottom),
event, button,col, row-top, focus)
class OverlayError(Exception):
pass
class Overlay(BoxWidget):
def __init__(self, top_w, bottom_w, align, width, valign, height,
min_width=None, min_height=None ):
"""
top_w -- a flow, box or fixed widget to overlay "on top"
bottom_w -- a box widget to appear "below" previous widget
align -- one of:
'left', 'center', 'right'
('fixed left', columns)
('fixed right', columns)
('relative', percentage 0=left 100=right)
width -- one of:
None if top_w is a fixed widget
number of columns wide
('fixed right', columns) Only if align is 'fixed left'
('fixed left', columns) Only if align is 'fixed right'
('relative', percentage of total width)
valign -- one of:
'top', 'middle', 'bottom'
('fixed top', rows)
('fixed bottom', rows)
('relative', percentage 0=top 100=bottom)
height -- one of:
None if top_w is a flow or fixed widget
number of rows high
('fixed bottom', rows) Only if valign is 'fixed top'
('fixed top', rows) Only if valign is 'fixed bottom'
('relative', percentage of total height)
min_width -- the minimum number of columns for top_w
when width is not fixed
min_height -- one of:
minimum number of rows for the widget when height not fixed
Overlay widgets behave similarly to Padding and Filler widgets
when determining the size and position of top_w. bottom_w is
always rendered the full size available "below" top_w.
"""
self.__super.__init__()
at,aa,wt,wa=decompose_align_width(align, width, OverlayError)
vt,va,ht,ha=decompose_valign_height(valign,height,OverlayError)
self.top_w = top_w
self.bottom_w = bottom_w
self.align_type, self.align_amount = at, aa
self.width_type, self.width_amount = wt, wa
if self.width_type and self.width_type != 'fixed':
self.min_width = min_width
else:
self.min_width = None
self.valign_type, self.valign_amount = vt, va
self.height_type, self.height_amount = ht, ha
if self.height_type not in ('fixed', None):
self.min_height = min_height
else:
self.min_height = None
def selectable(self):
"""Return selectable from top_w."""
return self.top_w.selectable()
def keypress(self, size, key):
"""Pass keypress to top_w."""
return self.top_w.keypress( size, key)
def get_cursor_coords(self, size):
"""Return cursor coords from top_w, if any."""
if not hasattr(self.body, 'get_cursor_coords'):
return None
left, right, top, bottom = self.calculate_padding_filler(size,
True)
x, y = self.top_w.get_cursor_coords(
(maxcol-left-right, maxrow-top-bottom) )
if y >= maxrow: # required??
y = maxrow-1
return x+left, y+top
def calculate_padding_filler(self, (maxcol, maxrow), focus):
"""Return (padding left, right, filler top, bottom)."""
height = None
if self.width_type is None:
# top_w is a fixed widget
width, height = self.top_w.pack(focus=focus)
assert height, "fixed widget must have a height"
left, right = calculate_padding(self.align_type,
self.align_amount, 'fixed', width,
None, maxcol, clip=True )
else:
left, right = calculate_padding(self.align_type,
self.align_amount, self.width_type,
self.width_amount, self.min_width, maxcol)
if height:
# top_w is a fixed widget
top, bottom = calculate_filler(self.valign_type,
self.valign_amount, 'fixed', height,
None, maxrow)
if maxrow-top-bottom < height:
bottom = maxrow-top-height
elif self.height_type is None:
# top_w is a flow widget
height = self.body.rows((maxcol,),focus=focus)
top, bottom = calculate_filler( self.valign_type,
self.valign_amount, 'fixed', height,
None, maxrow )
else:
top, bottom = calculate_filler(self.valign_type,
self.valign_amount, self.height_type,
self.height_amount, self.min_height, maxrow)
return left, right, top, bottom
def top_w_size(self, size, left, right, top, bottom):
"""Return the size to pass to top_w."""
if self.width_type is None:
# top_w is a fixed widget
return ()
maxcol, maxrow = size
if self.width_type is not None and self.height_type is None:
# top_w is a flow widget
return (maxcol-left-right,)
return (maxcol-left-right, maxrow-top-bottom)
def render(self, size, focus=False):
"""Render top_w overlayed on bottom_w."""
left, right, top, bottom = self.calculate_padding_filler(size,
focus)
bottom_c = self.bottom_w.render(size)
top_c = self.top_w.render(
self.top_w_size(size, left, right, top, bottom), focus)
if left<0 or right<0:
top_c = CompositeCanvas(top_c)
top_c.pad_trim_left_right(min(0,left), min(0,right))
if top<0 or bottom<0:
top_c = CompositeCanvas(top_c)
top_c.pad_trim_top_bottom(min(0,top), min(0,bottom))
return CanvasOverlay(top_c, bottom_c, max(0,left), top)
def mouse_event(self, size, event, button, col, row, focus):
"""Pass event to top_w, ignore if outside of top_w."""
if not hasattr(self.top_w, 'mouse_event'):
return False
left, right, top, bottom = self.calculate_padding_filler(size,
focus)
maxcol, maxrow = size
if ( col<left or col>=maxcol-right or
row<top or row>=maxrow-bottom ):
return False
return self.top_w.mouse_event(
self.top_w_size(size, left, right, top, bottom),
event, button, col-left, row-top, focus )
def decompose_align_width( align, width, err ):
try:
if align in ('left','center','right'):
align = (align,0)
align_type, align_amount = align
assert align_type in ('left','center','right','fixed left',
'fixed right','relative')
except:
raise err("align value %s is not one of 'left', 'center', "
"'right', ('fixed left', columns), ('fixed right', "
"columns), ('relative', percentage 0=left 100=right)"
% `align`)
try:
if width is None:
width = None, None
elif type(width) == type(0):
width = 'fixed', width
width_type, width_amount = width
assert width_type in ('fixed','fixed right','fixed left',
'relative', None)
except:
raise err("width value %s is not one of ('fixed', columns "
"width), ('fixed right', columns), ('relative', "
"percentage of total width), None" % `width`)
if width_type == 'fixed left' and align_type != 'fixed right':
raise err("fixed left width may only be used with fixed "
"right align")
if width_type == 'fixed right' and align_type != 'fixed left':
raise err("fixed right width may only be used with fixed "
"left align")
return align_type, align_amount, width_type, width_amount
def decompose_valign_height( valign, height, err ):
try:
if valign in ('top','middle','bottom'):
valign = (valign,0)
valign_type, valign_amount = valign
assert valign_type in ('top','middle','bottom','fixed top','fixed bottom','relative')
except:
raise err, "Invalid valign: %s" % `valign`
try:
if height is None:
height = None, None
elif type(height) == type(0):
height=('fixed',height)
height_type, height_amount = height
assert height_type in (None, 'fixed','fixed bottom','fixed top','relative')
except:
raise err, "Invalid height: %s"%`height`
if height_type == 'fixed top' and valign_type != 'fixed bottom':
raise err, "fixed top height may only be used with fixed bottom valign"
if height_type == 'fixed bottom' and valign_type != 'fixed top':
raise err, "fixed bottom height may only be used with fixed top valign"
return valign_type, valign_amount, height_type, height_amount
def calculate_filler( valign_type, valign_amount, height_type, height_amount,
min_height, maxrow ):
if height_type == 'fixed':
height = height_amount
elif height_type == 'relative':
height = int(height_amount*maxrow/100+.5)
if min_height is not None:
height = max(height, min_height)
else:
assert height_type in ('fixed bottom','fixed top')
height = maxrow-height_amount-valign_amount
if min_height is not None:
height = max(height, min_height)
if height >= maxrow:
# use the full space (no padding)
return 0, 0
if valign_type == 'fixed top':
top = valign_amount
if top+height <= maxrow:
return top, maxrow-top-height
# need to shrink top
return maxrow-height, 0
elif valign_type == 'fixed bottom':
bottom = valign_amount
if bottom+height <= maxrow:
return maxrow-bottom-height, bottom
# need to shrink bottom
return 0, maxrow-height
elif valign_type == 'relative':
top = int( (maxrow-height)*valign_amount/100+.5 )
elif valign_type == 'bottom':
top = maxrow-height
elif valign_type == 'middle':
top = int( (maxrow-height)/2 )
else: #self.valign_type == 'top'
top = 0
if top+height > maxrow: top = maxrow-height
if top < 0: top = 0
bottom = maxrow-height-top
return top, bottom
def calculate_padding( align_type, align_amount, width_type, width_amount,
min_width, maxcol, clip=False ):
if width_type == 'fixed':
width = width_amount
elif width_type == 'relative':
width = int(width_amount*maxcol/100+.5)
if min_width is not None:
width = max(width, min_width)
else:
assert width_type in ('fixed right', 'fixed left')
width = maxcol-width_amount-align_amount
if min_width is not None:
width = max(width, min_width)
if width == maxcol or (width > maxcol and not clip):
# use the full space (no padding)
return 0, 0
if align_type == 'fixed left':
left = align_amount
if left+width <= maxcol:
return left, maxcol-left-width
# need to shrink left
return maxcol-width, 0
elif align_type == 'fixed right':
right = align_amount
if right+width <= maxcol:
return maxcol-right-width, right
# need to shrink right
return 0, maxcol-width
elif align_type == 'relative':
left = int( (maxcol-width)*align_amount/100+.5 )
elif align_type == 'right':
left = maxcol-width
elif align_type == 'center':
left = int( (maxcol-width)/2 )
else:
assert align_type == 'left'
left = 0
if width < maxcol:
if left+width > maxcol: left = maxcol-width
if left < 0: left = 0
right = maxcol-width-left
return left, right
class Frame(BoxWidget):
def __init__(self, body, header=None, footer=None, focus_part='body'):
"""
body -- a box widget for the body of the frame
header -- a flow widget for above the body (or None)
footer -- a flow widget for below the body (or None)
focus_part -- 'header', 'footer' or 'body'
"""
self.__super.__init__()
self._header = header
self._body = body
self._footer = footer
self.focus_part = focus_part
def get_header(self):
return self._header
def set_header(self, header):
self._header = header
self._invalidate()
header = property(get_header, set_header)
def get_body(self):
return self._body
def set_body(self, body):
self._body = body
self._invalidate()
body = property(get_body, set_body)
def get_footer(self):
return self._footer
def set_footer(self, footer):
self._footer = footer
self._invalidate()
footer = property(get_footer, set_footer)
def set_focus(self, part):
"""Set the part of the frame that is in focus.
part -- 'header', 'footer' or 'body'
"""
assert part in ('header', 'footer', 'body')
self.focus_part = part
self._invalidate()
def frame_top_bottom(self, (maxcol,maxrow), focus):
"""Calculate the number of rows for the header and footer.
Returns (head rows, foot rows),(orig head, orig foot).
orig head/foot are from rows() calls.
"""
frows = hrows = 0
if self.header:
hrows = self.header.rows((maxcol,),
self.focus_part=='header' and focus)
if self.footer:
frows = self.footer.rows((maxcol,),
self.focus_part=='footer' and focus)
remaining = maxrow
if self.focus_part == 'footer':
if frows >= remaining:
return (0, remaining),(hrows, frows)
remaining -= frows
if hrows >= remaining:
return (remaining, frows),(hrows, frows)
elif self.focus_part == 'header':
if hrows >= maxrow:
return (remaining, 0),(hrows, frows)
remaining -= hrows
if frows >= remaining:
return (hrows, remaining),(hrows, frows)
elif hrows + frows >= remaining:
# self.focus_part == 'body'
rless1 = max(0, remaining-1)
if frows >= remaining-1:
return (0, rless1),(hrows, frows)
remaining -= frows
rless1 = max(0, remaining-1)
return (rless1,frows),(hrows, frows)
return (hrows, frows),(hrows, frows)
def render(self, (maxcol,maxrow), focus=False):
"""Render frame and return it."""
(htrim, ftrim),(hrows, frows) = self.frame_top_bottom(
(maxcol, maxrow), focus)
combinelist = []
depends_on = []
head = None
if htrim and htrim < hrows:
head = Filler(self.header, 'top').render(
(maxcol, htrim),
focus and self.focus_part == 'header')
elif htrim:
head = self.header.render((maxcol,),
focus and self.focus_part == 'header')
assert head.rows() == hrows, "rows, render mismatch"
if head:
combinelist.append((head, 'header',
self.focus_part == 'header'))
depends_on.append(self.header)
if ftrim+htrim < maxrow:
body = self.body.render((maxcol, maxrow-ftrim-htrim),
focus and self.focus_part == 'body')
combinelist.append((body, 'body',
self.focus_part == 'body'))
depends_on.append(self.body)
foot = None
if ftrim and ftrim < frows:
foot = Filler(self.footer, 'bottom').render(
(maxcol, ftrim),
focus and self.focus_part == 'footer')
elif ftrim:
foot = self.footer.render((maxcol,),
focus and self.focus_part == 'footer')
assert foot.rows() == frows, "rows, render mismatch"
if foot:
combinelist.append((foot, 'footer',
self.focus_part == 'footer'))
depends_on.append(self.footer)
return CanvasCombine(combinelist)
def keypress(self, (maxcol,maxrow), key):
"""Pass keypress to widget in focus."""
if self.focus_part == 'header' and self.header is not None:
if not self.header.selectable():
return key
return self.header.keypress((maxcol,),key)
if self.focus_part == 'footer' and self.footer is not None:
if not self.footer.selectable():
return key
return self.footer.keypress((maxcol,),key)
if self.focus_part != 'body':
return key
remaining = maxrow
if self.header is not None:
remaining -= self.header.rows((maxcol,))
if self.footer is not None:
remaining -= self.footer.rows((maxcol,))
if remaining <= 0: return key
if not self.body.selectable():
return key
return self.body.keypress( (maxcol, remaining), key )
def mouse_event(self, (maxcol, maxrow), event, button, col, row, focus):
"""
Pass mouse event to appropriate part of frame.
Focus may be changed on button 1 press.
"""
(htrim, ftrim),(hrows, frows) = self.frame_top_bottom(
(maxcol, maxrow), focus)
if row < htrim: # within header
focus = focus and self.focus_part == 'header'
if is_mouse_press(event) and button==1:
if self.header.selectable():
self.set_focus('header')
if not hasattr(self.header, 'mouse_event'):
return False
return self.header.mouse_event( (maxcol,), event,
button, col, row, focus )
if row >= maxrow-ftrim: # within footer
focus = focus and self.focus_part == 'footer'
if is_mouse_press(event) and button==1:
if self.footer.selectable():
self.set_focus('footer')
if not hasattr(self.footer, 'mouse_event'):
return False
return self.footer.mouse_event( (maxcol,), event,
button, col, row-maxrow+frows, focus )
# within body
focus = focus and self.focus_part == 'body'
if is_mouse_press(event) and button==1:
if self.body.selectable():
self.set_focus('body')
if not hasattr(self.body, 'mouse_event'):
return False
return self.body.mouse_event( (maxcol, maxrow-htrim-ftrim),
event, button, col, row-htrim, focus )
class AttrWrap(Widget):
"""
AttrWrap is a decorator that changes the default attribute for a
FlowWidget or BoxWidget
"""
def __init__(self, w, attr, focus_attr = None):
"""
w -- widget to wrap
attr -- attribute to apply to w
focus_attr -- attribute to apply when in focus, if None use attr
This object will pass all function calls and variable references
to the wrapped widget.
"""
self._w = w
self._attr = attr
self._focus_attr = focus_attr
def get_w(self):
return self._w
def set_w(self, w):
self._w = w
self._invalidate()
w = property(get_w, set_w)
def get_attr(self):
return self._attr
def set_attr(self, attr):
self._attr = attr
self._invalidate()
attr = property(get_attr, set_attr)
def get_focus_attr(self):
return self._focus_attr
def set_focus_attr(self, focus_attr):
self._focus_attr = focus_attr
self._invalidate()
focus_attr = property(get_focus_attr, set_focus_attr)
def render(self, size, focus = False ):
"""Render self.w and apply attribute. Return canvas.
size -- (maxcol,) if self.w contains a flow widget or
(maxcol, maxrow) if it contains a box widget.
"""
attr = self.attr
if focus and self.focus_attr is not None:
attr = self.focus_attr
canv = self.w.render(size, focus=focus)
canv = CompositeCanvas(canv)
canv.fill_attr(attr)
return canv
def selectable(self):
return self.w.selectable()
def __getattr__(self,name):
"""Call getattr on wrapped widget."""
return getattr(self.w, name)
class PileError(Exception):
pass
class Pile(Widget): # either FlowWidget or BoxWidget
def __init__(self, widget_list, focus_item=None):
"""
widget_list -- list of widgets
focus_item -- widget or integer index, if None the first
selectable widget will be chosen.
widget_list may also contain tuples such as:
('flow', widget) always treat widget as a flow widget
('fixed', height, widget) give this box widget a fixed height
('weight', weight, widget) if the pile is treated as a box
widget then treat widget as a box widget with a
height based on its relative weight value, otherwise
treat widget as a flow widget
widgets not in a tuple are the same as ('weight', 1, widget)
If the pile is treated as a box widget there must be at least
one 'weight' tuple in widget_list.
"""
self.__super.__init__()
self.widget_list = MonitoredList(widget_list)
self.item_types = []
for i in range(len(widget_list)):
w = widget_list[i]
if type(w) != type(()):
self.item_types.append(('weight',1))
elif w[0] == 'flow':
f, widget = w
self.widget_list[i] = widget
self.item_types.append((f,None))
w = widget
elif w[0] in ('fixed', 'weight'):
f, height, widget = w
self.widget_list[i] = widget
self.item_types.append((f,height))
w = widget
else:
raise PileError, "widget list item invalid %s" % `w`
if focus_item is None and w.selectable():
focus_item = i
self.widget_list.set_modified_callback(self._invalidate)
if focus_item is None:
focus_item = 0
self.set_focus(focus_item)
self.pref_col = None
def selectable(self):
"""Return True if the focus item is selectable."""
return self.focus_item.selectable()
def set_focus(self, item):
"""Set the item in focus.
item -- widget or integer index"""
if type(item) == type(0):
assert item>=0 and item<len(self.widget_list)
self.focus_item = self.widget_list[item]
else:
assert item in self.widget_list
self.focus_item = item
self._invalidate()
def get_focus(self):
"""Return the widget in focus."""
return self.focus_item
def get_pref_col(self, size):
"""Return the preferred column for the cursor, or None."""
if not self.selectable():
return None
self._update_pref_col_from_focus(size)
return self.pref_col
def get_item_size(self, size, i, focus, item_rows=None):
"""
Return a size appropriate for passing to self.widget_list[i]
"""
maxcol = size[0]
f, height = self.item_types[i]
if f=='fixed':
return (maxcol, height)
elif f=='weight' and len(size)==2:
if not item_rows:
item_rows = self.get_item_rows(size, focus)
return (maxcol, item_rows[i])
else:
return (maxcol,)
def get_item_rows(self, size, focus):
"""
Return a list of the number of rows used by each widget
in self.item_list.
"""
remaining = None
maxcol = size[0]
if len(size)==2:
remaining = size[1]
l = []
if remaining is None:
# pile is a flow widget
for (f, height), w in zip(
self.item_types, self.widget_list):
if f == 'fixed':
l.append( height )
else:
l.append( w.rows( (maxcol,), focus=focus
and self.focus_item == w ))
return l
# pile is a box widget
# do an extra pass to calculate rows for each widget
wtotal = 0
for (f, height), w in zip(self.item_types, self.widget_list):
if f == 'flow':
rows = w.rows((maxcol,), focus=focus and
self.focus_item == w )
l.append(rows)
remaining -= rows
elif f == 'fixed':
l.append(height)
remaining -= height
else:
l.append(None)
wtotal += height
if wtotal == 0:
raise PileError, "No weighted widgets found for Pile treated as a box widget"
if remaining < 0:
remaining = 0
i = 0
for (f, height), li in zip(self.item_types, l):
if li is None:
rows = int(float(remaining)*height
/wtotal+0.5)
l[i] = rows
remaining -= rows
wtotal -= height
i += 1
return l
def render(self, size, focus=False):
"""
Render all widgets in self.widget_list and return the results
stacked one on top of the next.
"""
maxcol = size[0]
item_rows = None
combinelist = []
i = 0
for (f, height), w in zip(self.item_types, self.widget_list):
item_focus = self.focus_item == w
canv = None
if f == 'fixed':
canv = w.render( (maxcol, height),
focus=focus and item_focus)
elif f == 'flow' or len(size)==1:
canv = w.render( (maxcol,),
focus=focus and item_focus)
else:
if item_rows is None:
item_rows = self.get_item_rows(size,
focus)
rows = item_rows[i]
if rows>0:
canv = w.render( (maxcol, rows),
focus=focus and item_focus )
if canv:
combinelist.append((canv, i, item_focus))
i+=1
return CanvasCombine(combinelist)
def get_cursor_coords(self, size):
"""Return the cursor coordinates of the focus widget."""
if not self.focus_item.selectable():
return None
if not hasattr(self.focus_item,'get_cursor_coords'):
return None
i = self.widget_list.index(self.focus_item)
f, height = self.item_types[i]
item_rows = None
maxcol = size[0]
if f == 'fixed' or (f=='weight' and len(size)==2):
if f == 'fixed':
maxrow = height
else:
if item_rows is None:
item_rows = self.get_item_rows(size,
focus=True)
maxrow = item_rows[i]
coords = self.focus_item.get_cursor_coords(
(maxcol,maxrow))
else:
coords = self.focus_item.get_cursor_coords((maxcol,))
if coords is None:
return None
x,y = coords
if i > 0:
if item_rows is None:
item_rows = self.get_item_rows(size, focus=True)
for r in item_rows[:i]:
y += r
return x, y
def rows(self, (maxcol,), focus=False ):
"""Return the number of rows required for this widget."""
return sum( self.get_item_rows( (maxcol,), focus ) )
def keypress(self, size, key ):
"""Pass the keypress to the widget in focus.
Unhandled 'up' and 'down' keys may cause a focus change."""
maxcol = size[0]
item_rows = None
if len(size)==2:
item_rows = self.get_item_rows( size, focus=True )
i = self.widget_list.index(self.focus_item)
f, height = self.item_types[i]
if self.focus_item.selectable():
tsize = self.get_item_size(size,i,True,item_rows)
key = self.focus_item.keypress( tsize, key )
if key not in ('up', 'down'):
return key
if key == 'up':
candidates = range(i-1, -1, -1) # count backwards to 0
else: # key == 'down'
candidates = range(i+1, len(self.widget_list))
if not item_rows:
item_rows = self.get_item_rows( size, focus=True )
for j in candidates:
if not self.widget_list[j].selectable():
continue
self._update_pref_col_from_focus(size)
old_focus = self.focus_item
self.set_focus(j)
if not hasattr(self.focus_item,'move_cursor_to_coords'):
return
f, height = self.item_types[j]
rows = item_rows[j]
if key=='up':
rowlist = range(rows-1, -1, -1)
else: # key == 'down'
rowlist = range(rows)
for row in rowlist:
tsize=self.get_item_size(size,j,True,item_rows)
if self.focus_item.move_cursor_to_coords(
tsize,self.pref_col,row):
break
return
# nothing to select
return key
def _update_pref_col_from_focus(self, size ):
"""Update self.pref_col from the focus widget."""
widget = self.focus_item
if not hasattr(widget,'get_pref_col'):
return
i = self.widget_list.index(widget)
tsize = self.get_item_size(size,i,True)
pref_col = widget.get_pref_col(tsize)
if pref_col is not None:
self.pref_col = pref_col
def move_cursor_to_coords(self, size, col, row):
"""Capture pref col and set new focus."""
self.pref_col = col
#FIXME guessing focus==True
focus=True
wrow = 0
item_rows = self.get_item_rows(size,focus)
for r,w in zip(item_rows, self.widget_list):
if wrow+r > row:
break
wrow += r
if not w.selectable():
return False
if hasattr(w,'move_cursor_to_coords'):
i = self.widget_list.index(w)
tsize = self.get_item_size(size, i, focus, item_rows)
rval = w.move_cursor_to_coords(tsize,col,row-wrow)
if rval is False:
return False
self.set_focus(w)
return True
def mouse_event(self, size, event, button, col, row, focus):
"""
Pass the event to the contained widget.
May change focus on button 1 press.
"""
wrow = 0
item_rows = self.get_item_rows(size,focus)
for r,w in zip(item_rows, self.widget_list):
if wrow+r > row:
break
wrow += r
focus = focus and self.focus_item == w
if is_mouse_press(event) and button==1:
if w.selectable():
self.set_focus(w)
if not hasattr(w,'mouse_event'):
return False
i = self.widget_list.index(w)
tsize = self.get_item_size(size, i, focus, item_rows)
return w.mouse_event(tsize, event, button, col, row-wrow,
focus)
class ColumnsError(Exception):
pass
class Columns(Widget): # either FlowWidget or BoxWidget
def __init__(self, widget_list, dividechars=0, focus_column=None,
min_width=1, box_columns=None):
"""
widget_list -- list of flow widgets or list of box widgets
dividechars -- blank characters between columns
focus_column -- index into widget_list of column in focus,
if None the first selectable widget will be chosen.
min_width -- minimum width for each column before it is hidden
box_columns -- a list of column indexes containing box widgets
whose maxrow is set to the maximum of the rows
required by columns not listed in box_columns.
widget_list may also contain tuples such as:
('fixed', width, widget) give this column a fixed width
('weight', weight, widget) give this column a relative weight
widgets not in a tuple are the same as ('weight', 1, widget)
box_columns is ignored when this widget is being used as a
box widget because in that case all columns are treated as box
widgets.
"""
self.__super.__init__()
self.widget_list = MonitoredList(widget_list)
self.column_types = []
for i in range(len(widget_list)):
w = widget_list[i]
if type(w) != type(()):
self.column_types.append(('weight',1))
elif w[0] in ('fixed', 'weight'):
f,width,widget = w
self.widget_list[i] = widget
self.column_types.append((f,width))
w = widget
else:
raise ColumnsError, "widget list item invalid: %s" % `w`
if focus_column is None and w.selectable():
focus_column = i
self.widget_list.set_modified_callback(self._invalidate)
self.dividechars = dividechars
if focus_column is None:
focus_column = 0
self.focus_col = focus_column
self.pref_col = None
self.min_width = min_width
self.box_columns = box_columns
self._cache_maxcol = None
def _invalidate(self):
self._cache_maxcol = None
self.__super._invalidate()
def set_focus_column( self, num ):
"""Set the column in focus by its index in self.widget_list."""
self.focus_col = num
self._invalidate()
def get_focus_column( self ):
"""Return the focus column index."""
return self.focus_col
def set_focus(self, item):
"""Set the item in focus.
item -- widget or integer index"""
if type(item) == type(0):
assert item>=0 and item<len(self.widget_list)
position = item
else:
position = self.widget_list.index(item)
self.focus_col = position
self._invalidate()
def get_focus(self):
"""Return the widget in focus."""
return self.widget_list[self.focus_col]
def column_widths( self, size ):
"""Return a list of column widths.
size -- (maxcol,) if self.widget_list contains flow widgets or
(maxcol, maxrow) if it contains box widgets.
"""
maxcol = size[0]
if maxcol == self._cache_maxcol:
return self._cache_column_widths
col_types = self.column_types
# hack to support old practice of editing self.widget_list
# directly
lwl, lct = len(self.widget_list), len(self.column_types)
if lwl > lct:
col_types = col_types + [('weight',1)] * (lwl-lct)
widths=[]
weighted = []
shared = maxcol + self.dividechars
growable = 0
i = 0
for t, width in col_types:
if t == 'fixed':
static_w = width
else:
static_w = self.min_width
if shared < static_w + self.dividechars:
break
widths.append( static_w )
shared -= static_w + self.dividechars
if t != 'fixed':
weighted.append( (width,i) )
i += 1
if shared:
# divide up the remaining space between weighted cols
weighted.sort()
wtotal = sum([weight for weight,i in weighted])
grow = shared + len(weighted)*self.min_width
for weight, i in weighted:
width = int(float(grow) * weight / wtotal + 0.5)
width = max(self.min_width, width)
widths[i] = width
grow -= width
wtotal -= weight
self._cache_maxcol = maxcol
self._cache_column_widths = widths
return widths
def render(self, size, focus=False):
"""Render columns and return canvas.
size -- (maxcol,) if self.widget_list contains flow widgets or
(maxcol, maxrow) if it contains box widgets.
"""
widths = self.column_widths( size )
if not widths:
return SolidCanvas(" ", size[0], (size[1:]+(1,))[0])
box_maxrow = None
if len(size)==1 and self.box_columns:
box_maxrow = 1
# two-pass mode to determine maxrow for box columns
for i in range(len(widths)):
if i in self.box_columns:
continue
mc = widths[i]
w = self.widget_list[i]
rows = w.rows( (mc,),
focus = focus and self.focus_col == i )
box_maxrow = max(box_maxrow, rows)
l = []
for i in range(len(widths)):
mc = widths[i]
w = self.widget_list[i]
if box_maxrow and i in self.box_columns:
sub_size = (mc, box_maxrow)
else:
sub_size = (mc,) + size[1:]
canv = w.render(sub_size,
focus = focus and self.focus_col == i)
if i < len(widths)-1:
mc += self.dividechars
l.append((canv, i, self.focus_col == i, mc))
canv = CanvasJoin(l)
if canv.cols() < size[0]:
canv.pad_trim_left_right(0, size[0]-canv.cols())
return canv
def get_cursor_coords(self, size):
"""Return the cursor coordinates from the focus widget."""
w = self.widget_list[self.focus_col]
if not w.selectable():
return None
if not hasattr(w, 'get_cursor_coords'):
return None
widths = self.column_widths( size )
if len(widths) < self.focus_col+1:
return None
colw = widths[self.focus_col]
coords = w.get_cursor_coords( (colw,)+size[1:] )
if coords is None:
return None
x,y = coords
x += self.focus_col * self.dividechars
x += sum( widths[:self.focus_col] )
return x, y
def move_cursor_to_coords(self, size, col, row):
"""Choose a selectable column to focus based on the coords."""
widths = self.column_widths(size)
best = None
x = 0
for i in range(len(widths)):
w = self.widget_list[i]
end = x + widths[i]
if w.selectable():
if x > col and best is None:
# no other choice
best = i, x, end
break
if x > col and col-best[2] < x-col:
# choose one on left
break
best = i, x, end
if col < end:
# choose this one
break
x = end + self.dividechars
if best is None:
return False
i, x, end = best
w = self.widget_list[i]
if hasattr(w,'move_cursor_to_coords'):
if type(col)==type(0):
move_x = min(max(0,col-x),end-x-1)
else:
move_x = col
rval = w.move_cursor_to_coords((end-x,)+size[1:],
move_x, row)
if rval is False:
return False
self.focus_col = i
self.pref_col = col
self._invalidate()
return True
def mouse_event(self, size, event, button, col, row, focus):
"""
Send event to appropriate column.
May change focus on button 1 press.
"""
widths = self.column_widths(size)
x = 0
for i in range(len(widths)):
if col < x:
return False
w = self.widget_list[i]
end = x + widths[i]
if col >= end:
x = end + self.dividechars
continue
focus = focus and self.focus_col == i
if is_mouse_press(event) and button == 1:
if w.selectable():
self.set_focus(w)
if not hasattr(w,'mouse_event'):
return False
return w.mouse_event((end-x,)+size[1:], event, button,
col - x, row, focus)
return False
def get_pref_col(self, size):
"""Return the pref col from the column in focus."""
maxcol = size[0]
widths = self.column_widths( (maxcol,) )
w = self.widget_list[self.focus_col]
if len(widths) < self.focus_col+1:
return 0
col = None
if hasattr(w,'get_pref_col'):
col = w.get_pref_col((widths[self.focus_col],)+size[1:])
if type(col)==type(0):
col += self.focus_col * self.dividechars
col += sum( widths[:self.focus_col] )
if col is None:
col = self.pref_col
if col is None and w.selectable():
col = widths[self.focus_col]/2
col += self.focus_col * self.dividechars
col += sum( widths[:self.focus_col] )
return col
def rows(self, (maxcol,), focus=0 ):
"""Return the number of rows required by the columns.
Only makes sense if self.widget_list contains flow widgets."""
widths = self.column_widths( (maxcol,) )
rows = 1
for i in range(len(widths)):
if self.box_columns and i in self.box_columns:
continue
mc = widths[i]
w = self.widget_list[i]
rows = max( rows, w.rows( (mc,),
focus = focus and self.focus_col == i ) )
return rows
def keypress(self, size, key):
"""Pass keypress to the focus column.
size -- (maxcol,) if self.widget_list contains flow widgets or
(maxcol, maxrow) if it contains box widgets.
"""
if self.focus_col is None: return key
widths = self.column_widths( size )
if self.focus_col < 0 or self.focus_col >= len(widths):
return key
i = self.focus_col
mc = widths[i]
w = self.widget_list[i]
if key not in ('up','down','page up','page down'):
self.pref_col = None
key = w.keypress( (mc,)+size[1:], key )
if key not in ('left','right'):
return key
if key == 'left':
candidates = range(i-1, -1, -1) # count backwards to 0
else: # key == 'right'
candidates = range(i+1, len(widths))
for j in candidates:
if not self.widget_list[j].selectable():
continue
self.set_focus_column( j )
return
return key
def selectable(self):
"""Return the selectable value of the focus column."""
return self.widget_list[self.focus_col].selectable()
class BoxAdapter(FlowWidget):
"""
Adapter for using a box widget where a flow widget would usually go
"""
no_cache = ["rows"]
def __init__(self, box_widget, height):
"""
Create a flow widget that contains a box widget
box_widget -- box widget
height -- number of rows for box widget
"""
self.__super.__init__()
self.height = height
self.box_widget = box_widget
def rows(self, (maxcol,), focus=False):
"""
Return self.height
"""
return self.height
def selectable(self):
"""
Return box widget's selectable value
"""
return self.box_widget.selectable()
def get_cursor_coords(self, (maxcol,)):
if not hasattr(self.box_widget,'get_cursor_coords'):
return None
return self.box_widget.get_cursor_coords((maxcol, self.height))
def get_pref_col(self, (maxcol,)):
if not hasattr(self.box_widget,'get_pref_col'):
return None
return self.box_widget.get_pref_col((maxcol, self.height))
def keypress(self, (maxcol,), key):
return self.box_widget.keypress((maxcol, self.height), key)
def move_cursor_to_coords(self, (maxcol,), col, row):
if not hasattr(self.box_widget,'move_cursor_to_coords'):
return True
return self.box_widget.move_cursor_to_coords((maxcol,
self.height), col, row )
def mouse_event(self, (maxcol,), event, button, col, row, focus):
if not hasattr(self.box_widget,'mouse_event'):
return False
return self.box_widget.mouse_event((maxcol, self.height),
event, button, col, row, focus)
def render(self, (maxcol,), focus=False):
canv = self.box_widget.render((maxcol, self.height), focus)
canv = CompositeCanvas(canv)
return canv
def __getattr__(self, name):
"""
Pass calls to box widget.
"""
return getattr(self.box_widget, name)
| gpl-2.0 | 6,931,457,597,435,678,000 | 26.361237 | 87 | 0.65247 | false |
timpalpant/calibre | src/calibre/db/tests/profiling.py | 14 | 1026 | #!/usr/bin/env python2
# vim:fileencoding=utf-8
from __future__ import (unicode_literals, division, absolute_import,
print_function)
__license__ = 'GPL v3'
__copyright__ = '2013, Kovid Goyal <kovid at kovidgoyal.net>'
import os, cProfile
from tempfile import gettempdir
from calibre.db.legacy import LibraryDatabase
db = None
def initdb(path):
global db
db = LibraryDatabase(os.path.expanduser(path))
def show_stats(path):
from pstats import Stats
s = Stats(path)
s.sort_stats('cumulative')
s.print_stats(30)
def main():
stats = os.path.join(gettempdir(), 'read_db.stats')
pr = cProfile.Profile()
initdb('~/test library')
all_ids = db.new_api.all_book_ids() # noqa
pr.enable()
for book_id in all_ids:
db.new_api._composite_for('#isbn', book_id)
db.new_api._composite_for('#formats', book_id)
pr.disable()
pr.dump_stats(stats)
show_stats(stats)
print ('Stats saved to', stats)
if __name__ == '__main__':
main()
| gpl-3.0 | 1,184,697,396,481,422,300 | 24.65 | 68 | 0.635478 | false |
ewandor/home-assistant | homeassistant/components/cloud/http_api.py | 1 | 7208 | """The HTTP api to control the cloud integration."""
import asyncio
from functools import wraps
import logging
import voluptuous as vol
import async_timeout
from homeassistant.components.http import (
HomeAssistantView, RequestDataValidator)
from . import auth_api
from .const import DOMAIN, REQUEST_TIMEOUT
_LOGGER = logging.getLogger(__name__)
@asyncio.coroutine
def async_setup(hass):
"""Initialize the HTTP api."""
hass.http.register_view(CloudLoginView)
hass.http.register_view(CloudLogoutView)
hass.http.register_view(CloudAccountView)
hass.http.register_view(CloudRegisterView)
hass.http.register_view(CloudConfirmRegisterView)
hass.http.register_view(CloudForgotPasswordView)
hass.http.register_view(CloudConfirmForgotPasswordView)
_CLOUD_ERRORS = {
auth_api.UserNotFound: (400, "User does not exist."),
auth_api.UserNotConfirmed: (400, 'Email not confirmed.'),
auth_api.Unauthenticated: (401, 'Authentication failed.'),
auth_api.PasswordChangeRequired: (400, 'Password change required.'),
auth_api.ExpiredCode: (400, 'Confirmation code has expired.'),
auth_api.InvalidCode: (400, 'Invalid confirmation code.'),
asyncio.TimeoutError: (502, 'Unable to reach the Home Assistant cloud.')
}
def _handle_cloud_errors(handler):
"""Helper method to handle auth errors."""
@asyncio.coroutine
@wraps(handler)
def error_handler(view, request, *args, **kwargs):
"""Handle exceptions that raise from the wrapped request handler."""
try:
result = yield from handler(view, request, *args, **kwargs)
return result
except (auth_api.CloudError, asyncio.TimeoutError) as err:
err_info = _CLOUD_ERRORS.get(err.__class__)
if err_info is None:
err_info = (502, 'Unexpected error: {}'.format(err))
status, msg = err_info
return view.json_message(msg, status_code=status,
message_code=err.__class__.__name__)
return error_handler
class CloudLoginView(HomeAssistantView):
"""Login to Home Assistant cloud."""
url = '/api/cloud/login'
name = 'api:cloud:login'
@_handle_cloud_errors
@RequestDataValidator(vol.Schema({
vol.Required('email'): str,
vol.Required('password'): str,
}))
@asyncio.coroutine
def post(self, request, data):
"""Handle login request."""
hass = request.app['hass']
cloud = hass.data[DOMAIN]
with async_timeout.timeout(REQUEST_TIMEOUT, loop=hass.loop):
yield from hass.async_add_job(auth_api.login, cloud, data['email'],
data['password'])
hass.async_add_job(cloud.iot.connect)
# Allow cloud to start connecting.
yield from asyncio.sleep(0, loop=hass.loop)
return self.json(_account_data(cloud))
class CloudLogoutView(HomeAssistantView):
"""Log out of the Home Assistant cloud."""
url = '/api/cloud/logout'
name = 'api:cloud:logout'
@_handle_cloud_errors
@asyncio.coroutine
def post(self, request):
"""Handle logout request."""
hass = request.app['hass']
cloud = hass.data[DOMAIN]
with async_timeout.timeout(REQUEST_TIMEOUT, loop=hass.loop):
yield from cloud.logout()
return self.json_message('ok')
class CloudAccountView(HomeAssistantView):
"""View to retrieve account info."""
url = '/api/cloud/account'
name = 'api:cloud:account'
@asyncio.coroutine
def get(self, request):
"""Get account info."""
hass = request.app['hass']
cloud = hass.data[DOMAIN]
if not cloud.is_logged_in:
return self.json_message('Not logged in', 400)
return self.json(_account_data(cloud))
class CloudRegisterView(HomeAssistantView):
"""Register on the Home Assistant cloud."""
url = '/api/cloud/register'
name = 'api:cloud:register'
@_handle_cloud_errors
@RequestDataValidator(vol.Schema({
vol.Required('email'): str,
vol.Required('password'): vol.All(str, vol.Length(min=6)),
}))
@asyncio.coroutine
def post(self, request, data):
"""Handle registration request."""
hass = request.app['hass']
cloud = hass.data[DOMAIN]
with async_timeout.timeout(REQUEST_TIMEOUT, loop=hass.loop):
yield from hass.async_add_job(
auth_api.register, cloud, data['email'], data['password'])
return self.json_message('ok')
class CloudConfirmRegisterView(HomeAssistantView):
"""Confirm registration on the Home Assistant cloud."""
url = '/api/cloud/confirm_register'
name = 'api:cloud:confirm_register'
@_handle_cloud_errors
@RequestDataValidator(vol.Schema({
vol.Required('confirmation_code'): str,
vol.Required('email'): str,
}))
@asyncio.coroutine
def post(self, request, data):
"""Handle registration confirmation request."""
hass = request.app['hass']
cloud = hass.data[DOMAIN]
with async_timeout.timeout(REQUEST_TIMEOUT, loop=hass.loop):
yield from hass.async_add_job(
auth_api.confirm_register, cloud, data['confirmation_code'],
data['email'])
return self.json_message('ok')
class CloudForgotPasswordView(HomeAssistantView):
"""View to start Forgot Password flow.."""
url = '/api/cloud/forgot_password'
name = 'api:cloud:forgot_password'
@_handle_cloud_errors
@RequestDataValidator(vol.Schema({
vol.Required('email'): str,
}))
@asyncio.coroutine
def post(self, request, data):
"""Handle forgot password request."""
hass = request.app['hass']
cloud = hass.data[DOMAIN]
with async_timeout.timeout(REQUEST_TIMEOUT, loop=hass.loop):
yield from hass.async_add_job(
auth_api.forgot_password, cloud, data['email'])
return self.json_message('ok')
class CloudConfirmForgotPasswordView(HomeAssistantView):
"""View to finish Forgot Password flow.."""
url = '/api/cloud/confirm_forgot_password'
name = 'api:cloud:confirm_forgot_password'
@_handle_cloud_errors
@RequestDataValidator(vol.Schema({
vol.Required('confirmation_code'): str,
vol.Required('email'): str,
vol.Required('new_password'): vol.All(str, vol.Length(min=6))
}))
@asyncio.coroutine
def post(self, request, data):
"""Handle forgot password confirm request."""
hass = request.app['hass']
cloud = hass.data[DOMAIN]
with async_timeout.timeout(REQUEST_TIMEOUT, loop=hass.loop):
yield from hass.async_add_job(
auth_api.confirm_forgot_password, cloud,
data['confirmation_code'], data['email'],
data['new_password'])
return self.json_message('ok')
def _account_data(cloud):
"""Generate the auth data JSON response."""
claims = cloud.claims
return {
'email': claims['email'],
'sub_exp': claims.get('custom:sub-exp'),
'cloud': cloud.iot.state,
}
| apache-2.0 | 5,551,867,265,287,187,000 | 29.935622 | 79 | 0.631937 | false |
akretion/odoo | addons/sale_purchase/models/sale_order.py | 4 | 17030 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from dateutil.relativedelta import relativedelta
from odoo import api, fields, models, _
from odoo.exceptions import UserError
from odoo.tools import float_compare
class SaleOrder(models.Model):
_inherit = 'sale.order'
purchase_order_count = fields.Integer("Number of Purchase Order", compute='_compute_purchase_order_count', groups='purchase.group_purchase_user')
@api.depends('order_line.purchase_line_ids')
def _compute_purchase_order_count(self):
purchase_line_data = self.env['purchase.order.line'].read_group(
[('sale_order_id', 'in', self.ids)],
['sale_order_id', 'purchase_order_count:count_distinct(order_id)'], ['sale_order_id']
)
purchase_count_map = {item['sale_order_id'][0]: item['purchase_order_count'] for item in purchase_line_data}
for order in self:
order.purchase_order_count = purchase_count_map.get(order.id, 0)
@api.multi
def _action_confirm(self):
result = super(SaleOrder, self)._action_confirm()
for order in self:
order.order_line.sudo()._purchase_service_generation()
return result
@api.multi
def action_cancel(self):
result = super(SaleOrder, self).action_cancel()
# When a sale person cancel a SO, he might not have the rights to write
# on PO. But we need the system to create an activity on the PO (so 'write'
# access), hence the `sudo`.
self.sudo()._activity_cancel_on_purchase()
return result
@api.multi
def action_view_purchase(self):
action = self.env.ref('purchase.purchase_rfq').read()[0]
action['domain'] = [('id', 'in', self.mapped('order_line.purchase_line_ids.order_id').ids)]
return action
@api.multi
def _activity_cancel_on_purchase(self):
""" If some SO are cancelled, we need to put an activity on their generated purchase. If sale lines of
differents sale orders impact different purchase, we only want one activity to be attached.
"""
purchase_to_notify_map = {} # map PO -> recordset of SOL as {purchase.order: set(sale.orde.liner)}
purchase_order_lines = self.env['purchase.order.line'].search([('sale_line_id', 'in', self.mapped('order_line').ids), ('state', '!=', 'cancel')])
for purchase_line in purchase_order_lines:
purchase_to_notify_map.setdefault(purchase_line.order_id, self.env['sale.order.line'])
purchase_to_notify_map[purchase_line.order_id] |= purchase_line.sale_line_id
for purchase_order, sale_order_lines in purchase_to_notify_map.items():
purchase_order.activity_schedule_with_view('mail.mail_activity_data_warning',
user_id=purchase_order.user_id.id or self.env.uid,
views_or_xmlid='sale_purchase.exception_purchase_on_sale_cancellation',
render_context={
'sale_orders': sale_order_lines.mapped('order_id'),
'sale_order_lines': sale_order_lines,
})
class SaleOrderLine(models.Model):
_inherit = 'sale.order.line'
purchase_line_ids = fields.One2many('purchase.order.line', 'sale_line_id', string="Generated Purchase Lines", readonly=True, help="Purchase line generated by this Sales item on order confirmation, or when the quantity was increased.")
purchase_line_count = fields.Integer("Number of generated purchase items", compute='_compute_purchase_count')
@api.multi
@api.depends('purchase_line_ids')
def _compute_purchase_count(self):
database_data = self.env['purchase.order.line'].sudo().read_group([('sale_line_id', 'in', self.ids)], ['sale_line_id'], ['sale_line_id'])
mapped_data = dict([(db['sale_line_id'][0], db['sale_line_id_count']) for db in database_data])
for line in self:
line.purchase_line_count = mapped_data.get(line.id, 0)
@api.onchange('product_uom_qty')
def _onchange_service_product_uom_qty(self):
if self.state == 'sale' and self.product_id.type == 'service' and self.product_id.service_to_purchase:
if self.product_uom_qty < self._origin.product_uom_qty:
if self.product_uom_qty < self.qty_delivered:
return {}
warning_mess = {
'title': _('Ordered quantity decreased!'),
'message': _('You are decreasing the ordered quantity! Do not forget to manually update the purchase order if needed.'),
}
return {'warning': warning_mess}
return {}
# --------------------------
# CRUD
# --------------------------
@api.model
def create(self, values):
line = super(SaleOrderLine, self).create(values)
# Do not generate purchase when expense SO line since the product is already delivered
if line.state == 'sale' and not line.is_expense:
line.sudo()._purchase_service_generation()
return line
@api.multi
def write(self, values):
increased_lines = None
decreased_lines = None
increased_values = {}
decreased_values = {}
if 'product_uom_qty' in values:
precision = self.env['decimal.precision'].precision_get('Product Unit of Measure')
increased_lines = self.sudo().filtered(lambda r: r.product_id.service_to_purchase and r.purchase_line_count and float_compare(r.product_uom_qty, values['product_uom_qty'], precision_digits=precision) == -1)
decreased_lines = self.sudo().filtered(lambda r: r.product_id.service_to_purchase and r.purchase_line_count and float_compare(r.product_uom_qty, values['product_uom_qty'], precision_digits=precision) == 1)
increased_values = {line.id: line.product_uom_qty for line in increased_lines}
decreased_values = {line.id: line.product_uom_qty for line in decreased_lines}
result = super(SaleOrderLine, self).write(values)
if increased_lines:
increased_lines._purchase_increase_ordered_qty(values['product_uom_qty'], increased_values)
if decreased_lines:
decreased_lines._purchase_decrease_ordered_qty(values['product_uom_qty'], decreased_values)
return result
# --------------------------
# Business Methods
# --------------------------
@api.multi
def _purchase_decrease_ordered_qty(self, new_qty, origin_values):
""" Decrease the quantity from SO line will add a next acitivities on the related purchase order
:param new_qty: new quantity (lower than the current one on SO line), expressed
in UoM of SO line.
:param origin_values: map from sale line id to old value for the ordered quantity (dict)
"""
purchase_to_notify_map = {} # map PO -> set(SOL)
last_purchase_lines = self.env['purchase.order.line'].search([('sale_line_id', 'in', self.ids)])
for purchase_line in last_purchase_lines:
purchase_to_notify_map.setdefault(purchase_line.order_id, self.env['sale.order.line'])
purchase_to_notify_map[purchase_line.order_id] |= purchase_line.sale_line_id
# create next activity
for purchase_order, sale_lines in purchase_to_notify_map.items():
render_context = {
'sale_lines': sale_lines,
'sale_orders': sale_lines.mapped('order_id'),
'origin_values': origin_values,
}
purchase_order.activity_schedule_with_view('mail.mail_activity_data_warning',
user_id=purchase_order.user_id.id or self.env.uid,
views_or_xmlid='sale_purchase.exception_purchase_on_sale_quantity_decreased',
render_context=render_context)
@api.multi
def _purchase_increase_ordered_qty(self, new_qty, origin_values):
""" Increase the quantity on the related purchase lines
:param new_qty: new quantity (higher than the current one on SO line), expressed
in UoM of SO line.
:param origin_values: map from sale line id to old value for the ordered quantity (dict)
"""
for line in self:
last_purchase_line = self.env['purchase.order.line'].search([('sale_line_id', '=', line.id)], order='create_date DESC', limit=1)
if last_purchase_line.state in ['draft', 'sent', 'to approve']: # update qty for draft PO lines
quantity = line.product_uom._compute_quantity(new_qty, last_purchase_line.product_uom)
last_purchase_line.write({'product_qty': quantity})
elif last_purchase_line.state in ['purchase', 'done', 'cancel']: # create new PO, by forcing the quantity as the difference from SO line
quantity = line.product_uom._compute_quantity(new_qty - origin_values.get(line.id, 0.0), last_purchase_line.product_uom)
line._purchase_service_create(quantity=quantity)
@api.multi
def _purchase_get_date_order(self, supplierinfo):
""" return the ordered date for the purchase order, computed as : SO commitment date - supplier delay """
commitment_date = fields.Datetime.from_string(self.order_id.commitment_date or fields.Datetime.now())
return commitment_date - relativedelta(days=int(supplierinfo.delay))
@api.multi
def _purchase_service_prepare_order_values(self, supplierinfo):
""" Returns the values to create the purchase order from the current SO line.
:param supplierinfo: record of product.supplierinfo
:rtype: dict
"""
self.ensure_one()
partner_supplier = supplierinfo.name
fiscal_position_id = self.env['account.fiscal.position'].sudo().with_context(force_company=self.company_id.id).get_fiscal_position(partner_supplier.id)
date_order = self._purchase_get_date_order(supplierinfo)
return {
'partner_id': partner_supplier.id,
'partner_ref': partner_supplier.ref,
'company_id': self.company_id.id,
'currency_id': partner_supplier.property_purchase_currency_id.id or self.env.user.company_id.currency_id.id,
'dest_address_id': self.order_id.partner_shipping_id.id,
'origin': self.order_id.name,
'payment_term_id': partner_supplier.property_supplier_payment_term_id.id,
'date_order': date_order,
'fiscal_position_id': fiscal_position_id,
}
@api.multi
def _purchase_service_prepare_line_values(self, purchase_order, quantity=False):
""" Returns the values to create the purchase order line from the current SO line.
:param purchase_order: record of purchase.order
:rtype: dict
:param quantity: the quantity to force on the PO line, expressed in SO line UoM
"""
self.ensure_one()
# compute quantity from SO line UoM
product_quantity = self.product_uom_qty
if quantity:
product_quantity = quantity
purchase_qty_uom = self.product_uom._compute_quantity(product_quantity, self.product_id.uom_po_id)
# determine vendor (real supplier, sharing the same partner as the one from the PO, but with more accurate informations like validity, quantity, ...)
# Note: one partner can have multiple supplier info for the same product
supplierinfo = self.product_id._select_seller(
partner_id=purchase_order.partner_id,
quantity=purchase_qty_uom,
date=purchase_order.date_order and purchase_order.date_order.date(), # and purchase_order.date_order[:10],
uom_id=self.product_id.uom_po_id
)
fpos = purchase_order.fiscal_position_id
taxes = fpos.map_tax(self.product_id.supplier_taxes_id) if fpos else self.product_id.supplier_taxes_id
if taxes:
taxes = taxes.filtered(lambda t: t.company_id.id == self.company_id.id)
# compute unit price
price_unit = 0.0
if supplierinfo:
price_unit = self.env['account.tax'].sudo()._fix_tax_included_price_company(supplierinfo.price, self.product_id.supplier_taxes_id, taxes, self.company_id)
if purchase_order.currency_id and supplierinfo.currency_id != purchase_order.currency_id:
price_unit = supplierinfo.currency_id.compute(price_unit, purchase_order.currency_id)
# purchase line description in supplier lang
product_in_supplier_lang = self.product_id.with_context({
'lang': supplierinfo.name.lang,
'partner_id': supplierinfo.name.id,
})
name = '[%s] %s' % (self.product_id.default_code, product_in_supplier_lang.display_name)
if product_in_supplier_lang.description_purchase:
name += '\n' + product_in_supplier_lang.description_purchase
return {
'name': '[%s] %s' % (self.product_id.default_code, self.name) if self.product_id.default_code else self.name,
'product_qty': purchase_qty_uom,
'product_id': self.product_id.id,
'product_uom': self.product_id.uom_po_id.id,
'price_unit': price_unit,
'date_planned': fields.Date.from_string(purchase_order.date_order) + relativedelta(days=int(supplierinfo.delay)),
'taxes_id': [(6, 0, taxes.ids)],
'order_id': purchase_order.id,
'sale_line_id': self.id,
}
@api.multi
def _purchase_service_create(self, quantity=False):
""" On Sales Order confirmation, some lines (services ones) can create a purchase order line and maybe a purchase order.
If a line should create a RFQ, it will check for existing PO. If no one is find, the SO line will create one, then adds
a new PO line. The created purchase order line will be linked to the SO line.
:param quantity: the quantity to force on the PO line, expressed in SO line UoM
"""
PurchaseOrder = self.env['purchase.order']
supplier_po_map = {}
sale_line_purchase_map = {}
for line in self:
# determine vendor of the order (take the first matching company and product)
suppliers = line.product_id.with_context(force_company=line.company_id.id)._select_seller(
quantity=line.product_uom_qty, uom_id=line.product_uom)
if not suppliers:
raise UserError(_("There is no vendor associated to the product %s. Please define a vendor for this product.") % (line.product_id.display_name,))
supplierinfo = suppliers[0]
partner_supplier = supplierinfo.name # yes, this field is not explicit .... it is a res.partner !
# determine (or create) PO
purchase_order = supplier_po_map.get(partner_supplier.id)
if not purchase_order:
purchase_order = PurchaseOrder.search([
('partner_id', '=', partner_supplier.id),
('state', '=', 'draft'),
('company_id', '=', line.company_id.id),
], limit=1)
if not purchase_order:
values = line._purchase_service_prepare_order_values(supplierinfo)
purchase_order = PurchaseOrder.create(values)
else: # update origin of existing PO
so_name = line.order_id.name
origins = []
if purchase_order.origin:
origins = purchase_order.origin.split(', ') + origins
if so_name not in origins:
origins += [so_name]
purchase_order.write({
'origin': ', '.join(origins)
})
supplier_po_map[partner_supplier.id] = purchase_order
# add a PO line to the PO
values = line._purchase_service_prepare_line_values(purchase_order, quantity=quantity)
purchase_line = self.env['purchase.order.line'].create(values)
# link the generated purchase to the SO line
sale_line_purchase_map.setdefault(line, self.env['purchase.order.line'])
sale_line_purchase_map[line] |= purchase_line
return sale_line_purchase_map
@api.multi
def _purchase_service_generation(self):
""" Create a Purchase for the first time from the sale line. If the SO line already created a PO, it
will not create a second one.
"""
sale_line_purchase_map = {}
for line in self:
# Do not regenerate PO line if the SO line has already created one in the past (SO cancel/reconfirmation case)
if line.product_id.service_to_purchase and not line.purchase_line_count:
result = line._purchase_service_create()
sale_line_purchase_map.update(result)
return sale_line_purchase_map
| agpl-3.0 | -7,981,069,912,607,275,000 | 52.05296 | 238 | 0.623782 | false |
Cloudzero/cloudzero-reactor-aws | reactor/aws/decorators.py | 1 | 1114 | # -*- coding: utf-8 -*-
# Copyright (c) 2016-present, CloudZero, Inc. All rights reserved.
# Licensed under the BSD-style license. See LICENSE file in the project root for full license information.
from botocore.exceptions import ClientError
from retrying import retry
def provisioned_throughput_retry(max_attempts=7):
def wrapper(func):
# todo This is not working entierly correctly, it seems to be retrying on _all_ exceptions
def retry_if_provisioned_throughput_error(exception):
"""Return True if we should retry (in this case on a ProvisionedThroughputException), False otherwise"""
return isinstance(exception, ClientError) and "ProvisionedThroughputExceededException" in str(exception)
@retry(retry_on_exception=retry_if_provisioned_throughput_error,
wait_exponential_multiplier=1000,
wait_exponential_max=10000,
stop_max_attempt_number=max_attempts,
wrap_exception=True)
def inner(*args, **kwargs):
return func(*args, **kwargs)
return inner
return wrapper
| bsd-3-clause | -1,154,636,253,028,406,500 | 41.846154 | 116 | 0.689408 | false |
sublime1809/django | django/middleware/common.py | 28 | 6967 | import hashlib
import logging
import re
from django.conf import settings
from django.core.mail import mail_managers
from django.core import urlresolvers
from django import http
from django.utils.encoding import force_text
from django.utils.http import urlquote
from django.utils import six
logger = logging.getLogger('django.request')
class CommonMiddleware(object):
"""
"Common" middleware for taking care of some basic operations:
- Forbids access to User-Agents in settings.DISALLOWED_USER_AGENTS
- URL rewriting: Based on the APPEND_SLASH and PREPEND_WWW settings,
this middleware appends missing slashes and/or prepends missing
"www."s.
- If APPEND_SLASH is set and the initial URL doesn't end with a
slash, and it is not found in urlpatterns, a new URL is formed by
appending a slash at the end. If this new URL is found in
urlpatterns, then an HTTP-redirect is returned to this new URL;
otherwise the initial URL is processed as usual.
- ETags: If the USE_ETAGS setting is set, ETags will be calculated from
the entire page content and Not Modified responses will be returned
appropriately.
"""
def process_request(self, request):
"""
Check for denied User-Agents and rewrite the URL based on
settings.APPEND_SLASH and settings.PREPEND_WWW
"""
# Check for denied User-Agents
if 'HTTP_USER_AGENT' in request.META:
for user_agent_regex in settings.DISALLOWED_USER_AGENTS:
if user_agent_regex.search(request.META['HTTP_USER_AGENT']):
logger.warning('Forbidden (User agent): %s', request.path,
extra={
'status_code': 403,
'request': request
}
)
return http.HttpResponseForbidden('<h1>Forbidden</h1>')
# Check for a redirect based on settings.APPEND_SLASH
# and settings.PREPEND_WWW
host = request.get_host()
old_url = [host, request.path]
new_url = old_url[:]
if (settings.PREPEND_WWW and old_url[0] and
not old_url[0].startswith('www.')):
new_url[0] = 'www.' + old_url[0]
# Append a slash if APPEND_SLASH is set and the URL doesn't have a
# trailing slash and there is no pattern for the current path
if settings.APPEND_SLASH and (not old_url[1].endswith('/')):
urlconf = getattr(request, 'urlconf', None)
if (not urlresolvers.is_valid_path(request.path_info, urlconf) and
urlresolvers.is_valid_path("%s/" % request.path_info, urlconf)):
new_url[1] = new_url[1] + '/'
if settings.DEBUG and request.method == 'POST':
raise RuntimeError((""
"You called this URL via POST, but the URL doesn't end "
"in a slash and you have APPEND_SLASH set. Django can't "
"redirect to the slash URL while maintaining POST data. "
"Change your form to point to %s%s (note the trailing "
"slash), or set APPEND_SLASH=False in your Django "
"settings.") % (new_url[0], new_url[1]))
if new_url == old_url:
# No redirects required.
return
if new_url[0]:
newurl = "%s://%s%s" % (
request.scheme,
new_url[0], urlquote(new_url[1]))
else:
newurl = urlquote(new_url[1])
if request.META.get('QUERY_STRING', ''):
if six.PY3:
newurl += '?' + request.META['QUERY_STRING']
else:
# `query_string` is a bytestring. Appending it to the unicode
# string `newurl` will fail if it isn't ASCII-only. This isn't
# allowed; only broken software generates such query strings.
# Better drop the invalid query string than crash (#15152).
try:
newurl += '?' + request.META['QUERY_STRING'].decode()
except UnicodeDecodeError:
pass
return http.HttpResponsePermanentRedirect(newurl)
def process_response(self, request, response):
"""
Calculate the ETag, if needed.
"""
if settings.USE_ETAGS:
if response.has_header('ETag'):
etag = response['ETag']
elif response.streaming:
etag = None
else:
etag = '"%s"' % hashlib.md5(response.content).hexdigest()
if etag is not None:
if (200 <= response.status_code < 300
and request.META.get('HTTP_IF_NONE_MATCH') == etag):
cookies = response.cookies
response = http.HttpResponseNotModified()
response.cookies = cookies
else:
response['ETag'] = etag
return response
class BrokenLinkEmailsMiddleware(object):
def process_response(self, request, response):
"""
Send broken link emails for relevant 404 NOT FOUND responses.
"""
if response.status_code == 404 and not settings.DEBUG:
domain = request.get_host()
path = request.get_full_path()
referer = force_text(request.META.get('HTTP_REFERER', ''), errors='replace')
if not self.is_ignorable_request(request, path, domain, referer):
ua = request.META.get('HTTP_USER_AGENT', '<none>')
ip = request.META.get('REMOTE_ADDR', '<none>')
mail_managers(
"Broken %slink on %s" % (
('INTERNAL ' if self.is_internal_request(domain, referer) else ''),
domain
),
"Referrer: %s\nRequested URL: %s\nUser agent: %s\n"
"IP address: %s\n" % (referer, path, ua, ip),
fail_silently=True)
return response
def is_internal_request(self, domain, referer):
"""
Returns True if the referring URL is the same domain as the current request.
"""
# Different subdomains are treated as different domains.
return bool(re.match("^https?://%s/" % re.escape(domain), referer))
def is_ignorable_request(self, request, uri, domain, referer):
"""
Returns True if the given request *shouldn't* notify the site managers.
"""
# '?' in referer is identified as search engine source
if (not referer or
(not self.is_internal_request(domain, referer) and '?' in referer)):
return True
return any(pattern.search(uri) for pattern in settings.IGNORABLE_404_URLS)
| bsd-3-clause | 5,269,062,605,172,859,000 | 40.718563 | 91 | 0.559064 | false |
layuplist/layup-list | apps/analytics/tasks.py | 1 | 2942 | from datetime import datetime, timedelta
from celery import shared_task
from django.conf import settings
from django.contrib.auth.models import User
from django.core.mail import send_mail
from django.db.models import Q
from django.template.loader import get_template
from django.template import Context
from apps.web.models import CourseOffering, Review, Vote
from lib import constants, task_utils, terms
@shared_task
@task_utils.email_if_fails
def send_analytics_email_update(lookback=timedelta(days=7)):
context = _get_analytics_email_context(lookback)
content = get_template('analytics_email.txt').render(Context(context))
send_mail(
'Layup List Weekly Update',
content,
constants.SUPPORT_EMAIL,
[email for _, email in settings.ADMINS],
fail_silently=False,
)
def _get_analytics_email_context(lookback):
changes_since = datetime.now() - lookback
new_query = Q(created_at__gte=changes_since)
users = User.objects.all()
quality_votes = Vote.objects.filter(category=Vote.CATEGORIES.QUALITY)
quality_upvotes = quality_votes.filter(value=1)
quality_downvotes = quality_votes.filter(value=-1)
difficulty_votes = Vote.objects.filter(category=Vote.CATEGORIES.DIFFICULTY)
difficulty_upvotes = difficulty_votes.filter(value=1)
difficulty_downvotes = difficulty_votes.filter(value=-1)
return {
'users': {
'all': users,
'new': users.filter(date_joined__gte=changes_since),
'unique_recent_logins': users.filter(
last_login__gte=changes_since),
},
'votes': {
'all_quality_upvotes': quality_upvotes,
'all_quality_downvotes': quality_downvotes,
'all_difficulty_upvotes': difficulty_upvotes,
'all_difficulty_downvotes': difficulty_downvotes,
'new_quality_upvotes': quality_upvotes.filter(new_query),
'new_quality_downvotes': quality_downvotes.filter(new_query),
'new_difficulty_upvotes': difficulty_upvotes.filter(new_query),
'new_difficulty_downvotes': difficulty_downvotes.filter(new_query),
},
'reviews': {
'all': Review.objects.all(),
'new': Review.objects.filter(new_query),
},
}
@shared_task
@task_utils.email_if_fails
def possibly_request_term_update():
next_term = terms.get_next_term(constants.CURRENT_TERM)
next_term_count = CourseOffering.objects.filter(term=next_term).count()
if next_term_count >= constants.OFFERINGS_THRESHOLD_FOR_TERM_UPDATE:
send_mail(
'Term may be out of date ({} offerings with term {})'.format(
next_term_count, next_term),
'Consider modifying the environment variable.',
constants.SUPPORT_EMAIL,
[email for _, email in settings.ADMINS],
fail_silently=False,
)
return next_term_count
| gpl-3.0 | 2,592,868,231,928,218,600 | 37.207792 | 79 | 0.662475 | false |
spandanb/horizon | openstack_dashboard/api/sahara.py | 1 | 10667 | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from django.conf import settings
from openstack_dashboard.api import base
from saharaclient import client as api_client
LOG = logging.getLogger(__name__)
# "type" of Sahara service registered in keystone
SAHARA_SERVICE = 'data_processing'
SAHARA_AUTO_IP_ALLOCATION_ENABLED = getattr(settings,
'SAHARA_AUTO_IP_ALLOCATION_ENABLED',
False)
VERSIONS = base.APIVersionManager(SAHARA_SERVICE,
preferred_version=getattr(settings,
'OPENSTACK_API_VERSIONS',
{}).get(SAHARA_SERVICE, 1.1))
VERSIONS.load_supported_version(1.1, {"client": api_client,
"version": 1.1})
def client(request):
return api_client.Client(VERSIONS.get_active_version()["version"],
sahara_url=base.url_for(request, SAHARA_SERVICE),
service_type=SAHARA_SERVICE,
project_id=request.user.project_id,
input_auth_token=request.user.token.id)
def image_list(request):
return client(request).images.list()
def image_get(request, image_id):
return client(request).images.get(image_id)
def image_unregister(request, image_id):
client(request).images.unregister_image(image_id)
def image_update(request, image_id, user_name, desc):
client(request).images.update_image(image_id, user_name, desc)
def image_tags_update(request, image_id, image_tags):
client(request).images.update_tags(image_id, image_tags)
def plugin_list(request):
return client(request).plugins.list()
def plugin_get(request, plugin_name):
return client(request).plugins.get(plugin_name)
def plugin_get_version_details(request, plugin_name, hadoop_version):
return client(request).plugins.get_version_details(plugin_name,
hadoop_version)
def plugin_convert_to_template(request, plugin_name, hadoop_version,
template_name, file_content):
return client(request).plugins.convert_to_cluster_template(plugin_name,
hadoop_version,
template_name,
file_content)
def nodegroup_template_create(request, name, plugin_name, hadoop_version,
flavor_id, description=None,
volumes_per_node=None, volumes_size=None,
node_processes=None, node_configs=None,
floating_ip_pool=None):
return client(request).node_group_templates.create(name, plugin_name,
hadoop_version,
flavor_id, description,
volumes_per_node,
volumes_size,
node_processes,
node_configs,
floating_ip_pool)
def nodegroup_template_list(request):
return client(request).node_group_templates.list()
def nodegroup_template_get(request, ngt_id):
return client(request).node_group_templates.get(ngt_id)
def nodegroup_template_delete(request, ngt_id):
client(request).node_group_templates.delete(ngt_id)
def nodegroup_template_update(request, ngt_id, name, plugin_name,
hadoop_version, flavor_id,
description=None, volumes_per_node=None,
volumes_size=None, node_processes=None,
node_configs=None, floating_ip_pool=None):
return client(request).node_group_templates.update(ngt_id, name,
plugin_name,
hadoop_version,
flavor_id,
description,
volumes_per_node,
volumes_size,
node_processes,
node_configs,
floating_ip_pool)
def cluster_template_create(request, name, plugin_name, hadoop_version,
description=None, cluster_configs=None,
node_groups=None, anti_affinity=None,
net_id=None):
return client(request).cluster_templates.create(name, plugin_name,
hadoop_version,
description,
cluster_configs,
node_groups,
anti_affinity,
net_id)
def cluster_template_list(request):
return client(request).cluster_templates.list()
def cluster_template_get(request, ct_id):
return client(request).cluster_templates.get(ct_id)
def cluster_template_delete(request, ct_id):
client(request).cluster_templates.delete(ct_id)
def cluster_template_update(request, ct_id, name, plugin_name,
hadoop_version, description=None,
cluster_configs=None, node_groups=None,
anti_affinity=None, net_id=None):
return client(request).cluster_templates.update(ct_id, name,
plugin_name,
hadoop_version,
description,
cluster_configs,
node_groups,
anti_affinity,
net_id)
def cluster_create(request, name, plugin_name, hadoop_version,
cluster_template_id=None, default_image_id=None,
is_transient=None, description=None, cluster_configs=None,
node_groups=None, user_keypair_id=None,
anti_affinity=None, net_id=None):
return client(request).clusters.create(name, plugin_name, hadoop_version,
cluster_template_id,
default_image_id,
is_transient, description,
cluster_configs, node_groups,
user_keypair_id, anti_affinity,
net_id)
def cluster_scale(request, cluster_id, scale_object):
return client(request).clusters.scale(cluster_id, scale_object)
def cluster_list(request):
return client(request).clusters.list()
def cluster_get(request, cluster_id):
return client(request).clusters.get(cluster_id)
def cluster_delete(request, cluster_id):
client(request).clusters.delete(cluster_id)
def data_source_create(request, name, description, ds_type, url,
credential_user=None, credential_pass=None):
return client(request).data_sources.create(name, description, ds_type,
url, credential_user,
credential_pass)
def data_source_list(request):
return client(request).data_sources.list()
def data_source_get(request, ds_id):
return client(request).data_sources.get(ds_id)
def data_source_delete(request, ds_id):
client(request).data_sources.delete(ds_id)
def job_binary_create(request, name, url, description, extra):
return client(request).job_binaries.create(name, url, description, extra)
def job_binary_list(request):
return client(request).job_binaries.list()
def job_binary_get(request, jb_id):
return client(request).job_binaries.get(jb_id)
def job_binary_delete(request, jb_id):
client(request).job_binaries.delete(jb_id)
def job_binary_get_file(request, jb_id):
return client(request).job_binaries.get_file(jb_id)
def job_binary_internal_create(request, name, data):
return client(request).job_binary_internals.create(name, data)
def job_binary_internal_list(request):
return client(request).job_binary_internals.list()
def job_binary_internal_get(request, jbi_id):
return client(request).job_binary_internals.get(jbi_id)
def job_binary_internal_delete(request, jbi_id):
client(request).job_binary_internals.delete(jbi_id)
def job_create(request, name, j_type, mains, libs, description):
return client(request).jobs.create(name, j_type, mains, libs, description)
def job_list(request):
return client(request).jobs.list()
def job_get(request, job_id):
return client(request).jobs.get(job_id)
def job_delete(request, job_id):
client(request).jobs.delete(job_id)
def job_get_configs(request, job_type):
return client(request).jobs.get_configs(job_type)
def job_execution_create(request, job_id, cluster_id,
input_id, output_id, configs):
return client(request).job_executions.create(job_id, cluster_id,
input_id, output_id,
configs)
def job_execution_list(request):
return client(request).job_executions.list()
def job_execution_get(request, jex_id):
return client(request).job_executions.get(jex_id)
def job_execution_delete(request, jex_id):
client(request).job_executions.delete(jex_id)
| apache-2.0 | -8,144,537,474,681,682,000 | 35.406143 | 78 | 0.541764 | false |
ManiacalLabs/BiblioPixel | bibliopixel/colors/gamma.py | 1 | 1733 | import numpy as np
class Gamma(object):
"""
Compute a fixed gamma table with 256 entries.
"""
def __init__(self, gamma=1.0, offset=0, lower_bound=0):
"""
:param float gamma: the root for gamma computation
:param float offset: an offset added to the result
:param int lower_bound: The lowest possible output value - the highest
is always 255.
"""
self.gamma = gamma
self.offset = offset
self.lower_bound = lower_bound
width = 255 - lower_bound
def gam(i):
return int(lower_bound + pow(i / 255, gamma) * width + offset)
self.table = tuple(gam(i) for i in range(256))
self.np_table = np.array(self.table, dtype='uint8')
def get(self, i):
"""
:returns: The gamma table entry
:param int i: the index into the table
"""
return self.table[max(0, min(255, int(i)))]
def batch_correct(self, uncorrected):
"""Batch apply gamma correction to a numpy array
:param ndarray uncorrected: uncorrected channel values, must be ints in [0,255]
:returns: corrected channel values, in the same shape as the input.
"""
return self.np_table[uncorrected]
# From https://github.com/scottjgibson/PixelPi/blob/master/pixelpi.py
APA102 = Gamma(gamma=2.5, offset=0.5)
WS2801 = SM16716 = Gamma(gamma=2.5)
NONE = DEFAULT = Gamma() # For when you need no correction
# From http://rgb-123.com/ws2812-color-output/
WS2812B = NEOPIXEL = WS2812 = Gamma(gamma=1 / 0.45)
# Color calculations from
# http://learn.adafruit.com/light-painting-with-raspberry-pi
LPD8806 = Gamma(gamma=2.5, offset=0.5, lower_bound=128)
| mit | -54,613,049,807,669,090 | 31.698113 | 87 | 0.620312 | false |
TheSimoms/Felleshoelet | spotifyconnector/venv/lib/python3.6/site-packages/pip/_internal/models/selection_prefs.py | 21 | 1908 | from pip._internal.utils.typing import MYPY_CHECK_RUNNING
if MYPY_CHECK_RUNNING:
from typing import Optional
from pip._internal.models.format_control import FormatControl
class SelectionPreferences(object):
"""
Encapsulates the candidate selection preferences for downloading
and installing files.
"""
# Don't include an allow_yanked default value to make sure each call
# site considers whether yanked releases are allowed. This also causes
# that decision to be made explicit in the calling code, which helps
# people when reading the code.
def __init__(
self,
allow_yanked, # type: bool
allow_all_prereleases=False, # type: bool
format_control=None, # type: Optional[FormatControl]
prefer_binary=False, # type: bool
ignore_requires_python=None, # type: Optional[bool]
):
# type: (...) -> None
"""Create a SelectionPreferences object.
:param allow_yanked: Whether files marked as yanked (in the sense
of PEP 592) are permitted to be candidates for install.
:param format_control: A FormatControl object or None. Used to control
the selection of source packages / binary packages when consulting
the index and links.
:param prefer_binary: Whether to prefer an old, but valid, binary
dist over a new source dist.
:param ignore_requires_python: Whether to ignore incompatible
"Requires-Python" values in links. Defaults to False.
"""
if ignore_requires_python is None:
ignore_requires_python = False
self.allow_yanked = allow_yanked
self.allow_all_prereleases = allow_all_prereleases
self.format_control = format_control
self.prefer_binary = prefer_binary
self.ignore_requires_python = ignore_requires_python
| gpl-2.0 | -3,967,020,638,732,614,000 | 39.595745 | 78 | 0.662998 | false |
adityacs/ansible | lib/ansible/modules/storage/zfs/zfs_facts.py | 13 | 8717 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2016, Adam Števko <adam.stevko@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'version': '1.0'}
DOCUMENTATION = '''
---
module: zfs_facts
short_description: Gather facts about ZFS datasets.
description:
- Gather facts from ZFS dataset properties.
version_added: "2.3"
author: Adam Števko (@xen0l)
options:
name:
description:
- ZFS dataset name.
alias: [ "ds", "dataset" ]
type: str
required: yes
recurse:
description:
- Specifies if properties for any children should be recursively
displayed.
type: bool
default: False
required: false
parsable:
description:
- Specifies if property values should be displayed in machine
friendly format.
type: bool
default: False
required: false
properties:
description:
- Specifies which dataset properties should be queried in comma-separated format.
For more information about dataset properties, check zfs(1M) man page.
alias: [ "props" ]
type: str
default: all
required: false
type:
description:
- Specifies which datasets types to display. Multiple values have to be
provided in comma-separated form.
alias: [ "props" ]
type: str
default: all
choices: [ 'all', 'filesystem', 'volume', 'snapshot', 'bookmark' ]
required: false
depth:
description:
- Specifiies recurion depth.
type: int
default: None
required: false
'''
EXAMPLES = '''
- name: Gather facts about ZFS dataset rpool/export/home
zfs_facts:
dataset: rpool/export/home
- name: Report space usage on ZFS filesystems under data/home
zfs_facts:
name: data/home
recurse: yes
type: filesystem
- debug:
msg: 'ZFS dataset {{ item.name }} consumes {{ item.used }} of disk space.'
with_items: '{{ ansible_zfs_datasets }}'
'''
RETURN = '''
name:
description: ZFS dataset name
returned: always
type: string
sample: rpool/var/spool
parsable:
description: if parsable output should be provided in machine friendly format.
returned: if 'parsable' is set to True
type: boolean
sample: True
recurse:
description: if we should recurse over ZFS dataset
returned: if 'recurse' is set to True
type: boolean
sample: True
zfs_datasets:
description: ZFS dataset facts
returned: always
type: string
sample:
{
"aclinherit": "restricted",
"aclmode": "discard",
"atime": "on",
"available": "43.8G",
"canmount": "on",
"casesensitivity": "sensitive",
"checksum": "on",
"compression": "off",
"compressratio": "1.00x",
"copies": "1",
"creation": "Thu Jun 16 11:37 2016",
"dedup": "off",
"devices": "on",
"exec": "on",
"filesystem_count": "none",
"filesystem_limit": "none",
"logbias": "latency",
"logicalreferenced": "18.5K",
"logicalused": "3.45G",
"mlslabel": "none",
"mounted": "yes",
"mountpoint": "/rpool",
"name": "rpool",
"nbmand": "off",
"normalization": "none",
"org.openindiana.caiman:install": "ready",
"primarycache": "all",
"quota": "none",
"readonly": "off",
"recordsize": "128K",
"redundant_metadata": "all",
"refcompressratio": "1.00x",
"referenced": "29.5K",
"refquota": "none",
"refreservation": "none",
"reservation": "none",
"secondarycache": "all",
"setuid": "on",
"sharenfs": "off",
"sharesmb": "off",
"snapdir": "hidden",
"snapshot_count": "none",
"snapshot_limit": "none",
"sync": "standard",
"type": "filesystem",
"used": "4.41G",
"usedbychildren": "4.41G",
"usedbydataset": "29.5K",
"usedbyrefreservation": "0",
"usedbysnapshots": "0",
"utf8only": "off",
"version": "5",
"vscan": "off",
"written": "29.5K",
"xattr": "on",
"zoned": "off"
}
'''
import os
from collections import defaultdict
from ansible.module_utils.six import iteritems
from ansible.module_utils.basic import AnsibleModule
SUPPORTED_TYPES = ['all', 'filesystem', 'volume', 'snapshot', 'bookmark']
class ZFSFacts(object):
def __init__(self, module):
self.module = module
self.name = module.params['name']
self.recurse = module.params['recurse']
self.parsable = module.params['parsable']
self.properties = module.params['properties']
self.type = module.params['type']
self.depth = module.params['depth']
self._datasets = defaultdict(dict)
self.facts = []
def dataset_exists(self):
cmd = [self.module.get_bin_path('zfs')]
cmd.append('list')
cmd.append(self.name)
(rc, out, err) = self.module.run_command(cmd)
if rc == 0:
return True
else:
return False
def get_facts(self):
cmd = [self.module.get_bin_path('zfs')]
cmd.append('get')
cmd.append('-H')
if self.parsable:
cmd.append('-p')
if self.recurse:
cmd.append('-r')
if int(self.depth) != 0:
cmd.append('-d')
cmd.append('%s' % self.depth)
if self.type:
cmd.append('-t')
cmd.append(self.type)
cmd.append('-o')
cmd.append('name,property,value')
cmd.append(self.properties)
cmd.append(self.name)
(rc, out, err) = self.module.run_command(cmd)
if rc == 0:
for line in out.splitlines():
dataset, property, value = line.split('\t')
self._datasets[dataset].update({property: value})
for k, v in iteritems(self._datasets):
v.update({'name': k})
self.facts.append(v)
return {'ansible_zfs_datasets': self.facts}
else:
self.module.fail_json(msg='Error while trying to get facts about ZFS dataset: %s' % self.name,
stderr=err,
rc=rc)
def main():
module = AnsibleModule(
argument_spec=dict(
name=dict(required=True, aliases=['ds', 'dataset'], type='str'),
recurse=dict(required=False, default=False, type='bool'),
parsable=dict(required=False, default=False, type='bool'),
properties=dict(required=False, default='all', type='str'),
type=dict(required=False, default='all', type='str', choices=SUPPORTED_TYPES),
depth=dict(required=False, default=0, type='int')
),
supports_check_mode=True
)
zfs_facts = ZFSFacts(module)
result = {}
result['changed'] = False
result['name'] = zfs_facts.name
if zfs_facts.parsable:
result['parsable'] = zfs_facts.parsable
if zfs_facts.recurse:
result['recurse'] = zfs_facts.recurse
if zfs_facts.dataset_exists():
result['ansible_facts'] = zfs_facts.get_facts()
else:
module.fail_json(msg='ZFS dataset %s does not exist!' % zfs_facts.name)
module.exit_json(**result)
if __name__ == '__main__':
main()
| gpl-3.0 | -2,548,795,044,996,286,000 | 29.472028 | 106 | 0.537235 | false |
neithere/timetra.diary | timetra/diary/utils.py | 1 | 10788 | # -*- coding: utf-8 -*-
#
# Timetra is a time tracking application and library.
# Copyright © 2010-2014 Andrey Mikhaylenko
#
# This file is part of Timetra.
#
# Timetra is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Timetra is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Timer. If not, see <http://gnu.org/licenses/>.
#
"""
Utility functions
=================
"""
from datetime import date, datetime, time, timedelta
import re
try:
basestring
except NameError:
# Python3
basestring = str
def to_date(obj):
if isinstance(obj, datetime):
return obj.date()
if isinstance(obj, date):
return obj
raise TypeError('expected date or datetime, got {0}'.format(obj))
def to_datetime(obj):
if isinstance(obj, datetime):
return obj
if isinstance(obj, date):
return datetime.combine(obj, time(0))
raise TypeError('expected date or datetime, got {0}'.format(obj))
# TODO: use https://bitbucket.org/russellballestrini/ago/src
# or https://github.com/tantalor/pretty_timedelta
def format_delta(delta, fmt='{hours}:{minutes}'):
""" Formats timedelta. Allowed variable names are: `days`, `hours`,
`minutes`, `seconds`.
"""
hours, rem = divmod(delta.seconds, 3600)
minutes, seconds = divmod(rem, 60)
return fmt.format(days=delta.days, hours=hours, minutes=minutes,
seconds=seconds)
def split_time(string):
""" Returns a pair of integers `(hours, minutes)` for given string::
>>> _split_time('02:15')
(2, 15)
>>> _split_time('2:15')
(2, 15)
>>> _split_time(':15')
(0, 15)
>>> _split_time('15')
(0, 15)
"""
def _split(s):
if ':' in s:
return s.split(':')
if len(s) <= 2:
# "35" -> 00:35
return 0, s
return s[:-2], s[-2:]
return tuple(int(x or 0) for x in _split(string))
def parse_date(string):
"""
Expects a date string in ``YYYY-MM-DD`` format.
Returns a corresponding `datetime.date` object.
"""
return datetime.strptime(string, '%Y-%m-%d').date()
def parse_time(string):
"""
Returns a datetime.time object and a boolean that tells whether given time
should be substracted from the start time.
>>> parse_time('20')
(time(0, 20), False)
>>> parse_time('-1:35')
(time(1, 35), True)
>>> parse_time('now')
datetime.now().time()
>>> parse_time('now-5')
datetime.now().time() - timedelta(minutes=5)
"""
substract = False
if string == 'now':
return datetime.now().time(), substract
if string.startswith('now-'):
# "now-150"
now = datetime.now()
_, substring = string.split('-')
delta, _ = parse_time(substring)
start = now - timedelta(hours=delta.hour, minutes=delta.minute)
return start.time(), substract
if string.startswith('-'):
# "-2:58"
substract = True
string = string[1:]
hours, minutes = split_time(string)
return time(hours, minutes), substract
def parse_time_to_datetime(string, relative_to=None, ensure_past_time=True):
""" Parses string to a datetime, relative to given date (or current one):
CURRENT FORMAT:
12:05 = DATE, at 12:05
TODO:
1205 = DATE, at 12:05
205 = DATE, at 02:05
05 = DATE, at 00:05
5 = DATE, at 00:05
-5 = DATE - 5 minutes
"""
if not string:
return
base_date = relative_to or datetime.now()
parsed_time, _ = parse_time(string)
date_time = datetime.combine(base_date, parsed_time)
# microseconds are not important but may break the comparison below
base_date = base_date.replace(microsecond=0)
date_time = date_time.replace(microsecond=0)
if ensure_past_time and base_date < date_time:
return date_time - timedelta(days=1)
else:
return date_time
def parse_delta(string):
""" Parses string to timedelta.
"""
if not string:
return
hours, minutes = split_time(string)
return timedelta(hours=hours, minutes=minutes)
def extract_date_time_bounds(spec):
spec = spec.strip()
rx_time = r'[0-9]{0,2}:?[0-9]{1,2}'
rx_rel = r'[+-]\d+'
rx_component = r'{time}|{rel}'.format(time=rx_time, rel=rx_rel)
rx_separator = r'\.\.'
rxs = tuple(re.compile(x) for x in [
# all normal cases
r'^(?P<since>{component}){sep}(?P<until>{component})$'.format(
component=rx_component, sep=rx_separator),
# since last until given
r'^\.\.(?P<until>{component})$'.format(component=rx_component),
# since given until now
r'^(?P<since>{component})\.\.$'.format(component=rx_component),
# since last until now
r'^(\.\.|)$'.format(time=rx_time),
# ultrashortcut "1230+5"
r'^(?P<since>{time})(?P<until>\+\d+)$'.format(time=rx_time),
# ultrashortcut "+5" / "-5"
r'^(?P<since>{rel})$'.format(rel=rx_rel),
])
for rx in rxs:
match = rx.match(spec)
if match:
return match.groupdict()
raise ValueError(u'Could not parse "{}" to time bounds '.format(spec))
def string_to_time_or_delta(value):
if value is None:
return None
assert isinstance(value, basestring)
if value.startswith(('+', '-')):
hours, minutes = split_time(value[1:])
assert minutes <= 60
delta = timedelta(hours=hours, minutes=minutes)
return delta if value[0] == '+' else -delta
else:
hours, minutes = split_time(value)
return time(hour=hours, minute=minutes)
def round_fwd(time):
if not time.second and not time.microsecond:
return time
if time.microsecond:
time += timedelta(seconds=+1, microseconds=-time.microsecond)
if time.second:
if time.second <= 30:
time += timedelta(seconds=30-time.second)
else:
time += timedelta(seconds=60-time.second)
return time
def _normalize_since(last, since, now):
if isinstance(since, datetime):
return since
if isinstance(since, time):
if since < now.time():
# e.g. since 20:00, now is 20:30, makes sense
reftime = round_fwd(now)
else:
# e.g. since 20:50, now is 20:30 → can't be today;
# probably yesterday (allowing earlier dates can be confusing)
reftime = round_fwd(now) - timedelta(days=1)
return reftime.replace(hour=since.hour, minute=since.minute, second=0, microsecond=0)
if isinstance(since, timedelta):
# relative...
if since.total_seconds() < 0:
# ...to `until`
#
# "-5..until" → "{until-5}..until"; `until` is not normalized yet
return since # unchanged timedelta
else:
# ...to `last`
#
# "+5.." → "{last+5}.."; `last` is already known
return round_fwd(last) + since
raise TypeError('since')
def _normalize_until(last, until, now):
if isinstance(until, datetime):
return until
if isinstance(until, time):
if until < now.time():
# e.g. until 20:00, now is 20:30, makes sense
reftime = round_fwd(now)
else:
# e.g. until 20:50, now is 20:30 → can't be today;
# probably yesterday (allowing earlier dates can be confusing)
reftime = round_fwd(now) - timedelta(days=1)
return reftime.replace(hour=until.hour, minute=until.minute, second=0, microsecond=0)
if isinstance(until, timedelta):
# relative...
if until.total_seconds() < 0:
# ...to `now`
#
# "since..-5" → "since..{now-5}"; `now` is already known
return round_fwd(now) + until
else:
# ...to `since`
#
# "since..+5" → "since..{since+5}"; `since` is not normalized yet
# (or it is but we want to keep the code refactoring-friendly)
return until # unchanged timedelta
raise TypeError('until')
def normalize_group(last, since, until, now):
assert since or last
assert until or now
if since is None:
# NOTE: "if not since" won't work for "00:00"
# because `bool(time(0,0)) == False`
since = round_fwd(last)
if until is None:
until = now
# since
since = _normalize_since(last, since, now)
#if isinstance(since, datetime):
# # XXX TODO this should be only raised in some special circumstances
# # it's not a good idea to prevent adding facts between existing ones
# # so an overlapping check would be a good idea (but on a later stage)
# assert last <= since, 'since ({}) must be ≥ last ({})'.format(since, last)
# until
until = _normalize_until(last, until, now)
if isinstance(until, datetime):
assert until <= now, 'until ({}) must be ≤ now ({})'.format(until, now)
# some components could not be normalized individually
if isinstance(since, timedelta) and isinstance(until, timedelta):
# "-10..+5" → "{now-10}..{since+5}"
assert since.total_seconds() < 0
assert until.total_seconds() >= 0
since = round_fwd(now + since) # actually: now -since
until = since + until
elif isinstance(since, timedelta):
# "-5..21:30" → "{until-5}..21:30"
assert since.total_seconds() < 0
since = round_fwd(until + since) # actually: until -since
elif isinstance(until, timedelta):
# "21:30..+5" → "21:30..{since+5}"
assert until.total_seconds() >= 0
until = since + until
assert since < until, 'since ({}) must be < until ({})'.format(since, until)
return since, until
def parse_date_time_bounds(spec, last, now=None):
groups = extract_date_time_bounds(spec)
raw_since = groups.get('since')
raw_until = groups.get('until')
since = string_to_time_or_delta(raw_since)
until = string_to_time_or_delta(raw_until)
if not now:
now = datetime.now()
return normalize_group(last, since, until, now)
| lgpl-3.0 | -3,627,545,172,862,945,300 | 29.495751 | 93 | 0.584022 | false |
UpstandingHackers/hammer | src/bindings/python/hammer_tests.py | 2 | 19188 | from __future__ import absolute_import, division, print_function
import unittest
import hammer as h
class TestTokenParser(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.parser = h.token(b"95\xa2")
def test_success(self):
self.assertEqual(self.parser.parse(b"95\xa2"), b"95\xa2")
def test_partial_fails(self):
self.assertEqual(self.parser.parse(b"95"), None)
class TestChParser(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.parser_int = h.ch(0xa2)
cls.parser_chr = h.ch(b"\xa2")
def test_success(self):
self.assertEqual(self.parser_int.parse(b"\xa2"), 0xa2)
self.assertEqual(self.parser_chr.parse(b"\xa2"), b"\xa2")
def test_failure(self):
self.assertEqual(self.parser_int.parse(b"\xa3"), None)
self.assertEqual(self.parser_chr.parse(b"\xa3"), None)
class TestChRange(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.parser = h.ch_range(b"a", b"c")
def test_success(self):
self.assertEqual(self.parser.parse(b"b"), b"b")
def test_failure(self):
self.assertEqual(self.parser.parse(b"d"), None)
class TestInt64(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.parser = h.int64()
def test_success(self):
self.assertEqual(self.parser.parse(b"\xff\xff\xff\xfe\x00\x00\x00\x00"), -0x200000000)
def test_failure(self):
self.assertEqual(self.parser.parse(b"\xff\xff\xff\xfe\x00\x00\x00"), None)
class TestInt32(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.parser = h.int32()
def test_success(self):
self.assertEqual(self.parser.parse(b"\xff\xfe\x00\x00"), -0x20000)
self.assertEqual(self.parser.parse(b"\x00\x02\x00\x00"), 0x20000)
def test_failure(self):
self.assertEqual(self.parser.parse(b"\xff\xfe\x00"), None)
self.assertEqual(self.parser.parse(b"\x00\x02\x00"), None)
class TestInt16(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.parser = h.int16()
def test_success(self):
self.assertEqual(self.parser.parse(b"\xfe\x00"), -0x200)
self.assertEqual(self.parser.parse(b"\x02\x00"), 0x200)
def test_failure(self):
self.assertEqual(self.parser.parse(b"\xfe"), None)
self.assertEqual(self.parser.parse(b"\x02"), None)
class TestInt8(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.parser = h.int8()
def test_success(self):
self.assertEqual(self.parser.parse(b"\x88"), -0x78)
def test_failure(self):
self.assertEqual(self.parser.parse(b""), None)
class TestUint64(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.parser = h.uint64()
def test_success(self):
self.assertEqual(self.parser.parse(b"\x00\x00\x00\x02\x00\x00\x00\x00"), 0x200000000)
def test_failure(self):
self.assertEqual(self.parser.parse(b"\x00\x00\x00\x02\x00\x00\x00"), None)
class TestUint32(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.parser = h.uint32()
def test_success(self):
self.assertEqual(self.parser.parse(b"\x00\x02\x00\x00"), 0x20000)
def test_failure(self):
self.assertEqual(self.parser.parse(b"\x00\x02\x00"), None)
class TestUint16(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.parser = h.uint16()
def test_success(self):
self.assertEqual(self.parser.parse(b"\x02\x00"), 0x200)
def test_failure(self):
self.assertEqual(self.parser.parse(b"\x02"), None)
class TestUint8(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.parser = h.uint8()
def test_success(self):
self.assertEqual(self.parser.parse(b"\x78"), 0x78)
def test_failure(self):
self.assertEqual(self.parser.parse(b""), None)
class TestIntRange(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.parser = h.int_range(h.uint8(), 3, 10)
def test_success(self):
self.assertEqual(self.parser.parse(b"\x05"), 5)
def test_failure(self):
self.assertEqual(self.parser.parse(b"\x0b"), None)
class TestWhitespace(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.parser = h.whitespace(h.ch(b"a"))
def test_success(self):
self.assertEqual(self.parser.parse(b"a"), b"a")
self.assertEqual(self.parser.parse(b" a"), b"a")
self.assertEqual(self.parser.parse(b" a"), b"a")
self.assertEqual(self.parser.parse(b"\ta"), b"a")
def test_failure(self):
self.assertEqual(self.parser.parse(b"_a"), None)
class TestWhitespaceEnd(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.parser = h.whitespace(h.end_p())
def test_success(self):
self.assertEqual(self.parser.parse(b""), None) # empty string
self.assertEqual(self.parser.parse(b" "), None) # empty string
def test_failure(self):
self.assertEqual(self.parser.parse(b" x"), None)
class TestLeft(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.parser = h.left(h.ch(b"a"), h.ch(b" "))
def test_success(self):
self.assertEqual(self.parser.parse(b"a "), b"a")
def test_failure(self):
self.assertEqual(self.parser.parse(b"a"), None)
self.assertEqual(self.parser.parse(b" "), None)
self.assertEqual(self.parser.parse(b"ab"), None)
class TestRight(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.parser = h.right(h.ch(b" "), h.ch(b"a"))
def test_success(self):
self.assertEqual(self.parser.parse(b" a"), b"a")
def test_failure(self):
self.assertEqual(self.parser.parse(b"a"), None)
self.assertEqual(self.parser.parse(b" "), None)
self.assertEqual(self.parser.parse(b"ba"), None)
class TestMiddle(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.parser = h.middle(h.ch(b" "), h.ch(b"a"), h.ch(b" "))
def test_success(self):
self.assertEqual(self.parser.parse(b" a "), b"a")
def test_failure(self):
self.assertEqual(self.parser.parse(b"a"), None)
self.assertEqual(self.parser.parse(b" "), None)
self.assertEqual(self.parser.parse(b" a"), None)
self.assertEqual(self.parser.parse(b"a "), None)
self.assertEqual(self.parser.parse(b" b "), None)
self.assertEqual(self.parser.parse(b"ba "), None)
self.assertEqual(self.parser.parse(b" ab"), None)
class TestAction(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.parser = h.action(h.sequence(h.choice(h.ch(b"a"), h.ch(b"A")),
h.choice(h.ch(b"b"), h.ch(b"B"))),
lambda x: [y.upper() for y in x])
def test_success(self):
self.assertEqual(self.parser.parse(b"ab"), [b"A", b"B"])
self.assertEqual(self.parser.parse(b"AB"), [b"A", b"B"])
def test_failure(self):
self.assertEqual(self.parser.parse(b"XX"), None)
class TestIn(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.parser = h.in_(b"abc")
def test_success(self):
self.assertEqual(self.parser.parse(b"b"), b"b")
def test_failure(self):
self.assertEqual(self.parser.parse(b"d"), None)
class TestNotIn(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.parser = h.not_in(b"abc")
def test_success(self):
self.assertEqual(self.parser.parse(b"d"), b"d")
def test_failure(self):
self.assertEqual(self.parser.parse(b"a"), None)
class TestEndP(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.parser = h.sequence(h.ch(b"a"), h.end_p())
def test_success(self):
self.assertEqual(self.parser.parse(b"a"), (b"a",))
def test_failure(self):
self.assertEqual(self.parser.parse(b"aa"), None)
class TestNothingP(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.parser = h.nothing_p()
def test_success(self):
pass
def test_failure(self):
self.assertEqual(self.parser.parse(b"a"), None)
class TestSequence(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.parser = h.sequence(h.ch(b"a"), h.ch(b"b"))
def test_success(self):
self.assertEqual(self.parser.parse(b"ab"), (b"a", b"b"))
def test_failure(self):
self.assertEqual(self.parser.parse(b"a"), None)
self.assertEqual(self.parser.parse(b"b"), None)
class TestSequenceWhitespace(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.parser = h.sequence(h.ch(b"a"), h.whitespace(h.ch(b"b")))
def test_success(self):
self.assertEqual(self.parser.parse(b"ab"), (b"a", b"b"))
self.assertEqual(self.parser.parse(b"a b"), (b"a", b"b"))
self.assertEqual(self.parser.parse(b"a b"), (b"a", b"b"))
def test_failure(self):
self.assertEqual(self.parser.parse(b"a c"), None)
class TestChoice(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.parser = h.choice(h.ch(b"a"), h.ch(b"b"))
def test_success(self):
self.assertEqual(self.parser.parse(b"a"), b"a")
self.assertEqual(self.parser.parse(b"b"), b"b")
def test_failure(self):
self.assertEqual(self.parser.parse(b"c"), None)
class TestButNot(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.parser = h.butnot(h.ch(b"a"), h.token(b"ab"))
def test_success(self):
self.assertEqual(self.parser.parse(b"a"), b"a")
self.assertEqual(self.parser.parse(b"aa"), b"a")
def test_failure(self):
self.assertEqual(self.parser.parse(b"ab"), None)
class TestButNotRange(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.parser = h.butnot(h.ch_range(b"0", b"9"), h.ch(b"6"))
def test_success(self):
self.assertEqual(self.parser.parse(b"4"), b"4")
def test_failure(self):
self.assertEqual(self.parser.parse(b"6"), None)
class TestDifference(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.parser = h.difference(h.token(b"ab"), h.ch(b"a"))
def test_success(self):
self.assertEqual(self.parser.parse(b"ab"), b"ab")
def test_failure(self):
self.assertEqual(self.parser.parse(b"a"), None)
class TestXor(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.parser = h.xor(h.ch_range(b"0", b"6"), h.ch_range(b"5", b"9"))
def test_success(self):
self.assertEqual(self.parser.parse(b"0"), b"0")
self.assertEqual(self.parser.parse(b"9"), b"9")
def test_failure(self):
self.assertEqual(self.parser.parse(b"5"), None)
self.assertEqual(self.parser.parse(b"a"), None)
class TestMany(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.parser = h.many(h.choice(h.ch(b"a"), h.ch(b"b")))
def test_success(self):
self.assertEqual(self.parser.parse(b""), ())
self.assertEqual(self.parser.parse(b"a"), (b"a",))
self.assertEqual(self.parser.parse(b"b"), (b"b",))
self.assertEqual(self.parser.parse(b"aabbaba"), (b"a", b"a", b"b", b"b", b"a", b"b", b"a"))
def test_failure(self):
pass
class TestMany1(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.parser = h.many1(h.choice(h.ch(b"a"), h.ch(b"b")))
def test_success(self):
self.assertEqual(self.parser.parse(b"a"), (b"a",))
self.assertEqual(self.parser.parse(b"b"), (b"b",))
self.assertEqual(self.parser.parse(b"aabbaba"), (b"a", b"a", b"b", b"b", b"a", b"b", b"a"))
def test_failure(self):
self.assertEqual(self.parser.parse(b""), None)
self.assertEqual(self.parser.parse(b"daabbabadef"), None)
class TestRepeatN(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.parser = h.repeat_n(h.choice(h.ch(b"a"), h.ch(b"b")), 2)
def test_success(self):
self.assertEqual(self.parser.parse(b"abdef"), (b"a", b"b"))
def test_failure(self):
self.assertEqual(self.parser.parse(b"adef"), None)
self.assertEqual(self.parser.parse(b"dabdef"), None)
class TestOptional(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.parser = h.sequence(h.ch(b"a"), h.optional(h.choice(h.ch(b"b"), h.ch(b"c"))), h.ch(b"d"))
def test_success(self):
self.assertEqual(self.parser.parse(b"abd"), (b"a", b"b", b"d"))
self.assertEqual(self.parser.parse(b"acd"), (b"a", b"c", b"d"))
self.assertEqual(self.parser.parse(b"ad"), (b"a", h.Placeholder(), b"d"))
def test_failure(self):
self.assertEqual(self.parser.parse(b"aed"), None)
self.assertEqual(self.parser.parse(b"ab"), None)
self.assertEqual(self.parser.parse(b"ac"), None)
class TestIgnore(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.parser = h.sequence(h.ch(b"a"), h.ignore(h.ch(b"b")), h.ch(b"c"))
def test_success(self):
self.assertEqual(self.parser.parse(b"abc"), (b"a",b"c"))
def test_failure(self):
self.assertEqual(self.parser.parse(b"ac"), None)
class TestSepBy(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.parser = h.sepBy(h.choice(h.ch(b"1"), h.ch(b"2"), h.ch(b"3")), h.ch(b","))
def test_success(self):
self.assertEqual(self.parser.parse(b"1,2,3"), (b"1", b"2", b"3"))
self.assertEqual(self.parser.parse(b"1,3,2"), (b"1", b"3", b"2"))
self.assertEqual(self.parser.parse(b"1,3"), (b"1", b"3"))
self.assertEqual(self.parser.parse(b"3"), (b"3",))
self.assertEqual(self.parser.parse(b""), ())
def test_failure(self):
pass
class TestSepBy1(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.parser = h.sepBy1(h.choice(h.ch(b"1"), h.ch(b"2"), h.ch(b"3")), h.ch(b","))
def test_success(self):
self.assertEqual(self.parser.parse(b"1,2,3"), (b"1", b"2", b"3"))
self.assertEqual(self.parser.parse(b"1,3,2"), (b"1", b"3", b"2"))
self.assertEqual(self.parser.parse(b"1,3"), (b"1", b"3"))
self.assertEqual(self.parser.parse(b"3"), (b"3",))
def test_failure(self):
self.assertEqual(self.parser.parse(b""), None)
class TestEpsilonP1(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.parser = h.sequence(h.ch(b"a"), h.epsilon_p(), h.ch(b"b"))
def test_success(self):
self.assertEqual(self.parser.parse(b"ab"), (b"a", b"b"))
def test_failure(self):
pass
class TestEpsilonP2(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.parser = h.sequence(h.epsilon_p(), h.ch(b"a"))
def test_success(self):
self.assertEqual(self.parser.parse(b"a"), (b"a",))
def test_failure(self):
pass
class TestEpsilonP3(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.parser = h.sequence(h.ch(b"a"), h.epsilon_p())
def test_success(self):
self.assertEqual(self.parser.parse(b"a"), (b"a",))
def test_failure(self):
pass
class TestAttrBool(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.parser = h.attr_bool(h.many1(h.choice(h.ch(b"a"), h.ch(b"b"))),
lambda x: x[0] == x[1])
def test_success(self):
self.assertEqual(self.parser.parse(b"aa"), (b"a", b"a"))
self.assertEqual(self.parser.parse(b"bb"), (b"b", b"b"))
def test_failure(self):
self.assertEqual(self.parser.parse(b"ab"), None)
class TestAnd1(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.parser = h.sequence(h.and_(h.ch(b"0")), h.ch(b"0"))
def test_success(self):
self.assertEqual(self.parser.parse(b"0"), (b"0",))
def test_failure(self):
pass
class TestAnd2(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.parser = h.sequence(h.and_(h.ch(b"0")), h.ch(b"1"))
def test_success(self):
pass
def test_failure(self):
self.assertEqual(self.parser.parse(b"0"), None)
class TestAnd3(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.parser = h.sequence(h.ch(b"1"), h.and_(h.ch(b"2")))
def test_success(self):
self.assertEqual(self.parser.parse(b"12"), (b"1",))
def test_failure(self):
pass
class TestNot1(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.parser = h.sequence(h.ch(b"a"),
h.choice(h.ch(b"+"), h.token(b"++")),
h.ch(b"b"))
def test_success(self):
self.assertEqual(self.parser.parse(b"a+b"), (b"a", b"+", b"b"))
def test_failure(self):
self.assertEqual(self.parser.parse(b"a++b"), None)
class TestNot2(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.parser = h.sequence(h.ch(b"a"), h.choice(h.sequence(h.ch(b"+"), h.not_(h.ch(b"+"))),
h.token(b"++")),
h.ch(b"b"))
def test_success(self):
self.assertEqual(self.parser.parse(b"a+b"), (b"a", (b"+",), b"b"))
self.assertEqual(self.parser.parse(b"a++b"), (b"a", b"++", b"b"))
def test_failure(self):
pass
# ### this is commented out for packrat in C ...
# #class TestLeftrec(unittest.TestCase):
# # @classmethod
# # def setUpClass(cls):
# # cls.parser = h.indirect()
# # a = h.ch(b"a")
# # h.bind_indirect(cls.parser, h.choice(h.sequence(cls.parser, a), a))
# # def test_success(self):
# # self.assertEqual(self.parser.parse(b"a"), b"a")
# # self.assertEqual(self.parser.parse(b"aa"), [b"a", b"a"])
# # self.assertEqual(self.parser.parse(b"aaa"), [b"a", b"a", b"a"])
# # def test_failure(self):
# # pass
class TestRightrec(unittest.TestCase):
@classmethod
def setUpClass(cls):
#raise unittest.SkipTest(b"Bind doesn't work right now")
cls.parser = h.indirect()
a = h.ch(b"a")
cls.parser.bind(h.choice(h.sequence(a, cls.parser),
h.epsilon_p()))
def test_success(self):
self.assertEqual(self.parser.parse(b"a"), (b"a",))
self.assertEqual(self.parser.parse(b"aa"), (b"a", (b"a",)))
self.assertEqual(self.parser.parse(b"aaa"), (b"a", (b"a", (b"a",))))
def test_failure(self):
pass
# ### this is just for GLR
# #class TestAmbiguous(unittest.TestCase):
# # @classmethod
# # def setUpClass(cls):
# # cls.parser = h.indirect()
# # d = h.ch(b"d")
# # p = h.ch(b"+")
# # h.bind_indirect(cls.parser, h.choice(h.sequence(cls.parser, p, cls.parser), d))
# # # this is supposed to be flattened
# # def test_success(self):
# # self.assertEqual(self.parser.parse(b"d"), [b"d"])
# # self.assertEqual(self.parser.parse(b"d+d"), [b"d", b"+", b"d"])
# # self.assertEqual(self.parser.parse(b"d+d+d"), [b"d", b"+", b"d", b"+", b"d"])
# # def test_failure(self):
# # self.assertEqual(self.parser.parse(b"d+"), None)
| gpl-2.0 | 4,513,962,877,326,974,000 | 36.549902 | 101 | 0.608714 | false |
ghisvail/vispy | vispy/app/backends/_sdl2.py | 18 | 15132 | # -*- coding: utf-8 -*-
# Copyright (c) 2015, Vispy Development Team.
# Distributed under the (new) BSD License. See LICENSE.txt for more info.
"""
vispy backend for sdl2.
"""
from __future__ import division
import atexit
import ctypes
from time import sleep
import warnings
import gc
from ..base import (BaseApplicationBackend, BaseCanvasBackend,
BaseTimerBackend)
from ...util import keys, logger
from ...util.ptime import time
from ... import config
USE_EGL = config['gl_backend'].lower().startswith('es')
# -------------------------------------------------------------------- init ---
try:
with warnings.catch_warnings(record=True): # can throw warnings
import sdl2
import sdl2.ext
# Map native keys to vispy keys
KEYMAP = {
# http://www.ginkgobitter.org/sdl/?SDL_Keycode
sdl2.SDLK_LSHIFT: keys.SHIFT,
sdl2.SDLK_RSHIFT: keys.SHIFT,
sdl2.SDLK_LCTRL: keys.CONTROL,
sdl2.SDLK_RCTRL: keys.CONTROL,
sdl2.SDLK_LALT: keys.ALT,
sdl2.SDLK_RALT: keys.ALT,
sdl2.SDLK_LGUI: keys.META,
sdl2.SDLK_RGUI: keys.META,
sdl2.SDLK_LEFT: keys.LEFT,
sdl2.SDLK_UP: keys.UP,
sdl2.SDLK_RIGHT: keys.RIGHT,
sdl2.SDLK_DOWN: keys.DOWN,
sdl2.SDLK_PAGEUP: keys.PAGEUP,
sdl2.SDLK_PAGEDOWN: keys.PAGEDOWN,
sdl2.SDLK_INSERT: keys.INSERT,
sdl2.SDLK_DELETE: keys.DELETE,
sdl2.SDLK_HOME: keys.HOME,
sdl2.SDLK_END: keys.END,
sdl2.SDLK_ESCAPE: keys.ESCAPE,
sdl2.SDLK_BACKSPACE: keys.BACKSPACE,
sdl2.SDLK_F1: keys.F1,
sdl2.SDLK_F2: keys.F2,
sdl2.SDLK_F3: keys.F3,
sdl2.SDLK_F4: keys.F4,
sdl2.SDLK_F5: keys.F5,
sdl2.SDLK_F6: keys.F6,
sdl2.SDLK_F7: keys.F7,
sdl2.SDLK_F8: keys.F8,
sdl2.SDLK_F9: keys.F9,
sdl2.SDLK_F10: keys.F10,
sdl2.SDLK_F11: keys.F11,
sdl2.SDLK_F12: keys.F12,
sdl2.SDLK_SPACE: keys.SPACE,
sdl2.SDLK_RETURN: keys.ENTER,
sdl2.SDLK_TAB: keys.TAB,
}
BUTTONMAP = {sdl2.SDL_BUTTON_LEFT: 1,
sdl2.SDL_BUTTON_MIDDLE: 2,
sdl2.SDL_BUTTON_RIGHT: 3
}
except Exception as exp:
available, testable, why_not, which = False, False, str(exp), None
else:
if USE_EGL:
available, testable, why_not = False, False, 'EGL not supported'
else:
available, testable, why_not = True, True, None
which = 'sdl2 %d.%d.%d' % sdl2.version_info[:3]
_SDL2_INITIALIZED = False
_VP_SDL2_ALL_WINDOWS = {}
def _get_sdl2_windows():
return list(_VP_SDL2_ALL_WINDOWS.values())
# -------------------------------------------------------------- capability ---
capability = dict( # things that can be set by the backend
title=True,
size=True,
position=True,
show=True,
vsync=True,
resizable=True,
decorate=True,
fullscreen=True,
context=True,
multi_window=True,
scroll=True,
parent=False,
always_on_top=False,
)
# ------------------------------------------------------- set_configuration ---
def _set_config(c):
"""Set gl configuration for SDL2"""
func = sdl2.SDL_GL_SetAttribute
func(sdl2.SDL_GL_RED_SIZE, c['red_size'])
func(sdl2.SDL_GL_GREEN_SIZE, c['green_size'])
func(sdl2.SDL_GL_BLUE_SIZE, c['blue_size'])
func(sdl2.SDL_GL_ALPHA_SIZE, c['alpha_size'])
func(sdl2.SDL_GL_DEPTH_SIZE, c['depth_size'])
func(sdl2.SDL_GL_STENCIL_SIZE, c['stencil_size'])
func(sdl2.SDL_GL_DOUBLEBUFFER, 1 if c['double_buffer'] else 0)
samps = c['samples']
func(sdl2.SDL_GL_MULTISAMPLEBUFFERS, 1 if samps > 0 else 0)
func(sdl2.SDL_GL_MULTISAMPLESAMPLES, samps if samps > 0 else 0)
func(sdl2.SDL_GL_STEREO, c['stereo'])
# ------------------------------------------------------------- application ---
class ApplicationBackend(BaseApplicationBackend):
def __init__(self):
BaseApplicationBackend.__init__(self)
self._timers = list()
def _add_timer(self, timer):
if timer not in self._timers:
self._timers.append(timer)
def _vispy_get_backend_name(self):
return 'SDL2'
def _vispy_process_events(self):
events = sdl2.ext.get_events()
while len(events) > 0:
for event in events:
_id = event.window.windowID
if _id in _VP_SDL2_ALL_WINDOWS:
win = _VP_SDL2_ALL_WINDOWS[_id]
win._on_event(event)
events = sdl2.ext.get_events()
for timer in self._timers:
timer._tick()
wins = _get_sdl2_windows()
for win in wins:
if win._needs_draw:
win._needs_draw = False
win._on_draw()
def _vispy_run(self):
wins = _get_sdl2_windows()
while all(w._id is not None for w in wins):
self._vispy_process_events()
self._vispy_quit() # to clean up
def _vispy_quit(self):
# Close windows
wins = _get_sdl2_windows()
for win in wins:
win._vispy_close()
# tear down timers
for timer in self._timers:
timer._vispy_stop()
self._timers = []
def _vispy_get_native_app(self):
global _SDL2_INITIALIZED
if not _SDL2_INITIALIZED:
sdl2.ext.init()
atexit.register(sdl2.ext.quit)
_SDL2_INITIALIZED = True
return sdl2
# ------------------------------------------------------------------ canvas ---
class CanvasBackend(BaseCanvasBackend):
""" SDL2 backend for Canvas abstract class."""
# args are for BaseCanvasBackend, kwargs are for us.
def __init__(self, *args, **kwargs):
BaseCanvasBackend.__init__(self, *args)
p = self._process_backend_kwargs(kwargs)
self._initialized = False
# Deal with config
_set_config(p.context.config)
# Deal with context
p.context.shared.add_ref('sdl2', self)
if p.context.shared.ref is self:
share = None
else:
other = p.context.shared.ref
share = other._id.window, other._native_context
sdl2.SDL_GL_MakeCurrent(*share)
sdl2.SDL_GL_SetAttribute(sdl2.SDL_GL_SHARE_WITH_CURRENT_CONTEXT, 1)
sdl2.SDL_GL_SetSwapInterval(1 if p.vsync else 0)
flags = sdl2.SDL_WINDOW_OPENGL
flags |= sdl2.SDL_WINDOW_SHOWN # start out shown
flags |= sdl2.SDL_WINDOW_ALLOW_HIGHDPI
flags |= sdl2.SDL_WINDOW_RESIZABLE if p.resizable else 0
flags |= sdl2.SDL_WINDOW_BORDERLESS if not p.decorate else 0
if p.fullscreen is not False:
self._fullscreen = True
if p.fullscreen is not True:
logger.warning('Cannot specify monitor number for SDL2 '
'fullscreen, using default')
flags |= sdl2.SDL_WINDOW_FULLSCREEN_DESKTOP
else:
self._fullscreen = False
self._mods = list()
if p.position is None:
position = [sdl2.SDL_WINDOWPOS_UNDEFINED] * 2
else:
position = None
self._id = sdl2.ext.Window(p.title, p.size, position, flags)
if not self._id.window:
raise RuntimeError('Could not create window')
if share is None:
self._native_context = sdl2.SDL_GL_CreateContext(self._id.window)
else:
self._native_context = sdl2.SDL_GL_CreateContext(share[0])
self._sdl_id = sdl2.SDL_GetWindowID(self._id.window)
_VP_SDL2_ALL_WINDOWS[self._sdl_id] = self
# Init
self._initialized = True
self._needs_draw = False
self._vispy_canvas.set_current()
self._vispy_canvas.events.initialize()
if not p.show:
self._vispy_set_visible(False)
def _vispy_warmup(self):
etime = time() + 0.1
while time() < etime:
sleep(0.01)
self._vispy_canvas.set_current()
self._vispy_canvas.app.process_events()
def _vispy_set_current(self):
if self._id is None:
return
# Make this the current context
sdl2.SDL_GL_MakeCurrent(self._id.window, self._native_context)
def _vispy_swap_buffers(self):
if self._id is None:
return
# Swap front and back buffer
sdl2.SDL_GL_SwapWindow(self._id.window)
def _vispy_set_title(self, title):
if self._id is None:
return
# Set the window title. Has no effect for widgets
sdl2.SDL_SetWindowTitle(self._id.window, title.encode('UTF-8'))
def _vispy_set_size(self, w, h):
if self._id is None:
return
# Set size of the widget or window
sdl2.SDL_SetWindowSize(self._id.window, w, h)
def _vispy_set_position(self, x, y):
if self._id is None:
return
# Set position of the widget or window. May have no effect for widgets
sdl2.SDL_SetWindowPosition(self._id.window, x, y)
def _vispy_set_visible(self, visible):
# Show or hide the window or widget
if self._id is None:
return
if visible:
self._id.show()
# this ensures that the show takes effect
self._vispy_update()
else:
self._id.hide()
def _vispy_update(self):
# Invoke a redraw, passing it on to the canvas
if self._vispy_canvas is None or self._id is None:
return
# Mark that this window wants to be drawn on the next loop iter
self._needs_draw = True
def _vispy_close(self):
# Force the window or widget to shut down
if self._id is not None:
_id = self._id.window
self._vispy_canvas = None
self._id = None
sdl2.SDL_DestroyWindow(_id)
del _VP_SDL2_ALL_WINDOWS[self._sdl_id]
self._sdl_id = None
gc.collect() # enforce gc to help context get destroyed
def _vispy_get_size(self):
if self._id is None:
return
w, h = ctypes.c_int(), ctypes.c_int()
sdl2.SDL_GetWindowSize(self._id.window,
ctypes.byref(w), ctypes.byref(h))
w, h = w.value, h.value
return w, h
def _vispy_get_fullscreen(self):
return self._fullscreen
def _vispy_set_fullscreen(self, fullscreen):
self._fullscreen = bool(fullscreen)
flags = sdl2.SDL_WINDOW_FULLSCREEN_DESKTOP if self._fullscreen else 0
sdl2.SDL_SetWindowFullscreen(self._id.window, flags)
def _vispy_get_position(self):
if self._id is None:
return
x, y = ctypes.c_int(), ctypes.c_int()
sdl2.SDL_GetWindowPosition(self._id.window,
ctypes.byref(x), ctypes.byref(y))
x, y = x.value, y.value
return x, y
##########################################
# Notify vispy of events triggered by SDL2
def _get_mouse_position(self):
if self._id is None:
return (0, 0)
x, y = ctypes.c_int(), ctypes.c_int()
sdl2.SDL_GetMouseState(ctypes.byref(x), ctypes.byref(y))
return x.value, y.value
def _on_draw(self):
if self._vispy_canvas is None or self._id is None:
return
self._vispy_canvas.set_current()
self._vispy_canvas.events.draw(region=None) # (0, 0, w, h))
def _on_event(self, event):
if self._vispy_canvas is None:
return
# triage event to proper handler
if event.type == sdl2.SDL_QUIT:
self._vispy_canvas.close()
elif event.type == sdl2.SDL_WINDOWEVENT:
if event.window.event == sdl2.SDL_WINDOWEVENT_RESIZED:
w, h = event.window.data1, event.window.data2
self._vispy_canvas.events.resize(size=(w, h))
elif event.window.event == sdl2.SDL_WINDOWEVENT_CLOSE:
self._vispy_canvas.close()
elif event.type == sdl2.SDL_MOUSEMOTION:
x, y = event.motion.x, event.motion.y
self._vispy_mouse_move(pos=(x, y), modifiers=self._mods)
elif event.type in (sdl2.SDL_MOUSEBUTTONDOWN,
sdl2.SDL_MOUSEBUTTONUP):
x, y = event.button.x, event.button.y
button = event.button.button
if button in BUTTONMAP:
button = BUTTONMAP.get(button, 0)
if event.type == sdl2.SDL_MOUSEBUTTONDOWN:
func = self._vispy_mouse_press
else:
func = self._vispy_mouse_release
func(pos=(x, y), button=button, modifiers=self._mods)
elif event.type == sdl2.SDL_MOUSEWHEEL:
pos = self._get_mouse_position()
delta = float(event.wheel.x), float(event.wheel.y)
self._vispy_canvas.events.mouse_wheel(pos=pos, delta=delta,
modifiers=self._mods)
elif event.type in (sdl2.SDL_KEYDOWN, sdl2.SDL_KEYUP):
down = (event.type == sdl2.SDL_KEYDOWN)
keysym = event.key.keysym
mods = keysym.mod
key = keysym.sym
self._process_mod(mods, down)
if key in KEYMAP:
key, text = KEYMAP[key], ''
elif key >= 32 and key <= 127:
key, text = keys.Key(chr(key)), chr(key)
else:
key, text = None, ''
if down:
fun = self._vispy_canvas.events.key_press
else:
fun = self._vispy_canvas.events.key_release
fun(key=key, text=text, modifiers=self._mods)
def _process_mod(self, key, down):
_modifiers = list()
if key & (sdl2.SDLK_LSHIFT | sdl2.SDLK_RSHIFT):
_modifiers.append(keys.SHIFT)
if key & (sdl2.SDLK_LCTRL | sdl2.SDLK_RCTRL):
_modifiers.append(keys.CONTROL)
if key & (sdl2.SDLK_LALT | sdl2.SDLK_RALT):
_modifiers.append(keys.ALT)
if key & (sdl2.SDLK_LGUI | sdl2.SDLK_RGUI):
_modifiers.append(keys.META)
for mod in _modifiers:
if mod not in self._mods:
if down:
self._mods.append(mod)
elif not down:
self._mods.pop(self._mods.index(mod))
# ------------------------------------------------------------------- timer ---
# XXX should probably use SDL_Timer (and SDL_INIT_TIMER)
class TimerBackend(BaseTimerBackend):
def __init__(self, vispy_timer):
BaseTimerBackend.__init__(self, vispy_timer)
vispy_timer._app._backend._add_timer(self)
self._vispy_stop()
def _vispy_start(self, interval):
self._interval = interval
self._next_time = time() + self._interval
def _vispy_stop(self):
self._next_time = float('inf')
def _tick(self):
if time() >= self._next_time:
self._vispy_timer._timeout()
self._next_time = time() + self._interval
| bsd-3-clause | -4,155,578,784,518,633,500 | 32.776786 | 79 | 0.551018 | false |
RedHatQE/cfme_tests | docs/conf.py | 1 | 10239 | # -*- coding: utf-8 -*-
#
# cfme_tests documentation build configuration file, created by
# sphinx-quickstart on Thu Nov 21 09:32:22 2013.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import os
import sys
from cfme.utils import on_rtd # noqa
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath("../"))
if on_rtd:
clean_autogenerated_docs = True
a = open("../conf/env.yaml", "w")
a.write(open("../conf/env.yaml.template", "r").read())
a.close()
a = open("../conf/credentials.yaml", "w")
a.write(open("../conf/credentials.yaml.template", "r").read())
a.close()
else:
try:
import sphinx_rtd_theme
html_theme = "sphinx_rtd_theme"
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
except ImportError:
html_theme = "default"
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.intersphinx",
"sphinx.ext.viewcode",
"cfme.fixtures.nelson",
"cfme.utils.apidoc",
]
intersphinx_mapping = {
"boto": ("http://boto.readthedocs.org/en/latest/", None),
"python": ("http://docs.python.org/2.7", None),
"pytest": ("http://pytest.org/latest/", None),
"selenium": ("http://selenium-python.readthedocs.org/", None),
"sqlalchemy": ("http://docs.sqlalchemy.org/en/latest/", None),
}
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# The suffix of source filenames.
source_suffix = ".rst"
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = "index"
# General information about the project.
project = u"cfme_tests"
copyright = u"2013, RedHat QE"
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = "0"
# The full version, including alpha/beta/rc tags.
release = "0"
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
# language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ["_build", "dockerbot"]
# The reST default role (used for this markup: `text`) to use for all documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = "sphinx"
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = ['cfme.']
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
# html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
html_title = "%s documentation" % project
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = "cfme_testsdoc"
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
# 'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
("index", "cfme_tests.tex", u"cfme\\_tests Documentation", u"RedHat QE", "manual")
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [("guides/dev_guide", "cfme_tests", u"cfme_tests Documentation", [u"RedHat QE"], 1)]
# If true, show URL addresses after external links.
# man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(
"index",
"cfme_tests",
"cfme_tests Documentation",
u"RedHat QE",
"cfme_tests",
"cfme_tests Documentation",
"Miscellaneous",
)
]
# Documents to append as an appendix to all manuals.
# texinfo_appendices = []
# If false, no module index is generated.
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
# texinfo_show_urls = 'footnote'
autodoc_default_flags = ["members"]
autodoc_mock_imports = [
"git",
"pycurl",
"gevent",
"novaclient.exceptions.OverLimit" "ovirt-engine-sdk-python",
"ovirtsdk",
"ovirtsdk.xml",
"ovirtsdk.infrastructure",
"ovirtsdk.infrastructure.errors",
"wrapanapi.utils",
"wrapanapi.VmState",
"wrapanapi.base",
"wrapanapi.exceptions",
"wrapanapi.ec2",
"wrapanapi.openstack",
"wrapanapi.systems.rhevm.Error",
"wrapanapi.scvmm",
"wrapanapi.virtualcenter",
"wrapanapi.kubernetes",
"wrapanapi.openshift",
"wrapanapi.rest_client",
"wrapanapi.openstack_infra",
"wrapanapi.hawkular",
"wrapanapi.msazure",
"wrapanapi.google",
"wrapanapi.systems.container",
"wrapanapi.containers.providers",
"wrapanapi.containers.providers.rhkubernetes",
"wrapanapi.containers.volume",
"wrapanapi.containers.template",
"wrapanapi.containers.service",
"wrapanapi.containers.route",
"wrapanapi.containers.pod",
"wrapanapi.containers.project",
"wrapanapi.containers.replicator",
"wrapanapi.containers.deployment_config",
"wrapanapi.containers.node",
"wrapanapi.containers.image",
"wrapanapi.containers.image_registry",
"wrapanapi.containers.container",
"wrapanapi.vcloud",
"wrapanapi.nuage",
"wrapanapi.lenovo",
]
| gpl-2.0 | -9,025,254,045,384,877,000 | 30.601852 | 96 | 0.683465 | false |
e0ne/cinder | cinder/openstack/common/versionutils.py | 11 | 4960 | # Copyright (c) 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Helpers for comparing version strings.
"""
import functools
import pkg_resources
from cinder.openstack.common.gettextutils import _
from cinder.openstack.common import log as logging
LOG = logging.getLogger(__name__)
class deprecated(object):
"""A decorator to mark callables as deprecated.
This decorator logs a deprecation message when the callable it decorates is
used. The message will include the release where the callable was
deprecated, the release where it may be removed and possibly an optional
replacement.
Examples:
1. Specifying the required deprecated release
>>> @deprecated(as_of=deprecated.ICEHOUSE)
... def a(): pass
2. Specifying a replacement:
>>> @deprecated(as_of=deprecated.ICEHOUSE, in_favor_of='f()')
... def b(): pass
3. Specifying the release where the functionality may be removed:
>>> @deprecated(as_of=deprecated.ICEHOUSE, remove_in=+1)
... def c(): pass
"""
FOLSOM = 'F'
GRIZZLY = 'G'
HAVANA = 'H'
ICEHOUSE = 'I'
_RELEASES = {
'F': 'Folsom',
'G': 'Grizzly',
'H': 'Havana',
'I': 'Icehouse',
}
_deprecated_msg_with_alternative = _(
'%(what)s is deprecated as of %(as_of)s in favor of '
'%(in_favor_of)s and may be removed in %(remove_in)s.')
_deprecated_msg_no_alternative = _(
'%(what)s is deprecated as of %(as_of)s and may be '
'removed in %(remove_in)s. It will not be superseded.')
def __init__(self, as_of, in_favor_of=None, remove_in=2, what=None):
"""Initialize decorator
:param as_of: the release deprecating the callable. Constants
are define in this class for convenience.
:param in_favor_of: the replacement for the callable (optional)
:param remove_in: an integer specifying how many releases to wait
before removing (default: 2)
:param what: name of the thing being deprecated (default: the
callable's name)
"""
self.as_of = as_of
self.in_favor_of = in_favor_of
self.remove_in = remove_in
self.what = what
def __call__(self, func):
if not self.what:
self.what = func.__name__ + '()'
@functools.wraps(func)
def wrapped(*args, **kwargs):
msg, details = self._build_message()
LOG.deprecated(msg, details)
return func(*args, **kwargs)
return wrapped
def _get_safe_to_remove_release(self, release):
# TODO(dstanek): this method will have to be reimplemented once
# when we get to the X release because once we get to the Y
# release, what is Y+2?
new_release = chr(ord(release) + self.remove_in)
if new_release in self._RELEASES:
return self._RELEASES[new_release]
else:
return new_release
def _build_message(self):
details = dict(what=self.what,
as_of=self._RELEASES[self.as_of],
remove_in=self._get_safe_to_remove_release(self.as_of))
if self.in_favor_of:
details['in_favor_of'] = self.in_favor_of
msg = self._deprecated_msg_with_alternative
else:
msg = self._deprecated_msg_no_alternative
return msg, details
def is_compatible(requested_version, current_version, same_major=True):
"""Determine whether `requested_version` is satisfied by
`current_version`; in other words, `current_version` is >=
`requested_version`.
:param requested_version: version to check for compatibility
:param current_version: version to check against
:param same_major: if True, the major version must be identical between
`requested_version` and `current_version`. This is used when a
major-version difference indicates incompatibility between the two
versions. Since this is the common-case in practice, the default is
True.
:returns: True if compatible, False if not
"""
requested_parts = pkg_resources.parse_version(requested_version)
current_parts = pkg_resources.parse_version(current_version)
if same_major and (requested_parts[0] != current_parts[0]):
return False
return current_parts >= requested_parts
| apache-2.0 | 2,030,077,365,344,251,600 | 32.513514 | 79 | 0.639718 | false |
CapOM/ChromiumGStreamerBackend | tools/telemetry/third_party/gsutilz/gslib/commands/cat.py | 32 | 4890 | # -*- coding: utf-8 -*-
# Copyright 2011 Google Inc. All Rights Reserved.
# Copyright 2011, Nexenta Systems Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Implementation of Unix-like cat command for cloud storage providers."""
from __future__ import absolute_import
import re
from gslib.cat_helper import CatHelper
from gslib.command import Command
from gslib.command_argument import CommandArgument
from gslib.cs_api_map import ApiSelector
from gslib.exception import CommandException
from gslib.util import NO_MAX
_SYNOPSIS = """
gsutil cat [-h] url...
"""
_DETAILED_HELP_TEXT = ("""
<B>SYNOPSIS</B>
""" + _SYNOPSIS + """
<B>DESCRIPTION</B>
The cat command outputs the contents of one or more URLs to stdout.
It is equivalent to doing:
gsutil cp url... -
(The final '-' causes gsutil to stream the output to stdout.)
<B>WARNING: DATA INTEGRITY CHECKING NOT DONE</B>
The gsutil cat command does not compute a checksum of the downloaded data.
Therefore, we recommend that users either perform their own validation of the
output of gsutil cat or use gsutil cp or rsync (both of which perform
integrity checking automatically).
<B>OPTIONS</B>
-h Prints short header for each object. For example:
gsutil cat -h gs://bucket/meeting_notes/2012_Feb/*.txt
This would print a header with the object name before the contents
of each text object that matched the wildcard.
-r range Causes gsutil to output just the specified byte range of the
object. Ranges are can be of these forms:
start-end (e.g., -r 256-5939)
start- (e.g., -r 256-)
-numbytes (e.g., -r -5)
where offsets start at 0, start-end means to return bytes start
through end (inclusive), start- means to return bytes start
through the end of the object, and -numbytes means to return the
last numbytes of the object. For example:
gsutil cat -r 256-939 gs://bucket/object
returns bytes 256 through 939, while:
gsutil cat -r -5 gs://bucket/object
returns the final 5 bytes of the object.
""")
class CatCommand(Command):
"""Implementation of gsutil cat command."""
# Command specification. See base class for documentation.
command_spec = Command.CreateCommandSpec(
'cat',
command_name_aliases=[],
usage_synopsis=_SYNOPSIS,
min_args=1,
max_args=NO_MAX,
supported_sub_args='hr:',
file_url_ok=False,
provider_url_ok=False,
urls_start_arg=0,
gs_api_support=[ApiSelector.XML, ApiSelector.JSON],
gs_default_api=ApiSelector.JSON,
argparse_arguments=[
CommandArgument.MakeZeroOrMoreCloudURLsArgument()
]
)
# Help specification. See help_provider.py for documentation.
help_spec = Command.HelpSpec(
help_name='cat',
help_name_aliases=[],
help_type='command_help',
help_one_line_summary='Concatenate object content to stdout',
help_text=_DETAILED_HELP_TEXT,
subcommand_help_text={},
)
# Command entry point.
def RunCommand(self):
"""Command entry point for the cat command."""
show_header = False
request_range = None
start_byte = 0
end_byte = None
if self.sub_opts:
for o, a in self.sub_opts:
if o == '-h':
show_header = True
elif o == '-r':
request_range = a.strip()
range_matcher = re.compile(
'^(?P<start>[0-9]+)-(?P<end>[0-9]*)$|^(?P<endslice>-[0-9]+)$')
range_match = range_matcher.match(request_range)
if not range_match:
raise CommandException('Invalid range (%s)' % request_range)
if range_match.group('start'):
start_byte = long(range_match.group('start'))
if range_match.group('end'):
end_byte = long(range_match.group('end'))
if range_match.group('endslice'):
start_byte = long(range_match.group('endslice'))
else:
self.RaiseInvalidArgumentException()
return CatHelper(self).CatUrlStrings(self.args,
show_header=show_header,
start_byte=start_byte,
end_byte=end_byte)
| bsd-3-clause | -9,204,638,240,251,082,000 | 32.958333 | 80 | 0.631697 | false |
gszpura/flask-ratelimiter | flask_ratelimiter/backends/flaskcacheredis_backend.py | 1 | 1595 | # -*- coding: utf-8 -*-
##
## This file is part of Flask-RateLimiter
## Copyright (C) 2014 CERN.
##
## Flask-RateLimiter is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Flask-RateLimiter is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Flask-RateLimiter; if not, write to the Free Software Foundation,
## Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
##
## In applying this licence, CERN does not waive the privileges and immunities
## granted to it by virtue of its status as an Intergovernmental Organization
## or submit itself to any jurisdiction.
from __future__ import absolute_import
from .simpleredis_backend import SimpleRedisBackend
class FlaskCacheRedisBackend(SimpleRedisBackend):
"""
Backend which uses Flask-Cache to store keys in Redis.
"""
expiration_window = 10
def __init__(self, cache=None, **kwargs):
self.cache = cache
if self.cache.__class__.__name__ != 'Cache':
raise ValueError('Incorrect cache was passed as an argument')
self.pipeline = self.cache.cache._client.pipeline()
super(SimpleRedisBackend, self).__init__(**kwargs) | gpl-2.0 | 5,963,266,271,981,230,000 | 38.9 | 79 | 0.717868 | false |
Heappl/scripts | oil_and_energy.py | 1 | 6416 |
import matplotlib.pyplot as plt
import numpy as np
import math
def diff_serie(data):
return [(float(x2) / float(x1) - 1.0) * 100 for (x2, x1) in zip(data[1:], data[:-1])]
def average_change(data):
return sum(diff_serie(data)) / (len(data) - 1)
def prediction(data, years):
return data + [pow(1.0 + average_change(data) / 100.0, y) * data[-1] for y in range(1, years)]
def doubling_time(data):
return math.log(2, 1 + average_change(data) / 100.0)
#1965-
production_tonnes = [1567.9, 1702.3, 1826.6, 1992.8, 2143.4, 2358.0, 2496.2, 2640.6, 2871.3, 2879.4, 2738.2, 2972.9, 3077.1, 3106.8, 3237.3, 3091.9, 2913.9, 2799.7, 2763.0, 2818.7, 2796.8, 2941.6, 2952.5, 3074.7, 3108.6, 3175.4, 3165.7, 3195.3, 3194.5, 3244.0, 3286.1, 3384.2, 3485.9, 3550.8, 3486.9, 3620.4, 3620.3, 3604.5, 3737.5, 3909.6, 3947.5, 3968.7, 3955.3, 3993.2, 3890.9, 3979.3, 4010.6, 4117.4, 4130.2]
consumption_tonnes = [1529.7, 1645.4, 1763.7, 1914.3, 2079.0, 2257.2, 2379.8, 2563.4, 2763.6, 2724.0, 2692.6, 2867.9, 2966.8, 3047.7, 3097.6, 2975.3, 2867.6, 2777.2, 2754.4, 2815.0, 2817.0, 2900.6, 2956.3, 3056.3, 3108.9, 3162.5, 3161.6, 3215.3, 3185.8, 3251.5, 3293.4, 3368.4, 3457.3, 3480.5, 3548.9, 3583.7, 3610.9, 3641.3, 3725.2, 3869.1, 3919.3, 3959.3, 4018.4, 4000.2, 3924.6, 4040.2, 4085.1, 4138.9, 4185.1]
#thousand of barrels daily
production_barrels = [31798, 34563, 37113, 40430, 43627, 48056, 50839, 53662, 58460, 58613, 55822, 60410, 62716, 63338, 66061, 62959, 59547, 57312, 56615, 57696, 57459, 60435, 60745, 63111, 64002, 65385, 65204, 65716, 65978, 67073, 67990, 69845, 72101, 73457, 72293, 74983, 75213, 74991, 77639, 81054, 82107, 82593, 82383, 82955, 81262, 83296, 84049, 86204, 86754]
consumption_barrels = [30811, 33158, 35541, 38455, 41825, 45355, 47880, 51427, 55563, 54792, 54329, 57693, 59889, 62741, 63879, 61244, 59399, 57814, 57591, 58865, 59249, 60995, 62293, 64247, 65578, 66761, 66908, 67972, 67677, 69204, 70364, 71853, 74044, 74577, 76269, 76902, 77607, 78499, 80216, 83055, 84389, 85325, 86754, 86147, 85111, 87801, 88934, 89931, 91331]
production_barrels = [x * 365 for x in production_barrels]
consumption_barrels = [x * 365 for x in consumption_barrels]
energy_consumption = [8796, 8853, 8864, 8956, 9033, 9225, 9460, 9550, 9608, 9808, 10066, 10146, 10347, 10703, 11228, 11520, 11830, 12110, 12268, 12209, 12891, 13101, 13330, 13583]
gas_consumption = [2060, 2114, 2107, 2140, 2146, 2199, 2291, 2321, 2346, 2424, 2509, 2528, 2614, 2699, 2787, 2858, 2926, 3058, 3149, 3074, 3323, 3381, 3488, 3529]
#1980-
#thousand million barrels
reserves_barrels = [683.4, 696.5, 725.6, 737.3, 774.4, 802.6, 907.7, 938.9, 1026.7, 1027.3, 1027.5, 1032.7, 1039.3, 1041.4, 1055.6, 1065.9, 1088.7, 1107.4, 1092.9, 1237.9, 1258.1, 1266.8, 1321.5, 1334.1, 1343.7, 1353.1, 1363.9, 1399.3, 1471.6, 1513.2, 1621.6, 1661.8, 1687.3, 1687.9]
reserves_barrels = [x * 1000000 for x in reserves_barrels]
print("average consumption growth: ", average_change(consumption_barrels), " doubling time: ", doubling_time(consumption_barrels))
print("average reserves change: ", average_change(reserves_barrels), " doubling time: ", doubling_time(reserves_barrels))
print("average energy consumption growth: ", average_change(energy_consumption), " doubling time: ", doubling_time(energy_consumption))
#1980-
#how many years will the reserves last at given year consumption
enough_for_years = [r / c for (r, c) in zip(reserves_barrels, consumption_barrels[-len(reserves_barrels):])]
#1960-2013
world_population = [3036532274, 3077628660, 3130560798, 3195417729, 3260822806, 3327569763, 3397499965, 3467103790, 3537483987, 3611954924, 3686967186, 3763995029, 3840300260, 3915619481, 3991662329, 4066294816, 4139216505, 4212110303, 4286351034, 4362172135, 4438370556, 4515996446, 4596367792, 4677144145, 4757614414, 4839988721, 4925285182, 5012839430, 5101281852, 5189681453, 5278917295, 5365433943, 5448304921, 5531856268, 5614434365, 5697982639, 5780020061, 5861978511, 5942982003, 6023027888, 6101963950, 6179984867, 6257502234, 6334799169, 6412222945, 6489886972, 6567781100, 6645686556, 6724770576, 6804045819, 6884007764, 6964618177, 7043181414, 7125096708]
world_fertility_rate = [4.97948236106441, 4.99685819530925, 5.01113676413739, 5.02191448801587, 5.02037956002923, 4.98999330794235, 4.93157441902233, 4.90693398924276, 4.85108066647119, 4.7838782325247, 4.70957818648593, 4.61218187570381, 4.49410609136349, 4.37544795176232, 4.26246755669855, 4.13826180878561, 4.02672994836869, 3.92932863735946, 3.83936487980546, 3.7736079113279, 3.71631798567279, 3.66392458807635, 3.63705804409513, 3.60623086822255, 3.57588217413512, 3.54839385963458, 3.51367909702325, 3.47111004443242, 3.41597838676048, 3.34521436416391, 3.27108095399287, 3.18149132330502, 3.0946032808544, 3.00573988523987, 2.9315151235276, 2.85926711725989, 2.80269227214385, 2.75019290398177, 2.70876432055345, 2.6701970301134, 2.64511099191879, 2.61393717543333, 2.59120804593079, 2.57341523485324, 2.55846596192828, 2.53920620757993, 2.53091593790034, 2.52411013716623, 2.51595136404155, 2.50060806166854, 2.48883793331158, 2.47573037924979, 2.47085830381361, 2.45989740588603]
oil_consumption_per_capita = [c / float(p) for (c, p) in zip(consumption_barrels, world_population[-len(consumption_barrels):])]
print(oil_consumption_per_capita)
def myplot(data, descr, endyear = 2014):
plt.plot(range(endyear - len(data), endyear), data)
plt.axis([endyear - len(data) - 1, endyear, min(data) * 0.9, max(data) * 1.07])
plt.xlabel(descr)
plt.show()
myplot(oil_consumption_per_capita, "world oil consumption per capita")
#myplot(prediction(consumption_barrels, 10), 'world oil consumption with prediction assuming constant growth at current average', 2024)
#myplot(diff_serie(consumption_barrels), 'consumption_growth rate of world oil consumption in percents')
#myplot(consumption_barrels, 'world oil consumption (yearly in thousand of barrels)');
#myplot(world_population, "world population")
#myplot(world_fertility_rate, "world fertility rate")
#myplot(reserves_barrels, 'world oil proven reserves');
#myplot(enough_for_years, 'world reserves will last for y years at given year consumption');
#myplot(energy_consumption, 'world energy consumption in Mtoe');
#myplot(prediction(energy_consumption, 20), 'world energy consumption in Mtoe and prediction', 2034);
#myplot(gas_consumption, "world natural gas consumption (bcm)")
| gpl-2.0 | -7,782,771,617,845,315,000 | 86.890411 | 990 | 0.735817 | false |
xerxes2/gpodder | src/gpodder/gtkui/frmntl/episodes.py | 2 | 11045 | # -*- coding: utf-8 -*-
#
# gPodder - A media aggregator and podcast client
# Copyright (c) 2005-2011 Thomas Perl and the gPodder Team
#
# gPodder is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# gPodder is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import gtk
import hildon
import gpodder
_ = gpodder.gettext
from gpodder import util
from gpodder.gtkui.interface.common import BuilderWidget
from gpodder.gtkui.model import EpisodeListModel
from gpodder.gtkui.model import PodcastChannelProxy
from gpodder.gtkui.frmntl.episodeactions import gPodderEpisodeActions
class gPodderEpisodes(BuilderWidget):
def new(self):
self.channel = None
# Styling for the label that appears when the list is empty
hildon.hildon_helper_set_logical_font(self.empty_label, \
'LargeSystemFont')
hildon.hildon_helper_set_logical_color(self.empty_label, \
gtk.RC_FG, gtk.STATE_NORMAL, 'SecondaryTextColor')
self.episode_actions = gPodderEpisodeActions(self.main_window, \
episode_list_status_changed=self.episode_list_status_changed, \
episode_is_downloading=self.episode_is_downloading, \
show_episode_shownotes=self.show_episode_shownotes, \
playback_episodes=self.playback_episodes, \
download_episode_list=self.download_episode_list, \
show_episode_in_download_manager=self.show_episode_in_download_manager, \
add_download_task_monitor=self.add_download_task_monitor, \
remove_download_task_monitor=self.remove_download_task_monitor, \
for_each_episode_set_task_status=self.for_each_episode_set_task_status, \
delete_episode_list=self.delete_episode_list)
# Tap-and-hold (aka "long press") context menu
self.touched_episode = None
self.context_menu = gtk.Menu()
# "Emulate" hildon_gtk_menu_new
self.context_menu.set_name('hildon-context-sensitive-menu')
self.context_menu.append(self.action_shownotes.create_menu_item())
self.context_menu.append(self.action_download.create_menu_item())
self.context_menu.append(self.action_delete.create_menu_item())
self.context_menu.append(gtk.SeparatorMenuItem())
self.context_menu.append(self.action_keep.create_menu_item())
self.context_menu.append(self.action_mark_as_old.create_menu_item())
self.context_menu.show_all()
self.treeview.tap_and_hold_setup(self.context_menu)
# Workaround for Maemo bug XXX
self.button_search_episodes_clear.set_name('HildonButton-thumb')
appmenu = hildon.AppMenu()
for action in (self.action_update, \
self.action_rename, \
self.action_login, \
self.action_website, \
self.action_unsubscribe, \
self.action_check_for_new_episodes, \
self.action_delete_episodes):
button = gtk.Button()
action.connect_proxy(button)
appmenu.append(button)
self.pause_sub_button = hildon.CheckButton(gtk.HILDON_SIZE_FINGER_HEIGHT)
self.pause_sub_button.set_label(_('Pause subscription'))
self.pause_sub_button.connect('toggled', self.on_pause_subscription_button_toggled)
appmenu.append(self.pause_sub_button)
self.keep_episodes_button = hildon.CheckButton(gtk.HILDON_SIZE_FINGER_HEIGHT)
self.keep_episodes_button.set_label(_('Keep episodes'))
self.keep_episodes_button.connect('toggled', self.on_keep_episodes_button_toggled)
appmenu.append(self.keep_episodes_button)
for filter in (self.item_view_episodes_all, \
self.item_view_episodes_undeleted, \
self.item_view_episodes_downloaded):
button = gtk.ToggleButton()
filter.connect_proxy(button)
appmenu.add_filter(button)
appmenu.show_all()
self.main_window.set_app_menu(appmenu)
def on_pause_subscription_button_toggled(self, widget):
new_value = not widget.get_active()
if new_value != self.channel.feed_update_enabled:
self.channel.feed_update_enabled = new_value
self.cover_downloader.reload_cover_from_disk(self.channel)
self.channel.save()
self.update_podcast_list_model(urls=[self.channel.url])
def on_keep_episodes_button_toggled(self, widget):
new_value = widget.get_active()
if new_value != self.channel.channel_is_locked:
self.channel.channel_is_locked = new_value
self.channel.update_channel_lock()
for episode in self.channel.get_all_episodes():
episode.mark(is_locked=self.channel.channel_is_locked)
self.update_podcast_list_model(urls=[self.channel.url])
self.episode_list_status_changed(self.channel.get_all_episodes())
def on_rename_button_clicked(self, widget):
if self.channel is None:
return
new_title = self.show_text_edit_dialog(_('Rename podcast'), \
_('New name:'), self.channel.title, \
affirmative_text=_('Rename'))
if new_title is not None and new_title != self.channel.title:
self.channel.set_custom_title(new_title)
self.main_window.set_title(self.channel.title)
self.channel.save()
self.show_message(_('Podcast renamed: %s') % new_title)
self.update_podcast_list_model(urls=[self.channel.url])
def on_login_button_clicked(self, widget):
accept, auth_data = self.show_login_dialog(_('Login to %s') % \
self.channel.title, '', \
self.channel.username, \
self.channel.password)
if accept:
self.channel.username, self.channel.password = auth_data
self.channel.save()
def on_website_button_clicked(self, widget):
if self.channel is not None:
util.open_website(self.channel.link)
def on_update_button_clicked(self, widget):
self.on_itemUpdateChannel_activate()
def on_unsubscribe_button_clicked(self, widget):
self.on_delete_event(widget, None)
self.on_itemRemoveChannel_activate(widget)
def on_episode_selected(self, treeview, path, column):
model = treeview.get_model()
episode = model.get_value(model.get_iter(path), \
EpisodeListModel.C_EPISODE)
self.episode_actions.show_episode(episode)
def on_delete_event(self, widget, event):
self.main_window.hide()
self.channel = None
self.hide_episode_search()
return True
def on_treeview_button_press(self, widget, event):
result = self.treeview.get_path_at_pos(int(event.x), int(event.y))
if result is not None:
path, column, x, y = result
model = self.treeview.get_model()
episode = model.get_value(model.get_iter(path), \
EpisodeListModel.C_EPISODE)
self.action_delete.set_property('visible', not episode.is_locked)
if episode.was_downloaded():
self.action_keep.set_property('visible', True)
self.action_download.set_property('visible', not episode.was_downloaded(and_exists=True))
else:
self.action_keep.set_property('visible', False)
self.action_download.set_property('visible', not self.episode_is_downloading(episode))
self.touched_episode = None
self.action_keep.set_active(episode.is_locked)
self.action_mark_as_old.set_active(not episode.is_played)
self.touched_episode = episode
else:
self.touched_episode = None
def on_shownotes_button_clicked(self, widget):
if self.touched_episode is not None:
self.show_episode_shownotes(self.touched_episode)
def on_download_button_clicked(self, widget):
if self.touched_episode is not None:
self.show_message(_('Downloading episode'))
self.download_episode_list([self.touched_episode])
def on_delete_button_clicked(self, widget):
if self.touched_episode is not None:
self.delete_episode_list([self.touched_episode])
def on_keep_button_clicked(self, widget):
if self.touched_episode is not None:
self.touched_episode.mark(is_locked=not self.touched_episode.is_locked)
self.episode_list_status_changed([self.touched_episode])
def on_mark_as_old_button_clicked(self, widget):
if self.touched_episode is not None:
self.touched_episode.mark(is_played=not self.touched_episode.is_played)
self.episode_list_status_changed([self.touched_episode])
def on_check_for_new_episodes_button_clicked(self, widget):
self.show_message(_('Checking for new episodes...'))
self.on_itemUpdate_activate(widget)
def on_delete_episodes_button_clicked(self, widget):
all_episodes = isinstance(self.channel, PodcastChannelProxy)
if all_episodes:
self.show_delete_episodes_window()
else:
self.show_delete_episodes_window(self.channel)
def show(self):
# Check if we are displaying the "all episodes" view
all_episodes = isinstance(self.channel, PodcastChannelProxy)
for action in (self.action_rename, \
self.action_login, \
self.action_unsubscribe, \
self.action_update):
action.set_visible(not all_episodes)
self.action_check_for_new_episodes.set_visible(all_episodes)
self.action_delete_episodes.set_visible(True)
self.action_website.set_visible(True)
if all_episodes:
self.pause_sub_button.hide()
self.keep_episodes_button.hide()
else:
self.pause_sub_button.show()
self.keep_episodes_button.show()
self.pause_sub_button.set_active(\
not self.channel.feed_update_enabled)
self.keep_episodes_button.set_active(\
self.channel.channel_is_locked)
self.main_window.set_title(self.channel.title)
self.main_window.show()
self.treeview.grab_focus()
| gpl-3.0 | 5,182,616,534,033,793,000 | 41.644788 | 105 | 0.632413 | false |
benjamin9999/pika | setup.py | 5 | 2198 | from setuptools import setup
import os
# Conditionally include additional modules for docs
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
requirements = list()
if on_rtd:
requirements.append('tornado')
requirements.append('twisted')
#requirements.append('pyev')
long_description = ('Pika is a pure-Python implementation of the AMQP 0-9-1 '
'protocol that tries to stay fairly independent of the '
'underlying network support library. Pika was developed '
'primarily for use with RabbitMQ, but should also work '
'with other AMQP 0-9-1 brokers.')
setup(name='pika',
version='0.10.0',
description='Pika Python AMQP Client Library',
long_description=open('README.rst').read(),
maintainer='Gavin M. Roy',
maintainer_email='gavinmroy@gmail.com',
url='https://pika.readthedocs.org ',
packages=['pika', 'pika.adapters'],
license='BSD',
install_requires=requirements,
package_data={'': ['LICENSE', 'README.rst']},
extras_require={'tornado': ['tornado'],
'twisted': ['twisted'],
'libev': ['pyev']},
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Natural Language :: English',
'Operating System :: OS Independent',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: Implementation :: CPython',
'Programming Language :: Python :: Implementation :: Jython',
'Programming Language :: Python :: Implementation :: PyPy',
'Topic :: Communications',
'Topic :: Internet',
'Topic :: Software Development :: Libraries',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: System :: Networking'],
zip_safe=True)
| bsd-3-clause | -1,157,360,849,307,871,700 | 42.098039 | 78 | 0.588717 | false |
OpenNingia/l5r-character-manager-3 | l5r/util/log.py | 1 | 2083 | # -*- coding: utf-8 -*-
# Copyright (C) 2014 Daniele Simonetti
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
__author__ = 'Daniele'
import os
import sys
import logging
import logging.handlers
import l5r.api as api
def log_setup(base_path, base_name):
# check base path
if not os.path.exists(base_path):
os.makedirs(base_path)
# set up logging to file
root = logging.getLogger('')
# set the level of the root logger
root.setLevel(logging.DEBUG)
# define the file formatter
file_fmt = logging.Formatter('%(asctime)s %(name)-12s ' +
'%(levelname)-8s %(message)s')
# define log rotation
rotation = logging.handlers.TimedRotatingFileHandler(
filename=os.path.join(base_path, base_name),
when='midnight',
backupCount = 15)
# console logging
console = logging.StreamHandler()
# assign formatter to the handlers
console .setFormatter(file_fmt)
rotation.setFormatter(file_fmt)
# add the handlers to the root logger
logging.getLogger('').addHandler(rotation)
if not hasattr(sys, "frozen"):
logging.getLogger('').addHandler(console )
log_path = api.get_user_data_path('logs')
log_setup(log_path, 'l5rcm.log')
app = logging.getLogger('app')
ui = logging.getLogger('ui')
model = logging.getLogger('model')
api = logging.getLogger('api')
rules = logging.getLogger('rules')
| gpl-3.0 | 1,630,793,402,676,458,200 | 29.188406 | 70 | 0.68507 | false |
lookout/dd-agent | checks/system/unix.py | 4 | 35157 | """
Unix system checks.
"""
# stdlib
import operator
import platform
import re
import sys
import time
# 3rd party
import uptime
try:
import psutil
except ImportError:
psutil = None
# project
from checks import Check
from util import get_hostname
from utils.platform import Platform
from utils.subprocess_output import get_subprocess_output
# locale-resilient float converter
to_float = lambda s: float(s.replace(",", "."))
class IO(Check):
def __init__(self, logger):
Check.__init__(self, logger)
self.header_re = re.compile(r'([%\\/\-_a-zA-Z0-9]+)[\s+]?')
self.item_re = re.compile(r'^([a-zA-Z0-9\/]+)')
self.value_re = re.compile(r'\d+\.\d+')
def _parse_linux2(self, output):
recentStats = output.split('Device:')[2].split('\n')
header = recentStats[0]
headerNames = re.findall(self.header_re, header)
device = None
ioStats = {}
for statsIndex in range(1, len(recentStats)):
row = recentStats[statsIndex]
if not row:
# Ignore blank lines.
continue
deviceMatch = self.item_re.match(row)
if deviceMatch is not None:
# Sometimes device names span two lines.
device = deviceMatch.groups()[0]
else:
continue
values = re.findall(self.value_re, row)
if not values:
# Sometimes values are on the next line so we encounter
# instances of [].
continue
ioStats[device] = {}
for headerIndex in range(len(headerNames)):
headerName = headerNames[headerIndex]
ioStats[device][headerName] = values[headerIndex]
return ioStats
def _parse_darwin(self, output):
lines = [l.split() for l in output.split("\n") if len(l) > 0]
disks = lines[0]
lastline = lines[-1]
io = {}
for idx, disk in enumerate(disks):
kb_t, tps, mb_s = map(float, lastline[(3 * idx):(3 * idx) + 3]) # 3 cols at a time
io[disk] = {
'system.io.bytes_per_s': mb_s * 2**20,
}
return io
def xlate(self, metric_name, os_name):
"""Standardize on linux metric names"""
if os_name == "sunos":
names = {
"wait": "await",
"svc_t": "svctm",
"%b": "%util",
"kr/s": "rkB/s",
"kw/s": "wkB/s",
"actv": "avgqu-sz",
}
elif os_name == "freebsd":
names = {
"svc_t": "await",
"%b": "%util",
"kr/s": "rkB/s",
"kw/s": "wkB/s",
"wait": "avgqu-sz",
}
# translate if possible
return names.get(metric_name, metric_name)
def check(self, agentConfig):
"""Capture io stats.
@rtype dict
@return {"device": {"metric": value, "metric": value}, ...}
"""
io = {}
try:
if Platform.is_linux():
stdout, _, _ = get_subprocess_output(['iostat', '-d', '1', '2', '-x', '-k'], self.logger)
# Linux 2.6.32-343-ec2 (ip-10-35-95-10) 12/11/2012 _x86_64_ (2 CPU)
#
# Device: rrqm/s wrqm/s r/s w/s rkB/s wkB/s avgrq-sz avgqu-sz await svctm %util
# sda1 0.00 17.61 0.26 32.63 4.23 201.04 12.48 0.16 4.81 0.53 1.73
# sdb 0.00 2.68 0.19 3.84 5.79 26.07 15.82 0.02 4.93 0.22 0.09
# sdg 0.00 0.13 2.29 3.84 100.53 30.61 42.78 0.05 8.41 0.88 0.54
# sdf 0.00 0.13 2.30 3.84 100.54 30.61 42.78 0.06 9.12 0.90 0.55
# md0 0.00 0.00 0.05 3.37 1.41 30.01 18.35 0.00 0.00 0.00 0.00
#
# Device: rrqm/s wrqm/s r/s w/s rkB/s wkB/s avgrq-sz avgqu-sz await svctm %util
# sda1 0.00 0.00 0.00 10.89 0.00 43.56 8.00 0.03 2.73 2.73 2.97
# sdb 0.00 0.00 0.00 2.97 0.00 11.88 8.00 0.00 0.00 0.00 0.00
# sdg 0.00 0.00 0.00 0.00 0.00 0.00 0.00 0.00 0.00 0.00 0.00
# sdf 0.00 0.00 0.00 0.00 0.00 0.00 0.00 0.00 0.00 0.00 0.00
# md0 0.00 0.00 0.00 0.00 0.00 0.00 0.00 0.00 0.00 0.00 0.00
io.update(self._parse_linux2(stdout))
elif sys.platform == "sunos5":
output, _, _ = get_subprocess_output(["iostat", "-x", "-d", "1", "2"], self.logger)
iostat = output.splitlines()
# extended device statistics <-- since boot
# device r/s w/s kr/s kw/s wait actv svc_t %w %b
# ramdisk1 0.0 0.0 0.1 0.1 0.0 0.0 0.0 0 0
# sd0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0 0
# sd1 79.9 149.9 1237.6 6737.9 0.0 0.5 2.3 0 11
# extended device statistics <-- past second
# device r/s w/s kr/s kw/s wait actv svc_t %w %b
# ramdisk1 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0 0
# sd0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0 0
# sd1 0.0 139.0 0.0 1850.6 0.0 0.0 0.1 0 1
# discard the first half of the display (stats since boot)
lines = [l for l in iostat if len(l) > 0]
lines = lines[len(lines)/2:]
assert "extended device statistics" in lines[0]
headers = lines[1].split()
assert "device" in headers
for l in lines[2:]:
cols = l.split()
# cols[0] is the device
# cols[1:] are the values
io[cols[0]] = {}
for i in range(1, len(cols)):
io[cols[0]][self.xlate(headers[i], "sunos")] = cols[i]
elif sys.platform.startswith("freebsd"):
output, _, _ = get_subprocess_output(["iostat", "-x", "-d", "1", "2"], self.logger)
iostat = output.splitlines()
# Be careful!
# It looks like SunOS, but some columms (wait, svc_t) have different meaning
# extended device statistics
# device r/s w/s kr/s kw/s wait svc_t %b
# ad0 3.1 1.3 49.9 18.8 0 0.7 0
# extended device statistics
# device r/s w/s kr/s kw/s wait svc_t %b
# ad0 0.0 2.0 0.0 31.8 0 0.2 0
# discard the first half of the display (stats since boot)
lines = [l for l in iostat if len(l) > 0]
lines = lines[len(lines)/2:]
assert "extended device statistics" in lines[0]
headers = lines[1].split()
assert "device" in headers
for l in lines[2:]:
cols = l.split()
# cols[0] is the device
# cols[1:] are the values
io[cols[0]] = {}
for i in range(1, len(cols)):
io[cols[0]][self.xlate(headers[i], "freebsd")] = cols[i]
elif sys.platform == 'darwin':
iostat, _, _ = get_subprocess_output(['iostat', '-d', '-c', '2', '-w', '1'], self.logger)
# disk0 disk1 <-- number of disks
# KB/t tps MB/s KB/t tps MB/s
# 21.11 23 0.47 20.01 0 0.00
# 6.67 3 0.02 0.00 0 0.00 <-- line of interest
io = self._parse_darwin(iostat)
else:
return False
# If we filter devices, do it know.
device_blacklist_re = agentConfig.get('device_blacklist_re', None)
if device_blacklist_re:
filtered_io = {}
for device, stats in io.iteritems():
if not device_blacklist_re.match(device):
filtered_io[device] = stats
else:
filtered_io = io
return filtered_io
except Exception:
self.logger.exception("Cannot extract IO statistics")
return False
class Load(Check):
def check(self, agentConfig):
if Platform.is_linux():
try:
with open('/proc/loadavg', 'r') as load_avg:
uptime = load_avg.readline().strip()
except Exception:
self.logger.exception('Cannot extract load')
return False
elif sys.platform in ('darwin', 'sunos5') or sys.platform.startswith("freebsd"):
# Get output from uptime
try:
uptime, _, _ = get_subprocess_output(['uptime'], self.logger)
except Exception:
self.logger.exception('Cannot extract load')
return False
# Split out the 3 load average values
load = [res.replace(',', '.') for res in re.findall(r'([0-9]+[\.,]\d+)', uptime)]
# Normalize load by number of cores
try:
cores = int(agentConfig.get('system_stats').get('cpuCores'))
assert cores >= 1, "Cannot determine number of cores"
# Compute a normalized load, named .load.norm to make it easy to find next to .load
return {'system.load.1': float(load[0]),
'system.load.5': float(load[1]),
'system.load.15': float(load[2]),
'system.load.norm.1': float(load[0])/cores,
'system.load.norm.5': float(load[1])/cores,
'system.load.norm.15': float(load[2])/cores,
}
except Exception:
# No normalized load available
return {'system.load.1': float(load[0]),
'system.load.5': float(load[1]),
'system.load.15': float(load[2])}
class Memory(Check):
def __init__(self, logger):
Check.__init__(self, logger)
macV = None
if sys.platform == 'darwin':
macV = platform.mac_ver()
macV_minor_version = int(re.match(r'10\.(\d+)\.?.*', macV[0]).group(1))
# Output from top is slightly modified on OS X 10.6 (case #28239) and greater
if macV and (macV_minor_version >= 6):
self.topIndex = 6
else:
self.topIndex = 5
self.pagesize = 0
if sys.platform == 'sunos5':
try:
pgsz, _, _ = get_subprocess_output(['pagesize'], self.logger)
self.pagesize = int(pgsz.strip())
except Exception:
# No page size available
pass
def check(self, agentConfig):
if Platform.is_linux():
try:
with open('/proc/meminfo', 'r') as mem_info:
lines = mem_info.readlines()
except Exception:
self.logger.exception('Cannot get memory metrics from /proc/meminfo')
return False
# NOTE: not all of the stats below are present on all systems as
# not all kernel versions report all of them.
#
# $ cat /proc/meminfo
# MemTotal: 7995360 kB
# MemFree: 1045120 kB
# MemAvailable: 1253920 kB
# Buffers: 226284 kB
# Cached: 775516 kB
# SwapCached: 248868 kB
# Active: 1004816 kB
# Inactive: 1011948 kB
# Active(anon): 455152 kB
# Inactive(anon): 584664 kB
# Active(file): 549664 kB
# Inactive(file): 427284 kB
# Unevictable: 4392476 kB
# Mlocked: 4392476 kB
# SwapTotal: 11120632 kB
# SwapFree: 10555044 kB
# Dirty: 2948 kB
# Writeback: 0 kB
# AnonPages: 5203560 kB
# Mapped: 50520 kB
# Shmem: 10108 kB
# Slab: 161300 kB
# SReclaimable: 136108 kB
# SUnreclaim: 25192 kB
# KernelStack: 3160 kB
# PageTables: 26776 kB
# NFS_Unstable: 0 kB
# Bounce: 0 kB
# WritebackTmp: 0 kB
# CommitLimit: 15118312 kB
# Committed_AS: 6703508 kB
# VmallocTotal: 34359738367 kB
# VmallocUsed: 400668 kB
# VmallocChunk: 34359329524 kB
# HardwareCorrupted: 0 kB
# HugePages_Total: 0
# HugePages_Free: 0
# HugePages_Rsvd: 0
# HugePages_Surp: 0
# Hugepagesize: 2048 kB
# DirectMap4k: 10112 kB
# DirectMap2M: 8243200 kB
regexp = re.compile(r'^(\w+):\s+([0-9]+)') # We run this several times so one-time compile now
meminfo = {}
for line in lines:
try:
match = re.search(regexp, line)
if match is not None:
meminfo[match.group(1)] = match.group(2)
except Exception:
self.logger.exception("Cannot parse /proc/meminfo")
memData = {}
# Physical memory
# FIXME units are in MB, we should use bytes instead
try:
memData['physTotal'] = int(meminfo.get('MemTotal', 0)) / 1024
memData['physFree'] = int(meminfo.get('MemFree', 0)) / 1024
memData['physBuffers'] = int(meminfo.get('Buffers', 0)) / 1024
memData['physCached'] = int(meminfo.get('Cached', 0)) / 1024
memData['physShared'] = int(meminfo.get('Shmem', 0)) / 1024
memData['physSlab'] = int(meminfo.get('Slab', 0)) / 1024
memData['physPageTables'] = int(meminfo.get('PageTables', 0)) / 1024
memData['physUsed'] = memData['physTotal'] - memData['physFree']
if 'MemAvailable' in meminfo:
memData['physUsable'] = int(meminfo.get('MemAvailable', 0)) / 1024
else:
# Usable is relative since cached and buffers are actually used to speed things up.
memData['physUsable'] = memData['physFree'] + memData['physBuffers'] + memData['physCached']
if memData['physTotal'] > 0:
memData['physPctUsable'] = float(memData['physUsable']) / float(memData['physTotal'])
except Exception:
self.logger.exception('Cannot compute stats from /proc/meminfo')
# Swap
# FIXME units are in MB, we should use bytes instead
try:
memData['swapTotal'] = int(meminfo.get('SwapTotal', 0)) / 1024
memData['swapFree'] = int(meminfo.get('SwapFree', 0)) / 1024
memData['swapCached'] = int(meminfo.get('SwapCached', 0)) / 1024
memData['swapUsed'] = memData['swapTotal'] - memData['swapFree']
if memData['swapTotal'] > 0:
memData['swapPctFree'] = float(memData['swapFree']) / float(memData['swapTotal'])
except Exception:
self.logger.exception('Cannot compute swap stats')
return memData
elif sys.platform == 'darwin':
if psutil is None:
self.logger.error("psutil must be installed on MacOS to collect memory metrics")
return False
phys_memory = psutil.virtual_memory()
swap = psutil.swap_memory()
return {'physUsed': phys_memory.used / float(1024**2),
'physFree': phys_memory.free / float(1024**2),
'physUsable': phys_memory.available / float(1024**2),
'physPctUsable': (100 - phys_memory.percent) / 100.0,
'swapUsed': swap.used / float(1024**2),
'swapFree': swap.free / float(1024**2)}
elif sys.platform.startswith("freebsd"):
try:
output, _, _ = get_subprocess_output(['sysctl', 'vm.stats.vm'], self.logger)
sysctl = output.splitlines()
except Exception:
self.logger.exception('getMemoryUsage')
return False
# ...
# vm.stats.vm.v_page_size: 4096
# vm.stats.vm.v_page_count: 759884
# vm.stats.vm.v_wire_count: 122726
# vm.stats.vm.v_active_count: 109350
# vm.stats.vm.v_cache_count: 17437
# vm.stats.vm.v_inactive_count: 479673
# vm.stats.vm.v_free_count: 30542
# ...
# We run this several times so one-time compile now
regexp = re.compile(r'^vm\.stats\.vm\.(\w+):\s+([0-9]+)')
meminfo = {}
for line in sysctl:
try:
match = re.search(regexp, line)
if match is not None:
meminfo[match.group(1)] = match.group(2)
except Exception:
self.logger.exception("Cannot parse sysctl vm.stats.vm output")
memData = {}
# Physical memory
try:
pageSize = int(meminfo.get('v_page_size'))
memData['physTotal'] = (int(meminfo.get('v_page_count', 0))
* pageSize) / 1048576
memData['physFree'] = (int(meminfo.get('v_free_count', 0))
* pageSize) / 1048576
memData['physCached'] = (int(meminfo.get('v_cache_count', 0))
* pageSize) / 1048576
memData['physUsed'] = ((int(meminfo.get('v_active_count'), 0) +
int(meminfo.get('v_wire_count', 0)))
* pageSize) / 1048576
memData['physUsable'] = ((int(meminfo.get('v_free_count'), 0) +
int(meminfo.get('v_cache_count', 0)) +
int(meminfo.get('v_inactive_count', 0))) *
pageSize) / 1048576
if memData['physTotal'] > 0:
memData['physPctUsable'] = float(memData['physUsable']) / float(memData['physTotal'])
except Exception:
self.logger.exception('Cannot compute stats from /proc/meminfo')
# Swap
try:
output, _, _ = get_subprocess_output(['swapinfo', '-m'], self.logger)
sysctl = output.splitlines()
except Exception:
self.logger.exception('getMemoryUsage')
return False
# ...
# Device 1M-blocks Used Avail Capacity
# /dev/ad0s1b 570 0 570 0%
# ...
assert "Device" in sysctl[0]
try:
memData['swapTotal'] = 0
memData['swapFree'] = 0
memData['swapUsed'] = 0
for line in sysctl[1:]:
if len(line) > 0:
line = line.split()
memData['swapTotal'] += int(line[1])
memData['swapFree'] += int(line[3])
memData['swapUsed'] += int(line[2])
except Exception:
self.logger.exception('Cannot compute stats from swapinfo')
return memData
elif sys.platform == 'sunos5':
try:
memData = {}
cmd = ["kstat", "-m", "memory_cap", "-c", "zone_memory_cap", "-p"]
output, _, _ = get_subprocess_output(cmd, self.logger)
kmem = output.splitlines()
# memory_cap:360:53aa9b7e-48ba-4152-a52b-a6368c:anon_alloc_fail 0
# memory_cap:360:53aa9b7e-48ba-4152-a52b-a6368c:anonpgin 0
# memory_cap:360:53aa9b7e-48ba-4152-a52b-a6368c:class zone_memory_cap
# memory_cap:360:53aa9b7e-48ba-4152-a52b-a6368c:crtime 16359935.0680834
# memory_cap:360:53aa9b7e-48ba-4152-a52b-a6368c:execpgin 185
# memory_cap:360:53aa9b7e-48ba-4152-a52b-a6368c:fspgin 2556
# memory_cap:360:53aa9b7e-48ba-4152-a52b-a6368c:n_pf_throttle 0
# memory_cap:360:53aa9b7e-48ba-4152-a52b-a6368c:n_pf_throttle_usec 0
# memory_cap:360:53aa9b7e-48ba-4152-a52b-a6368c:nover 0
# memory_cap:360:53aa9b7e-48ba-4152-a52b-a6368c:pagedout 0
# memory_cap:360:53aa9b7e-48ba-4152-a52b-a6368c:pgpgin 2741
# memory_cap:360:53aa9b7e-48ba-4152-a52b-a6368c:physcap 536870912 <--
# memory_cap:360:53aa9b7e-48ba-4152-a52b-a6368c:rss 115544064 <--
# memory_cap:360:53aa9b7e-48ba-4152-a52b-a6368c:snaptime 16787393.9439095
# memory_cap:360:53aa9b7e-48ba-4152-a52b-a6368c:swap 91828224 <--
# memory_cap:360:53aa9b7e-48ba-4152-a52b-a6368c:swapcap 1073741824 <--
# memory_cap:360:53aa9b7e-48ba-4152-a52b-a6368c:zonename 53aa9b7e-48ba-4152-a52b-a6368c3d9e7c
# turn memory_cap:360:zone_name:key value
# into { "key": value, ...}
kv = [l.strip().split() for l in kmem if len(l) > 0]
entries = dict([(k.split(":")[-1], v) for (k, v) in kv])
# extract rss, physcap, swap, swapcap, turn into MB
convert = lambda v: int(long(v))/2**20
memData["physTotal"] = convert(entries["physcap"])
memData["physUsed"] = convert(entries["rss"])
memData["physFree"] = memData["physTotal"] - memData["physUsed"]
memData["swapTotal"] = convert(entries["swapcap"])
memData["swapUsed"] = convert(entries["swap"])
memData["swapFree"] = memData["swapTotal"] - memData["swapUsed"]
if memData['swapTotal'] > 0:
memData['swapPctFree'] = float(memData['swapFree']) / float(memData['swapTotal'])
return memData
except Exception:
self.logger.exception("Cannot compute mem stats from kstat -c zone_memory_cap")
return False
else:
return False
class Processes(Check):
def check(self, agentConfig):
process_exclude_args = agentConfig.get('exclude_process_args', False)
if process_exclude_args:
ps_arg = 'aux'
else:
ps_arg = 'auxww'
# Get output from ps
try:
output, _, _ = get_subprocess_output(['ps', ps_arg], self.logger)
processLines = output.splitlines() # Also removes a trailing empty line
except StandardError:
self.logger.exception('getProcesses')
return False
del processLines[0] # Removes the headers
processes = []
for line in processLines:
line = line.split(None, 10)
processes.append(map(lambda s: s.strip(), line))
return {'processes': processes,
'apiKey': agentConfig['api_key'],
'host': get_hostname(agentConfig)}
class Cpu(Check):
def check(self, agentConfig):
"""Return an aggregate of CPU stats across all CPUs
When figures are not available, False is sent back.
"""
def format_results(us, sy, wa, idle, st, guest=None):
data = {'cpuUser': us, 'cpuSystem': sy, 'cpuWait': wa, 'cpuIdle': idle, 'cpuStolen': st, 'cpuGuest': guest}
return dict((k, v) for k, v in data.iteritems() if v is not None)
def get_value(legend, data, name, filter_value=None):
"Using the legend and a metric name, get the value or None from the data line"
if name in legend:
value = to_float(data[legend.index(name)])
if filter_value is not None:
if value > filter_value:
return None
return value
else:
# FIXME return a float or False, would trigger type error if not python
self.logger.debug("Cannot extract cpu value %s from %s (%s)" % (name, data, legend))
return 0.0
try:
if Platform.is_linux():
output, _, _ = get_subprocess_output(['mpstat', '1', '3'], self.logger)
mpstat = output.splitlines()
# topdog@ip:~$ mpstat 1 3
# Linux 2.6.32-341-ec2 (ip) 01/19/2012 _x86_64_ (2 CPU)
#
# 04:22:41 PM CPU %usr %nice %sys %iowait %irq %soft %steal %guest %idle
# 04:22:42 PM all 0.00 0.00 0.00 0.00 0.00 0.00 0.00 0.00 100.00
# 04:22:43 PM all 0.00 0.00 0.00 0.00 0.00 0.00 0.00 0.00 100.00
# 04:22:44 PM all 0.00 0.00 0.00 0.00 0.00 0.00 0.00 0.00 100.00
# Average: all 0.00 0.00 0.00 0.00 0.00 0.00 0.00 0.00 100.00
#
# OR
#
# Thanks to Mart Visser to spotting this one.
# blah:/etc/dd-agent# mpstat
# Linux 2.6.26-2-xen-amd64 (atira) 02/17/2012 _x86_64_
#
# 05:27:03 PM CPU %user %nice %sys %iowait %irq %soft %steal %idle intr/s
# 05:27:03 PM all 3.59 0.00 0.68 0.69 0.00 0.00 0.01 95.03 43.65
#
legend = [l for l in mpstat if "%usr" in l or "%user" in l]
avg = [l for l in mpstat if "Average" in l]
if len(legend) == 1 and len(avg) == 1:
headers = [h for h in legend[0].split() if h not in ("AM", "PM")]
data = avg[0].split()
# Userland
# Debian lenny says %user so we look for both
# One of them will be 0
cpu_metrics = {
"%usr": None, "%user": None, "%nice": None,
"%iowait": None, "%idle": None, "%sys": None,
"%irq": None, "%soft": None, "%steal": None,
"%guest": None
}
for cpu_m in cpu_metrics:
cpu_metrics[cpu_m] = get_value(headers, data, cpu_m, filter_value=110)
if any([v is None for v in cpu_metrics.values()]):
self.logger.warning("Invalid mpstat data: %s" % data)
cpu_user = cpu_metrics["%usr"] + cpu_metrics["%user"] + cpu_metrics["%nice"]
cpu_system = cpu_metrics["%sys"] + cpu_metrics["%irq"] + cpu_metrics["%soft"]
cpu_wait = cpu_metrics["%iowait"]
cpu_idle = cpu_metrics["%idle"]
cpu_stolen = cpu_metrics["%steal"]
cpu_guest = cpu_metrics["%guest"]
return format_results(cpu_user,
cpu_system,
cpu_wait,
cpu_idle,
cpu_stolen,
cpu_guest)
else:
return False
elif sys.platform == 'darwin':
# generate 3 seconds of data
# [' disk0 disk1 cpu load average', ' KB/t tps MB/s KB/t tps MB/s us sy id 1m 5m 15m', ' 21.23 13 0.27 17.85 7 0.13 14 7 79 1.04 1.27 1.31', ' 4.00 3 0.01 5.00 8 0.04 12 10 78 1.04 1.27 1.31', '']
iostats, _, _ = get_subprocess_output(['iostat', '-C', '-w', '3', '-c', '2'], self.logger)
lines = [l for l in iostats.splitlines() if len(l) > 0]
legend = [l for l in lines if "us" in l]
if len(legend) == 1:
headers = legend[0].split()
data = lines[-1].split()
cpu_user = get_value(headers, data, "us")
cpu_sys = get_value(headers, data, "sy")
cpu_wait = 0
cpu_idle = get_value(headers, data, "id")
cpu_st = 0
return format_results(cpu_user, cpu_sys, cpu_wait, cpu_idle, cpu_st)
else:
self.logger.warn("Expected to get at least 4 lines of data from iostat instead of just " + str(iostats[:max(80, len(iostats))]))
return False
elif sys.platform.startswith("freebsd"):
# generate 3 seconds of data
# tty ada0 cd0 pass0 cpu
# tin tout KB/t tps MB/s KB/t tps MB/s KB/t tps MB/s us ni sy in id
# 0 69 26.71 0 0.01 0.00 0 0.00 0.00 0 0.00 2 0 0 1 97
# 0 78 0.00 0 0.00 0.00 0 0.00 0.00 0 0.00 0 0 0 0 100
iostats, _, _ = get_subprocess_output(['iostat', '-w', '3', '-c', '2'], self.logger)
lines = [l for l in iostats.splitlines() if len(l) > 0]
legend = [l for l in lines if "us" in l]
if len(legend) == 1:
headers = legend[0].split()
data = lines[-1].split()
cpu_user = get_value(headers, data, "us")
cpu_nice = get_value(headers, data, "ni")
cpu_sys = get_value(headers, data, "sy")
cpu_intr = get_value(headers, data, "in")
cpu_wait = 0
cpu_idle = get_value(headers, data, "id")
cpu_stol = 0
return format_results(cpu_user + cpu_nice, cpu_sys + cpu_intr, cpu_wait, cpu_idle, cpu_stol)
else:
self.logger.warn("Expected to get at least 4 lines of data from iostat instead of just " + str(iostats[:max(80, len(iostats))]))
return False
elif sys.platform == 'sunos5':
# mpstat -aq 1 2
# SET minf mjf xcal intr ithr csw icsw migr smtx srw syscl usr sys wt idl sze
# 0 5239 0 12857 22969 5523 14628 73 546 4055 1 146856 5 6 0 89 24 <-- since boot
# 1 ...
# SET minf mjf xcal intr ithr csw icsw migr smtx srw syscl usr sys wt idl sze
# 0 20374 0 45634 57792 5786 26767 80 876 20036 2 724475 13 13 0 75 24 <-- past 1s
# 1 ...
# http://docs.oracle.com/cd/E23824_01/html/821-1462/mpstat-1m.html
#
# Will aggregate over all processor sets
output, _, _ = get_subprocess_output(['mpstat', '-aq', '1', '2'], self.logger)
mpstat = output.splitlines()
lines = [l for l in mpstat if len(l) > 0]
# discard the first len(lines)/2 lines
lines = lines[len(lines)/2:]
legend = [l for l in lines if "SET" in l]
assert len(legend) == 1
if len(legend) == 1:
headers = legend[0].split()
# collect stats for each processor set
# and aggregate them based on the relative set size
d_lines = [l for l in lines if "SET" not in l]
user = [get_value(headers, l.split(), "usr") for l in d_lines]
kern = [get_value(headers, l.split(), "sys") for l in d_lines]
wait = [get_value(headers, l.split(), "wt") for l in d_lines]
idle = [get_value(headers, l.split(), "idl") for l in d_lines]
size = [get_value(headers, l.split(), "sze") for l in d_lines]
count = sum(size)
rel_size = [s/count for s in size]
dot = lambda v1, v2: reduce(operator.add, map(operator.mul, v1, v2))
return format_results(dot(user, rel_size),
dot(kern, rel_size),
dot(wait, rel_size),
dot(idle, rel_size),
0.0)
else:
self.logger.warn("CPUStats: unsupported platform")
return False
except Exception:
self.logger.exception("Cannot compute CPU stats")
return False
class System(Check):
def check(self, agentConfig):
return {"system.uptime": uptime.uptime()}
def main():
# 1s loop with results
import logging
logging.basicConfig(level=logging.DEBUG, format='%(asctime)-15s %(message)s')
log = logging.getLogger()
cpu = Cpu(log)
io = IO(log)
load = Load(log)
mem = Memory(log)
# proc = Processes(log)
system = System(log)
config = {"api_key": "666", "device_blacklist_re": re.compile('.*disk0.*')}
while True:
print("=" * 10)
print("--- IO ---")
print(io.check(config))
print("--- CPU ---")
print(cpu.check(config))
print("--- Load ---")
print(load.check(config))
print("--- Memory ---")
print(mem.check(config))
print("--- System ---")
print(system.check(config))
print("\n\n\n")
# print("--- Processes ---")
# print(proc.check(config))
time.sleep(1)
if __name__ == '__main__':
main()
| bsd-3-clause | -3,477,648,975,757,831,700 | 44.363871 | 285 | 0.461643 | false |
r-kan/reminder | base/setting/utility.py | 1 | 6935 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import print_function, unicode_literals
from util.select import RankHolder, get_weighted_random_dict_key
PERCENTAGE = 1
WEIGHT = 2
class RankArbitrator(object):
def __init__(self):
self.__general_ranks = {} # key: pattern, value: list of Rank => un-timed rank
self.__timed_ranks = {} # ranks with period constraint (separate this field for faster 'time-filtering' phase)
self.__general_percentage_count = 0
self.__general_percentage_holder = {} # key: pattern, value: RankHolder
self.__general_weight_holder = {}
def is_active(self):
return self.__general_ranks or self.__timed_ranks
def add_rank(self, pattern, rank):
target_rank = self.__general_ranks if rank.period.empty else self.__timed_ranks
if pattern not in target_rank:
target_rank[pattern] = []
rank_list = target_rank[pattern]
rank_list.append(rank)
def finalize_rank(self):
"""prepare general holders for faster arbitrate"""
for pattern in self.__general_ranks:
ranks = self.__general_ranks[pattern]
rank = ranks[0] # TODO: support multi-rank for one pattern
is_percentage = PERCENTAGE is rank.kind
self.__general_percentage_count += rank.value if is_percentage else 0
target_holder = self.__general_percentage_holder if is_percentage else self.__general_weight_holder
assert pattern not in target_holder
target_holder[pattern] = RankHolder(rank.value)
assert self.__general_percentage_count <= 100
def __get_current_timed_rank(self):
"""return percentage_rank_dict, weight_rank_dict (key: pattern, value: Rank)"""
percentage_rank = {}
weight_rank = {}
for pattern in self.__timed_ranks:
ranks = self.__timed_ranks[pattern]
rank = ranks[0] # TODO: support multi-rank for one pattern
if rank.period.satisfy_current_time():
target_rank = percentage_rank if PERCENTAGE is rank.kind else weight_rank
target_rank[pattern] = rank
return percentage_rank, weight_rank
@staticmethod
def __get_dice(general_holders, timed_ranks):
"""return a valid dice: dict within entry has 'rank' field"""
import copy
total_holders = copy.deepcopy(general_holders)
for pattern in timed_ranks:
assert pattern not in total_holders
raw_rank = timed_ranks[pattern]
# TODO: support multi-rank for one pattern
rank = raw_rank[0] if isinstance(raw_rank, list) else raw_rank
total_holders[pattern] = RankHolder(rank.value)
return total_holders
__HAS_SHOWN_PERCENTAGE_WARNING__ = False
def arbitrate(self):
"""consider current date/time and value of the ranks, return the selected pattern"""
timed_percentage_rank, timed_weight_rank = self.__get_current_timed_rank()
timed_percentage_count = sum([timed_percentage_rank[pattern].value for pattern in timed_percentage_rank])
total_percentage_count = self.__general_percentage_count + timed_percentage_count
max_percentage = 100
if total_percentage_count > max_percentage:
max_percentage = total_percentage_count
if not RankArbitrator.__HAS_SHOWN_PERCENTAGE_WARNING__:
print("[warning] total percentage count value '%s' is greater than 100" % total_percentage_count)
RankArbitrator.__HAS_SHOWN_PERCENTAGE_WARNING__ = True
dice = {PERCENTAGE: RankHolder(total_percentage_count),
WEIGHT: RankHolder(max_percentage - total_percentage_count)}
choice = get_weighted_random_dict_key(dice)
general_holders = self.__general_percentage_holder if PERCENTAGE is choice else self.__general_weight_holder
timed_ranks = timed_percentage_rank if PERCENTAGE is choice else timed_weight_rank
if not general_holders and not timed_ranks:
return None
dice = self.__get_dice(general_holders, timed_ranks)
choice_pattern = get_weighted_random_dict_key(dice)
return choice_pattern
class Period(object):
def __init__(self, period_str):
self.month = None
self.month_day = None
self.week_day = None
self.begin_time = None
self.end_time = None
self.empty = True
self.parse(period_str)
def parse(self, period_str):
if not period_str:
return
self.empty = False
for spec in period_str.split(','):
# spec.: hhmm-hhmm
if '-' in spec:
[begin_time, end_time] = spec.split('-')
assert 4 == len(begin_time) and 4 == len(end_time)
begin_hour = int(begin_time[:2])
begin_minute = int(begin_time[2:])
end_hour = int(end_time[:2])
end_minute = int(end_time[2:])
assert begin_hour in range(24)
assert end_hour in range(25) # allow end_hour is 24
assert begin_minute in range(60)
assert end_minute in range(60)
assert 24 != end_hour or 0 == end_minute
self.begin_time = 60 * begin_hour + begin_minute
self.end_time = 60 * end_hour + end_minute
def __str__(self):
return str(self.begin_time) + "-" + str(self.end_time) if not self.empty else ""
def satisfy_current_time(self):
assert self.begin_time is not None # note: cannot use 'if self.begin_time', as begin_time can be 0
from datetime import datetime
today = datetime.today()
cur_hour = today.hour
cur_minute = today.minute
cur_time = 60 * cur_hour + cur_minute
cross_day = self.begin_time > self.end_time
if cross_day:
return cur_time > self.begin_time or cur_time < self.end_time
else:
return cur_time in range(self.begin_time, self.end_time)
class Rank(object):
def __init__(self, kind, value, period):
assert kind in ["PERCENTAGE", "WEIGHT"]
self.kind = PERCENTAGE if "PERCENTAGE" == kind else WEIGHT
self.value = value
self.period = Period(period)
assert self.value > 0
def print(self, prefix=None):
print(prefix if prefix else '\t', "WEIGHT" if WEIGHT == self.kind else "PERCENTAGE", self.value, self.period)
@staticmethod
def create(data=None):
if not isinstance(data, dict):
data = {}
kind = data["kind"] if "kind" in data else "WEIGHT"
value = data["value"] if "value" in data else 1
period = data["period"] if "period" in data else None
return Rank(kind, value, period)
@staticmethod
def create_default():
return Rank.create()
| mit | -4,023,592,659,876,096,500 | 41.286585 | 119 | 0.60995 | false |
bhaugen/nova | django_extensions/management/modelviz.py | 8 | 9296 | #!/usr/bin/env python
"""Django model to DOT (Graphviz) converter
by Antonio Cavedoni <antonio@cavedoni.org>
Make sure your DJANGO_SETTINGS_MODULE is set to your project or
place this script in the same directory of the project and call
the script like this:
$ python modelviz.py [-h] [-a] [-d] [-g] [-i <model_names>] <app_label> ... <app_label> > <filename>.dot
$ dot <filename>.dot -Tpng -o <filename>.png
options:
-h, --help
show this help message and exit.
-a, --all_applications
show models from all applications.
-d, --disable_fields
don't show the class member fields.
-g, --group_models
draw an enclosing box around models from the same app.
-i, --include_models=User,Person,Car
only include selected models in graph.
"""
__version__ = "0.9"
__svnid__ = "$Id$"
__license__ = "Python"
__author__ = "Antonio Cavedoni <http://cavedoni.com/>"
__contributors__ = [
"Stefano J. Attardi <http://attardi.org/>",
"limodou <http://www.donews.net/limodou/>",
"Carlo C8E Miron",
"Andre Campos <cahenan@gmail.com>",
"Justin Findlay <jfindlay@gmail.com>",
"Alexander Houben <alexander@houben.ch>",
"Bas van Oostveen <v.oostveen@gmail.com>",
]
import getopt, sys
from django.core.management import setup_environ
try:
import settings
except ImportError:
pass
else:
setup_environ(settings)
from django.utils.safestring import mark_safe
from django.template import Template, Context
from django.db import models
from django.db.models import get_models
from django.db.models.fields.related import \
ForeignKey, OneToOneField, ManyToManyField
try:
from django.db.models.fields.generic import GenericRelation
except ImportError:
from django.contrib.contenttypes.generic import GenericRelation
head_template = """
digraph name {
fontname = "Helvetica"
fontsize = 8
node [
fontname = "Helvetica"
fontsize = 8
shape = "plaintext"
]
edge [
fontname = "Helvetica"
fontsize = 8
]
"""
body_template = """
{% if use_subgraph %}
subgraph {{ cluster_app_name }} {
label=<
<TABLE BORDER="0" CELLBORDER="0" CELLSPACING="0">
<TR><TD COLSPAN="2" CELLPADDING="4" ALIGN="CENTER"
><FONT FACE="Helvetica Bold" COLOR="Black" POINT-SIZE="12"
>{{ app_name }}</FONT></TD></TR>
</TABLE>
>
color=olivedrab4
style="rounded"
{% endif %}
{% for model in models %}
{{ model.app_name }}_{{ model.name }} [label=<
<TABLE BGCOLOR="palegoldenrod" BORDER="0" CELLBORDER="0" CELLSPACING="0">
<TR><TD COLSPAN="2" CELLPADDING="4" ALIGN="CENTER" BGCOLOR="olivedrab4"
><FONT FACE="Helvetica Bold" COLOR="white"
>{{ model.name }}{% if model.abstracts %}<BR/><<FONT FACE="Helvetica Italic">{{ model.abstracts|join:"," }}</FONT>>{% endif %}</FONT></TD></TR>
{% if not disable_fields %}
{% for field in model.fields %}
<TR><TD ALIGN="LEFT" BORDER="0"
><FONT {% if field.blank %}COLOR="#7B7B7B" {% endif %}FACE="Helvetica {% if field.abstract %}Italic{% else %}Bold{% endif %}">{{ field.name }}</FONT
></TD>
<TD ALIGN="LEFT"
><FONT {% if field.blank %}COLOR="#7B7B7B" {% endif %}FACE="Helvetica {% if field.abstract %}Italic{% else %}Bold{% endif %}">{{ field.type }}</FONT
></TD></TR>
{% endfor %}
{% endif %}
</TABLE>
>]
{% endfor %}
{% if use_subgraph %}
}
{% endif %}
"""
rel_template = """
{% for model in models %}
{% for relation in model.relations %}
{% if relation.needs_node %}
{{ relation.target_app }}_{{ relation.target }} [label=<
<TABLE BGCOLOR="palegoldenrod" BORDER="0" CELLBORDER="0" CELLSPACING="0">
<TR><TD COLSPAN="2" CELLPADDING="4" ALIGN="CENTER" BGCOLOR="olivedrab4"
><FONT FACE="Helvetica Bold" COLOR="white"
>{{ relation.target }}</FONT></TD></TR>
</TABLE>
>]
{% endif %}
{{ model.app_name }}_{{ model.name }} -> {{ relation.target_app }}_{{ relation.target }}
[label="{{ relation.name }}"] {{ relation.arrows }};
{% endfor %}
{% endfor %}
"""
tail_template = """
}
"""
def generate_dot(app_labels, **kwargs):
disable_fields = kwargs.get('disable_fields', False)
include_models = kwargs.get('include_models', [])
all_applications = kwargs.get('all_applications', False)
use_subgraph = kwargs.get('group_models', False)
dot = head_template
apps = []
if all_applications:
apps = models.get_apps()
for app_label in app_labels:
app = models.get_app(app_label)
if not app in apps:
apps.append(app)
graphs = []
for app in apps:
graph = Context({
'name': '"%s"' % app.__name__,
'app_name': "%s" % '.'.join(app.__name__.split('.')[:-1]),
'cluster_app_name': "cluster_%s" % app.__name__.replace(".", "_"),
'disable_fields': disable_fields,
'use_subgraph': use_subgraph,
'models': []
})
for appmodel in get_models(app):
abstracts = [e.__name__ for e in appmodel.__bases__ if hasattr(e, '_meta') and e._meta.abstract]
abstract_fields = []
for e in appmodel.__bases__:
if hasattr(e, '_meta') and e._meta.abstract:
abstract_fields.extend(e._meta.fields)
model = {
'app_name': app.__name__.replace(".", "_"),
'name': appmodel.__name__,
'abstracts': abstracts,
'fields': [],
'relations': []
}
# consider given model name ?
def consider(model_name):
return not include_models or model_name in include_models
if not consider(appmodel._meta.object_name):
continue
# model attributes
def add_attributes(field):
model['fields'].append({
'name': field.name,
'type': type(field).__name__,
'blank': field.blank,
'abstract': field in abstract_fields,
})
for field in appmodel._meta.fields:
add_attributes(field)
if appmodel._meta.many_to_many:
for field in appmodel._meta.many_to_many:
add_attributes(field)
# relations
def add_relation(field, extras=""):
_rel = {
'target_app': field.rel.to.__module__.replace('.','_'),
'target': field.rel.to.__name__,
'type': type(field).__name__,
'name': field.name,
'arrows': extras,
'needs_node': True
}
if _rel not in model['relations'] and consider(_rel['target']):
model['relations'].append(_rel)
for field in appmodel._meta.fields:
if isinstance(field, ForeignKey):
add_relation(field)
elif isinstance(field, OneToOneField):
add_relation(field, '[arrowhead=none arrowtail=none]')
if appmodel._meta.many_to_many:
for field in appmodel._meta.many_to_many:
if isinstance(field, ManyToManyField) and getattr(field, 'creates_table', False):
add_relation(field, '[arrowhead=normal arrowtail=normal]')
elif isinstance(field, GenericRelation):
add_relation(field, mark_safe('[style="dotted"] [arrowhead=normal arrowtail=normal]'))
graph['models'].append(model)
graphs.append(graph)
nodes = []
for graph in graphs:
nodes.extend([e['name'] for e in graph['models']])
for graph in graphs:
# don't draw duplication nodes because of relations
for model in graph['models']:
for relation in model['relations']:
if relation['target'] in nodes:
relation['needs_node'] = False
# render templates
t = Template(body_template)
dot += '\n' + t.render(graph)
for graph in graphs:
t = Template(rel_template)
dot += '\n' + t.render(graph)
dot += '\n' + tail_template
return dot
def main():
try:
opts, args = getopt.getopt(sys.argv[1:], "hadgi:",
["help", "all_applications", "disable_fields", "group_models", "include_models="])
except getopt.GetoptError, error:
print __doc__
sys.exit(error)
kwargs = {}
for opt, arg in opts:
if opt in ("-h", "--help"):
print __doc__
sys.exit()
if opt in ("-a", "--all_applications"):
kwargs['all_applications'] = True
if opt in ("-d", "--disable_fields"):
kwargs['disable_fields'] = True
if opt in ("-g", "--group_models"):
kwargs['group_models'] = True
if opt in ("-i", "--include_models"):
kwargs['include_models'] = arg.split(',')
if not args and not kwargs.get('all_applications', False):
print __doc__
sys.exit()
print generate_dot(args, **kwargs)
if __name__ == "__main__":
main()
| mit | 3,301,418,556,880,389,000 | 31.390244 | 156 | 0.55099 | false |
fritsvanveen/QGIS | python/ext-libs/pygments/lexers/modula2.py | 23 | 52564 | # -*- coding: utf-8 -*-
"""
pygments.lexers.modula2
~~~~~~~~~~~~~~~~~~~~~~~
Multi-Dialect Lexer for Modula-2.
:copyright: Copyright 2006-2015 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from pygments.lexer import RegexLexer, include
from pygments.util import get_bool_opt, get_list_opt
from pygments.token import Text, Comment, Operator, Keyword, Name, \
String, Number, Punctuation, Error
__all__ = ['Modula2Lexer']
# Multi-Dialect Modula-2 Lexer
class Modula2Lexer(RegexLexer):
"""
For `Modula-2 <http://www.modula2.org/>`_ source code.
The Modula-2 lexer supports several dialects. By default, it operates in
fallback mode, recognising the *combined* literals, punctuation symbols
and operators of all supported dialects, and the *combined* reserved words
and builtins of PIM Modula-2, ISO Modula-2 and Modula-2 R10, while not
differentiating between library defined identifiers.
To select a specific dialect, a dialect option may be passed
or a dialect tag may be embedded into a source file.
Dialect Options:
`m2pim`
Select PIM Modula-2 dialect.
`m2iso`
Select ISO Modula-2 dialect.
`m2r10`
Select Modula-2 R10 dialect.
`objm2`
Select Objective Modula-2 dialect.
The PIM and ISO dialect options may be qualified with a language extension.
Language Extensions:
`+aglet`
Select Aglet Modula-2 extensions, available with m2iso.
`+gm2`
Select GNU Modula-2 extensions, available with m2pim.
`+p1`
Select p1 Modula-2 extensions, available with m2iso.
`+xds`
Select XDS Modula-2 extensions, available with m2iso.
Passing a Dialect Option via Unix Commandline Interface
Dialect options may be passed to the lexer using the `dialect` key.
Only one such option should be passed. If multiple dialect options are
passed, the first valid option is used, any subsequent options are ignored.
Examples:
`$ pygmentize -O full,dialect=m2iso -f html -o /path/to/output /path/to/input`
Use ISO dialect to render input to HTML output
`$ pygmentize -O full,dialect=m2iso+p1 -f rtf -o /path/to/output /path/to/input`
Use ISO dialect with p1 extensions to render input to RTF output
Embedding a Dialect Option within a source file
A dialect option may be embedded in a source file in form of a dialect
tag, a specially formatted comment that specifies a dialect option.
Dialect Tag EBNF::
dialectTag :
OpeningCommentDelim Prefix dialectOption ClosingCommentDelim ;
dialectOption :
'm2pim' | 'm2iso' | 'm2r10' | 'objm2' |
'm2iso+aglet' | 'm2pim+gm2' | 'm2iso+p1' | 'm2iso+xds' ;
Prefix : '!' ;
OpeningCommentDelim : '(*' ;
ClosingCommentDelim : '*)' ;
No whitespace is permitted between the tokens of a dialect tag.
In the event that a source file contains multiple dialect tags, the first
tag that contains a valid dialect option will be used and any subsequent
dialect tags will be ignored. Ideally, a dialect tag should be placed
at the beginning of a source file.
An embedded dialect tag overrides a dialect option set via command line.
Examples:
``(*!m2r10*) DEFINITION MODULE Foobar; ...``
Use Modula2 R10 dialect to render this source file.
``(*!m2pim+gm2*) DEFINITION MODULE Bazbam; ...``
Use PIM dialect with GNU extensions to render this source file.
Algol Publication Mode:
In Algol publication mode, source text is rendered for publication of
algorithms in scientific papers and academic texts, following the format
of the Revised Algol-60 Language Report. It is activated by passing
one of two corresponding styles as an option:
`algol`
render reserved words lowercase underline boldface
and builtins lowercase boldface italic
`algol_nu`
render reserved words lowercase boldface (no underlining)
and builtins lowercase boldface italic
The lexer automatically performs the required lowercase conversion when
this mode is activated.
Example:
``$ pygmentize -O full,style=algol -f latex -o /path/to/output /path/to/input``
Render input file in Algol publication mode to LaTeX output.
Rendering Mode of First Class ADT Identifiers:
The rendering of standard library first class ADT identifiers is controlled
by option flag "treat_stdlib_adts_as_builtins".
When this option is turned on, standard library ADT identifiers are rendered
as builtins. When it is turned off, they are rendered as ordinary library
identifiers.
`treat_stdlib_adts_as_builtins` (default: On)
The option is useful for dialects that support ADTs as first class objects
and provide ADTs in the standard library that would otherwise be built-in.
At present, only Modula-2 R10 supports library ADTs as first class objects
and therefore, no ADT identifiers are defined for any other dialects.
Example:
``$ pygmentize -O full,dialect=m2r10,treat_stdlib_adts_as_builtins=Off ...``
Render standard library ADTs as ordinary library types.
.. versionadded:: 1.3
.. versionchanged:: 2.1
Added multi-dialect support.
"""
name = 'Modula-2'
aliases = ['modula2', 'm2']
filenames = ['*.def', '*.mod']
mimetypes = ['text/x-modula2']
flags = re.MULTILINE | re.DOTALL
tokens = {
'whitespace': [
(r'\n+', Text), # blank lines
(r'\s+', Text), # whitespace
],
'dialecttags': [
# PIM Dialect Tag
(r'\(\*!m2pim\*\)', Comment.Special),
# ISO Dialect Tag
(r'\(\*!m2iso\*\)', Comment.Special),
# M2R10 Dialect Tag
(r'\(\*!m2r10\*\)', Comment.Special),
# ObjM2 Dialect Tag
(r'\(\*!objm2\*\)', Comment.Special),
# Aglet Extensions Dialect Tag
(r'\(\*!m2iso\+aglet\*\)', Comment.Special),
# GNU Extensions Dialect Tag
(r'\(\*!m2pim\+gm2\*\)', Comment.Special),
# p1 Extensions Dialect Tag
(r'\(\*!m2iso\+p1\*\)', Comment.Special),
# XDS Extensions Dialect Tag
(r'\(\*!m2iso\+xds\*\)', Comment.Special),
],
'identifiers': [
(r'([a-zA-Z_$][\w$]*)', Name),
],
'prefixed_number_literals': [
#
# Base-2, whole number
(r'0b[01]+(\'[01]+)*', Number.Bin),
#
# Base-16, whole number
(r'0[ux][0-9A-F]+(\'[0-9A-F]+)*', Number.Hex),
],
'plain_number_literals': [
#
# Base-10, real number with exponent
(r'[0-9]+(\'[0-9]+)*' # integral part
r'\.[0-9]+(\'[0-9]+)*' # fractional part
r'[eE][+-]?[0-9]+(\'[0-9]+)*', # exponent
Number.Float),
#
# Base-10, real number without exponent
(r'[0-9]+(\'[0-9]+)*' # integral part
r'\.[0-9]+(\'[0-9]+)*', # fractional part
Number.Float),
#
# Base-10, whole number
(r'[0-9]+(\'[0-9]+)*', Number.Integer),
],
'suffixed_number_literals': [
#
# Base-8, whole number
(r'[0-7]+B', Number.Oct),
#
# Base-8, character code
(r'[0-7]+C', Number.Oct),
#
# Base-16, number
(r'[0-9A-F]+H', Number.Hex),
],
'string_literals': [
(r"'(\\\\|\\'|[^'])*'", String), # single quoted string
(r'"(\\\\|\\"|[^"])*"', String), # double quoted string
],
'digraph_operators': [
# Dot Product Operator
(r'\*\.', Operator),
# Array Concatenation Operator
(r'\+>', Operator), # M2R10 + ObjM2
# Inequality Operator
(r'<>', Operator), # ISO + PIM
# Less-Or-Equal, Subset
(r'<=', Operator),
# Greater-Or-Equal, Superset
(r'>=', Operator),
# Identity Operator
(r'==', Operator), # M2R10 + ObjM2
# Type Conversion Operator
(r'::', Operator), # M2R10 + ObjM2
# Assignment Symbol
(r':=', Operator),
# Postfix Increment Mutator
(r'\+\+', Operator), # M2R10 + ObjM2
# Postfix Decrement Mutator
(r'--', Operator), # M2R10 + ObjM2
],
'unigraph_operators': [
# Arithmetic Operators
(r'[+-]', Operator),
(r'[*/]', Operator),
# ISO 80000-2 compliant Set Difference Operator
(r'\\', Operator), # M2R10 + ObjM2
# Relational Operators
(r'[=#<>]', Operator),
# Dereferencing Operator
(r'\^', Operator),
# Dereferencing Operator Synonym
(r'@', Operator), # ISO
# Logical AND Operator Synonym
(r'&', Operator), # PIM + ISO
# Logical NOT Operator Synonym
(r'~', Operator), # PIM + ISO
# Smalltalk Message Prefix
(r'`', Operator), # ObjM2
],
'digraph_punctuation': [
# Range Constructor
(r'\.\.', Punctuation),
# Opening Chevron Bracket
(r'<<', Punctuation), # M2R10 + ISO
# Closing Chevron Bracket
(r'>>', Punctuation), # M2R10 + ISO
# Blueprint Punctuation
(r'->', Punctuation), # M2R10 + ISO
# Distinguish |# and # in M2 R10
(r'\|#', Punctuation),
# Distinguish ## and # in M2 R10
(r'##', Punctuation),
# Distinguish |* and * in M2 R10
(r'\|\*', Punctuation),
],
'unigraph_punctuation': [
# Common Punctuation
(r'[\(\)\[\]{},.:;\|]', Punctuation),
# Case Label Separator Synonym
(r'!', Punctuation), # ISO
# Blueprint Punctuation
(r'\?', Punctuation), # M2R10 + ObjM2
],
'comments': [
# Single Line Comment
(r'^//.*?\n', Comment.Single), # M2R10 + ObjM2
# Block Comment
(r'\(\*([^$].*?)\*\)', Comment.Multiline),
# Template Block Comment
(r'/\*(.*?)\*/', Comment.Multiline), # M2R10 + ObjM2
],
'pragmas': [
# ISO Style Pragmas
(r'<\*.*?\*>', Comment.Preproc), # ISO, M2R10 + ObjM2
# Pascal Style Pragmas
(r'\(\*\$.*?\*\)', Comment.Preproc), # PIM
],
'root': [
include('whitespace'),
include('dialecttags'),
include('pragmas'),
include('comments'),
include('identifiers'),
include('suffixed_number_literals'), # PIM + ISO
include('prefixed_number_literals'), # M2R10 + ObjM2
include('plain_number_literals'),
include('string_literals'),
include('digraph_punctuation'),
include('digraph_operators'),
include('unigraph_punctuation'),
include('unigraph_operators'),
]
}
# C o m m o n D a t a s e t s
# Common Reserved Words Dataset
common_reserved_words = (
# 37 common reserved words
'AND', 'ARRAY', 'BEGIN', 'BY', 'CASE', 'CONST', 'DEFINITION', 'DIV',
'DO', 'ELSE', 'ELSIF', 'END', 'EXIT', 'FOR', 'FROM', 'IF',
'IMPLEMENTATION', 'IMPORT', 'IN', 'LOOP', 'MOD', 'MODULE', 'NOT',
'OF', 'OR', 'POINTER', 'PROCEDURE', 'RECORD', 'REPEAT', 'RETURN',
'SET', 'THEN', 'TO', 'TYPE', 'UNTIL', 'VAR', 'WHILE',
)
# Common Builtins Dataset
common_builtins = (
# 16 common builtins
'ABS', 'BOOLEAN', 'CARDINAL', 'CHAR', 'CHR', 'FALSE', 'INTEGER',
'LONGINT', 'LONGREAL', 'MAX', 'MIN', 'NIL', 'ODD', 'ORD', 'REAL',
'TRUE',
)
# Common Pseudo-Module Builtins Dataset
common_pseudo_builtins = (
# 4 common pseudo builtins
'ADDRESS', 'BYTE', 'WORD', 'ADR'
)
# P I M M o d u l a - 2 D a t a s e t s
# Lexemes to Mark as Error Tokens for PIM Modula-2
pim_lexemes_to_reject = (
'!', '`', '@', '$', '%', '?', '\\', '==', '++', '--', '::', '*.',
'+>', '->', '<<', '>>', '|#', '##',
)
# PIM Modula-2 Additional Reserved Words Dataset
pim_additional_reserved_words = (
# 3 additional reserved words
'EXPORT', 'QUALIFIED', 'WITH',
)
# PIM Modula-2 Additional Builtins Dataset
pim_additional_builtins = (
# 16 additional builtins
'BITSET', 'CAP', 'DEC', 'DISPOSE', 'EXCL', 'FLOAT', 'HALT', 'HIGH',
'INC', 'INCL', 'NEW', 'NIL', 'PROC', 'SIZE', 'TRUNC', 'VAL',
)
# PIM Modula-2 Additional Pseudo-Module Builtins Dataset
pim_additional_pseudo_builtins = (
# 5 additional pseudo builtins
'SYSTEM', 'PROCESS', 'TSIZE', 'NEWPROCESS', 'TRANSFER',
)
# I S O M o d u l a - 2 D a t a s e t s
# Lexemes to Mark as Error Tokens for ISO Modula-2
iso_lexemes_to_reject = (
'`', '$', '%', '?', '\\', '==', '++', '--', '::', '*.', '+>', '->',
'<<', '>>', '|#', '##',
)
# ISO Modula-2 Additional Reserved Words Dataset
iso_additional_reserved_words = (
# 9 additional reserved words (ISO 10514-1)
'EXCEPT', 'EXPORT', 'FINALLY', 'FORWARD', 'PACKEDSET', 'QUALIFIED',
'REM', 'RETRY', 'WITH',
# 10 additional reserved words (ISO 10514-2 & ISO 10514-3)
'ABSTRACT', 'AS', 'CLASS', 'GUARD', 'INHERIT', 'OVERRIDE', 'READONLY',
'REVEAL', 'TRACED', 'UNSAFEGUARDED',
)
# ISO Modula-2 Additional Builtins Dataset
iso_additional_builtins = (
# 26 additional builtins (ISO 10514-1)
'BITSET', 'CAP', 'CMPLX', 'COMPLEX', 'DEC', 'DISPOSE', 'EXCL', 'FLOAT',
'HALT', 'HIGH', 'IM', 'INC', 'INCL', 'INT', 'INTERRUPTIBLE', 'LENGTH',
'LFLOAT', 'LONGCOMPLEX', 'NEW', 'PROC', 'PROTECTION', 'RE', 'SIZE',
'TRUNC', 'UNINTERRUBTIBLE', 'VAL',
# 5 additional builtins (ISO 10514-2 & ISO 10514-3)
'CREATE', 'DESTROY', 'EMPTY', 'ISMEMBER', 'SELF',
)
# ISO Modula-2 Additional Pseudo-Module Builtins Dataset
iso_additional_pseudo_builtins = (
# 14 additional builtins (SYSTEM)
'SYSTEM', 'BITSPERLOC', 'LOCSPERBYTE', 'LOCSPERWORD', 'LOC',
'ADDADR', 'SUBADR', 'DIFADR', 'MAKEADR', 'ADR',
'ROTATE', 'SHIFT', 'CAST', 'TSIZE',
# 13 additional builtins (COROUTINES)
'COROUTINES', 'ATTACH', 'COROUTINE', 'CURRENT', 'DETACH', 'HANDLER',
'INTERRUPTSOURCE', 'IOTRANSFER', 'IsATTACHED', 'LISTEN',
'NEWCOROUTINE', 'PROT', 'TRANSFER',
# 9 additional builtins (EXCEPTIONS)
'EXCEPTIONS', 'AllocateSource', 'CurrentNumber', 'ExceptionNumber',
'ExceptionSource', 'GetMessage', 'IsCurrentSource',
'IsExceptionalExecution', 'RAISE',
# 3 additional builtins (TERMINATION)
'TERMINATION', 'IsTerminating', 'HasHalted',
# 4 additional builtins (M2EXCEPTION)
'M2EXCEPTION', 'M2Exceptions', 'M2Exception', 'IsM2Exception',
'indexException', 'rangeException', 'caseSelectException',
'invalidLocation', 'functionException', 'wholeValueException',
'wholeDivException', 'realValueException', 'realDivException',
'complexValueException', 'complexDivException', 'protException',
'sysException', 'coException', 'exException',
)
# M o d u l a - 2 R 1 0 D a t a s e t s
# Lexemes to Mark as Error Tokens for Modula-2 R10
m2r10_lexemes_to_reject = (
'!', '`', '@', '$', '%', '&', '<>',
)
# Modula-2 R10 reserved words in addition to the common set
m2r10_additional_reserved_words = (
# 12 additional reserved words
'ALIAS', 'ARGLIST', 'BLUEPRINT', 'COPY', 'GENLIB', 'INDETERMINATE',
'NEW', 'NONE', 'OPAQUE', 'REFERENTIAL', 'RELEASE', 'RETAIN',
# 2 additional reserved words with symbolic assembly option
'ASM', 'REG',
)
# Modula-2 R10 builtins in addition to the common set
m2r10_additional_builtins = (
# 26 additional builtins
'CARDINAL', 'COUNT', 'EMPTY', 'EXISTS', 'INSERT', 'LENGTH', 'LONGCARD',
'OCTET', 'PTR', 'PRED', 'READ', 'READNEW', 'REMOVE', 'RETRIEVE', 'SORT',
'STORE', 'SUBSET', 'SUCC', 'TLIMIT', 'TMAX', 'TMIN', 'TRUE', 'TSIZE',
'UNICHAR', 'WRITE', 'WRITEF',
)
# Modula-2 R10 Additional Pseudo-Module Builtins Dataset
m2r10_additional_pseudo_builtins = (
# 13 additional builtins (TPROPERTIES)
'TPROPERTIES', 'PROPERTY', 'LITERAL', 'TPROPERTY', 'TLITERAL',
'TBUILTIN', 'TDYN', 'TREFC', 'TNIL', 'TBASE', 'TPRECISION',
'TMAXEXP', 'TMINEXP',
# 4 additional builtins (CONVERSION)
'CONVERSION', 'TSXFSIZE', 'SXF', 'VAL',
# 35 additional builtins (UNSAFE)
'UNSAFE', 'CAST', 'INTRINSIC', 'AVAIL', 'ADD', 'SUB', 'ADDC', 'SUBC',
'FETCHADD', 'FETCHSUB', 'SHL', 'SHR', 'ASHR', 'ROTL', 'ROTR', 'ROTLC',
'ROTRC', 'BWNOT', 'BWAND', 'BWOR', 'BWXOR', 'BWNAND', 'BWNOR',
'SETBIT', 'TESTBIT', 'LSBIT', 'MSBIT', 'CSBITS', 'BAIL', 'HALT',
'TODO', 'FFI', 'ADDR', 'VARGLIST', 'VARGC',
# 11 additional builtins (ATOMIC)
'ATOMIC', 'INTRINSIC', 'AVAIL', 'SWAP', 'CAS', 'INC', 'DEC', 'BWAND',
'BWNAND', 'BWOR', 'BWXOR',
# 7 additional builtins (COMPILER)
'COMPILER', 'DEBUG', 'MODNAME', 'PROCNAME', 'LINENUM', 'DEFAULT',
'HASH',
# 5 additional builtins (ASSEMBLER)
'ASSEMBLER', 'REGISTER', 'SETREG', 'GETREG', 'CODE',
)
# O b j e c t i v e M o d u l a - 2 D a t a s e t s
# Lexemes to Mark as Error Tokens for Objective Modula-2
objm2_lexemes_to_reject = (
'!', '$', '%', '&', '<>',
)
# Objective Modula-2 Extensions
# reserved words in addition to Modula-2 R10
objm2_additional_reserved_words = (
# 16 additional reserved words
'BYCOPY', 'BYREF', 'CLASS', 'CONTINUE', 'CRITICAL', 'INOUT', 'METHOD',
'ON', 'OPTIONAL', 'OUT', 'PRIVATE', 'PROTECTED', 'PROTOCOL', 'PUBLIC',
'SUPER', 'TRY',
)
# Objective Modula-2 Extensions
# builtins in addition to Modula-2 R10
objm2_additional_builtins = (
# 3 additional builtins
'OBJECT', 'NO', 'YES',
)
# Objective Modula-2 Extensions
# pseudo-module builtins in addition to Modula-2 R10
objm2_additional_pseudo_builtins = (
# None
)
# A g l e t M o d u l a - 2 D a t a s e t s
# Aglet Extensions
# reserved words in addition to ISO Modula-2
aglet_additional_reserved_words = (
# None
)
# Aglet Extensions
# builtins in addition to ISO Modula-2
aglet_additional_builtins = (
# 9 additional builtins
'BITSET8', 'BITSET16', 'BITSET32', 'CARDINAL8', 'CARDINAL16',
'CARDINAL32', 'INTEGER8', 'INTEGER16', 'INTEGER32',
)
# Aglet Modula-2 Extensions
# pseudo-module builtins in addition to ISO Modula-2
aglet_additional_pseudo_builtins = (
# None
)
# G N U M o d u l a - 2 D a t a s e t s
# GNU Extensions
# reserved words in addition to PIM Modula-2
gm2_additional_reserved_words = (
# 10 additional reserved words
'ASM', '__ATTRIBUTE__', '__BUILTIN__', '__COLUMN__', '__DATE__',
'__FILE__', '__FUNCTION__', '__LINE__', '__MODULE__', 'VOLATILE',
)
# GNU Extensions
# builtins in addition to PIM Modula-2
gm2_additional_builtins = (
# 21 additional builtins
'BITSET8', 'BITSET16', 'BITSET32', 'CARDINAL8', 'CARDINAL16',
'CARDINAL32', 'CARDINAL64', 'COMPLEX32', 'COMPLEX64', 'COMPLEX96',
'COMPLEX128', 'INTEGER8', 'INTEGER16', 'INTEGER32', 'INTEGER64',
'REAL8', 'REAL16', 'REAL32', 'REAL96', 'REAL128', 'THROW',
)
# GNU Extensions
# pseudo-module builtins in addition to PIM Modula-2
gm2_additional_pseudo_builtins = (
# None
)
# p 1 M o d u l a - 2 D a t a s e t s
# p1 Extensions
# reserved words in addition to ISO Modula-2
p1_additional_reserved_words = (
# None
)
# p1 Extensions
# builtins in addition to ISO Modula-2
p1_additional_builtins = (
# None
)
# p1 Modula-2 Extensions
# pseudo-module builtins in addition to ISO Modula-2
p1_additional_pseudo_builtins = (
# 1 additional builtin
'BCD',
)
# X D S M o d u l a - 2 D a t a s e t s
# XDS Extensions
# reserved words in addition to ISO Modula-2
xds_additional_reserved_words = (
# 1 additional reserved word
'SEQ',
)
# XDS Extensions
# builtins in addition to ISO Modula-2
xds_additional_builtins = (
# 9 additional builtins
'ASH', 'ASSERT', 'DIFFADR_TYPE', 'ENTIER', 'INDEX', 'LEN',
'LONGCARD', 'SHORTCARD', 'SHORTINT',
)
# XDS Modula-2 Extensions
# pseudo-module builtins in addition to ISO Modula-2
xds_additional_pseudo_builtins = (
# 22 additional builtins (SYSTEM)
'PROCESS', 'NEWPROCESS', 'BOOL8', 'BOOL16', 'BOOL32', 'CARD8',
'CARD16', 'CARD32', 'INT8', 'INT16', 'INT32', 'REF', 'MOVE',
'FILL', 'GET', 'PUT', 'CC', 'int', 'unsigned', 'size_t', 'void'
# 3 additional builtins (COMPILER)
'COMPILER', 'OPTION', 'EQUATION'
)
# P I M S t a n d a r d L i b r a r y D a t a s e t s
# PIM Modula-2 Standard Library Modules Dataset
pim_stdlib_module_identifiers = (
'Terminal', 'FileSystem', 'InOut', 'RealInOut', 'MathLib0', 'Storage',
)
# PIM Modula-2 Standard Library Types Dataset
pim_stdlib_type_identifiers = (
'Flag', 'FlagSet', 'Response', 'Command', 'Lock', 'Permission',
'MediumType', 'File', 'FileProc', 'DirectoryProc', 'FileCommand',
'DirectoryCommand',
)
# PIM Modula-2 Standard Library Procedures Dataset
pim_stdlib_proc_identifiers = (
'Read', 'BusyRead', 'ReadAgain', 'Write', 'WriteString', 'WriteLn',
'Create', 'Lookup', 'Close', 'Delete', 'Rename', 'SetRead', 'SetWrite',
'SetModify', 'SetOpen', 'Doio', 'SetPos', 'GetPos', 'Length', 'Reset',
'Again', 'ReadWord', 'WriteWord', 'ReadChar', 'WriteChar',
'CreateMedium', 'DeleteMedium', 'AssignName', 'DeassignName',
'ReadMedium', 'LookupMedium', 'OpenInput', 'OpenOutput', 'CloseInput',
'CloseOutput', 'ReadString', 'ReadInt', 'ReadCard', 'ReadWrd',
'WriteInt', 'WriteCard', 'WriteOct', 'WriteHex', 'WriteWrd',
'ReadReal', 'WriteReal', 'WriteFixPt', 'WriteRealOct', 'sqrt', 'exp',
'ln', 'sin', 'cos', 'arctan', 'entier', 'ALLOCATE', 'DEALLOCATE',
)
# PIM Modula-2 Standard Library Variables Dataset
pim_stdlib_var_identifiers = (
'Done', 'termCH', 'in', 'out'
)
# PIM Modula-2 Standard Library Constants Dataset
pim_stdlib_const_identifiers = (
'EOL',
)
# I S O S t a n d a r d L i b r a r y D a t a s e t s
# ISO Modula-2 Standard Library Modules Dataset
iso_stdlib_module_identifiers = (
# TO DO
)
# ISO Modula-2 Standard Library Types Dataset
iso_stdlib_type_identifiers = (
# TO DO
)
# ISO Modula-2 Standard Library Procedures Dataset
iso_stdlib_proc_identifiers = (
# TO DO
)
# ISO Modula-2 Standard Library Variables Dataset
iso_stdlib_var_identifiers = (
# TO DO
)
# ISO Modula-2 Standard Library Constants Dataset
iso_stdlib_const_identifiers = (
# TO DO
)
# M 2 R 1 0 S t a n d a r d L i b r a r y D a t a s e t s
# Modula-2 R10 Standard Library ADTs Dataset
m2r10_stdlib_adt_identifiers = (
'BCD', 'LONGBCD', 'BITSET', 'SHORTBITSET', 'LONGBITSET',
'LONGLONGBITSET', 'COMPLEX', 'LONGCOMPLEX', 'SHORTCARD', 'LONGLONGCARD',
'SHORTINT', 'LONGLONGINT', 'POSINT', 'SHORTPOSINT', 'LONGPOSINT',
'LONGLONGPOSINT', 'BITSET8', 'BITSET16', 'BITSET32', 'BITSET64',
'BITSET128', 'BS8', 'BS16', 'BS32', 'BS64', 'BS128', 'CARDINAL8',
'CARDINAL16', 'CARDINAL32', 'CARDINAL64', 'CARDINAL128', 'CARD8',
'CARD16', 'CARD32', 'CARD64', 'CARD128', 'INTEGER8', 'INTEGER16',
'INTEGER32', 'INTEGER64', 'INTEGER128', 'INT8', 'INT16', 'INT32',
'INT64', 'INT128', 'STRING', 'UNISTRING',
)
# Modula-2 R10 Standard Library Blueprints Dataset
m2r10_stdlib_blueprint_identifiers = (
'ProtoRoot', 'ProtoComputational', 'ProtoNumeric', 'ProtoScalar',
'ProtoNonScalar', 'ProtoCardinal', 'ProtoInteger', 'ProtoReal',
'ProtoComplex', 'ProtoVector', 'ProtoTuple', 'ProtoCompArray',
'ProtoCollection', 'ProtoStaticArray', 'ProtoStaticSet',
'ProtoStaticString', 'ProtoArray', 'ProtoString', 'ProtoSet',
'ProtoMultiSet', 'ProtoDictionary', 'ProtoMultiDict', 'ProtoExtension',
'ProtoIO', 'ProtoCardMath', 'ProtoIntMath', 'ProtoRealMath',
)
# Modula-2 R10 Standard Library Modules Dataset
m2r10_stdlib_module_identifiers = (
'ASCII', 'BooleanIO', 'CharIO', 'UnicharIO', 'OctetIO',
'CardinalIO', 'LongCardIO', 'IntegerIO', 'LongIntIO', 'RealIO',
'LongRealIO', 'BCDIO', 'LongBCDIO', 'CardMath', 'LongCardMath',
'IntMath', 'LongIntMath', 'RealMath', 'LongRealMath', 'BCDMath',
'LongBCDMath', 'FileIO', 'FileSystem', 'Storage', 'IOSupport',
)
# Modula-2 R10 Standard Library Types Dataset
m2r10_stdlib_type_identifiers = (
'File', 'Status',
# TO BE COMPLETED
)
# Modula-2 R10 Standard Library Procedures Dataset
m2r10_stdlib_proc_identifiers = (
'ALLOCATE', 'DEALLOCATE', 'SIZE',
# TO BE COMPLETED
)
# Modula-2 R10 Standard Library Variables Dataset
m2r10_stdlib_var_identifiers = (
'stdIn', 'stdOut', 'stdErr',
)
# Modula-2 R10 Standard Library Constants Dataset
m2r10_stdlib_const_identifiers = (
'pi', 'tau',
)
# D i a l e c t s
# Dialect modes
dialects = (
'unknown',
'm2pim', 'm2iso', 'm2r10', 'objm2',
'm2iso+aglet', 'm2pim+gm2', 'm2iso+p1', 'm2iso+xds',
)
# D a t a b a s e s
# Lexemes to Mark as Errors Database
lexemes_to_reject_db = {
# Lexemes to reject for unknown dialect
'unknown': (
# LEAVE THIS EMPTY
),
# Lexemes to reject for PIM Modula-2
'm2pim': (
pim_lexemes_to_reject,
),
# Lexemes to reject for ISO Modula-2
'm2iso': (
iso_lexemes_to_reject,
),
# Lexemes to reject for Modula-2 R10
'm2r10': (
m2r10_lexemes_to_reject,
),
# Lexemes to reject for Objective Modula-2
'objm2': (
objm2_lexemes_to_reject,
),
# Lexemes to reject for Aglet Modula-2
'm2iso+aglet': (
iso_lexemes_to_reject,
),
# Lexemes to reject for GNU Modula-2
'm2pim+gm2': (
pim_lexemes_to_reject,
),
# Lexemes to reject for p1 Modula-2
'm2iso+p1': (
iso_lexemes_to_reject,
),
# Lexemes to reject for XDS Modula-2
'm2iso+xds': (
iso_lexemes_to_reject,
),
}
# Reserved Words Database
reserved_words_db = {
# Reserved words for unknown dialect
'unknown': (
common_reserved_words,
pim_additional_reserved_words,
iso_additional_reserved_words,
m2r10_additional_reserved_words,
),
# Reserved words for PIM Modula-2
'm2pim': (
common_reserved_words,
pim_additional_reserved_words,
),
# Reserved words for Modula-2 R10
'm2iso': (
common_reserved_words,
iso_additional_reserved_words,
),
# Reserved words for ISO Modula-2
'm2r10': (
common_reserved_words,
m2r10_additional_reserved_words,
),
# Reserved words for Objective Modula-2
'objm2': (
common_reserved_words,
m2r10_additional_reserved_words,
objm2_additional_reserved_words,
),
# Reserved words for Aglet Modula-2 Extensions
'm2iso+aglet': (
common_reserved_words,
iso_additional_reserved_words,
aglet_additional_reserved_words,
),
# Reserved words for GNU Modula-2 Extensions
'm2pim+gm2': (
common_reserved_words,
pim_additional_reserved_words,
gm2_additional_reserved_words,
),
# Reserved words for p1 Modula-2 Extensions
'm2iso+p1': (
common_reserved_words,
iso_additional_reserved_words,
p1_additional_reserved_words,
),
# Reserved words for XDS Modula-2 Extensions
'm2iso+xds': (
common_reserved_words,
iso_additional_reserved_words,
xds_additional_reserved_words,
),
}
# Builtins Database
builtins_db = {
# Builtins for unknown dialect
'unknown': (
common_builtins,
pim_additional_builtins,
iso_additional_builtins,
m2r10_additional_builtins,
),
# Builtins for PIM Modula-2
'm2pim': (
common_builtins,
pim_additional_builtins,
),
# Builtins for ISO Modula-2
'm2iso': (
common_builtins,
iso_additional_builtins,
),
# Builtins for ISO Modula-2
'm2r10': (
common_builtins,
m2r10_additional_builtins,
),
# Builtins for Objective Modula-2
'objm2': (
common_builtins,
m2r10_additional_builtins,
objm2_additional_builtins,
),
# Builtins for Aglet Modula-2 Extensions
'm2iso+aglet': (
common_builtins,
iso_additional_builtins,
aglet_additional_builtins,
),
# Builtins for GNU Modula-2 Extensions
'm2pim+gm2': (
common_builtins,
pim_additional_builtins,
gm2_additional_builtins,
),
# Builtins for p1 Modula-2 Extensions
'm2iso+p1': (
common_builtins,
iso_additional_builtins,
p1_additional_builtins,
),
# Builtins for XDS Modula-2 Extensions
'm2iso+xds': (
common_builtins,
iso_additional_builtins,
xds_additional_builtins,
),
}
# Pseudo-Module Builtins Database
pseudo_builtins_db = {
# Builtins for unknown dialect
'unknown': (
common_pseudo_builtins,
pim_additional_pseudo_builtins,
iso_additional_pseudo_builtins,
m2r10_additional_pseudo_builtins,
),
# Builtins for PIM Modula-2
'm2pim': (
common_pseudo_builtins,
pim_additional_pseudo_builtins,
),
# Builtins for ISO Modula-2
'm2iso': (
common_pseudo_builtins,
iso_additional_pseudo_builtins,
),
# Builtins for ISO Modula-2
'm2r10': (
common_pseudo_builtins,
m2r10_additional_pseudo_builtins,
),
# Builtins for Objective Modula-2
'objm2': (
common_pseudo_builtins,
m2r10_additional_pseudo_builtins,
objm2_additional_pseudo_builtins,
),
# Builtins for Aglet Modula-2 Extensions
'm2iso+aglet': (
common_pseudo_builtins,
iso_additional_pseudo_builtins,
aglet_additional_pseudo_builtins,
),
# Builtins for GNU Modula-2 Extensions
'm2pim+gm2': (
common_pseudo_builtins,
pim_additional_pseudo_builtins,
gm2_additional_pseudo_builtins,
),
# Builtins for p1 Modula-2 Extensions
'm2iso+p1': (
common_pseudo_builtins,
iso_additional_pseudo_builtins,
p1_additional_pseudo_builtins,
),
# Builtins for XDS Modula-2 Extensions
'm2iso+xds': (
common_pseudo_builtins,
iso_additional_pseudo_builtins,
xds_additional_pseudo_builtins,
),
}
# Standard Library ADTs Database
stdlib_adts_db = {
# Empty entry for unknown dialect
'unknown': (
# LEAVE THIS EMPTY
),
# Standard Library ADTs for PIM Modula-2
'm2pim': (
# No first class library types
),
# Standard Library ADTs for ISO Modula-2
'm2iso': (
# No first class library types
),
# Standard Library ADTs for Modula-2 R10
'm2r10': (
m2r10_stdlib_adt_identifiers,
),
# Standard Library ADTs for Objective Modula-2
'objm2': (
m2r10_stdlib_adt_identifiers,
),
# Standard Library ADTs for Aglet Modula-2
'm2iso+aglet': (
# No first class library types
),
# Standard Library ADTs for GNU Modula-2
'm2pim+gm2': (
# No first class library types
),
# Standard Library ADTs for p1 Modula-2
'm2iso+p1': (
# No first class library types
),
# Standard Library ADTs for XDS Modula-2
'm2iso+xds': (
# No first class library types
),
}
# Standard Library Modules Database
stdlib_modules_db = {
# Empty entry for unknown dialect
'unknown': (
# LEAVE THIS EMPTY
),
# Standard Library Modules for PIM Modula-2
'm2pim': (
pim_stdlib_module_identifiers,
),
# Standard Library Modules for ISO Modula-2
'm2iso': (
iso_stdlib_module_identifiers,
),
# Standard Library Modules for Modula-2 R10
'm2r10': (
m2r10_stdlib_blueprint_identifiers,
m2r10_stdlib_module_identifiers,
m2r10_stdlib_adt_identifiers,
),
# Standard Library Modules for Objective Modula-2
'objm2': (
m2r10_stdlib_blueprint_identifiers,
m2r10_stdlib_module_identifiers,
),
# Standard Library Modules for Aglet Modula-2
'm2iso+aglet': (
iso_stdlib_module_identifiers,
),
# Standard Library Modules for GNU Modula-2
'm2pim+gm2': (
pim_stdlib_module_identifiers,
),
# Standard Library Modules for p1 Modula-2
'm2iso+p1': (
iso_stdlib_module_identifiers,
),
# Standard Library Modules for XDS Modula-2
'm2iso+xds': (
iso_stdlib_module_identifiers,
),
}
# Standard Library Types Database
stdlib_types_db = {
# Empty entry for unknown dialect
'unknown': (
# LEAVE THIS EMPTY
),
# Standard Library Types for PIM Modula-2
'm2pim': (
pim_stdlib_type_identifiers,
),
# Standard Library Types for ISO Modula-2
'm2iso': (
iso_stdlib_type_identifiers,
),
# Standard Library Types for Modula-2 R10
'm2r10': (
m2r10_stdlib_type_identifiers,
),
# Standard Library Types for Objective Modula-2
'objm2': (
m2r10_stdlib_type_identifiers,
),
# Standard Library Types for Aglet Modula-2
'm2iso+aglet': (
iso_stdlib_type_identifiers,
),
# Standard Library Types for GNU Modula-2
'm2pim+gm2': (
pim_stdlib_type_identifiers,
),
# Standard Library Types for p1 Modula-2
'm2iso+p1': (
iso_stdlib_type_identifiers,
),
# Standard Library Types for XDS Modula-2
'm2iso+xds': (
iso_stdlib_type_identifiers,
),
}
# Standard Library Procedures Database
stdlib_procedures_db = {
# Empty entry for unknown dialect
'unknown': (
# LEAVE THIS EMPTY
),
# Standard Library Procedures for PIM Modula-2
'm2pim': (
pim_stdlib_proc_identifiers,
),
# Standard Library Procedures for ISO Modula-2
'm2iso': (
iso_stdlib_proc_identifiers,
),
# Standard Library Procedures for Modula-2 R10
'm2r10': (
m2r10_stdlib_proc_identifiers,
),
# Standard Library Procedures for Objective Modula-2
'objm2': (
m2r10_stdlib_proc_identifiers,
),
# Standard Library Procedures for Aglet Modula-2
'm2iso+aglet': (
iso_stdlib_proc_identifiers,
),
# Standard Library Procedures for GNU Modula-2
'm2pim+gm2': (
pim_stdlib_proc_identifiers,
),
# Standard Library Procedures for p1 Modula-2
'm2iso+p1': (
iso_stdlib_proc_identifiers,
),
# Standard Library Procedures for XDS Modula-2
'm2iso+xds': (
iso_stdlib_proc_identifiers,
),
}
# Standard Library Variables Database
stdlib_variables_db = {
# Empty entry for unknown dialect
'unknown': (
# LEAVE THIS EMPTY
),
# Standard Library Variables for PIM Modula-2
'm2pim': (
pim_stdlib_var_identifiers,
),
# Standard Library Variables for ISO Modula-2
'm2iso': (
iso_stdlib_var_identifiers,
),
# Standard Library Variables for Modula-2 R10
'm2r10': (
m2r10_stdlib_var_identifiers,
),
# Standard Library Variables for Objective Modula-2
'objm2': (
m2r10_stdlib_var_identifiers,
),
# Standard Library Variables for Aglet Modula-2
'm2iso+aglet': (
iso_stdlib_var_identifiers,
),
# Standard Library Variables for GNU Modula-2
'm2pim+gm2': (
pim_stdlib_var_identifiers,
),
# Standard Library Variables for p1 Modula-2
'm2iso+p1': (
iso_stdlib_var_identifiers,
),
# Standard Library Variables for XDS Modula-2
'm2iso+xds': (
iso_stdlib_var_identifiers,
),
}
# Standard Library Constants Database
stdlib_constants_db = {
# Empty entry for unknown dialect
'unknown': (
# LEAVE THIS EMPTY
),
# Standard Library Constants for PIM Modula-2
'm2pim': (
pim_stdlib_const_identifiers,
),
# Standard Library Constants for ISO Modula-2
'm2iso': (
iso_stdlib_const_identifiers,
),
# Standard Library Constants for Modula-2 R10
'm2r10': (
m2r10_stdlib_const_identifiers,
),
# Standard Library Constants for Objective Modula-2
'objm2': (
m2r10_stdlib_const_identifiers,
),
# Standard Library Constants for Aglet Modula-2
'm2iso+aglet': (
iso_stdlib_const_identifiers,
),
# Standard Library Constants for GNU Modula-2
'm2pim+gm2': (
pim_stdlib_const_identifiers,
),
# Standard Library Constants for p1 Modula-2
'm2iso+p1': (
iso_stdlib_const_identifiers,
),
# Standard Library Constants for XDS Modula-2
'm2iso+xds': (
iso_stdlib_const_identifiers,
),
}
# M e t h o d s
# initialise a lexer instance
def __init__(self, **options):
#
# check dialect options
#
dialects = get_list_opt(options, 'dialect', [])
#
for dialect_option in dialects:
if dialect_option in self.dialects[1:-1]:
# valid dialect option found
self.set_dialect(dialect_option)
break
#
# Fallback Mode (DEFAULT)
else:
# no valid dialect option
self.set_dialect('unknown')
#
self.dialect_set_by_tag = False
#
# check style options
#
styles = get_list_opt(options, 'style', [])
#
# use lowercase mode for Algol style
if 'algol' in styles or 'algol_nu' in styles:
self.algol_publication_mode = True
else:
self.algol_publication_mode = False
#
# Check option flags
#
self.treat_stdlib_adts_as_builtins = get_bool_opt(
options, 'treat_stdlib_adts_as_builtins', True)
#
# call superclass initialiser
RegexLexer.__init__(self, **options)
# Set lexer to a specified dialect
def set_dialect(self, dialect_id):
#
# if __debug__:
# print 'entered set_dialect with arg: ', dialect_id
#
# check dialect name against known dialects
if dialect_id not in self.dialects:
dialect = 'unknown' # default
else:
dialect = dialect_id
#
# compose lexemes to reject set
lexemes_to_reject_set = set()
# add each list of reject lexemes for this dialect
for list in self.lexemes_to_reject_db[dialect]:
lexemes_to_reject_set.update(set(list))
#
# compose reserved words set
reswords_set = set()
# add each list of reserved words for this dialect
for list in self.reserved_words_db[dialect]:
reswords_set.update(set(list))
#
# compose builtins set
builtins_set = set()
# add each list of builtins for this dialect excluding reserved words
for list in self.builtins_db[dialect]:
builtins_set.update(set(list).difference(reswords_set))
#
# compose pseudo-builtins set
pseudo_builtins_set = set()
# add each list of builtins for this dialect excluding reserved words
for list in self.pseudo_builtins_db[dialect]:
pseudo_builtins_set.update(set(list).difference(reswords_set))
#
# compose ADTs set
adts_set = set()
# add each list of ADTs for this dialect excluding reserved words
for list in self.stdlib_adts_db[dialect]:
adts_set.update(set(list).difference(reswords_set))
#
# compose modules set
modules_set = set()
# add each list of builtins for this dialect excluding builtins
for list in self.stdlib_modules_db[dialect]:
modules_set.update(set(list).difference(builtins_set))
#
# compose types set
types_set = set()
# add each list of types for this dialect excluding builtins
for list in self.stdlib_types_db[dialect]:
types_set.update(set(list).difference(builtins_set))
#
# compose procedures set
procedures_set = set()
# add each list of procedures for this dialect excluding builtins
for list in self.stdlib_procedures_db[dialect]:
procedures_set.update(set(list).difference(builtins_set))
#
# compose variables set
variables_set = set()
# add each list of variables for this dialect excluding builtins
for list in self.stdlib_variables_db[dialect]:
variables_set.update(set(list).difference(builtins_set))
#
# compose constants set
constants_set = set()
# add each list of constants for this dialect excluding builtins
for list in self.stdlib_constants_db[dialect]:
constants_set.update(set(list).difference(builtins_set))
#
# update lexer state
self.dialect = dialect
self.lexemes_to_reject = lexemes_to_reject_set
self.reserved_words = reswords_set
self.builtins = builtins_set
self.pseudo_builtins = pseudo_builtins_set
self.adts = adts_set
self.modules = modules_set
self.types = types_set
self.procedures = procedures_set
self.variables = variables_set
self.constants = constants_set
#
# if __debug__:
# print 'exiting set_dialect'
# print ' self.dialect: ', self.dialect
# print ' self.lexemes_to_reject: ', self.lexemes_to_reject
# print ' self.reserved_words: ', self.reserved_words
# print ' self.builtins: ', self.builtins
# print ' self.pseudo_builtins: ', self.pseudo_builtins
# print ' self.adts: ', self.adts
# print ' self.modules: ', self.modules
# print ' self.types: ', self.types
# print ' self.procedures: ', self.procedures
# print ' self.variables: ', self.variables
# print ' self.types: ', self.types
# print ' self.constants: ', self.constants
# Extracts a dialect name from a dialect tag comment string and checks
# the extracted name against known dialects. If a match is found, the
# matching name is returned, otherwise dialect id 'unknown' is returned
def get_dialect_from_dialect_tag(self, dialect_tag):
#
# if __debug__:
# print 'entered get_dialect_from_dialect_tag with arg: ', dialect_tag
#
# constants
left_tag_delim = '(*!'
right_tag_delim = '*)'
left_tag_delim_len = len(left_tag_delim)
right_tag_delim_len = len(right_tag_delim)
indicator_start = left_tag_delim_len
indicator_end = -(right_tag_delim_len)
#
# check comment string for dialect indicator
if len(dialect_tag) > (left_tag_delim_len + right_tag_delim_len) \
and dialect_tag.startswith(left_tag_delim) \
and dialect_tag.endswith(right_tag_delim):
#
# if __debug__:
# print 'dialect tag found'
#
# extract dialect indicator
indicator = dialect_tag[indicator_start:indicator_end]
#
# if __debug__:
# print 'extracted: ', indicator
#
# check against known dialects
for index in range(1, len(self.dialects)):
#
# if __debug__:
# print 'dialects[', index, ']: ', self.dialects[index]
#
if indicator == self.dialects[index]:
#
# if __debug__:
# print 'matching dialect found'
#
# indicator matches known dialect
return indicator
else:
# indicator does not match any dialect
return 'unknown' # default
else:
# invalid indicator string
return 'unknown' # default
# intercept the token stream, modify token attributes and return them
def get_tokens_unprocessed(self, text):
for index, token, value in RegexLexer.get_tokens_unprocessed(self, text):
#
# check for dialect tag if dialect has not been set by tag
if not self.dialect_set_by_tag and token == Comment.Special:
indicated_dialect = self.get_dialect_from_dialect_tag(value)
if indicated_dialect != 'unknown':
# token is a dialect indicator
# reset reserved words and builtins
self.set_dialect(indicated_dialect)
self.dialect_set_by_tag = True
#
# check for reserved words, predefined and stdlib identifiers
if token is Name:
if value in self.reserved_words:
token = Keyword.Reserved
if self.algol_publication_mode:
value = value.lower()
#
elif value in self.builtins:
token = Name.Builtin
if self.algol_publication_mode:
value = value.lower()
#
elif value in self.pseudo_builtins:
token = Name.Builtin.Pseudo
if self.algol_publication_mode:
value = value.lower()
#
elif value in self.adts:
if not self.treat_stdlib_adts_as_builtins:
token = Name.Namespace
else:
token = Name.Builtin.Pseudo
if self.algol_publication_mode:
value = value.lower()
#
elif value in self.modules:
token = Name.Namespace
#
elif value in self.types:
token = Name.Class
#
elif value in self.procedures:
token = Name.Function
#
elif value in self.variables:
token = Name.Variable
#
elif value in self.constants:
token = Name.Constant
#
elif token in Number:
#
# mark prefix number literals as error for PIM and ISO dialects
if self.dialect not in ('unknown', 'm2r10', 'objm2'):
if "'" in value or value[0:2] in ('0b', '0x', '0u'):
token = Error
#
elif self.dialect in ('m2r10', 'objm2'):
# mark base-8 number literals as errors for M2 R10 and ObjM2
if token is Number.Oct:
token = Error
# mark suffix base-16 literals as errors for M2 R10 and ObjM2
elif token is Number.Hex and 'H' in value:
token = Error
# mark real numbers with E as errors for M2 R10 and ObjM2
elif token is Number.Float and 'E' in value:
token = Error
#
elif token in Comment:
#
# mark single line comment as error for PIM and ISO dialects
if token is Comment.Single:
if self.dialect not in ('unknown', 'm2r10', 'objm2'):
token = Error
#
if token is Comment.Preproc:
# mark ISO pragma as error for PIM dialects
if value.startswith('<*') and \
self.dialect.startswith('m2pim'):
token = Error
# mark PIM pragma as comment for other dialects
elif value.startswith('(*$') and \
self.dialect != 'unknown' and \
not self.dialect.startswith('m2pim'):
token = Comment.Multiline
#
else: # token is neither Name nor Comment
#
# mark lexemes matching the dialect's error token set as errors
if value in self.lexemes_to_reject:
token = Error
#
# substitute lexemes when in Algol mode
if self.algol_publication_mode:
if value == '#':
value = u'≠'
elif value == '<=':
value = u'≤'
elif value == '>=':
value = u'≥'
elif value == '==':
value = u'≡'
elif value == '*.':
value = u'•'
# return result
yield index, token, value
| gpl-2.0 | -6,001,406,843,386,985,000 | 32.66688 | 84 | 0.541786 | false |
stonebig/bokeh | bokeh/document/document.py | 1 | 39796 | #-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2019, Anaconda, Inc., and Bokeh Contributors.
# All rights reserved.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
''' Provide the ``Document`` class, which is a container for Bokeh Models to
be reflected to the client side BokehJS library.
As a concrete example, consider a column layout with ``Slider`` and ``Select``
widgets, and a plot with some tools, an axis and grid, and a glyph renderer
for circles. A simplified representation of this document might look like the
figure below:
.. figure:: /_images/document.svg
:align: center
:width: 65%
A Bokeh Document is a collection of Bokeh Models (e.g. plots, tools,
glyphs, etc.) that can be serialized as a single collection.
'''
#-----------------------------------------------------------------------------
# Boilerplate
#-----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function, unicode_literals
import logging
log = logging.getLogger(__name__)
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# Standard library imports
from collections import defaultdict
from json import loads
import sys
# External imports
import jinja2
from six import string_types
# Bokeh imports
from ..core.enums import HoldPolicy
from ..core.json_encoder import serialize_json
from ..core.query import find
from ..core.templates import FILE
from ..core.validation import check_integrity
from ..events import Event
from ..themes import default as default_theme, built_in_themes
from ..themes import Theme
from ..util.callback_manager import _check_callback
from ..util.datatypes import MultiValuedDict
from ..util.future import wraps
from ..util.version import __version__
from .events import ModelChangedEvent, RootAddedEvent, RootRemovedEvent, SessionCallbackAdded, SessionCallbackRemoved, TitleChangedEvent
from .locking import UnlockedDocumentProxy
from .util import initialize_references_json, instantiate_references_json, references_json
#-----------------------------------------------------------------------------
# Globals and constants
#-----------------------------------------------------------------------------
DEFAULT_TITLE = "Bokeh Application"
__all__ = (
'Document',
)
#-----------------------------------------------------------------------------
# General API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Dev API
#-----------------------------------------------------------------------------
class Document(object):
''' The basic unit of serialization for Bokeh.
Document instances collect Bokeh models (e.g. plots, layouts, widgets,
etc.) so that they may be reflected into the BokehJS client runtime.
Because models may refer to other models (e.g., a plot *has* a list of
renderers), it is not generally useful or meaningful to convert individual
models to JSON. Accordingly, the ``Document`` is thus the smallest unit
of serialization for Bokeh.
'''
def __init__(self, **kwargs):
self._roots = list()
self._theme = kwargs.pop('theme', default_theme)
# use _title directly because we don't need to trigger an event
self._title = kwargs.pop('title', DEFAULT_TITLE)
self._template = FILE
self._all_models_freeze_count = 0
self._all_models = dict()
self._all_models_by_name = MultiValuedDict()
self._all_former_model_ids = set()
self._callbacks = {}
self._session_destroyed_callbacks = set()
self._session_callbacks = set()
self._session_context = None
self._modules = []
self._template_variables = {}
self._hold = None
self._held_events = []
# set of models subscribed to user events
self._subscribed_models = defaultdict(set)
self._callback_objs_by_callable = {self.add_next_tick_callback: defaultdict(set),
self.add_periodic_callback: defaultdict(set),
self.add_timeout_callback: defaultdict(set)}
# Properties --------------------------------------------------------------
@property
def roots(self):
''' A list of all the root models in this Document.
'''
return list(self._roots)
@property
def session_callbacks(self):
''' A list of all the session callbacks on this document.
'''
return list(self._session_callbacks)
@property
def session_destroyed_callbacks(self):
''' A list of all the on_session_destroyed callbacks on this document.
'''
return self._session_destroyed_callbacks
@session_destroyed_callbacks.setter
def session_destroyed_callbacks(self, callbacks):
self._session_destroyed_callbacks = callbacks
@property
def session_context(self):
''' The ``SessionContext`` for this document.
'''
return self._session_context
@property
def template(self):
''' A Jinja2 template to use for rendering this document.
'''
return self._template
@template.setter
def template(self, template):
if not isinstance(template, (jinja2.Template, string_types)):
raise ValueError("document template must be Jinja2 template or a string")
self._template = template
@property
def template_variables(self):
''' A dictionary of template variables to pass when rendering
``self.template``.
'''
return self._template_variables
@property
def theme(self):
''' The current ``Theme`` instance affecting models in this Document.
Setting this to ``None`` sets the default theme. (i.e this property
never returns ``None``.)
Changing theme may trigger model change events on the models in the
document if the theme modifies any model properties.
'''
return self._theme
@theme.setter
def theme(self, theme):
if theme is None:
theme = default_theme
if self._theme is theme:
return
if isinstance(theme, string_types):
try:
self._theme = built_in_themes[theme]
except KeyError:
raise ValueError(
"{0} is not a built-in theme; available themes are "
"{1}".format(theme, ', '.join(built_in_themes.keys()))
)
elif isinstance(theme, Theme):
self._theme = theme
else:
raise ValueError("Theme must be a string or an instance of the Theme class")
for model in self._all_models.values():
self._theme.apply_to_model(model)
@property
def title(self):
''' A title for this document.
This title will be set on standalone HTML documents, but not e.g. when
``autoload_server`` is used.
'''
return self._title
@title.setter
def title(self, title):
self._set_title(title)
# Public methods ----------------------------------------------------------
def add_next_tick_callback(self, callback):
''' Add callback to be invoked once on the next tick of the event loop.
Args:
callback (callable) :
A callback function to execute on the next tick.
Returns:
NextTickCallback : can be used with ``remove_next_tick_callback``
.. note::
Next tick callbacks only work within the context of a Bokeh server
session. This function will no effect when Bokeh outputs to
standalone HTML or Jupyter notebook cells.
'''
from ..server.callbacks import NextTickCallback
cb = NextTickCallback(self, None)
return self._add_session_callback(cb, callback, one_shot=True, originator=self.add_next_tick_callback)
def add_periodic_callback(self, callback, period_milliseconds):
''' Add a callback to be invoked on a session periodically.
Args:
callback (callable) :
A callback function to execute periodically
period_milliseconds (int) :
Number of milliseconds between each callback execution.
Returns:
PeriodicCallback : can be used with ``remove_periodic_callback``
.. note::
Periodic callbacks only work within the context of a Bokeh server
session. This function will no effect when Bokeh outputs to
standalone HTML or Jupyter notebook cells.
'''
from ..server.callbacks import PeriodicCallback
cb = PeriodicCallback(self,
None,
period_milliseconds)
return self._add_session_callback(cb, callback, one_shot=False, originator=self.add_periodic_callback)
def add_root(self, model, setter=None):
''' Add a model as a root of this Document.
Any changes to this model (including to other models referred to
by it) will trigger ``on_change`` callbacks registered on this
document.
Args:
model (Model) :
The model to add as a root of this document.
setter (ClientSession or ServerSession or None, optional) :
This is used to prevent "boomerang" updates to Bokeh apps.
(default: None)
In the context of a Bokeh server application, incoming updates
to properties will be annotated with the session that is
doing the updating. This value is propagated through any
subsequent change notifications that the update triggers.
The session can compare the event setter to itself, and
suppress any updates that originate from itself.
'''
if model in self._roots:
return
self._push_all_models_freeze()
# TODO (bird) Should we do some kind of reporting of how many
# LayoutDOM's are in the document roots? In vanilla bokeh cases e.g.
# output_file more than one LayoutDOM is probably not going to go
# well. But in embedded cases, you may well want more than one.
try:
self._roots.append(model)
finally:
self._pop_all_models_freeze()
self._trigger_on_change(RootAddedEvent(self, model, setter))
def add_timeout_callback(self, callback, timeout_milliseconds):
''' Add callback to be invoked once, after a specified timeout passes.
Args:
callback (callable) :
A callback function to execute after timeout
timeout_milliseconds (int) :
Number of milliseconds before callback execution.
Returns:
TimeoutCallback : can be used with ``remove_timeout_callback``
.. note::
Timeout callbacks only work within the context of a Bokeh server
session. This function will no effect when Bokeh outputs to
standalone HTML or Jupyter notebook cells.
'''
from ..server.callbacks import TimeoutCallback
cb = TimeoutCallback(self,
None,
timeout_milliseconds)
return self._add_session_callback(cb, callback, one_shot=True, originator=self.add_timeout_callback)
def apply_json_event(self, json):
event = loads(json, object_hook=Event.decode_json)
if not isinstance(event, Event):
log.warning('Could not decode event json: %s' % json)
else:
for obj in self._subscribed_models[event.event_name]:
obj._trigger_event(event)
def apply_json_patch(self, patch, setter=None):
''' Apply a JSON patch object and process any resulting events.
Args:
patch (JSON-data) :
The JSON-object containing the patch to apply.
setter (ClientSession or ServerSession or None, optional) :
This is used to prevent "boomerang" updates to Bokeh apps.
(default: None)
In the context of a Bokeh server application, incoming updates
to properties will be annotated with the session that is
doing the updating. This value is propagated through any
subsequent change notifications that the update triggers.
The session can compare the event setter to itself, and
suppress any updates that originate from itself.
Returns:
None
'''
references_json = patch['references']
events_json = patch['events']
references = instantiate_references_json(references_json)
# Use our existing model instances whenever we have them
for obj in references.values():
if obj.id in self._all_models:
references[obj.id] = self._all_models[obj.id]
# The model being changed isn't always in references so add it in
for event_json in events_json:
if 'model' in event_json:
model_id = event_json['model']['id']
if model_id in self._all_models:
references[model_id] = self._all_models[model_id]
initialize_references_json(references_json, references, setter)
for event_json in events_json:
if event_json['kind'] == 'ModelChanged':
patched_id = event_json['model']['id']
if patched_id not in self._all_models:
if patched_id not in self._all_former_model_ids:
raise RuntimeError("Cannot apply patch to %s which is not in the document" % (str(patched_id)))
else:
log.warning("Cannot apply patch to %s which is not in the document anymore" % (str(patched_id)))
break
patched_obj = self._all_models[patched_id]
attr = event_json['attr']
value = event_json['new']
patched_obj.set_from_json(attr, value, models=references, setter=setter)
elif event_json['kind'] == 'ColumnDataChanged':
source_id = event_json['column_source']['id']
if source_id not in self._all_models:
raise RuntimeError("Cannot apply patch to %s which is not in the document" % (str(source_id)))
source = self._all_models[source_id]
value = event_json['new']
source.set_from_json('data', value, models=references, setter=setter)
elif event_json['kind'] == 'ColumnsStreamed':
source_id = event_json['column_source']['id']
if source_id not in self._all_models:
raise RuntimeError("Cannot stream to %s which is not in the document" % (str(source_id)))
source = self._all_models[source_id]
data = event_json['data']
rollover = event_json.get('rollover', None)
source._stream(data, rollover, setter)
elif event_json['kind'] == 'ColumnsPatched':
source_id = event_json['column_source']['id']
if source_id not in self._all_models:
raise RuntimeError("Cannot apply patch to %s which is not in the document" % (str(source_id)))
source = self._all_models[source_id]
patches = event_json['patches']
source.patch(patches, setter)
elif event_json['kind'] == 'RootAdded':
root_id = event_json['model']['id']
root_obj = references[root_id]
self.add_root(root_obj, setter)
elif event_json['kind'] == 'RootRemoved':
root_id = event_json['model']['id']
root_obj = references[root_id]
self.remove_root(root_obj, setter)
elif event_json['kind'] == 'TitleChanged':
self._set_title(event_json['title'], setter)
else:
raise RuntimeError("Unknown patch event " + repr(event_json))
def apply_json_patch_string(self, patch):
''' Apply a JSON patch provided as a string.
Args:
patch (str) :
Returns:
None
'''
json_parsed = loads(patch)
self.apply_json_patch(json_parsed)
def clear(self):
''' Remove all content from the document but do not reset title.
Returns:
None
'''
self._push_all_models_freeze()
try:
while len(self._roots) > 0:
r = next(iter(self._roots))
self.remove_root(r)
finally:
self._pop_all_models_freeze()
def destroy(self, session):
self.remove_on_change(session)
# probably better to implement a destroy protocol on models to
# untangle everything, then the collect below might not be needed
for m in self._all_models.values():
m._document = None
del m
self._roots = []
self._all_models = None
self._all_models_by_name = None
self._theme = None
self._template = None
self._session_context = None
self.delete_modules()
import gc
gc.collect()
def delete_modules(self):
''' Clean up after any modules created by this Document when its session is
destroyed.
'''
from gc import get_referrers
from types import FrameType
log.debug("Deleting %s modules for %s" % (len(self._modules), self))
for module in self._modules:
# Modules created for a Document should have three referrers at this point:
#
# - sys.modules
# - self._modules
# - a frame object
#
# This function will take care of removing these expected references.
#
# If there are any additional referrers, this probably means the module will be
# leaked. Here we perform a detailed check that the only referrers are expected
# ones. Otherwise issue an error log message with details.
referrers = get_referrers(module)
referrers = [x for x in referrers if x is not sys.modules]
referrers = [x for x in referrers if x is not self._modules]
referrers = [x for x in referrers if not isinstance(x, FrameType)]
if len(referrers) != 0:
log.error("Module %r has extra unexpected referrers! This could indicate a serious memory leak. Extra referrers: %r" % (module, referrers))
# remove the reference from sys.modules
if module.__name__ in sys.modules:
del sys.modules[module.__name__]
# remove the reference from self._modules
self._modules = None
# the frame reference will take care of itself
@classmethod
def from_json(cls, json):
''' Load a document from JSON.
json (JSON-data) :
A JSON-encoded document to create a new Document from.
Returns:
Document :
'''
roots_json = json['roots']
root_ids = roots_json['root_ids']
references_json = roots_json['references']
references = instantiate_references_json(references_json)
initialize_references_json(references_json, references)
doc = Document()
for r in root_ids:
doc.add_root(references[r])
doc.title = json['title']
return doc
@classmethod
def from_json_string(cls, json):
''' Load a document from JSON.
json (str) :
A string with a JSON-encoded document to create a new Document
from.
Returns:
Document :
'''
json_parsed = loads(json)
return cls.from_json(json_parsed)
def get_model_by_id(self, model_id):
''' Find the model for the given ID in this document, or ``None`` if it
is not found.
Args:
model_id (str) : The ID of the model to search for
Returns:
Model or None
'''
return self._all_models.get(model_id)
def get_model_by_name(self, name):
''' Find the model for the given name in this document, or ``None`` if
it is not found.
Args:
name (str) : The name of the model to search for
Returns:
Model or None
'''
return self._all_models_by_name.get_one(name, "Found more than one model named '%s'" % name)
def hold(self, policy="combine"):
''' Activate a document hold.
While a hold is active, no model changes will be applied, or trigger
callbacks. Once ``unhold`` is called, the events collected during the
hold will be applied according to the hold policy.
Args:
hold ('combine' or 'collect', optional)
Whether events collected during a hold should attempt to be
combined (default: 'combine')
When set to ``'collect'`` all events will be collected and
replayed in order as-is when ``unhold`` is called.
When set to ``'combine'`` Bokeh will attempt to combine
compatible events together. Typically, different events that
change the same property on the same mode can be combined.
For example, if the following sequence occurs:
.. code-block:: python
doc.hold('combine')
slider.value = 10
slider.value = 11
slider.value = 12
Then only *one* callback, for the last ``slider.value = 12``
will be triggered.
Returns:
None
.. note::
``hold`` only applies to document change events, i.e. setting
properties on models. It does not apply to events such as
``ButtonClick``, etc.
'''
if self._hold is not None and self._hold != policy:
log.warning("hold already active with '%s', ignoring '%s'" % (self._hold, policy))
return
if policy not in HoldPolicy:
raise ValueError("Unknown hold policy %r" % policy)
self._hold = policy
def unhold(self):
''' Turn off any active document hold and apply any collected events.
Returns:
None
'''
# no-op if we are already no holding
if self._hold is None: return
self._hold = None
events = list(self._held_events)
self._held_events = []
for event in events:
self._trigger_on_change(event)
def on_change(self, *callbacks):
''' Provide callbacks to invoke if the document or any Model reachable
from its roots changes.
'''
for callback in callbacks:
if callback in self._callbacks: continue
_check_callback(callback, ('event',))
self._callbacks[callback] = callback
def on_change_dispatch_to(self, receiver):
if not receiver in self._callbacks:
self._callbacks[receiver] = lambda event: event.dispatch(receiver)
def on_session_destroyed(self, *callbacks):
''' Provide callbacks to invoke when the session serving the Document
is destroyed
'''
for callback in callbacks:
_check_callback(callback, ('session_context',))
self._session_destroyed_callbacks.add(callback)
def remove_next_tick_callback(self, callback_obj):
''' Remove a callback added earlier with ``add_next_tick_callback``.
Args:
callback_obj : a value returned from ``add_next_tick_callback``
Returns:
None
Raises:
ValueError, if the callback was never added or has already been run or removed
'''
self._remove_session_callback(callback_obj, self.add_next_tick_callback)
def remove_on_change(self, *callbacks):
''' Remove a callback added earlier with ``on_change``.
Raises:
KeyError, if the callback was never added
'''
for callback in callbacks:
del self._callbacks[callback]
def remove_periodic_callback(self, callback_obj):
''' Remove a callback added earlier with ``add_periodic_callback``
Args:
callback_obj : a value returned from ``add_periodic_callback``
Returns:
None
Raises:
ValueError, if the callback was never added or has already been removed
'''
self._remove_session_callback(callback_obj, self.add_periodic_callback)
def remove_root(self, model, setter=None):
''' Remove a model as root model from this Document.
Changes to this model may still trigger ``on_change`` callbacks
on this document, if the model is still referred to by other
root models.
Args:
model (Model) :
The model to add as a root of this document.
setter (ClientSession or ServerSession or None, optional) :
This is used to prevent "boomerang" updates to Bokeh apps.
(default: None)
In the context of a Bokeh server application, incoming updates
to properties will be annotated with the session that is
doing the updating. This value is propagated through any
subsequent change notifications that the update triggers.
The session can compare the event setter to itself, and
suppress any updates that originate from itself.
'''
if model not in self._roots:
return # TODO (bev) ValueError?
self._push_all_models_freeze()
try:
self._roots.remove(model)
finally:
self._pop_all_models_freeze()
self._trigger_on_change(RootRemovedEvent(self, model, setter))
def remove_timeout_callback(self, callback_obj):
''' Remove a callback added earlier with ``add_timeout_callback``.
Args:
callback_obj : a value returned from ``add_timeout_callback``
Returns:
None
Raises:
ValueError, if the callback was never added or has alraedy been run or removed
'''
self._remove_session_callback(callback_obj, self.add_timeout_callback)
def replace_with_json(self, json):
''' Overwrite everything in this document with the JSON-encoded
document.
json (JSON-data) :
A JSON-encoded document to overwrite this one.
Returns:
None
'''
replacement = self.from_json(json)
replacement._destructively_move(self)
def select(self, selector):
''' Query this document for objects that match the given selector.
Args:
selector (JSON-like query dictionary) : you can query by type or by
name, e.g. ``{"type": HoverTool}``, ``{"name": "mycircle"}``
Returns:
seq[Model]
'''
if self._is_single_string_selector(selector, 'name'):
# special-case optimization for by-name query
return self._all_models_by_name.get_all(selector['name'])
else:
return find(self._all_models.values(), selector)
def select_one(self, selector):
''' Query this document for objects that match the given selector.
Raises an error if more than one object is found. Returns
single matching object, or None if nothing is found
Args:
selector (JSON-like query dictionary) : you can query by type or by
name, e.g. ``{"type": HoverTool}``, ``{"name": "mycircle"}``
Returns:
Model or None
'''
result = list(self.select(selector))
if len(result) > 1:
raise ValueError("Found more than one model matching %s: %r" % (selector, result))
if len(result) == 0:
return None
return result[0]
def set_select(self, selector, updates):
''' Update objects that match a given selector with the specified
attribute/value updates.
Args:
selector (JSON-like query dictionary) : you can query by type or by
name,i e.g. ``{"type": HoverTool}``, ``{"name": "mycircle"}``
updates (dict) :
Returns:
None
'''
for obj in self.select(selector):
for key, val in updates.items():
setattr(obj, key, val)
def to_json(self):
''' Convert this document to a JSON object.
Return:
JSON-data
'''
# this is a total hack to go via a string, needed because
# our BokehJSONEncoder goes straight to a string.
doc_json = self.to_json_string()
return loads(doc_json)
def to_json_string(self, indent=None):
''' Convert the document to a JSON string.
Args:
indent (int or None, optional) : number of spaces to indent, or
None to suppress all newlines and indentation (default: None)
Returns:
str
'''
root_ids = []
for r in self._roots:
root_ids.append(r.id)
root_references = self._all_models.values()
json = {
'title' : self.title,
'roots' : {
'root_ids' : root_ids,
'references' : references_json(root_references)
},
'version' : __version__
}
return serialize_json(json, indent=indent)
def validate(self):
''' Perform integrity checks on the modes in this document.
Returns:
None
'''
for r in self.roots:
refs = r.references()
check_integrity(refs)
# Private methods ---------------------------------------------------------
def _add_session_callback(self, callback_obj, callback, one_shot, originator):
''' Internal implementation for adding session callbacks.
Args:
callback_obj (SessionCallback) :
A session callback object that wraps a callable and is
passed to ``trigger_on_change``.
callback (callable) :
A callable to execute when session events happen.
one_shot (bool) :
Whether the callback should immediately auto-remove itself
after one execution.
Returns:
SessionCallback : passed in as ``callback_obj``.
Raises:
ValueError, if the callback has been previously added
'''
if one_shot:
@wraps(callback)
def remove_then_invoke(*args, **kwargs):
if callback_obj in self._session_callbacks:
self._remove_session_callback(callback_obj, originator)
return callback(*args, **kwargs)
actual_callback = remove_then_invoke
else:
actual_callback = callback
callback_obj._callback = self._wrap_with_self_as_curdoc(actual_callback)
self._session_callbacks.add(callback_obj)
self._callback_objs_by_callable[originator][callback].add(callback_obj)
# emit event so the session is notified of the new callback
self._trigger_on_change(SessionCallbackAdded(self, callback_obj))
return callback_obj
def _destructively_move(self, dest_doc):
''' Move all data in this doc to the dest_doc, leaving this doc empty.
Args:
dest_doc (Document) :
The Bokeh document to populate with data from this one
Returns:
None
'''
if dest_doc is self:
raise RuntimeError("Attempted to overwrite a document with itself")
dest_doc.clear()
# we have to remove ALL roots before adding any
# to the new doc or else models referenced from multiple
# roots could be in both docs at once, which isn't allowed.
roots = []
self._push_all_models_freeze()
try:
while self.roots:
r = next(iter(self.roots))
self.remove_root(r)
roots.append(r)
finally:
self._pop_all_models_freeze()
for r in roots:
if r.document is not None:
raise RuntimeError("Somehow we didn't detach %r" % (r))
if len(self._all_models) != 0:
raise RuntimeError("_all_models still had stuff in it: %r" % (self._all_models))
for r in roots:
dest_doc.add_root(r)
dest_doc.title = self.title
def _invalidate_all_models(self):
'''
'''
# if freeze count is > 0, we'll recompute on unfreeze
if self._all_models_freeze_count == 0:
self._recompute_all_models()
def _is_single_string_selector(self, selector, field):
'''
'''
if len(selector) != 1:
return False
if field not in selector:
return False
return isinstance(selector[field], string_types)
def _notify_change(self, model, attr, old, new, hint=None, setter=None, callback_invoker=None):
''' Called by Model when it changes
'''
# if name changes, update by-name index
if attr == 'name':
if old is not None:
self._all_models_by_name.remove_value(old, model)
if new is not None:
self._all_models_by_name.add_value(new, model)
if hint is None:
serializable_new = model.lookup(attr).serializable_value(model)
else:
serializable_new = None
event = ModelChangedEvent(self, model, attr, old, new, serializable_new, hint, setter, callback_invoker)
self._trigger_on_change(event)
def _push_all_models_freeze(self):
'''
'''
self._all_models_freeze_count += 1
def _pop_all_models_freeze(self):
'''
'''
self._all_models_freeze_count -= 1
if self._all_models_freeze_count == 0:
self._recompute_all_models()
def _recompute_all_models(self):
'''
'''
new_all_models_set = set()
for r in self.roots:
new_all_models_set = new_all_models_set.union(r.references())
old_all_models_set = set(self._all_models.values())
to_detach = old_all_models_set - new_all_models_set
to_attach = new_all_models_set - old_all_models_set
recomputed = {}
recomputed_by_name = MultiValuedDict()
for m in new_all_models_set:
recomputed[m.id] = m
if m.name is not None:
recomputed_by_name.add_value(m.name, m)
for d in to_detach:
self._all_former_model_ids.add(d.id)
d._detach_document()
for a in to_attach:
a._attach_document(self)
self._all_models = recomputed
self._all_models_by_name = recomputed_by_name
def _remove_session_callback(self, callback_obj, originator):
''' Remove a callback added earlier with ``add_periodic_callback``,
``add_timeout_callback``, or ``add_next_tick_callback``.
Returns:
None
Raises:
KeyError, if the callback was never added
'''
try:
callback_objs = [callback_obj]
self._session_callbacks.remove(callback_obj)
for cb, cb_objs in list(self._callback_objs_by_callable[originator].items()):
try:
cb_objs.remove(callback_obj)
if not cb_objs:
del self._callback_objs_by_callable[originator][cb]
except KeyError:
pass
except KeyError:
raise ValueError("callback already ran or was already removed, cannot be removed again")
# emit event so the session is notified and can remove the callback
for callback_obj in callback_objs:
self._trigger_on_change(SessionCallbackRemoved(self, callback_obj))
def _set_title(self, title, setter=None):
'''
'''
if title is None:
raise ValueError("Document title may not be None")
if self._title != title:
self._title = title
self._trigger_on_change(TitleChangedEvent(self, title, setter))
def _trigger_on_change(self, event):
'''
'''
if self._hold == "collect":
self._held_events.append(event)
return
elif self._hold == "combine":
_combine_document_events(event, self._held_events)
return
if event.callback_invoker is not None:
self._with_self_as_curdoc(event.callback_invoker)
def invoke_callbacks():
for cb in self._callbacks.values():
cb(event)
self._with_self_as_curdoc(invoke_callbacks)
def _with_self_as_curdoc(self, f):
'''
'''
from bokeh.io.doc import set_curdoc, curdoc
old_doc = curdoc()
try:
if getattr(f, "nolock", False):
set_curdoc(UnlockedDocumentProxy(self))
else:
set_curdoc(self)
return f()
finally:
set_curdoc(old_doc)
def _wrap_with_self_as_curdoc(self, f):
'''
'''
doc = self
@wraps(f)
def wrapper(*args, **kwargs):
@wraps(f)
def invoke():
return f(*args, **kwargs)
return doc._with_self_as_curdoc(invoke)
return wrapper
def _combine_document_events(new_event, old_events):
''' Attempt to combine a new event with a list of previous events.
The ``old_event`` will be scanned in reverse, and ``.combine(new_event)``
will be called on each. If a combination can be made, the function
will return immediately. Otherwise, ``new_event`` will be appended to
``old_events``.
Args:
new_event (DocumentChangedEvent) :
The new event to attempt to combine
old_events (list[DocumentChangedEvent])
A list of previous events to attempt to combine new_event with
**This is an "out" parameter**. The values it contains will be
modified in-place.
Returns:
None
'''
for event in reversed(old_events):
if event.combine(new_event):
return
# no combination was possible
old_events.append(new_event)
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
| bsd-3-clause | -2,561,572,242,748,076,000 | 33.159657 | 155 | 0.561438 | false |