text
stringlengths 4
1.02M
| meta
dict |
---|---|
from __future__ import print_function
try:
import ConfigParser
except ImportError:
import configparser as ConfigParser
import mock
import os
import subprocess
from helpers import unittest
from luigi import six
import luigi
import luigi.cmdline
from luigi.mock import MockTarget
class SomeTask(luigi.Task):
n = luigi.IntParameter()
def output(self):
return MockTarget('/tmp/test_%d' % self.n)
def run(self):
f = self.output().open('w')
f.write('done')
f.close()
class AmbiguousClass(luigi.Task):
pass
class AmbiguousClass(luigi.Task): # NOQA
pass
class TaskWithSameName(luigi.Task):
def run(self):
self.x = 42
class TaskWithSameName(luigi.Task): # NOQA
# there should be no ambiguity
def run(self):
self.x = 43
class WriteToFile(luigi.Task):
filename = luigi.Parameter()
def output(self):
return luigi.LocalTarget(self.filename)
def run(self):
f = self.output().open('w')
print('foo', file=f)
f.close()
class FooBaseClass(luigi.Task):
x = luigi.Parameter(default='foo_base_default')
class FooSubClass(FooBaseClass):
pass
class ATaskThatFails(luigi.Task):
def run(self):
raise ValueError()
class CmdlineTest(unittest.TestCase):
def setUp(self):
MockTarget.fs.clear()
@mock.patch("logging.getLogger")
def test_cmdline_main_task_cls(self, logger):
luigi.run(['--local-scheduler', '--no-lock', '--n', '100'], main_task_cls=SomeTask)
self.assertEqual(dict(MockTarget.fs.get_all_data()), {'/tmp/test_100': b'done'})
@mock.patch("logging.getLogger")
def test_cmdline_local_scheduler(self, logger):
luigi.run(['SomeTask', '--no-lock', '--n', '101'], local_scheduler=True)
self.assertEqual(dict(MockTarget.fs.get_all_data()), {'/tmp/test_101': b'done'})
@mock.patch("logging.getLogger")
def test_cmdline_other_task(self, logger):
luigi.run(['--local-scheduler', '--no-lock', 'SomeTask', '--n', '1000'])
self.assertEqual(dict(MockTarget.fs.get_all_data()), {'/tmp/test_1000': b'done'})
@mock.patch("logging.getLogger")
def test_cmdline_ambiguous_class(self, logger):
self.assertRaises(Exception, luigi.run, ['--local-scheduler', '--no-lock', 'AmbiguousClass'])
@mock.patch("logging.getLogger")
@mock.patch("logging.StreamHandler")
def test_setup_interface_logging(self, handler, logger):
handler.return_value = mock.Mock(name="stream_handler")
with mock.patch("luigi.interface.setup_interface_logging.has_run", new=False):
luigi.interface.setup_interface_logging()
self.assertEqual([mock.call(handler.return_value)], logger.return_value.addHandler.call_args_list)
with mock.patch("luigi.interface.setup_interface_logging.has_run", new=False):
if six.PY2:
error = ConfigParser.NoSectionError
else:
error = KeyError
self.assertRaises(error, luigi.interface.setup_interface_logging, '/blah')
@mock.patch("warnings.warn")
@mock.patch("luigi.interface.setup_interface_logging")
def test_cmdline_logger(self, setup_mock, warn):
with mock.patch("luigi.interface.core") as env_params:
env_params.return_value.logging_conf_file = ''
env_params.return_value.log_level = 'DEBUG'
env_params.return_value.parallel_scheduling_processes = 1
luigi.run(['SomeTask', '--n', '7', '--local-scheduler', '--no-lock'])
self.assertEqual([mock.call('', 'DEBUG')], setup_mock.call_args_list)
with mock.patch("luigi.configuration.get_config") as getconf:
getconf.return_value.get.side_effect = ConfigParser.NoOptionError(section='foo', option='bar')
getconf.return_value.getint.return_value = 0
luigi.interface.setup_interface_logging.call_args_list = []
luigi.run(['SomeTask', '--n', '42', '--local-scheduler', '--no-lock'])
self.assertEqual([], setup_mock.call_args_list)
@mock.patch('argparse.ArgumentParser.print_usage')
def test_non_existent_class(self, print_usage):
self.assertRaises(luigi.task_register.TaskClassNotFoundException,
luigi.run, ['--local-scheduler', '--no-lock', 'XYZ'])
@mock.patch('argparse.ArgumentParser.print_usage')
def test_no_task(self, print_usage):
self.assertRaises(SystemExit, luigi.run, ['--local-scheduler', '--no-lock'])
def test_luigid_logging_conf(self):
with mock.patch('luigi.server.run') as server_run, \
mock.patch('logging.config.fileConfig') as fileConfig:
luigi.cmdline.luigid([])
self.assertTrue(server_run.called)
# the default test configuration specifies a logging conf file
fileConfig.assert_called_with("test/testconfig/logging.cfg")
def test_luigid_no_configure_logging(self):
with mock.patch('luigi.server.run') as server_run, \
mock.patch('logging.basicConfig') as basicConfig, \
mock.patch('luigi.configuration.get_config') as get_config:
get_config.return_value.getboolean.return_value = True # no_configure_logging=True
luigi.cmdline.luigid([])
self.assertTrue(server_run.called)
self.assertTrue(basicConfig.called)
def test_luigid_no_logging_conf(self):
with mock.patch('luigi.server.run') as server_run, \
mock.patch('logging.basicConfig') as basicConfig, \
mock.patch('luigi.configuration.get_config') as get_config:
get_config.return_value.getboolean.return_value = False # no_configure_logging=False
get_config.return_value.get.return_value = None # logging_conf_file=None
luigi.cmdline.luigid([])
self.assertTrue(server_run.called)
self.assertTrue(basicConfig.called)
def test_luigid_missing_logging_conf(self):
with mock.patch('luigi.server.run') as server_run, \
mock.patch('logging.basicConfig') as basicConfig, \
mock.patch('luigi.configuration.get_config') as get_config:
get_config.return_value.getboolean.return_value = False # no_configure_logging=False
get_config.return_value.get.return_value = "nonexistent.cfg" # logging_conf_file=None
self.assertRaises(Exception, luigi.cmdline.luigid, [])
self.assertFalse(server_run.called)
self.assertFalse(basicConfig.called)
class InvokeOverCmdlineTest(unittest.TestCase):
def _run_cmdline(self, args):
env = os.environ.copy()
env['PYTHONPATH'] = env.get('PYTHONPATH', '') + ':.:test'
print('Running: ' + ' '.join(args)) # To simplify rerunning failing tests
p = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=env)
stdout, stderr = p.communicate() # Unfortunately subprocess.check_output is 2.7+
return p.returncode, stdout, stderr
def test_bin_luigi(self):
t = luigi.LocalTarget(is_tmp=True)
args = ['./bin/luigi', '--module', 'cmdline_test', 'WriteToFile', '--filename', t.path, '--local-scheduler', '--no-lock']
self._run_cmdline(args)
self.assertTrue(t.exists())
def test_direct_python(self):
t = luigi.LocalTarget(is_tmp=True)
args = ['python', 'test/cmdline_test.py', 'WriteToFile', '--filename', t.path, '--local-scheduler', '--no-lock']
self._run_cmdline(args)
self.assertTrue(t.exists())
def test_python_module(self):
t = luigi.LocalTarget(is_tmp=True)
args = ['python', '-m', 'luigi', '--module', 'cmdline_test', 'WriteToFile', '--filename', t.path, '--local-scheduler', '--no-lock']
self._run_cmdline(args)
self.assertTrue(t.exists())
def test_direct_python_help(self):
returncode, stdout, stderr = self._run_cmdline(['python', 'test/cmdline_test.py', '--help-all'])
self.assertTrue(stdout.find(b'--FooBaseClass-x') != -1)
self.assertFalse(stdout.find(b'--x') != -1)
def test_direct_python_help_class(self):
returncode, stdout, stderr = self._run_cmdline(['python', 'test/cmdline_test.py', 'FooBaseClass', '--help'])
self.assertTrue(stdout.find(b'--FooBaseClass-x') != -1)
self.assertTrue(stdout.find(b'--x') != -1)
def test_bin_luigi_help(self):
returncode, stdout, stderr = self._run_cmdline(['./bin/luigi', '--module', 'cmdline_test', '--help-all'])
self.assertTrue(stdout.find(b'--FooBaseClass-x') != -1)
self.assertFalse(stdout.find(b'--x') != -1)
def test_python_module_luigi_help(self):
returncode, stdout, stderr = self._run_cmdline(['python', '-m', 'luigi', '--module', 'cmdline_test', '--help-all'])
self.assertTrue(stdout.find(b'--FooBaseClass-x') != -1)
self.assertFalse(stdout.find(b'--x') != -1)
def test_bin_luigi_help_no_module(self):
returncode, stdout, stderr = self._run_cmdline(['./bin/luigi', '--help'])
self.assertTrue(stdout.find(b'usage:') != -1)
def test_bin_luigi_help_not_spammy(self):
"""
Test that `luigi --help` fits on one screen
"""
returncode, stdout, stderr = self._run_cmdline(['./bin/luigi', '--help'])
self.assertLessEqual(len(stdout.splitlines()), 15)
def test_bin_luigi_all_help_spammy(self):
"""
Test that `luigi --help-all` doesn't fit on a screen
Naturally, I don't mind this test breaking, but it convinces me that
the "not spammy" test is actually testing what it claims too.
"""
returncode, stdout, stderr = self._run_cmdline(['./bin/luigi', '--help-all'])
self.assertGreater(len(stdout.splitlines()), 15)
def test_error_mesage_on_misspelled_task(self):
returncode, stdout, stderr = self._run_cmdline(['./bin/luigi', 'RangeDaili'])
self.assertTrue(stderr.find(b'RangeDaily') != -1)
def test_bin_luigi_no_parameters(self):
returncode, stdout, stderr = self._run_cmdline(['./bin/luigi'])
self.assertTrue(stderr.find(b'No task specified') != -1)
def test_python_module_luigi_no_parameters(self):
returncode, stdout, stderr = self._run_cmdline(['python', '-m', 'luigi'])
self.assertTrue(stderr.find(b'No task specified') != -1)
def test_bin_luigi_help_class(self):
returncode, stdout, stderr = self._run_cmdline(['./bin/luigi', '--module', 'cmdline_test', 'FooBaseClass', '--help'])
self.assertTrue(stdout.find(b'--FooBaseClass-x') != -1)
self.assertTrue(stdout.find(b'--x') != -1)
def test_python_module_help_class(self):
returncode, stdout, stderr = self._run_cmdline(['python', '-m', 'luigi', '--module', 'cmdline_test', 'FooBaseClass', '--help'])
self.assertTrue(stdout.find(b'--FooBaseClass-x') != -1)
self.assertTrue(stdout.find(b'--x') != -1)
def test_bin_luigi_options_before_task(self):
args = ['./bin/luigi', '--module', 'cmdline_test', '--no-lock', '--local-scheduler', '--FooBaseClass-x', 'hello', 'FooBaseClass']
returncode, stdout, stderr = self._run_cmdline(args)
self.assertEqual(0, returncode)
def test_bin_fail_on_unrecognized_args(self):
returncode, stdout, stderr = self._run_cmdline(['./bin/luigi', '--no-lock', '--local-scheduler', 'Task', '--unknown-param', 'hiiii'])
self.assertNotEqual(0, returncode)
def test_deps_py_script(self):
"""
Test the deps.py script.
"""
args = 'python luigi/tools/deps.py --module examples.top_artists ArtistToplistToDatabase --date-interval 2015-W10'.split()
returncode, stdout, stderr = self._run_cmdline(args)
self.assertEqual(0, returncode)
self.assertTrue(stdout.find(b'[FileSystem] data/streams_2015_03_04_faked.tsv') != -1)
self.assertTrue(stdout.find(b'[DB] localhost') != -1)
def test_deps_tree_py_script(self):
"""
Test the deps_tree.py script.
"""
args = 'python luigi/tools/deps_tree.py --module examples.top_artists AggregateArtists --date-interval 2012-06'.split()
returncode, stdout, stderr = self._run_cmdline(args)
self.assertEqual(0, returncode)
for i in range(1, 30):
self.assertTrue(stdout.find(("-[Streams-{{'date': '2012-06-{0}'}}".format(str(i).zfill(2))).encode('utf-8')) != -1)
def test_bin_mentions_misspelled_task(self):
"""
Test that the error message is informative when a task is misspelled.
In particular it should say that the task is misspelled and not that
the local parameters do not exist.
"""
returncode, stdout, stderr = self._run_cmdline(['./bin/luigi', '--module', 'cmdline_test', 'HooBaseClass', '--x 5'])
self.assertTrue(stderr.find(b'FooBaseClass') != -1)
self.assertTrue(stderr.find(b'--x') != 0)
def test_stack_trace_has_no_inner(self):
"""
Test that the stack trace for failing tasks are short
The stack trace shouldn't contain unreasonably much implementation
details of luigi In particular it should say that the task is
misspelled and not that the local parameters do not exist.
"""
returncode, stdout, stderr = self._run_cmdline(['./bin/luigi', '--module', 'cmdline_test', 'ATaskThatFails', '--local-scheduler', '--no-lock'])
print(stdout)
self.assertFalse(stdout.find(b"run() got an unexpected keyword argument 'tracking_url_callback'") != -1)
self.assertFalse(stdout.find(b'During handling of the above exception, another exception occurred') != -1)
if __name__ == '__main__':
# Needed for one of the tests
luigi.run()
| {
"content_hash": "80325f63ca5f8cd302390e8437243021",
"timestamp": "",
"source": "github",
"line_count": 329,
"max_line_length": 151,
"avg_line_length": 42.209726443769,
"alnum_prop": 0.6297256426874055,
"repo_name": "edx/luigi",
"id": "feaea5db62b842f6b28e7205e960b246866341b0",
"size": "14490",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "test/cmdline_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "5051"
},
{
"name": "HTML",
"bytes": "39013"
},
{
"name": "JavaScript",
"bytes": "166869"
},
{
"name": "Python",
"bytes": "1800278"
},
{
"name": "Shell",
"bytes": "2627"
}
],
"symlink_target": ""
} |
"""
* Copyright 2007,2008,2009 John C. Gunther
* Copyright (C) 2009 Luke Kenneth Casson Leighton <lkcl@lkcl.net>
*
* Licensed under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at:
*
* http:#www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
* either express or implied. See the License for the specific
* language governing permissions and limitations under the
* License.
*
"""
from pyjamas import DOM
from pyjamas import Window
from pyjamas.ui import Event
from pyjamas.ui.AbsolutePanel import AbsolutePanel
from pyjamas.ui.Composite import Composite
from pyjamas.ui.Grid import Grid
from pyjamas.ui import HasHorizontalAlignment
from pyjamas.ui import HasVerticalAlignment
from pyjamas.ui.HTML import HTML
from pyjamas.ui.Image import Image
from pyjamas.ui.SimplePanel import SimplePanel
from pyjamas.ui.UIObject import UIObject
from pyjamas.ui.Widget import Widget
from pyjamas.chart.GChartConsts import NAI, DEFAULT_X_CHARTSIZE, DEFAULT_Y_CHARTSIZE
from pyjamas.chart.GChartConsts import USE_CSS
from pyjamas.chart.GChartConsts import Y_AXIS
from pyjamas.chart.GChartConsts import DEFAULT_BLANK_IMAGE_URL_FULLPATH
from pyjamas.chart.GChartConsts import DEFAULT_SYMBOL_BORDER_COLORS
from pyjamas.chart.GChartConsts import DEFAULT_LEGEND_BACKGROUND_COLOR
from pyjamas.chart.GChartConsts import N_SYSTEM_CURVES
from pyjamas.chart.GChartConsts import DEFAULT_LEGEND_BORDER_COLOR
from pyjamas.chart.GChartConsts import DEFAULT_LEGEND_BORDER_WIDTH
from pyjamas.chart.GChartConsts import DEFAULT_LEGEND_BORDER_STYLE
from pyjamas.chart.GChartConsts import DEFAULT_FONT_COLOR
from pyjamas.chart.GChartConsts import DEFAULT_LEGEND_FONTSIZE
from pyjamas.chart.GChartConsts import DEFAULT_FONT_STYLE
from pyjamas.chart.GChartConsts import DEFAULT_FONT_WEIGHT
from pyjamas.chart.GChartConsts import DEFAULT_PLOTAREA_BACKGROUND_COLOR
from pyjamas.chart.GChartConsts import DEFAULT_PLOTAREA_BORDER_COLOR
from pyjamas.chart.GChartConsts import DEFAULT_PLOTAREA_BORDER_STYLE
from pyjamas.chart.GChartConsts import DEFAULT_PLOTAREA_BORDER_WIDTH
from pyjamas.chart.GChartConsts import PLOTAREA_ID
from pyjamas.chart.GChartConsts import TITLE_ID
from pyjamas.chart.GChartConsts import YAXIS_ID
from pyjamas.chart.GChartConsts import YTICKS_ID
from pyjamas.chart.GChartConsts import YGRIDLINES_ID
from pyjamas.chart.GChartConsts import YLABEL_ID
from pyjamas.chart.GChartConsts import Y2AXIS_ID
from pyjamas.chart.GChartConsts import Y2TICKS_ID
from pyjamas.chart.GChartConsts import Y2GRIDLINES_ID
from pyjamas.chart.GChartConsts import Y2LABEL_ID
from pyjamas.chart.GChartConsts import LEGEND_ID
from pyjamas.chart.GChartConsts import XAXIS_ID
from pyjamas.chart.GChartConsts import XTICKS_ID
from pyjamas.chart.GChartConsts import XGRIDLINES_ID
from pyjamas.chart.GChartConsts import XLABEL_ID
from pyjamas.chart.GChartConsts import FOOTNOTES_ID
from pyjamas.chart.GChartConsts import HOVER_CURSOR_ID
from pyjamas.chart.GChartConsts import HOVER_ANNOTATION_ID
from pyjamas.chart.GChartConsts import N_PRE_SYSTEM_CURVES
from pyjamas.chart.GChartConsts import N_POST_SYSTEM_CURVES
from pyjamas.chart.GChartConsts import DEFAULT_GRID_COLOR
from pyjamas.chart.GChartConsts import GRID_BORDER_STYLE
from pyjamas.chart.GChartConsts import GRID_BORDER_WIDTH
from pyjamas.chart.GChartConsts import DEFAULT_GRID_WIDTH
from pyjamas.chart.GChartConsts import DEFAULT_GRID_HEIGHT
from pyjamas.chart.GChartConsts import TICK_BORDER_STYLE
from pyjamas.chart.GChartConsts import TICK_BORDER_WIDTH
from pyjamas.chart.GChartConsts import Y2_AXIS
from pyjamas.chart.GChartConsts import DEFAULT_TITLE_THICKNESS
from pyjamas.chart.GChartConsts import DEFAULT_FOOTNOTES_THICKNESS
from pyjamas.chart.GChartConsts import TICK_CHARWIDTH_TO_FONTSIZE_LOWERBOUND
from pyjamas.chart.GChartConsts import TRANSPARENT_BORDER_COLOR
from pyjamas.chart.GChartConsts import YAxisId
from pyjamas.chart import Double
from pyjamas.chart import AnnotationLocation
from pyjamas.chart import SymbolType
from pyjamas.chart import TickLocation
from pyjamas.chart import TouchedPointUpdateOption
from pyjamas.chart.Curve import Curve
from pyjamas.chart import GChartUtil
from pyjamas.chart.GChartWidgets import PlotPanel
from pyjamas.chart import GChartWidgets
import pygwt
from pyjamas.chart.Axis import XAxis, YAxis, Y2Axis
global canvasFactory
canvasFactory = None
"""*
*
* Tells GChart how to create the canvas widgets it needs
* (specifically, widgets that implement GChart's
* <tt>GChartCanvasLite</tt> interface) to render your
* charts using an external vector graphics library. <p>
*
* You must define a class that implements
* <tt>GChartCanvasFactory</tt> and then pass an instance of that
* class to this method, if you want to have the fast, crisply drawn
* connecting lines, polygonal areas, and 2-D pie slices that only a
* vector graphics library can provide.
* <p>
*
* @see GChartCanvasFactory GChartCanvasFactory
* @see GChartCanvasLite GChartCanvasLite
* @see #getCanvasFactory getCanvasFactory
* @see GChart.Symbol#setFillSpacing setFillSpacing
* @see GChart.Symbol#setFillThickness setFillThickness
*
"""
def setCanvasFactory(factory):
global canvasFactory
canvasFactory = factory
"""*
* Returns the GChart class' canvas factory, or <tt>None</tt>
* if no canvas factory has been specified.
*
* @return the previously specified canvas factory
*
* @see #setCanvasFactory setCanvasFactory
*
"""
def getCanvasFactory():
global canvasFactory
return canvasFactory
# Is the symbol type one of the special ANCHOR_MOUSE types,
# whose position varies with the mouse cursor location?
def isMouseAnchored(symbolType):
return (SymbolType.ANCHOR_MOUSE == symbolType or
SymbolType.ANCHOR_MOUSE_SNAP_TO_X == symbolType or
SymbolType.ANCHOR_MOUSE_SNAP_TO_Y == symbolType)
from pyjamas.ui.ClickListener import ClickHandler
from pyjamas.ui.KeyboardListener import KeyboardHandler
from pyjamas.ui.FocusListener import FocusHandler
from pyjamas.ui.MouseListener import MouseHandler
class GChart (Composite, FocusHandler, KeyboardHandler,
MouseHandler, ClickHandler):
"""*
* Instantiates a GChart with a curve display region of
* the specified size.
*
*
* @param xChartSize the width of the curve display region, in pixels.
* @param yChartSize the height of the curve display region, in pixels.
*
* @see #setXChartSize setXChartSize
* @see #setYChartSize setYChartSize
* @see #setChartSize setChartSize
"""
def __init__(self,
**kwargs):
self.chartTitle = None
self.hoverParameterInterpreter = None
self.hoverTouchingEnabled = True
self.defaultSymbolBorderColors = DEFAULT_SYMBOL_BORDER_COLORS
# creates canvas Widgets GChart needs for *_CANVAS symbol types.
#self.canvasFactory = None
# outer container needed so CSS-defined paddings don't interfere with positioning
self.chartPanel = SimplePanel()
self.borderWidth = USE_CSS
self.borderStyle = USE_CSS
self.borderColor = USE_CSS
self.backgroundColor = USE_CSS
self.blankImageURL = None
self.chartDecorationsChanged = True
# collection of curves associated with this chart.
self.curves = []
self.fontFamily = USE_CSS
self.footnotesThickness = NAI
self.legendBackgroundColor = DEFAULT_LEGEND_BACKGROUND_COLOR
self.legendBorderColor = DEFAULT_LEGEND_BORDER_COLOR
self.legendBorderWidth = DEFAULT_LEGEND_BORDER_WIDTH
self.legendBorderStyle = DEFAULT_LEGEND_BORDER_STYLE
self.legendThickness = NAI
self.chartFootnotes = None
self.chartFootnotesLeftJustified = True
self.legendVisible = True
self.legendFontColor = DEFAULT_FONT_COLOR
self.legendFontSize = DEFAULT_LEGEND_FONTSIZE
self.legendFontStyle = DEFAULT_FONT_STYLE
self.legendFontWeight = DEFAULT_FONT_WEIGHT
self.initialPieSliceOrientation = 0.0
"""
* Contains the plotting region, as well as axes, ticks, and
* tick-labels associated with that region. Note that tickText
* must be centered on the ticks--placing them on the same
* AbsolutePanel as the ticks/plots facilitates self.
*
"""
self.plotPanel = PlotPanel(self)
self.padding = USE_CSS
self.optimizeForMemory = False
self.clipToPlotArea = False
self.clipToDecoratedChart = False
self.titleThickness = NAI
self.wasUnloaded = False
self.addSystemCurves(); # must come first: later lines use system curves
self.xAxis = XAxis(self)
self.yAxis = YAxis(self)
self.y2Axis = Y2Axis(self)
"""
* See the block comment at top of "class GChart" for a detailed
* discussion/rational for GChart's (very minimal support) of
* stylenames. Would like deeper support if I can ever figure out
* how to do it without hamstringing future versions by locking
* them into a particular implementation I might need to change
* later on. In particular, I don't know how to provide such "deep"
* stylenames that also work consistently with canvas-rendered
* curves.
"""
if not kwargs.has_key('StyleName'):
kwargs['StyleName'] = "gchart-GChart"
if not kwargs.has_key('XChartSize'):
kwargs['XChartSize'] = DEFAULT_X_CHARTSIZE
if not kwargs.has_key('YChartSize'):
kwargs['YChartSize'] = DEFAULT_Y_CHARTSIZE
# Note: plotPanel (where real chart resides) won't get
# added to chartPanel (top-level do-nothing container for
# padding and such) until AFTER first update; FF2 has some
# serious performance problems otherwise for common usage
# scenarios with large widget-count pages.
Composite.__init__(self, self.chartPanel, **kwargs)
FocusHandler.__init__(self)
KeyboardHandler.__init__(self)
ClickHandler.__init__(self)
MouseHandler.__init__(self)
def getCanvasFactory(self):
return getCanvasFactory()
def getLastPieSliceOrientation(self):
return self.lastPieSliceOrientation
def setLastPieSliceOrientation(self, lastOrientation):
self.lastPieSliceOrientation = lastOrientation%1.0
"""* Sets the default initial orientation for pie slices.
**
** The default initial orientation is used as the first pie
** slice's first edge's orientation setting only if the symbol associated
** with that pie slice has the default, undefined, orientation
** setting of <tt>Double.NaN</tt>.
** <p>
** The default value of this setting is 0, which corresponds
** to due south (6 o'clock). The value specifies the
** fraction of a complete clockwise rotation, beginning
** at due south required to reach the first edge of the
** pie slice.
**
** @see Symbol#setPieSliceOrientation setPieSliceOrientation
**
** @param orientation the orientation to use for the first
** edge of the first pie slice in this GChart, in cases
** in which that first pie slice's orientation is undefined
** (<tt>Double.NaN</tt>).
*"""
def setInitialPieSliceOrientation(self, orientation):
if orientation < 0 or orientation >=1:
raise IllegalArgumentException(
"orientation="+orientation+"; "+
"orientation must be >=0 and < 1.")
self.initialPieSliceOrientation = orientation
self.invalidateAllSlices()
"""*
** Returns a previously specified initial pie slice orientation.
**
** @return the fraction of a clockwise rotation, beginning
** from the 6 o'clock postion, needed to reach the default
** initial pie slice orientation.
**
** @see #setInitialPieSliceOrientation
** setInitialPieSliceOrientation
*"""
def getInitialPieSliceOrientation(self):
return self.initialPieSliceOrientation
# adds system curves GChart uses to render title, ticks, etc.
def addSystemCurves(self):
# Must be first: other methods assume sys curves exist
for i in range(N_SYSTEM_CURVES):
c = Curve(self, i)
self.curves.append(c)
# Required rendering panels are added lazily, later on
# define (or default) properties, points on, system curves
c = self.getSystemCurve(PLOTAREA_ID)
c.getSymbol().setSymbolType(SymbolType.BOX_SOUTHEAST)
c.getSymbol().setBackgroundColor(DEFAULT_PLOTAREA_BACKGROUND_COLOR)
c.getSymbol().setBorderColor(DEFAULT_PLOTAREA_BORDER_COLOR)
c.getSymbol().setBorderStyle(DEFAULT_PLOTAREA_BORDER_STYLE)
c.getSymbol().setBorderWidth(DEFAULT_PLOTAREA_BORDER_WIDTH)
c.getSymbol().setHoverAnnotationEnabled(False)
c.getSymbol().setHoverSelectionEnabled(False)
c.addPoint(-Double.MAX_VALUE,Double.MAX_VALUE)
c = self.getSystemCurve(TITLE_ID)
c.getSymbol().setSymbolType(SymbolType.ANCHOR_NORTHWEST)
c.getSymbol().setHoverAnnotationEnabled(False)
c.getSymbol().setHoverSelectionEnabled(False)
c.addPoint(0,0)
c.getPoint().setAnnotationLocation(AnnotationLocation.CENTER)
c = self.getSystemCurve(YAXIS_ID)
c.getSymbol().setSymbolType(SymbolType.XGRIDLINE)
c.getSymbol().setBackgroundColor(DEFAULT_GRID_COLOR)
c.getSymbol().setBorderColor(DEFAULT_GRID_COLOR)
c.getSymbol().setBorderStyle(GRID_BORDER_STYLE)
c.getSymbol().setBorderWidth(GRID_BORDER_WIDTH)
c.getSymbol().setWidth(DEFAULT_GRID_WIDTH)
c.getSymbol().setHeight(DEFAULT_GRID_HEIGHT)
c.getSymbol().setHoverAnnotationEnabled(False)
c.getSymbol().setHoverSelectionEnabled(False)
c.addPoint(-Double.MAX_VALUE,-Double.MAX_VALUE)
c = self.getSystemCurve(YTICKS_ID)
c.getSymbol().setSymbolType(SymbolType.BOX_WEST)
c.getSymbol().setBackgroundColor(DEFAULT_GRID_COLOR)
c.getSymbol().setBorderColor(DEFAULT_GRID_COLOR)
c.getSymbol().setBorderStyle(TICK_BORDER_STYLE)
c.getSymbol().setBorderWidth(TICK_BORDER_WIDTH)
c.getSymbol().setHoverAnnotationEnabled(False)
c.getSymbol().setHoverSelectionEnabled(False)
# points, annotation locations added when ticks are
c = self.getSystemCurve(YGRIDLINES_ID)
c.getSymbol().setSymbolType(SymbolType.YGRIDLINE)
c.getSymbol().setBackgroundColor(DEFAULT_GRID_COLOR)
c.getSymbol().setBorderColor(DEFAULT_GRID_COLOR)
c.getSymbol().setBorderStyle(GRID_BORDER_STYLE)
c.getSymbol().setBorderWidth(GRID_BORDER_WIDTH)
c.getSymbol().setWidth(DEFAULT_GRID_WIDTH)
c.getSymbol().setHeight(DEFAULT_GRID_HEIGHT)
c.getSymbol().setHoverAnnotationEnabled(False)
c.getSymbol().setHoverSelectionEnabled(False)
c = self.getSystemCurve(YLABEL_ID)
c.getSymbol().setSymbolType(SymbolType.ANCHOR_WEST)
c.getSymbol().setHoverAnnotationEnabled(False)
c.getSymbol().setHoverSelectionEnabled(False)
c.addPoint(0,0)
c.getPoint().setAnnotationLocation(AnnotationLocation.CENTER)
c = self.getSystemCurve(Y2AXIS_ID)
c.setYAxis(Y2_AXIS)
c.getSymbol().setSymbolType(SymbolType.XGRIDLINE)
c.getSymbol().setBackgroundColor(DEFAULT_GRID_COLOR)
c.getSymbol().setBorderColor(DEFAULT_GRID_COLOR)
c.getSymbol().setBorderStyle(GRID_BORDER_STYLE)
c.getSymbol().setBorderWidth(GRID_BORDER_WIDTH)
c.getSymbol().setWidth(DEFAULT_GRID_WIDTH)
c.getSymbol().setHeight(DEFAULT_GRID_HEIGHT)
c.getSymbol().setHoverAnnotationEnabled(False)
c.getSymbol().setHoverSelectionEnabled(False)
c.addPoint(Double.MAX_VALUE,-Double.MAX_VALUE)
c = self.getSystemCurve(Y2TICKS_ID)
c.setYAxis(Y2_AXIS)
c.getSymbol().setSymbolType(SymbolType.BOX_EAST)
c.getSymbol().setBackgroundColor(DEFAULT_GRID_COLOR)
c.getSymbol().setBorderColor(DEFAULT_GRID_COLOR)
c.getSymbol().setBorderStyle(TICK_BORDER_STYLE)
c.getSymbol().setBorderWidth(TICK_BORDER_WIDTH)
c.getSymbol().setHoverAnnotationEnabled(False)
c.getSymbol().setHoverSelectionEnabled(False)
c = self.getSystemCurve(Y2GRIDLINES_ID)
c.setYAxis(Y2_AXIS)
c.getSymbol().setSymbolType(SymbolType.YGRIDLINE)
c.getSymbol().setBackgroundColor(DEFAULT_GRID_COLOR)
c.getSymbol().setBorderColor(DEFAULT_GRID_COLOR)
c.getSymbol().setBorderStyle(GRID_BORDER_STYLE)
c.getSymbol().setBorderWidth(GRID_BORDER_WIDTH)
c.getSymbol().setWidth(DEFAULT_GRID_WIDTH)
c.getSymbol().setHeight(DEFAULT_GRID_HEIGHT)
c.getSymbol().setHoverAnnotationEnabled(False)
c.getSymbol().setHoverSelectionEnabled(False)
c = self.getSystemCurve(Y2LABEL_ID)
c.getSymbol().setSymbolType(SymbolType.ANCHOR_EAST)
c.getSymbol().setHoverAnnotationEnabled(False)
c.getSymbol().setHoverSelectionEnabled(False)
c.addPoint(0,0)
c.getPoint().setAnnotationLocation(AnnotationLocation.CENTER)
c = self.getSystemCurve(LEGEND_ID)
c.getSymbol().setSymbolType(SymbolType.ANCHOR_EAST)
c.getSymbol().setHoverAnnotationEnabled(False)
c.getSymbol().setHoverSelectionEnabled(False)
c.addPoint(0,0)
c.getPoint().setAnnotationLocation(AnnotationLocation.CENTER)
c = self.getSystemCurve(XAXIS_ID)
c.getSymbol().setSymbolType(SymbolType.YGRIDLINE)
c.getSymbol().setBackgroundColor(DEFAULT_GRID_COLOR)
c.getSymbol().setBorderColor(DEFAULT_GRID_COLOR)
c.getSymbol().setBorderStyle(GRID_BORDER_STYLE)
c.getSymbol().setBorderWidth(GRID_BORDER_WIDTH)
c.getSymbol().setWidth(DEFAULT_GRID_WIDTH)
c.getSymbol().setHeight(DEFAULT_GRID_HEIGHT)
c.getSymbol().setHoverAnnotationEnabled(False)
c.getSymbol().setHoverSelectionEnabled(False)
c.addPoint(-Double.MAX_VALUE,-Double.MAX_VALUE)
# tick thickness and length get set in the axis constructors
c = self.getSystemCurve(XTICKS_ID)
c.getSymbol().setSymbolType(SymbolType.BOX_SOUTH)
c.getSymbol().setBackgroundColor(DEFAULT_GRID_COLOR)
c.getSymbol().setBorderColor(DEFAULT_GRID_COLOR)
c.getSymbol().setBorderStyle(TICK_BORDER_STYLE)
c.getSymbol().setBorderWidth(TICK_BORDER_WIDTH)
c.getSymbol().setHoverAnnotationEnabled(False)
c.getSymbol().setHoverSelectionEnabled(False)
c = self.getSystemCurve(XGRIDLINES_ID)
c.getSymbol().setSymbolType(SymbolType.XGRIDLINE)
c.getSymbol().setBackgroundColor(DEFAULT_GRID_COLOR)
c.getSymbol().setBorderColor(DEFAULT_GRID_COLOR)
c.getSymbol().setBorderStyle(GRID_BORDER_STYLE)
c.getSymbol().setBorderWidth(GRID_BORDER_WIDTH)
c.getSymbol().setWidth(DEFAULT_GRID_WIDTH)
c.getSymbol().setHeight(DEFAULT_GRID_HEIGHT)
c.getSymbol().setHoverAnnotationEnabled(False)
c.getSymbol().setHoverSelectionEnabled(False)
c = self.getSystemCurve(XLABEL_ID)
c.getSymbol().setSymbolType(SymbolType.ANCHOR_SOUTH)
c.getSymbol().setHoverAnnotationEnabled(False)
c.getSymbol().setHoverSelectionEnabled(False)
c.addPoint(0,0)
c.getPoint().setAnnotationLocation(AnnotationLocation.CENTER)
c = self.getSystemCurve(FOOTNOTES_ID)
c.getSymbol().setSymbolType(SymbolType.ANCHOR_SOUTHWEST)
c.getSymbol().setHoverAnnotationEnabled(False)
c.getSymbol().setHoverSelectionEnabled(False)
c.addPoint(0,0)
c.getPoint().setAnnotationLocation(AnnotationLocation.CENTER)
c = self.getSystemCurve(HOVER_ANNOTATION_ID)
c.setVisible(False); # initially no hover annotation
c.getSymbol().setSymbolType(SymbolType.NONE)
c.getSymbol().setHoverAnnotationEnabled(False)
c.getSymbol().setHoverSelectionEnabled(False)
c.addPoint(Double.NaN,Double.NaN)
c.getPoint().setAnnotationLocation(AnnotationLocation.CENTER)
c = self.getSystemCurve(HOVER_CURSOR_ID)
c.setVisible(False); # initially no hover selection
c.getSymbol().setSymbolType(SymbolType.NONE)
c.getSymbol().setHoverAnnotationEnabled(False)
c.getSymbol().setHoverSelectionEnabled(False)
c.addPoint(Double.NaN,Double.NaN)
c.getPoint().setAnnotationLocation(AnnotationLocation.CENTER)
# external "curve count" should now be 0 (system curves don't count)
if self.getNCurves() != 0:
raise IllegalStateException("self.getNCurves() != 0. Probably a GChart bug.")
"""
* Updates the system curves that represent chart
* decorations (axis labels, title, ticks, etc.).<p>
*
* Note that all x, y shifts are relative to the "anchoring"
* symbol type locations defined once and for all in the
* addSystemCurves method above.
*
"""
def updateDecorations(self, xChartSizeDecorated):
# x-axis label
self.getSystemCurve(XLABEL_ID).getPoint(0).setAnnotationWidget(
self.getXAxis().getAxisLabel(), self.getXChartSize(),
self.getXAxis().getAxisLabelThickness())
self.getSystemCurve(XLABEL_ID).getPoint(0).setAnnotationYShift(
- self.getXAxis().getTickLabelThickness(False)
- self.getXAxis().getTickSpace()
- self.getXAxis().getTickLabelPadding()
- self.getXAxis().getAxisLabelThickness()/2)
# y-axis label
self.getSystemCurve(YLABEL_ID).getPoint(0).setAnnotationWidget(
self.getYAxis().getAxisLabel(),
self.getYAxis().getAxisLabelThickness(),
self.getYChartSize())
self.getSystemCurve(YLABEL_ID).getPoint(0).setAnnotationXShift(
- self.getYAxis().getTickLabelThickness(False)
- self.getYAxis().getTickSpace()
- self.getYAxis().getTickLabelPadding()
- self.getYAxis().getAxisLabelThickness()/2)
# y2-axis label
self.getSystemCurve(Y2LABEL_ID).getPoint(0).setAnnotationWidget(
self.getY2Axis().getAxisLabel(),
self.getY2Axis().getAxisLabelThickness(),
self.getYChartSize())
self.getSystemCurve(Y2LABEL_ID).getPoint(0).setAnnotationXShift(
+ self.getY2Axis().getTickLabelThickness(False)
+ self.getY2Axis().getTickSpace()
+ self.getY2Axis().getTickLabelPadding()
+ self.getY2Axis().getAxisLabelThickness()/2)
# legend
legend = None
if self.isLegendVisible() and 0 < self.getNVisibleCurvesOnLegend():
legend = self.createLegend(self.plotPanel)
self.getSystemCurve(LEGEND_ID).getPoint(0).setAnnotationWidget(
legend, self.getLegendThickness(), self.getYChartSize())
self.getSystemCurve(LEGEND_ID).getPoint(0).setAnnotationXShift(
+ self.getY2Axis().getTickLabelThickness(False)
+ self.getY2Axis().getTickSpace()
+ self.getY2Axis().getTickLabelPadding()
+ self.getY2Axis().getAxisLabelThickness()
+ self.getLegendThickness()/2 )
# title
shiftToLeftEdge = (- self.getYAxis().getAxisLabelThickness()
- self.getYAxis().getTickLabelThickness(False)
- self.getYAxis().getTickSpace()
- self.getYAxis().getTickLabelPadding())
shiftToHorizontalMidpoint = shiftToLeftEdge + xChartSizeDecorated/2
self.getSystemCurve(TITLE_ID).getPoint(0).setAnnotationWidget(
self.getChartTitle(), xChartSizeDecorated,
self.getChartTitleThickness())
self.getSystemCurve(TITLE_ID).getPoint(0).setAnnotationYShift(
self.getChartTitleThickness()/2)
self.getSystemCurve(TITLE_ID).getPoint(0).setAnnotationXShift(
shiftToHorizontalMidpoint)
# footnotes
self.getSystemCurve(FOOTNOTES_ID).getPoint(0).setAnnotationWidget(
self.getChartFootnotes(), xChartSizeDecorated,
self.getChartFootnotesThickness())
self.getSystemCurve(FOOTNOTES_ID).getPoint(0).setAnnotationYShift(
- self.getXAxis().getTickLabelThickness(False)
- self.getXAxis().getTickSpace()
- self.getXAxis().getTickLabelPadding()
- self.getXAxis().getAxisLabelThickness()
- self.getChartFootnotesThickness()/2 )
if self.getChartFootnotesLeftJustified():
self.getSystemCurve(FOOTNOTES_ID).getPoint(0).setAnnotationXShift(
shiftToLeftEdge)
self.getSystemCurve(FOOTNOTES_ID).getPoint(0).setAnnotationLocation(
AnnotationLocation.EAST)
else:
# footnotes centered
self.getSystemCurve(FOOTNOTES_ID).getPoint(0).setAnnotationXShift(
shiftToHorizontalMidpoint)
self.getSystemCurve(FOOTNOTES_ID).getPoint(0).setAnnotationLocation(
AnnotationLocation.CENTER)
# add points to ticks and gridlines curves in accord with chart specs
# x & y axis can be present even if no curves mapped to them
self.getSystemCurve(XAXIS_ID).setVisible(self.getXAxis().getAxisVisible())
self.getXAxis().populateGridlines()
self.getSystemCurve(YAXIS_ID).setVisible(self.getYAxis().getAxisVisible())
self.getYAxis().populateGridlines()
# y2 axis is present only if at least 1 curve is on it.
if self.hasY2Axis():
self.getY2Axis().populateGridlines()
self.getSystemCurve(Y2AXIS_ID).setVisible(self.getY2Axis().getAxisVisible())
self.getSystemCurve(Y2TICKS_ID).setVisible(True)
self.getSystemCurve(Y2GRIDLINES_ID).setVisible(True)
else:
self.getSystemCurve(Y2AXIS_ID).setVisible(False)
self.getSystemCurve(Y2TICKS_ID).setVisible(False)
self.getSystemCurve(Y2GRIDLINES_ID).setVisible(False)
"""*
*
* Adds an object to handle click events on this chart, that
* is, an object whose <tt>ClickHandler.onClick</tt> method will be
* called whenever the user clicks on this chart.
*
* <p>
*
* When implementing a class that handles GChart click
* events, you'll need to know the following facts:
* <p>
*
* <ol>
*
* <li>You can use the <tt>getSource</tt> method of the
* <tt>ClickEvent</tt> passed into your <tt>onClick</tt> handler
* to retrieve the <tt>GChart</tt> that was
* clicked on. For example:
* <p>
*
* <pre>
* # Deletes the clicked-on curve
def onClick(self, event):
GChart theGChart = (GChart) event.getSource()
GChart.Curve c = theGChart.getTouchedCurve()
if None != c:
theGChart.removeCurve(c)
# what you see in browser won't change without an update
theGChart.update()
* </pre>
* <p>
*
* <li>The <tt>GChart</tt> methods <tt>getTouchedPoint</tt> and
* <tt>getTouchedCurve</tt> return either the point and
* curve that were clicked on, or <tt>None</tt> if the
* click didn't "touch" any points.
*
* <p>
*
*</ol>
* <p>
*
* The editable pie chart example on the GChart <a
* href="http:#gchart.googlecode.com/svn/trunk/live-demo/v2_6/com.googlecode.gchart.gchartdemoapp.GChartDemoApp/GChartDemoApp.html">
* live demo page</a>
* illustrates how to use this method to launch a popup modal
* <tt>DialogBox</tt> whenever the user clicks on a point, and how to
* change the selected point from within that dialog by using
* GChart's <tt>touch</tt> method.
* <p>
*
* For a much simpler example that lets the user
* delete points by clicking on them, see the Chart Gallery's
* <a href="package-summary.html#GChartExample18a">
* GChartExample18a</a>.
* <p>
*
* @param handler the click handler that will handle
* click events on this chart.
*
* @return the handler's registration object. You need to retain a
* reference to this registration object only if you may later need
* to remove the handler (via the registration's
* <tt>removeHandler</tt> method). Most applications don't remove
* handlers (handlers tend to be statically defined) and so can
* ignore the value returned from this method.
*
* @see #getTouchedPoint getTouchedPoint
* @see #getTouchedCurve getTouchedCurve
* @see #touch touch
* @see #isUpdateNeeded isUpdateNeeded
"""
def addClickHandler(self, handler):
result = addDomHandler(handler, ClickEvent.getType())
return result
def addDoubleClickHandler(self, handler):
result = addDomHandler(handler, DoubleClickEvent.getType())
return result
def addMouseDownHandler(self, handler):
result = addDomHandler(handler, MouseDownEvent.getType())
return result
def addMouseMoveHandler(self, handler):
result = addDomHandler(handler, MouseMoveEvent.getType())
return result
def addMouseOutHandler(self, handler):
result = addDomHandler(handler, MouseOutEvent.getType())
return result
def addMouseOverHandler(self, handler):
result = addDomHandler(handler, MouseOverEvent.getType())
return result
def addMouseUpHandler(self, handler):
result = addDomHandler(handler, MouseUpEvent.getType())
return result
def addMouseWheelHandler(self, handler):
result = addDomHandler(handler, MouseWheelEvent.getType())
return result
"""
* Given external, coded, index returns a curve's ArrayList index
*
* Basic order within the curves array is as follows:
*
* o 6 decorative curves that hold title, etc
* o "self.getNCurves()" user-created curves
* o 1 "Pop-up" hover annotation holding curve
* o 1 Selection cursor holding curve
*
* It's very important that the last two system curves come last, both
* for performance (at the end means GChart's algorithms are able to
* update only these curves when hover feedback changes) and to
* ensure these elements are always on top of all other chart
* elements, as required.
* <p>
*
* The "external" system curve indexes are in a continuous range of
* negative integers, which are mapped into the ArrayList
* positions above via this code.
*
"""
def internalCurveIndex(self, externalIndex):
if NAI == externalIndex:
# -1 is the "no such curve" index used by an ArrayList
result = -1
elif externalIndex < -N_POST_SYSTEM_CURVES:
# decoration related sys curves (before user's)
result = externalIndex + N_SYSTEM_CURVES
elif externalIndex < 0:
# hover feedback related, after user curves (at the end)
result = len(self.curves)+externalIndex
else:
# + indexes mapped to ordinary user-created curves
result = externalIndex + N_PRE_SYSTEM_CURVES
return result
""" Given a curves ArrayList index returns external, coded, index
*
* Companion/inverse of preceeding method.
*
"""
def externalCurveIndex(self, internalIndex):
if internalIndex < 0:
result = NAI
elif internalIndex < N_PRE_SYSTEM_CURVES:
# one of the sys curves that comes before user's curves
result = internalIndex - N_SYSTEM_CURVES
elif internalIndex >= len(self.curves)-N_POST_SYSTEM_CURVES:
# sys curves, like hover feedback, that come after user's
result = internalIndex - len(self.curves)
else:
# ordinary user created curve
result = internalIndex - N_PRE_SYSTEM_CURVES
return result
# does the external curve index represent a GChart-sys-defined curve?
def isSystemCurveIndex(self, externalIndex):
result = externalIndex < 0
return result
"""*
* Adds a curve to the chart, at the specified position
* in the curve sequence. Existing curves at postions at
* or greater than the specified position have their
* positional indexes increased by 1.
* <p>
*
* @see #getCurve getCurve
* @see #addCurve() addCurve()
* @see #removeCurve removeCurve
* @see #clearCurves clearCurves
* @see #getNCurves getNCurves
"""
def addCurve(self, iCurve=None):
if iCurve is None:
iCurve = self.getNCurves()
if iCurve > self.getNCurves():
raise IllegalArgumentException(
"iCurve = " + iCurve +"; iCurve may not exceed self.getNCurves() (" + self.getNCurves() + ")")
elif iCurve < 0:
raise IllegalArgumentException(
"iCurve = " + iCurve +"; iCurve may not be negative.")
internalIndex = self.internalCurveIndex(iCurve)
c = Curve(self, internalIndex)
self.curves.insert(internalIndex, c)
# curves are initially added to the x, y axes.
self.getXAxis().incrementCurves()
self.getYAxis().incrementCurves()
# adjust ArrayList indexes to account for newly added element
for i in range(internalIndex+1, len(self.curves)):
self.curves[i].incrementIndex()
if 0 != self.plotPanel.getRenderingPanelCount():
# other panels are already there
rpIndex = self.getRenderingPanelIndex(internalIndex)
self.plotPanel.addGraphicsRenderingPanel(rpIndex)
self.plotPanel.addAnnotationRenderingPanel(rpIndex)
# otherwise, before 1st update: lazy-add panels when they're 1st used
c.invalidate()
if self.getNCurves() > 0:
self.setDefaultBorderColor(c, self.getNCurves()-1)
"""*
* Removes every curve this chart contains.
*
* @see #getCurve getCurve
* @see #addCurve() addCurve()
* @see #addCurve(int) addCurve(int)
* @see #removeCurve removeCurve
* @see #getNCurves getNCurves
"""
def clearCurves(self):
for iCurve in range( self.getNCurves()-1, -1, -1):
self.removeCurve(iCurve)
"""*
** Returns the background color of the chart as a whole.
**
** @return the chart's background color, in a standard
** CSS color string format.
**
** @see #setBackgroundColor(String) setBackgroundColor
**
*"""
def getBackgroundColor(self):
return(self.backgroundColor)
"""*
** Returns the color of the border around the chart as
** a whole.
**
** @return the color of the chart's border, in a standard
** CSS color string format.
**
** @see #setBorderColor(String) setBorderColor
**
*"""
def getBorderColor(self):
return self.borderColor
"""*
** Returns the width of the border around the chart as a whole
**
** @return width of the border around the chart as a whole, as
** a CSS border width specification string (e.g. "1px").
**
** @see #setBorderWidth(String) setBorderWidth
**
*"""
def getBorderWidth(self):
return self.borderWidth
"""*
** Returns the style of the border around the chart as a whole
**
** @return cssStyle for the border around the chart as a whole
**
** @see #setBorderStyle(String) setBorderStyle
**
*"""
def getBorderStyle(self):
return self.borderStyle
"""* Returns the previously specified chart footnotes widget.
*
* @return widget representing chart's footnotes or <tt>None</tt> if none.
*
* @see #setChartFootnotes(Widget) setChartFootnotes(Widget)
* @see #setChartFootnotes(String) setChartFootnotes(String)
* @see #getChartTitle getChartTitle
"""
def getChartFootnotes(self):
return self.chartFootnotes
"""* Returns flag indicating if this chart's footnotes are
* left-justified or centered.
*
* @return True if footnotes are flush against the left edge
* of the chart, False if they are horizontally centered
* across the bottom edge of the chart.
*
* @see #setChartFootnotesLeftJustified setChartFootnotesLeftJustified
* @see #setChartFootnotes(String) setChartFootnotes(String)
* @see #setChartTitle setChartTitle
"""
def getChartFootnotesLeftJustified(self):
return self.chartFootnotesLeftJustified
"""* Returns the thickness (height) of the rectangular region
** at the bottom of the chart allocated for footnotes.
** <p>
**
** The width of this region always equals the width of
** the entire GChart (including legend and axis labels).
** <p>
**
** Your footnotes widget is always vertically centered
** in this region.
** <p>
**
**
** Your footnotes widget will either be horizontally
** centered in this region, or left justified in it,
** depending on the property defined by the
** <tt>setChartFootnotesLeftJustified</tt> method.
**
** <p>
**
**
** This method always returns 0 if the footnotes widget
** is <tt>None</tt> (the default); the rectangular
** footnotes region is entirely eliminated in that case.
** <p>
**
** @return the thickness (height) of the rectangular region
** at the bottom of the chart allocated for footnotes, in
** pixels.
**
** @see #setChartFootnotesThickness(int) setChartFootnotesThickness
** @see #setChartFootnotesLeftJustified setChartFootnotesLeftJustified
*"""
def getChartFootnotesThickness(self):
result = 0
EXTRA_HEIGHT = 3; # 1.5 lines padding above/below
DEF_HEIGHT = 1
if None == self.getChartFootnotes():
result = 0
elif NAI != self.footnotesThickness:
result = self.footnotesThickness
elif hasattr(self.getChartFootnotes(), 'getHTML'):
result = DEFAULT_FOOTNOTES_THICKNESS * (EXTRA_HEIGHT +
GChartUtil.htmlHeight( self.getChartFootnotes().getHTML()) )
else:
result = DEFAULT_FOOTNOTES_THICKNESS* (DEF_HEIGHT + EXTRA_HEIGHT)
return result
"""* Returns the previously specified widget representing the
* chart's title.
*
* @return widget representing chart's title or <tt>None</tt>
* if none
*
* @see #setChartTitle(Widget) setChartTitle(Widget)
* @see #setChartTitle(String) setChartTitle(String)
*
"""
def getChartTitle(self):
return self.chartTitle
"""*
** Returns the thickness (height) of the rectangular region at
** the top of the chart allocated for the title.
** <p>
**
** This method always returns 0 if the title widget
** is <tt>None</tt> (the default); the rectangular
** title region is entirely eliminated in that case.
** <p>
**
** Your title widget is always centered vertically and
** horizontally within this rectangular region.
**
**
** @return the thickness (height) of the rectangle
** that contains the chart's title, in pixels.
**
** @see #setChartTitleThickness setChartTitleThickness
**
*"""
def getChartTitleThickness(self):
result = 0
EXTRA_HEIGHT = 3; # 1.5 lines above & below title
DEF_HEIGHT = 1
if None == self.getChartTitle():
result = 0
elif NAI != self.titleThickness:
result = self.titleThickness
elif hasattr(self.getChartTitle(), 'getHTML'):
result = DEFAULT_TITLE_THICKNESS * (EXTRA_HEIGHT +
GChartUtil.htmlHeight( self.getChartTitle().getHTML() ))
else:
result = DEFAULT_TITLE_THICKNESS* (EXTRA_HEIGHT + DEF_HEIGHT)
return result
"""*
* Determines if this chart will clip any chart elements
* that extend beyond the bounds of the decorated chart.
* The decorated chart includes title, footnotes, etc.
* as well as the plot area proper.
*
* @return True if off-the-decorated-chart elements are
* clipped, False otherwise.
*
* @see #setClipToDecoratedChart setClipToDecoratedChart
* @see #setClipToPlotArea setClipToPlotArea
* @see #getXChartSizeDecorated getXChartSizeDecorated
* @see #getYChartSizeDecorated getYChartSizeDecorated
*
"""
def getClipToDecoratedChart(self):
return self.clipToDecoratedChart
"""*
* Returns True if graphical aspects of the
* chart that fall outside of the plot area are being clipped
* off, False otherwise.
*
* @return <tt>True</tt> if clipping to plot area, else
* <tt>False</tt>.
*
* @see #setClipToPlotArea setClipToPlotArea
"""
def getClipToPlotArea(self):
return self.clipToPlotArea
# returns point closest to given plot-panel pixel coordinates
def getClosestBrushTouchingPointNoCheck(self, x, y):
result = None
# NAI means mouse is at some unknown, off-the-chart, position
if x == NAI or y == NAI:
return result
dBest = Double.MAX_VALUE; # dist. to closest symbol
# fact that charts tend to have a small number of curves
# allows us to use simple sequential search across curves
nCurves = self.getNCurves()
for iCurve in range(nCurves):
c = self.getSystemCurve(iCurve)
if not c.isVisible():
continue
sym = c.getSymbol()
if not sym.getHoverAnnotationEnabled() and not sym.getHoverSelectionEnabled():
continue
symType = sym.getSymbolType()
onY2 = c.onY2()
iClosest = c.getClosestTouchingPoint(x, y)
if NAI == iClosest:
continue; # no hits on this curve
xPoint = symType.getCenterX(self.plotPanel, sym, iClosest)
yPoint = symType.getCenterY(self.plotPanel, sym, iClosest, onY2)
dx = sym.xScaleFactor*(x-xPoint)
dy = sym.yScaleFactor*(y-yPoint)
# distance, squared, of mouse from symbol's "center"
d = dx*dx+dy*dy
if d <= dBest:
# for ties, use later, "on top", point
dBest = d
result = c.getPoint(iClosest)
return result
"""*
*
* Returns the point that would be touched if the mouse were
* moved to the given x,y plot-area pixel coordinates, or
* <tt>None</tt> if the moving the mouse to these coordinates
* would not have touched any points.<p>
*
* This method only works if the chart rendering is
* up-to-date (if <tt>isUpdateNeeded</tt> returns
* <tt>False</tt>). Otherwise, <tt>None</tt> is returned.
* <p>
*
* <small> GChart's hit testing method works best if a
* chart's points are approximately evenly distributed across
* the plot area's x or y axis, across a small number of
* curves. In particular, charts that have many points
* bunched up into a small part of the plot area, or that
* have many points completely outside of the plot area, or
* that place each point into a separate curve, could
* experience significantly worse that usual hit testing
* performance. Though such cases are expected to be rare, in
* the worst case, GChart could be reduced to a simple linear
* search across all the chart's points during hit testing.
* </small>
*
* @param xPlotArea x-coordinate of trial mouse position, in
* GChart's plot area pixel coordinates.
* @param yPlotArea y-coordinate of trial mouse position, in
* GChart's plot area pixel coordinates.
*
* @return reference to the point that would have been "touched"
* by the mouse, or <tt>None</tt> if positioning the mouse
* to these coordinates would not have touched any point.
*
* @see Axis#getMouseCoordinate getMouseCoordinate
* @see Axis#modelToPlotAreaPixel modelToPlotAreaPixel
* @see #isUpdateNeeded isUpdateNeeded
* @see #touch touch
*
"""
def getClosestBrushTouchingPoint(self, xPlotArea, yPlotArea):
result = None
if not isUpdateNeeded():
result = getClosestBrushTouchingPointNoCheck(xPlotArea, yPlotArea)
return result
"""*
* Returns a reference to the curve at the specified
* positional index. Use the reference returned by this method to
* modify properties of a curve (the symbol, data points, etc.)
*
* When the positional index is None, it is equivalent to
* <tt>getCurve(self.getNCurves()-1)</tt>.
*
* <p>
* @param iCurve index of the curve to be retrieved.
* @return reference to the Curve at the specified position.
*
* @see #getCurve() getCurve()
* @see #addCurve() addCurve()
* @see #addCurve(int) addCurve(int)
* @see #removeCurve removeCurve
* @see #clearCurves clearCurves
* @see #getNCurves getNCurves
"""
def getCurve(self, iCurve=None):
if iCurve is None:
N = self.getNCurves()
if N < 1:
raise IllegalStateException(
"You must add at least 1 curve before invoking getCurve()")
return self.getSystemCurve(N-1)
if iCurve >= self.getNCurves():
raise IllegalArgumentException(
"iCurve = " + iCurve +"; iCurve may not exceed self.getNCurves()-1 (" + str(self.getNCurves()-1) + ")")
elif iCurve < 0:
raise IllegalArgumentException(
"iCurve = " + iCurve +"; iCurve may not be negative.")
result = self.getSystemCurve(iCurve)
return result
# Version of getCurve that allows sys curve (negative id) access
def getSystemCurve(self, iCurve):
internalIndex = self.internalCurveIndex(iCurve)
result = self.curves[internalIndex]
return result
"""*
* Returns the positional index (within this chart's list of
* curves) of the specified curve.
* <p>
*
* Returns <i>NAI</i> if the specified curve is not found on
* this GChart's curve list.
*
* <p>
* @param curve whose list position is to be retrieved
* @return position of curve in GChart's curve list, or
* <i>NAI</i> if not on this chart's curve list.
*
* @see #getCurve() getCurve()
* @see #getCurve(int) getCurve(int)
* @see #addCurve() addCurve()
* @see #addCurve(int) addCurve(int)
* @see #removeCurve removeCurve
* @see #clearCurves clearCurves
* @see #getNCurves getNCurves
"""
def getCurveIndex(self, curve):
internalIndex = curve.getIndexOf()
result = self.externalCurveIndex(internalIndex)
return result
def getInternalCurveIndex(self, curve):
result = curve.getIndexOf()
return result
# maps all background curve indexes into first rendering panel
def getRenderingPanelIndex(self, internalCurveIndex):
result = 0
if N_PRE_SYSTEM_CURVES <= internalCurveIndex:
result = internalCurveIndex - N_PRE_SYSTEM_CURVES + 1
return result
"""* Returns the font-family used in tick labels, point annotations,
** legends, and as the default in titles, footnotes, and
** axis labels.
**
** @see #setFontFamily(String) setFontFamily
**
**
*"""
def getFontFamily(self):
return self.fontFamily
"""*
** Returns CSS color specification for all gridlines, axes,
** and tickmarks.
**
** @see #setGridColor setGridColor
**
** @return the color, in CSS standard color format,
** used for all gridlines, axes, and tick marks.
**
*"""
def getGridColor(self):
cGridlines = self.getSystemCurve(XGRIDLINES_ID)
result = cGridlines.getSymbol().getBorderColor()
return result
"""*
** Returns the background color of the chart's legend.
**
** @return the legend's background color, in a standard
** CSS color string format.
**
** @see #setLegendBackgroundColor setLegendBackgroundColor
**
*"""
def getLegendBackgroundColor(self):
return self.legendBackgroundColor
"""*
** Returns the border color of the chart's legend.
**
** @return the color of the legend's border, in a standard
** CSS color string format, or else the special GChart keyword
** <tt>TRANSPARENT_BORDER_COLOR</tt>.
**
** @see #setLegendBorderColor setLegendBordergroundColor
** @see #TRANSPARENT_BORDER_COLOR TRANSPARENT_BORDER_COLOR
**
*"""
def getLegendBorderColor(self):
return self.legendBorderColor
"""*
** Returns the width of the chart's legend's border
**
** @return width of the legend's border, in pixels
**
** @see #setLegendBorderWidth setLegendBorderWidth
**
*"""
def getLegendBorderWidth(self):
return self.legendBorderWidth
"""*
** Returns the style of the chart's legend's border
**
** @return cssStyle of the legend's border
**
** @see #setLegendBorderStyle setLegendBorderStyle
**
*"""
def getLegendBorderStyle(self):
return self.legendBorderStyle
"""*
** Returns the color of the font used to display the labels
** within the legend (chart key)
**
** @return CSS color string defining the legend text's color
**
** @see #setLegendFontColor setLegendFontColor
*"""
def getLegendFontColor(self):
return self.legendFontColor
"""*
* Returns the CSS font size, in pixels, of text displayed
* in the chart's legend (also know as a chart's key).
*
* @return the (previously specified) font size of legend text
*
* @see #setLegendFontSize setLegendFontSize
"""
def getLegendFontSize(self):
return self.legendFontSize
"""*
** Returns the font-style in which this GChart's legend text
** will be rendered.
**
** @return font-style of legend text (italic, normal, etc.)
**
** @see #setLegendFontStyle setLegendFontStyle
*"""
def getLegendFontStyle(self):
return self.legendFontStyle
"""*
** Returns True if legend text will be rendered in a bold,
** or False if in normal, weight font.
**
** @return if the legend's text is in bold or not.
**
** @see #setLegendFontWeight setLegendFontWeight
*"""
def getLegendFontWeight(self):
return self.legendFontWeight
"""*
** Returns the thickness (width) of the rectangular region
** to the right of the y2-axis label allocated for the
** chart legend.<p>
**
** The region extends vertically in parallel with the
** right edge of the plot area. The legend is always
** centered vertically and horizontally within this
** rectangular region.
** <p>
**
** This method always returns 0 if the legend is not
** visible; the rectangular legend region is entirely
** eliminated in that case.
**
** @return thickness (width) of legend key holding region,
** in pixels.
**
** @see #setLegendThickness setLegendThickness
*"""
def getLegendThickness(self):
result = 0
if self.isLegendVisible() and 0 < self.getNVisibleCurvesOnLegend():
if NAI == self.legendThickness:
result = self.getDefaultLegendThickness()
else:
result = self.legendThickness
return result
"""*
* Returns the number of curves on this chart.
*
* @return the number of curves on this chart
*
* @see #getCurve getCurve
* @see #addCurve() addCurve()
* @see #addCurve(int) addCurve(int)
* @see #removeCurve removeCurve
* @see #clearCurves clearCurves
"""
def getNCurves(self):
return len(self.curves) - N_SYSTEM_CURVES
"""* Returns the CSS string that specifies the width of the
** padding between the chart and it's external border
** <p>
**
** @return the CSS string that defines the CSS padding property
** for the GChart as a whole.
**
** @see #setPadding(String) setPadding
**
*"""
def getPadding(self):
return self.padding
"""*
** Returns the background color of the area of the chart
** in which symbols representing curve data are displayed
**
** @return CSS color string defining the plot area's background
** color
**
** @see #setPlotAreaBackgroundColor setPlotAreaBackgroundColor
*"""
def getPlotAreaBackgroundColor(self):
c = self.getSystemCurve(PLOTAREA_ID)
result = c.getSymbol().getBackgroundColor()
return result
"""*
** Returns the border color of the area of the chart
** in which symbols representing curve data are displayed
**
** @return CSS color string defining the color of the plot
** area's border
**
** @see #setPlotAreaBorderColor setPlotAreaBorderColor
*"""
def getPlotAreaBorderColor(self):
c = self.getSystemCurve(PLOTAREA_ID)
result = c.getSymbol().getBorderColor()
return result
"""*
** Returns the width of the border around the area of the
** chart in which symbols representing curve data are
** displayed.
**
** @return width, in pixels, of the border around the plot area
**
** @see #setPlotAreaBorderWidth setPlotAreaBorderWidth
*"""
def getPlotAreaBorderWidth(self):
c = self.getSystemCurve(PLOTAREA_ID)
result = c.getSymbol().getBorderWidth()
return result
"""*
** Returns the style of the border around the area of the
** chart in which symbols representing curve data are
** displayed (the so-called plot area).
**
** @return CSS style of the border around the plot area
**
** @see #setPlotAreaBorderStyle setPlotAreaBorderStyle
*"""
def getPlotAreaBorderStyle(self):
c = self.getSystemCurve(PLOTAREA_ID)
result = c.getSymbol().getBorderStyle()
return result
"""*
*
* Returns the image URL that will be used to define the
* plot area's background the next time <tt>update</tt> is called.
* <p>
*
* @return url of image to be used as the background of the plot
* area the next time that <tt>update</tt> is called.
*
* @see #setPlotAreaImageURL setPlotAreaImageURL
* @see #update update
*
"""
def getPlotAreaImageURL(self):
c = self.getSystemCurve(PLOTAREA_ID)
result = c.getSymbol().getImageURL()
return result
"""*
*
* Returns a flag that tells if GChart is configured to
* perform updates so that the chart uses less memory.
*
* @return <tt>True</tt> if GChart optimizes updates to
* save memory, <tt>False</tt> (the default) if it optimizes
* them to save time.
*
* @see #setOptimizeForMemory setOptimizeForMemory
*
*"""
def getOptimizeForMemory(self):
return self.optimizeForMemory
"""*
* @deprecated
*
* Equivalent to <tt>!getClipToPlotArea()</tt>. Use that
* method instead.
*
* @see #getClipToPlotArea getClipToPlotArea
"""
def getShowOffChartPoints(self):
return not getClipToPlotArea()
"""* @deprecated
**
** Equivalent to <tt>!getClipToDecoratedChart()</tt>. Use
** that method instead.
**
** @see #getClipToDecoratedChart getClipToDecoratedChart
**
*"""
def getShowOffDecoratedChartGlyphs(self):
return not getClipToDecoratedChart()
"""*
** Returns a URL that points to a 1 x 1 pixel blank image
** file GChart requires to render its charts without
** producing missing image icons.
**
** <p>
**
** @return the URL of the file GChart needs to prevent
** missing image icons from appearing on your chart.
**
** @see #setBlankImageURL setBlankImageURL
**
*"""
def getBlankImageURL(self):
if self.blankImageURL:
return self.blankImageURL
return DEFAULT_BLANK_IMAGE_URL_FULLPATH
"""*
* Returns this GChart's hover parameter interpreter.
*
* @see #setHoverParameterInterpreter setHoverParameterInterpreter
*
* @return the hover parameter interpreter used by this
* GChart, or <tt>None</tt> if none.
*
"""
def getHoverParameterInterpreter(self):
return self.hoverParameterInterpreter
"""*
* Is it possible to select points and have their hover
* annotations pop up, merely by "touching" them with
* the mouse-attached "brush"?
*
* @return True (the default) if just hovering over a point can
* select it, False if you must click on a point to select it.
*
* @see #setHoverTouchingEnabled setHoverTouchingEnabled
*
"""
def getHoverTouchingEnabled(self):
return self.hoverTouchingEnabled
"""*
* Returns the x-axis associated with this chart. Use the
* returned reference to manipulate axis min and max,
* number of ticks, tick positions, tick label formats, etc.
* <p>
* @return object representing the x-axis of this chart.
*
* @see #getYAxis getYAxis
* @see #getY2Axis getY2Axis
"""
def getXAxis(self):
return self.xAxis
"""*
* Returns the number of x-pixels in the region of the chart
* used for curve display purposes.
*
* @return the number of x-pixels available for curve display.
*
* @see #setXChartSize setXChartSize
*
"""
def getXChartSize(self):
return self.xChartSize
"""*
* Returns the number of x-pixels reserved for the chart as a
* whole, including space reserved for decorations (title,
* footnotes, axis labels, ticks, tick labels, legend key,
* etc.).
* <p>
*
* The returned size does not include the border or padding
* around the chart as a whole. <p>
*
* You cannot directly set the decorated x chart size.
* Instead, you must set the width of the plot area, and the
* thicknesses of certain of the decoration-holding regions
* (using methods linked to below) that, summed together,
* define the total width of the chart.
*
* @return the width of the entire chart, in pixels.
*
* @see #setXChartSize setXChartSize
* @see #getYChartSizeDecorated getYChartSizeDecorated
* @see Axis#setAxisLabelThickness setAxisLabelThickness
* @see Axis#setTickLabelThickness setTickLabelThickness
* @see Axis#setTickLength setTickLength
* @see Axis#setTickLocation setTickLocation
* @see Axis#setTickLabelPadding setTickLabelPadding
* @see Axis#setLegendThickness setLegendThickness
*
"""
def getXChartSizeDecorated(self):
result = (self.getXChartSize() +
self.getYAxis().getAxisLabelThickness() +
self.getYAxis().getTickLabelThickness() +
self.getYAxis().getTickSpace() +
self.getYAxis().getTickLabelPadding() +
self.getY2Axis().getAxisLabelThickness() +
self.getY2Axis().getTickLabelThickness() +
self.getY2Axis().getTickSpace() +
self.getYAxis().getTickLabelPadding() +
self.getLegendThickness())
return result
"""*
* Returns the y2-axis (right y axis) associated with this
* chart. Use the returned reference to manipulate axis
* min and max, number of ticks, tick positions, tick
* label formats, etc.
*
* <p>
* @return object representing the y2-axis of this chart.
*
* @see #getYAxis getYAxis
* @see #getXAxis getXAxis
"""
def getY2Axis(self):
return self.y2Axis
"""*
* Returns the (left) y-axis associated with this chart. Use the
* returned reference to manipulate axis min and max,
* number of ticks, tick positions, tick label formats, etc.
* <p>
* @return object representing the y-axis of this chart.
*
* @see #getXAxis getXAxis
* @see #getY2Axis getY2Axis
"""
def getYAxis(self):
return self.yAxis
"""*
* Returns the number of y-pixels in the region of the chart
* used for curve display purposes.
*
* @return the number of y-pixels available for curve display.
*
* @see #setYChartSize setYChartSize
*
"""
def getYChartSize(self):
return self.yChartSize
"""*
* Returns the number of y-pixels reserved for the chart as a
* whole, including space reserved for decorations (title,
* footnotes, axis labels, ticks, tick labels, etc.). <p>
*
* The returned size does not include the border or padding
* around the chart as a whole. <p>
*
* You cannot directly set the decorated y chart size.
* Instead, you must set sizes and thicknesses of the
* plot area and certain of the decoration-holding regions
* (using the methods linked-to below) that, when summed
* together, define the height of the decorated chart.
*
* @return the height of the entire chart, in pixels.
*
* @see #setYChartSize setYChartSize
* @see #getXChartSizeDecorated getXChartSizeDecorated
* @see Axis#setAxisLabelThickness setAxisLabelThickness
* @see Axis#setTickLabelThickness setTickLabelThickness
* @see Axis#setTickLength setTickLength
* @see Axis#setTickLocation setTickLocation
* @see Axis#setTickLabelPadding setTickLabelPadding
* @see #setChartTitleThickness setChartTitleThickness
* @see #setChartFootnotesThickness setChartFootnotesThickness
*
"""
def getYChartSizeDecorated(self):
result = (self.getYChartSize() +
self.getXAxis().getAxisLabelThickness() +
self.getXAxis().getTickLabelThickness() +
self.getXAxis().getTickSpace() +
self.getXAxis().getTickLabelPadding() +
self.getChartTitleThickness() +
self.getChartFootnotesThickness())
return result
"""*
* Determines if this chart has a "y2" (right) y-axis.
* <p>
* Only charts that have at least one curve on the right
* y axis will have a y2-axis.
*
* @return True if the chart has a second y axis, False otherwise.
*
* @see Curve#setYAxis Curve.setYAxis
"""
def hasY2Axis(self):
result = self.getY2Axis().getNCurvesVisibleOnAxis() > 0
return result
"""*
* Determines if this chart has an ordinary, or left, y-axis.
* <p>
* Only charts that have at least one curve on the left
* y axis will have a y-axis.
*
* @return True if the chart has a left y axis, False otherwise
*
* @see Curve#setYAxis Curve.setYAxis
*
"""
def hasYAxis(self):
result = self.getYAxis().getNCurvesVisibleOnAxis() > 0
return result
"""*
* Determines if the legend of this chart is visible.
*
*
* @return True if the legend is visible, False otherwise.
*
* @see #setLegendVisible setLegendVisible
"""
def isLegendVisible(self):
return self.legendVisible
"""*
*
* Is the in-browser rendition of the chart inconsistent with
* the current chart specs? In other words, is a call to
* GChart's <tt>update</tt> method needed to bring the
* browser's display into agreement with current chart specs?
* <p>
*
* <i>Note:</i> Whenever this method returns
* <tt>True</tt>, GChart "freezes" hover feedback, and
* can no longer actively track the currently "touched"
* point. This is because GChart, to simplify its
* bookkeeping, assumes in-browser (DOM) rendering and
* current chart specs are in synch when determining the
* point selection consequences of mouse events over the
* chart.
*
* @return True if a call to <tt>update</tt> is needed to
* bring current chart specifications and browser-rendered
* representation into synch, False otherwise.
*
* @see #update update
* @see #getTouchedPoint getTouchedPoint
*
"""
def isUpdateNeeded(self):
result = self.chartDecorationsChanged or not self.plotPanel.isValidated()
return result
"""*
* Removes the curve at the specified positional index.
* <p>
*
* @param iCurve index of the curve to be removed
*
* @see #removeCurve(Curve) removeCurve(Curve)
* @see #getCurve getCurve
* @see #addCurve() addCurve()
* @see #addCurve(int) addCurve(int)
* @see #clearCurves clearCurves
* @see #getNCurves getNCurves
"""
def removeCurve(self, iCurve):
if iCurve >= self.getNCurves():
raise IllegalArgumentException(
"iCurve = " + iCurve +"; iCurve may not exceed self.getNCurves()-1 (" + (self.getNCurves()-1) + ")")
elif iCurve < 0:
raise IllegalArgumentException(
"iCurve = " + iCurve +"; iCurve may not be negative.")
self.invalidateDependentSlices(iCurve)
"""
* Simulate user moving away from point before it is deleted (this
* assures that any required hoverCleanup gets called, and clears
* the otherwise dangling reference to the touched point).
*
"""
if self.plotPanel.touchedPoint is not None and self.plotPanel.touchedPoint.getParent() == self.getSystemCurve(iCurve):
self.plotPanel.touch(None)
# remove the rendering panel that corresponds to this curve
# (must keep the two lists in synch or 1-to-1 mapping breaks)
internalIndex = self.internalCurveIndex(iCurve)
if 0 != self.plotPanel.getRenderingPanelCount():
rpIndex = getRenderingPanelIndex(internalIndex)
self.plotPanel.removeGraphicsRenderingPanel(rpIndex)
self.plotPanel.removeAnnotationRenderingPanel(rpIndex)
c = self.curves[internalIndex]
if c.isVisible():
getXAxis().decrementCurves()
if c.getYAxis() == Y_AXIS:
getYAxis().decrementCurves()
else:
getY2Axis().decrementCurves()
c.clearIndex()
# else before 1st update, no rendering panels created yet
self.curves.pop(internalIndex)
# adjust ArrayList indexes to account for newly removed element
for i in range(internalIndex, len(self.curves)):
self.curves[i].decrementIndex()
"""*
* Removes the given curve from this GChart.
* <p>
*
* If the given curve is <tt>None</tt> or is not a curve on this GChart,
* an exception is thrown.
*
* <p>
*
* @param curve the curve to be removed.
*
* @see #removeCurve(int) removeCurve(int)
*
"""
def removeCurve(self, curve):
if None == curve:
raise IllegalArgumentException("Curve cannot be None.")
index = getCurveIndex(curve)
if index == NAI:
raise IllegalArgumentException("Curve is not one of this GChart's curves.")
if index < 0:
raise IllegalArgumentException("System curves cannot be removed (this should be impossible; a GChart bug is likely.)")
else:
removeCurve(index)
"""*
** Specifies the background color of the chart as a whole.
**
** <p>
** The default background color is <tt>USE_CSS</tt>.
** <p>
**
** For more information on standard CSS color
** specifications see the discussion in
** {@link Symbol#setBackgroundColor Symbol.setBackgroundColor}.
** <p>
**
** @param cssColor the chart's background color, in a standard
** CSS color string format.
**
**
** @see #USE_CSS USE_CSS
**
*"""
def setBackgroundColor(self, cssColor):
self.chartDecorationsChanged = True
self.backgroundColor = cssColor
"""*
** Specifies a URL that points to the transparent, 1 x 1 pixel,
** "blank GIF" that GChart uses in order to render your
** chart without adding spurious "missing image" icons to it.
** <p>
**
** When GWT compiles an application that imports the GChart
** library, it automatically adds an appropriate blank
** image, <tt>gchart.gif</tt>, to the module base directory
** (this is the directory into which GWT also copies your
** compiled Javascript, all the files in your public
** directory, etc.). <p>
**
** By default, GChart uses the following blank image URL:
** <p>
**
** <pre>
** pygwt.getModuleBaseURL() + "gchart.gif"
** </pre>
** <p>
**
** <small> Earlier versions used "gchart.gif" as this default url.
** <a href="http:#groups.google.com/group/Google-Web-Toolkit/msg/4be3f19dc14f958a">
** This GWT forum post by Dean S. Jones</a> identified the
** need to add the <tt>pygwt.getModuleBaseURL()</tt> prefix.
** </small>
** <p>
**
** Note that this default adds a potentially very
** long URL to every <tt>img</tt> element added by GChart to
** render your chart, which can (in theory) more than double
** the memory required to represent your chart in the
** browser, because the absolute URLs can be of undetermined
** length. In practice, browser memory usage increases of
** 10% have been observed with the on-line demo GChart and a
** typicial, 60-odd character absolute URL. <p>
**
** You have several alternatives to the above default that can
** often reduce the length of the URL and thus save browser
** memory:
**
** <p>
**
** <ol> <li>Simply copy <tt>gchart.gif</tt> from the module
** base directory into your host page's base directory, and
** then use <tt>setBlankImageURL("gchart.gif")</tt> to access
** this URL relatively.
**
** <li>If the relative path from the host page base
** directory to the module base directory is
** reasonably short, pass that alternative
** relative URL to this method (note that all
** relative URLs are interpreted relative to the base
** directory of the host page containing your GChart).
**
** <li>Place a copy of <tt>gchart.gif</tt> into
** a directory whose absolute URL is very short,
** and then pass that short absolute URL to this method.
**
** </ol>
** <p>
**
** <small> <i>Special note to anyone reading
** this who designed HTML's <tt>image</tt> tag:</i> If you
** had provided a <tt>src=none</tt> option, this method
** would not have to exist.
** </small>
** <p>
**
** <i>Tip:</i> If you already have an appropriate blank
** gif on your site that is accessible from the host
** page via a reasonably short URL there is no need to
** copy <tt>gchart.gif</tt>. You can just pass that URL
** to this method.
**
** <p>
**
** <i>Note:</i> Though GChart uses this blank image by default,
** you can use the <tt>setImageURL</tt> method to specify a
** non-blank image for use in rendering a specific curve.
** <p>
**
**
** @param blankImageURL a URL that points to a 1 x 1 pixel
** transparent image that GChart needs to render your
** charts without adding a spurious "missing image" icon.
**
** @see #getBlankImageURL getBlankImageURL
** @see #DEFAULT_BLANK_IMAGE_URL DEFAULT_BLANK_IMAGE_URL
** @see Symbol#setImageURL setImageURL
**
*"""
def setBlankImageURL(self, _blankImageURL):
if _blankImageURL != self.blankImageURL:
self.blankImageURL = _blankImageURL
# Decided not to prefetch blank image URL because 1) pre-fetching
# doesn't improve performance noticably in tested browsers,
# 2) there are reports of possible memory leaks associated with
# its use in the GWT issue tracker, and 3) users can
# easily do the prefetch on their own if they want to, and that
# is really the right place to do a prefetch anyway.
# Image.prefetch(GChart.getBlankImageURL())
"""*
* Defines this GChart's hover parameter interpreter.
* <p>
*
* Hovertext template strings can include <tt>${</tt>...
* <tt>}</tt> bracketed
* references to built-in parameters such as <tt>${x}</tt>
* and <tt>${y}</tt> that get get replaced with appropriate
* string representations of the x or y values of the
* hovered-over point in displayed hovertext. You can add
* new, custom, named parameters, and/or redefine the
* meaning of built-in parameters, by passing a hover parameter
* interpreter to this method.
* <p>
*
* For sample code that shows you how to define a hover
* parameter interpreter, see <tt>HoverParameterInterpreter</tt>.
*
* @see HoverParameterInterpreter HoverParameterInterpreter
* @see Symbol#setHovertextTemplate setHovertextTemplate
*
* @param hpi the hover parameter interpreter to use with all
* hovertext templates on this GChart (this interpreter is
* responsible for replacing <tt>${</tt>...<tt>}</tt>
* bracketed embedded parameter names in the hover text
* template with appropriate HTML snippets representing the
* value of that parameter at the hovered-over point).
*
"""
def setHoverParameterInterpreter(self, hpi):
self.hoverParameterInterpreter = hpi
"""*
* Specifies if merely hovering over a point is sufficient to select
* it and display its hover annotation (<tt>True</tt>), or if an
* actual click is needed (<tt>False</tt>). <p>
*
* With the default of <tt>True</tt>, points are auto-selected as
* the user "touches" them with the mouse-attached "brush"--no
* clicking is required. <p>
*
* When hover touching is disabled, a GChart can be used in a manner
* analogous to a single-selection (sorry there's no multi-selection
* capability) listbox, with its click-selectable points playing the
* role of the selectable list items. Specifically, disabling hover
* touching lets you move the mouse freely without any danger of
* changing the selected point--the point even remains selected if
* the mouse moves entirely off the chart. This is helpful when your
* application follows the common pattern of "select the thing you
* want to operate on, then issue a command that operates on that
* thing". This option is also helpful if you use very
* compute-intensive hover widgets, or if you simply prefer
* explictly-clicked-open/closed pop-up annotations.<p>
*
* <small> <i>How to Stop Leaky Clicks:</i> In IE7 and the hosted
* mode browser, clicking ahead on a <tt>Button</tt> widget "leaks"
* clicks upwards to the enclosing parent, even if you call
* <tt>event.cancelBubble(True)</tt>. Such "leaky clicks" can
* inappropriately change the selected point, when you really just
* wanted to operate on it. This does not happen in Firefox 2, 3, or
* Chrome, whose buttons properly "eat" the clicks--even when they
* come in fast. To workaround the problem, you can place the
* buttons into a hover widget (as shown in
* <tt>GChartExample21.java</tt> in the chart gallery). This works
* because GChart applies checks that ignore any mouse events that
* occur within the rectangular region associated with the opened
* hover widget. </small> <p>
*
* For an example that uses <tt>setHoverTouchingEnabled(False)</tt>
* to allow the user to change the y-value of the selected point,
* see the Chart Gallery's <a
* href="package-summary.html#GChartExample21"> GChartExample21</a>.
*
*
* @param hoverTouchingEnabled <tt>True</tt> (the default) if you
* want users to be able to select points simply by hovering over
* them with their mouse, <tt>False</tt> if you want to
* require that they actually click on points to select them.
*
* @see #getHoverTouchingEnabled getHoverTouchingEnabled
* @see Symbol#setBrushHeight setBrushHeight
* @see #touch touch
* @see #update update
* @see HoverUpdateable HoverUpdateable
*
"""
def setHoverTouchingEnabled(self, hoverTouchingEnabled):
self.hoverTouchingEnabled = hoverTouchingEnabled
"""*
** Specifies the color of the border around the chart as
** a whole.
**
** <p>
** The default border color is <tt>USE_CSS</tt>.
**
** <p>
** <blockquote><small>
** <i>Tip:</i> No border will appear if either <tt>borderStyle</tt>
** is <tt>none</tt>, <tt>borderWidth</tt> is <tt>0px</tt> or
** <tt>borderColor</tt> is <tt>transparent</tt>. Since
** these will often be the "CSS inherited" values,
** generally, it's best to set all three properties
** whenever you set any one of them.
** </small></blockquote>
** <p>
**
**
** For more information on standard CSS color
** specifications see the discussion in
** {@link Symbol#setBackgroundColor Symbol.setBackgroundColor}.
** <p>
**
** @param cssColor the color of the chart's border, in a standard
** CSS color string format.
**
** @see #setBorderWidth(String) setBorderWidth
** @see #setBorderStyle(String) setBorderStyle
** @see #getBorderColor getBorderColor
** @see #USE_CSS USE_CSS
**
*"""
def setBorderColor(self, cssColor):
self.chartDecorationsChanged = True
if self.borderColor is None or self.borderColor == TRANSPARENT_BORDER_COLOR:
raise IllegalArgumentException(
"None and TRANSPARENT_BORDER_COLOR are not allowed. This method requires a valid CSS color specification String.")
self.borderColor = cssColor
"""*
** Sets style of the border around the chart as a whole.
**
** <p>
** The default border style is <tt>USE_CSS</tt>.
** <p>
**
** <p>
** <blockquote><small>
** <i>Tip:</i> No border will appear if either <tt>borderStyle</tt>
** is <tt>none</tt>, <tt>borderWidth</tt> is <tt>0px</tt> or
** <tt>borderColor</tt> is <tt>transparent</tt>. Since
** these will often be the "CSS inherited" values,
** generally, it's best to set all three properties
** whenever you set any one of them.
** </small></blockquote>
** <p>
**
**
** @param borderStyle a CSS border style such as
** "solid", "dotted", "dashed", etc.
**
** @see #getBorderStyle getBorderStyle
** @see #setBackgroundColor(String) setBackgroundColor
** @see #setBorderColor(String) setBorderColor
** @see #setBorderWidth(String) setBorderWidth
** @see #USE_CSS USE_CSS
**
**
*"""
def setBorderStyle(self, borderStyle):
self.chartDecorationsChanged = True
self.borderStyle = borderStyle
"""*
** Specifies the width of the border around the chart as a whole.
**
** <p>
** The default border width is <tt>USE_CSS</tt>.
**
** <p>
** <blockquote><small>
** <i>Tip:</i> No border will appear if either <tt>borderStyle</tt>
** is <tt>none</tt>, <tt>borderWidth</tt> is <tt>0px</tt> or
** <tt>borderColor</tt> is <tt>transparent</tt>. Since
** these will often be the "CSS inherited" values,
** generally, it's best to set all three properties
** whenever you set any one of them.
** </small></blockquote>
**
** @param cssWidth width of the border around the chart as a whole,
** expressed as a CSS border-width specification string, such
** as "1px".
**
** @see #getBorderWidth getBorderWidth
** @see #setBorderStyle(String) setBorderStyle
** @see #setBorderColor(String) setBorderColor
** @see #USE_CSS USE_CSS
*"""
def setBorderWidth(self, cssWidth):
self.chartDecorationsChanged = True
self.borderWidth = cssWidth
"""*
* Convenience method equivalent to
* <tt>setChartFootnotes(HTML(html))</tt>.
*
* @param html HTML text used to define the chart's title.
*
* @see #setChartFootnotes(Widget) setChartFootnotes(Widget)
"""
def setChartFootnotes(self, html):
setChartFootnotes(HTML(html))
"""* Sets widget that appears just below the chart.
* <p>
*
* The widget will vertically centered within a band just
* below the x axis label that stretches along the entire
* bottom edge of the chart, and whose height is defined by
* <tt>setChartFootnotesThickness</tt>.
*
* <p>
*
* The widget will either be left justified, or horizontally
* centered, within this band depending on the property
* defined by <tt>setChartFootnotesLeftJustified</tt>
*
*
* @param chartFootnotes widget representing the chart's footnotes
*
* @see #setChartFootnotes(String) setChartFootnotes(String)
* @see #setChartFootnotesThickness setChartFootnotesThickness
* @see #getChartFootnotes getChartFootnotes
* @see #setChartFootnotesLeftJustified
* setChartFootnotesLeftJustified
"""
def setChartFootnotes(self, chartFootnotes):
self.chartDecorationsChanged = True
self.chartFootnotes = chartFootnotes
"""* Defines if this chart's footnotes are left justified,
* or horizontally centered across the bottom edge of the
* chart.
* <p>
* Note that a chart's footnotes are always vertically
* centered within the band at the bottom of the chart
* reserved for chart footnotes. Use the
* <tt>setChartFootnotesThickness</tt> method to set the
* height of this band.
*
* @param footnotesLeftJustified True to position chart footnotes
* flush against the left edge of the chart, False (the default) to
* center them horizontally across the chart's bottom edge.
*
* @see #setChartFootnotes(String) setChartFootnotes(String)
* @see #getChartFootnotes getChartFootnotes
* @see #setChartFootnotesThickness
"""
def setChartFootnotesLeftJustified(self, footnotesLeftJustified):
self.chartDecorationsChanged = True
self.chartFootnotesLeftJustified = footnotesLeftJustified
"""*
** Sets the thickness (height) of the rectangular region at
** the bottom of the chart allocated for the footnotes.
** <p>
**
** The width of this region always equals the width of
** the entire GChart (including legend and axis labels).
** <p>
**
** Your footnotes widget is always vertically centered
** in this region.
** <p>
**
**
** Your footnotes widget will either be horizontally
** centered in this region, or left justified in it,
** depending on the property defined by the
** <tt>setChartFootnotesLeftJustified</tt> method.
** <p>
**
** This setting has no impact on chart layout if the
** footnotes widget is <tt>None</tt> (the default); the
** rectangular footnotes region is entirely eliminated, and
** in effect has a 0 thickness, in that case.
** <p>
**
** If you set the footnotes thickness to <tt>NAI</tt>
** (the default) GChart will use a thickness based on
** the estimated number of (<tt><br></tt> or
** <tt><li></tt>
** delimited) lines.
**
** @param thickness the thickness (height) of the rectangle
** that contains the footnotes, in pixels, or
** <tt>NAI</tt> to use the default thickness.
**
** @see #getChartFootnotesThickness getChartFootnotesThickness
** @see #setChartFootnotesLeftJustified setChartFootnotesLeftJustified
** @see NAI NAI
** @see #DEFAULT_FOOTNOTES_THICKNESS
** DEFAULT_FOOTNOTES_THICKNESS
**
*"""
def setChartFootnotesThickness(self, thickness):
self.chartDecorationsChanged = True
self.footnotesThickness = thickness
"""*
* Convenience method equivalent to
* <tt>setXChartSize(xChartSize); setYChartSize(yChartSize)</tt>.
*
* @param xChartSize number of x-pixels in the curve
* display area of the chart
* @param yChartSize number of y-pixels in the curve
* display area of the chart
*
* @see #setXChartSize setXChartSize
* @see #setYChartSize setYChartSize
*
"""
def setChartSize(self, xChartSize, yChartSize):
self.setXChartSize(xChartSize)
self.setYChartSize(yChartSize)
# returns x,y min/max over every plotted curve
"""*
* Specifies the widget that appears centered just above the chart.
*
* @param chartTitle the widget to be used as this chart's title.
*
* @see #setChartTitle(String) setChartTitle(String)
* @see #setChartTitleThickness setChartTitleThickness
* @see #getChartTitle getChartTitle
*
"""
def setChartTitle(self, chartTitle):
if isinstance(chartTitle, basestring):
chartTitle = HTML(chartTitle)
self.chartDecorationsChanged = True
self.chartTitle = chartTitle
"""*
** Sets the thickness (height) of the rectangular region at
** the top of the chart allocated for the title.
** <p>
**
** Your title widget is always centered vertically and
** horizontally within this rectangular region. <p>
**
** This setting has no impact on chart layout if the title
** widget is <tt>None</tt>, since the title-holding
** region is entirely eliminated in that case.
**
** If you set the title thickness to <tt>NAI</tt>
** (the default) GChart will use a thickness that is
** based on the the number of <tt><br></tt> or
** <tt><li></tt> delimited HTML lines if the title Widget
** implements <tt>HasHTML</tt>.
**
** @param thickness the thickness (height) of the rectangle
** that contains the title, in pixels, or
** <tt>NAI</tt> to use the default thickness.
**
** @see #getChartTitleThickness getChartTitleThickness
** @see NAI NAI
** @see #DEFAULT_TITLE_THICKNESS
** DEFAULT_TITLE_THICKNESS
**
*"""
def setChartTitleThickness(self, thickness):
self.chartDecorationsChanged = True
self.titleThickness = thickness
"""*
* Specifies if this chart will clip any rendered chart elements
* (including hover selection feedback and popup annotations)
* that extends beyond the bounds of the decorated chart.
* <p>
*
* The decorated chart includes not just the plot area, but
* space allocated for titles, footnotes, legend key, axis
* labels, tick marks, etc. The size of this decorated chart
* can be obtained via the <tt>getXChartSizeDecorated</tt>
* and <tt>getYChartSizeDecorated</tt> methods.
* <p>
*
* <small> Note that, in non-IE browsers, drawing a curve via
* <tt>GWTCanvas</tt> that falls outside the bounds of the
* decorated chart could occlude mouse events over elements
* on the enclosing page <i>that fall within the smallest
* bounding rectangle that contains the canvas-rendered
* curve</i>. HTML rendering (IE's element-based VML used by
* <tt>GWTCanvas</tt> is essentially HTML-like in this respect) only
* creates such occlusions at the positions where the curve
* is actually rendered. </small>
*
* @param clipToDecoratedChart use <tt>True</tt> to clip
* off-the-decorated-chart symbols, annotations, etc. or
* <tt>False</tt> (the default) to allow such chart elements to be
* drawn outside of the rectangular region allocated for the
* chart.
*
* @see #getClipToDecoratedChart getClipToDecoratedChart
* @see #setClipToPlotArea setClipToPlotArea
* @see #getXChartSizeDecorated getXChartSizeDecorated
* @see #getYChartSizeDecorated getYChartSizeDecorated
* @see #setCanvasFactory setCanvasFactory
*
"""
def setClipToDecoratedChart(self, clipToDecoratedChart):
self.chartDecorationsChanged = True
invalidateAccessibleCurves()
self.clipToDecoratedChart = clipToDecoratedChart
"""* Specifies if rendered graphics falling
** outside the plot area will be clipped off.
* <p>
*
* <i>Note:</i> This clipping does not apply to the hover selection
* feedback. In particular, points that fall outside the plot area,
* though not visible, will still display their selection feedback
* and pop-up hover annotations when the user mouses over them.
*
* @param clipToPlotArea <tt>False</tt> (the default) to display
* off-the-plot-area graphics,
* <tt>True</tt>
* to clip them off.
*
* @see #getClipToPlotArea getClipToPlotArea
* @see #setClipToDecoratedChart setClipToDecoratedChart
*
"""
def setClipToPlotArea(self, clipToPlotArea):
self.chartDecorationsChanged = True
self.invalidateAccessibleCurves()
self.clipToPlotArea = clipToPlotArea
"""*
* Sets the symbol border colors that are used by default for
* newly created curves. The
* array must contain one or more elements, each a standard
* CSS or RGBA color specification string (see the
* <tt>setBackgroundColor</tt> link below for more
* on CSS color specification strings) or the
* special GChart keyword <tt>TRANSPARENT_BORDER_COLOR</tt>.
* <p>
*
* GChart uses the first color in this array as the default border
* color of the first curve added (via <tt>addCurve</tt>), the
* second color for the second curve added, and so on. If more
* curves are added than the number of elements in the default
* border colors array, the sequence is repeated.
*
* <p>
* <small>
* Note that each curve/symbol's default color is "locked in" at the
* point when that curve/symbol is first added, based on the
* total number of curves at that time.
* </small>
*
* <p>
*
* Because, by default, GChart uses a transparent symbol background
* color, the default border color will usually, in effect, define
* the default color of each curve. The default border color
* also defines the
* default color of point-to-point connecting lines in a line
* chart.<p>
*
* If not explicitly specified via this method, GChart uses
* <tt>GChart.DEFAULT_SYMBOL_BORDER_COLORS</tt> by default.
* However, most people find the
* color sequence <a href=
* "http:#ui.openoffice.org/VisualDesign/OOoChart_colors_drafts.html#02">
* used by OpenOffice's Charts</a> more aesthetically pleasing.
* The <a
* href="package-summary.html#GChartExample22a">World's Simplest
* Line Chart Editor</a> example chart contains a line of
* code that makes GChart use the OpenOffice defaults.
* <p>
*
* <small>This feature was added in response to an email from
* <a href="http:#www.profilercorp.com">Joe Cole</a>
* and <a href="http:#gwt-ext.com/forum/viewtopic.php?f=13&t=3465&start=3">
this post</a> by Sanjiv Jivan.
* They both pointed out the importance of changing GChart's
* default colors.</small>
*
*
* @param defaultBorderColors array of CSS color strings
* whose successive elements define the initial symbol border colors
* for curves in the order that they are added.
*
* @see #DEFAULT_SYMBOL_BORDER_COLORS DEFAULT_SYMBOL_BORDER_COLORS
* @see #TRANSPARENT_BORDER_COLOR TRANSPARENT_BORDER_COLOR
* @see Symbol#setBackgroundColor setBackgroundColor
* @see Symbol#setBorderColor setBorderColor
* @see #addCurve addCurve
*
"""
def setDefaultSymbolBorderColors(self, defaultBorderColors):
if None == defaultBorderColors:
raise IllegalArgumentException(
"defaultBorderColors array cannot be None.")
elif defaultBorderColors.length < 1:
raise IllegalArgumentException(
"defaultBorderColors array must have at least 1 element.")
else:
self.defaultSymbolBorderColors = defaultBorderColors
"""* Sets the font-family used in tick labels, point annotations,
** legends, titles, footnotes, and
** axis labels.
** <p>
** If not specified, the default value is <tt>USE_CSS</tt>.
** <p>
**
** Note that titles, footnotes and axis labels are
** defined via externally created Widgets, which are free
** to override the font-family specified by this
** method.
**
** @param fontFamily a CSS font-family specification, such
** as "Arial, sans-serif"
**
** @see #getFontFamily getFontFamily
** @see #USE_CSS USE_CSS
**
*"""
def setFontFamily(self, fontFamily):
self.chartDecorationsChanged = True
self.fontFamily = fontFamily
"""*
** Specifies the single color used for all gridlines, axes
** lines, and tick marks.
**
**
** <p>
** For more information on standard CSS color
** specifications see the discussion in
** {@link Symbol#setBackgroundColor Symbol.setBackgroundColor}.
** <p>
**
** @param cssColor the color, in CSS standard color format,
** to be used for all gridlines, axes, and tick marks.
**
** @see #getGridColor getGridColor
** @see #DEFAULT_GRID_COLOR DEFAULT_GRID_COLOR
**
*"""
def setGridColor(self, cssColor):
#TODO: support line style for dotted/dashed gridlines lines,
# allow tick and grid colors to be specified separately, etc.
self.getSystemCurve(XGRIDLINES_ID).getSymbol().setBorderColor(cssColor)
self.getSystemCurve(YGRIDLINES_ID).getSymbol().setBorderColor(cssColor)
self.getSystemCurve(Y2GRIDLINES_ID).getSymbol().setBorderColor(cssColor)
self.getSystemCurve(XAXIS_ID).getSymbol().setBorderColor(cssColor)
self.getSystemCurve(YAXIS_ID).getSymbol().setBorderColor(cssColor)
self.getSystemCurve(Y2AXIS_ID).getSymbol().setBorderColor(cssColor)
self.getSystemCurve(XTICKS_ID).getSymbol().setBorderColor(cssColor)
self.getSystemCurve(YTICKS_ID).getSymbol().setBorderColor(cssColor)
self.getSystemCurve(Y2TICKS_ID).getSymbol().setBorderColor(cssColor)
"""*
** Sets the background color of the chart's legend.
**
**
** <p>
** For more information on standard CSS color
** specifications see the discussion in
** {@link Symbol#setBackgroundColor Symbol.setBackgroundColor}.
** <p>
**
** @param cssColor the legend's background color, in a standard
** CSS color string format.
**
** @see #getLegendBackgroundColor getLegendBackgroundColor
** @see #DEFAULT_LEGEND_BACKGROUND_COLOR
** DEFAULT_LEGEND_BACKGROUND_COLOR
*"""
def setLegendBackgroundColor(self, cssColor):
self.chartDecorationsChanged = True
self.legendBackgroundColor = cssColor
"""*
** Sets the border color of the chart's legend.
**
**
** <p>
** For more information on standard CSS color
** specifications see the discussion in
** {@link Symbol#setBackgroundColor Symbol.setBackgroundColor}.
** <p>
**
** @param cssColor the color of the legend's border, in a standard
** CSS color string format, of the special GChart keyword
** <tt>TRANSPARENT_BORDER_COLOR</tt> for a transparent border.
**
**
** @see #getLegendBorderColor getLegendBorderColor
** @see #DEFAULT_LEGEND_BORDER_COLOR DEFAULT_LEGEND_BORDER_COLOR
** @see #TRANSPARENT_BORDER_COLOR TRANSPARENT_BORDER_COLOR
**
*"""
def setLegendBorderColor(self, cssColor):
self.chartDecorationsChanged = True
self.legendBorderColor = cssColor
"""*
** Sets the width of the chart legend's border.
**
** @param width the width of the legend's border, in pixels
**
** @see #getLegendBorderWidth getLegendBorderWidth
** @see #DEFAULT_LEGEND_BORDER_WIDTH DEFAULT_LEGEND_BORDER_WIDTH
*"""
def setLegendBorderWidth(self, width):
self.chartDecorationsChanged = True
self.legendBorderWidth = width
"""*
** Sets style of the border around the chart's legend (key).
**
** <p>
**
** <p>
** @param borderStyle a CSS border style such as
** "solid", "dotted", "dashed", etc.
**
** @see #getLegendBorderStyle getLegendBorderStyle
** @see #setLegendBackgroundColor setLegendBackgroundColor
** @see #setLegendBorderColor setLegendBorderColor
** @see #DEFAULT_LEGEND_BORDER_STYLE DEFAULT_LEGEND_BORDER_STYLE
*"""
def setLegendBorderStyle(self, borderStyle):
self.chartDecorationsChanged = True
self.legendBorderStyle = borderStyle
"""*
** Specifies the color of the legend's font. Default is
** <tt>DEFAULT_FONT_COLOR</tt>.
**
**
** <p>
** For more information on standard CSS color
** specifications see the discussion in
** {@link Symbol#setBackgroundColor Symbol.setBackgroundColor}.
** <p>
**
** @param cssColor color of the font used to display the
** labels in the legend.
**
** @see #getLegendFontColor getLegendFontColor
** @see #DEFAULT_FONT_COLOR DEFAULT_FONT_COLOR
**
*"""
def setLegendFontColor(self, cssColor):
self.chartDecorationsChanged = True
self.legendFontColor = cssColor
"""*
* Specifies the CSS font size, in pixels, of text displayed
* in the chart's legend (also know as a chart's key).
* <p>
* This size also governs the size of the symbol icon
* displayed in the legend.
* <p>
* Default is <tt>DEFAULT_LEGEND_FONTSIZE</tt>.
*
* @param legendFontSize the font size of legend text
*
* @see #getLegendFontSize getLegendFontSize
* @see #DEFAULT_LEGEND_FONTSIZE DEFAULT_LEGEND_FONTSIZE
*
"""
def setLegendFontSize(self, legendFontSize):
self.chartDecorationsChanged = True
self.legendFontSize = legendFontSize
"""*
** Specifies the cssStyle of the font used to render the
** legend's labels. Default is <tt>DEFAULT_FONT_STYLE</tt>.
**
** @param cssStyle any valid CSS font-style, namely,
** normal, italic, oblique, or inherit.
**
** @see #getLegendFontStyle getLegendFontStyle
** @see #DEFAULT_FONT_STYLE DEFAULT_FONT_STYLE
*"""
def setLegendFontStyle(self, cssStyle):
self.chartDecorationsChanged = True
self.legendFontStyle = cssStyle
"""*
** Specifies the weight of the font used in the labels of the
** legend. Default is <tt>DEFAULT_FONT_WEIGHT</tt>.
**
** @param cssWeight a CSS font-weight specification, such as
** bold, bolder, normal, light, 100, 200, ... or 900.
**
** @see #getLegendFontWeight getLegendFontWeight
** @see #DEFAULT_FONT_WEIGHT DEFAULT_FONT_WEIGHT
*"""
def setLegendFontWeight(self, cssWeight):
self.chartDecorationsChanged = True
self.legendFontWeight = cssWeight
"""*
** Sets the thickness (width) of the rectangular region at
** the right of the chart allocated for the legend key.
** <p>
**
** This setting has no impact on chart layout if the
** legend key is not visible, since the legend key's
** rectangular region is entirely eliminated in that
** case.
**
** <p>
**
** If the legend thickness is set to <tt>NAI</tt>
** (the default) GChart uses an heuristic to set the legend
** thickness based on the number of characters in each
** curve's legend label.
**
**
** @param legendThickness the thickness (width) of the rectangle
** that contains the legend key, in pixels, or
** <tt>NAI</tt> to use a built-in heurstic
** to determine the legend width.
**
** @see #getLegendThickness getLegendThickness
** @see Curve#setLegendLabel setLegendLabel
** @see Y2Axis#setAxisLabelThickness Y2Axis.setAxisLabelThickness
**
*"""
def setLegendThickness(self, legendThickness):
self.chartDecorationsChanged = True
self.legendThickness = legendThickness
"""*
* Specifies if the legend is to be visible on this chart.
* Legends are visible by default. However, a legend is only
* generated if at least one curve's legend label has been
* specified.
*
* @param isLegendVisible True to display the legend, False to
* hide it.
*
* @see #isLegendVisible isLegendVisible
* @see Curve#setLegendLabel setLegendLabel
"""
def setLegendVisible(self, legendVisible):
self.chartDecorationsChanged = True
self.legendVisible = legendVisible
"""*
* By default, this property is <tt>False</tt>, which means
* that GChart will retain no-longer-needed Image and Grid
* widgets (plus any user object references associated with
* those widgets, such as those created via the
* <tt>setAnnotationText</tt> and
* <tt>setAnnotationWidget</tt> methods) between
* <tt>updates</tt> in the expectation that they may be
* needed by future updates. This strategy often makes
* updates faster, because building Image and Grid
* elements "from scratch" is very expensive. However,
* strictly speaking, GChart is holding onto memory it no
* longer needs to render the chart <i>right now</i>--which
* would normally be considered a memory leak if it were not
* being done deliberately. <p>
*
* If <tt>optimizeForMemory</tt> is set to <tt>True</tt>,
* GChart will (at the very next <tt>update()</tt> call) free
* up any Image or Grid elements that are no longer required
* to render the current chart. Should a chart's size grow back
* to a former size, the subsequent update would be slower,
* though.
*
* <p> Charts that use exactly the same number of Image and
* Grid elements for each update (for example a bar chart
* where the number of bars is fixed) will see no impact on
* either memory use or update speeds by setting this
* parameter. Charts that have a highly variable number of
* Image or Grid elements (for example, a chart whose number
* of points varies randomly between 5 and 500) may see a
* very large impact on speed (False is faster) or memory
* (True is more compact).
* <p>
*
* The setting of this parameter never has any impact on the
* speed or memory used on the <i>very first</i> chart
* update.
* <p>
*
* In one test using the future oil price simulation chart of
* GChart's live demo (which has only small changes in the
* number of elements required to render the chart between
* updates) setting this parameter to True made the updates,
* on average, around 10% slower, but also reduced the memory
* footprint by around 2%.
*
* @param optimizeForMemory <tt>True</tt> to optimize updates
* to use less memory, <tt>False</tt> (the default) to
* optimize them to use less time.
*
* @see #update update
*
"""
def setOptimizeForMemory(self, optimizeForMemory):
self.optimizeForMemory = optimizeForMemory
"""*
** Specifies the amount of padding to add just inside of the
** chart's border, as a CSS padding specification string.
** <p>
**
** <p>
** The default padding is <tt>USE_CSS</tt>.
**
** <p>
**
** @param cssPadding the width of the padding, as a CSS padding
** specification string
** (e.g. use "1px" to introduce a 1 pixel padding
** just between the chart' border and the chart itself)
**
** @see #getPadding getPadding
** @see #setBorderWidth setBorderWidth
** @see #setBorderStyle(String) setBorderStyle
** @see #setBorderColor(String) setBorderColor
** @see #USE_CSS USE_CSS
*"""
def setPadding(self, cssPadding):
self.chartDecorationsChanged = True
self.padding = cssPadding
"""*
** Specifies the background color of the area of the chart
** in which symbols representing curve data are displayed
**
**
** <p>
** For more information on standard CSS color
** specifications see the discussion in
** {@link Symbol#setBackgroundColor Symbol.setBackgroundColor}.
** <p>
**
** @param cssColor CSS color string defining the plot
** area's background color
**
** @see #getPlotAreaBackgroundColor getPlotAreaBackgroundColor
*"""
def setPlotAreaBackgroundColor(self, cssColor):
c = self.getSystemCurve(PLOTAREA_ID)
c.getSymbol().setBackgroundColor(cssColor)
"""*
** Specifies the color of the border around the area of the
** chart in which symbols representing curve data are
** displayed.
**
**
** <p>
** For more information on standard CSS color
** specifications see the discussion in
** {@link Symbol#setBackgroundColor Symbol.setBackgroundColor}.
** <p>
**
** @param cssColor CSS color string defining the color of
** the plot area's border
**
** @see #getPlotAreaBorderColor getPlotAreaBorderColor
*"""
def setPlotAreaBorderColor(self, cssColor):
c = self.getSystemCurve(PLOTAREA_ID)
c.getSymbol().setBorderColor(cssColor)
"""*
** Specifies the width of the border around the area of the
** chart in which symbols representing curve data are
** displayed.
**
** @param width the width, in pixels, of the border around
** the plot area
**
** @see #getPlotAreaBorderWidth getPlotAreaBorderWidth
*"""
def setPlotAreaBorderWidth(self, width):
c = self.getSystemCurve(PLOTAREA_ID)
c.getSymbol().setBorderWidth(width)
"""*
** Sets style of the border around the chart's plot area
** (the rectangular area where the curves are drawn).
**
** <p>
**
** <p>
** @param borderStyle a CSS border style such as
** "solid", "dotted", "dashed", etc.
**
** @see #getPlotAreaBorderStyle getPlotAreaBorderStyle
** @see #setPlotAreaBackgroundColor setPlotAreaBackgroundColor
** @see #setPlotAreaBorderColor setPlotAreaBorderColor
*"""
def setPlotAreaBorderStyle(self, borderStyle):
c = self.getSystemCurve(PLOTAREA_ID)
c.getSymbol().setBorderStyle(borderStyle)
"""*
* Sets the image URL that defines the background of
* the GChart plot area. The GChart plot area is the
* rectangular region defined by the x and y axes of
* the plot, but does not include those axes (or
* their ticks).
* <p>
* Note that by default, or if this URL is set to <tt>None</tt>,
* GChart will use the URL returned by
* <tt>getBlankImageURL</tt>.
* <p>
*
* <small><b>Ideas/tips for using the plot area background
* URL:</b>
* <blockquote>
* <ol>
* <li> It's often best to
* exactly match the width and height of the image
* with the GChart plot area width and height
* (defined via (via <tt>setChartSize</tt>). Otherwise,
* the image will be scaled up or down to fit the
* plot area, which usually doesn't look that great.
*
* <li>Note that since a Google Chart API url is just
* an image url to GChart, you can easily use a
* Google Chart API url to define the background of an
* otherwise client-side chart. For example, you
* might place a 3-D pie chart behind
* a rapidly changing client-side GChart bar chart.
*
* <li> Note that this method's image will appear <i>behind</i>
* every gridline and curve on the chart. To overlay
* images <i>on top of</i> the gridlines or other curves, or
* even to place them outside of the plot area, use a
* dedicated curve and its symbol's <tt>setImageURL</tt>
* method, or simply embed such images within HTML-defined
* point annotations.
* </ol>
*
* </blockquote></small>
*
* @see #getPlotAreaImageURL getPlotAreaImageURL
* @see #setBlankImageURL setBlankImageURL
* @see GChart.Symbol#setImageURL setImageURL
*
* @param imageURL URL of the image used as the background
* of the plot area.
*
"""
def setPlotAreaImageURL(self, imageURL):
c = self.getSystemCurve(PLOTAREA_ID)
c.getSymbol().setImageURL(imageURL)
"""* @deprecated
**
** Equivalent to
** <tt>setClipToPlotArea(!showOffChartPoints)</tt>.
** Use that method instead.
** <p>
**
** <small>
** As of GChart 2.5, the clip-to-plot-area algorithm no
** longer drops the entire symbol if it's x,y coordinates
** are outside of the plot area; instead, it clips them
** off in the traditional "<tt>overflow: hidden</tt>" manner.
** Though unlikely you would need to, there is no easy way
** to recreate the previous behavior. <p>
**
** This change was made so that both rectangular HTML and
** continuous, canvas-rendered
** chart elements would be clipped in a consistent and
** sensible way.
** </small>
**
** @see #setClipToPlotArea setClipToPlotArea
**
*"""
def setShowOffChartPoints(self, showOffChartPoints):
self.setClipToPlotArea(not showOffChartPoints)
"""* @deprecated
**
** Equivalent to
** setClipToDecoratedChart(!showOffDecoratedChart), please
** use that method instead.
**
** @see #setClipToDecoratedChart setClipToDecoratedChart
*"""
def setShowOffDecoratedChartGlyphs(self, showOffDecoratedChartGlyphs):
self.setClipToDecoratedChart(not showOffDecoratedChartGlyphs)
"""*
* Returns the curve that the mouse "brush" is currently
* "touching" (the so-called "hovered over" point), or <tt>None</tt>
* if none.
* <p>
*
* Convenience method equivalent to (when the touched point is
* not <tt>None</tt>) <tt>getTouchedPoint().getParent()</tt>.
* See <tt>getTouchedPoint</tt> for full details.
* <p>
*
*
* See the <tt>setBrushHeight</tt> method for the rules
* GChart uses to determine the currently touched point.
* <p>
*
*
* @return a reference to the curve that the mouse "brush"
* is currently "touching".
*
* @see #getTouchedPoint getTouchedPoint
* @see Symbol#setBrushHeight setBrushHeight
* @see Symbol#setHoverSelectionSymbolType
* setHoverSelectionSymbolType
*
"""
def getTouchedCurve(self):
result = None
if None != self.getTouchedPoint():
result = self.getTouchedPoint().getParent()
return result
"""*
* Returns the point that the mouse "brush" is currently
* "touching" (the so-called "hovered over" point), or <tt>None</tt>
* if none.
*
* <p>
* <small> <i>Fine-print:</i> If the chart clicked on needs an
* update, this method returns the touched point <i>as
* of the last time the chart's in-browser (DOM) display was
* up-to-date</i>. If you don't assure that your chart's DOM display
* is up-to-date via other means (e.g. updating right after you
* change its specifications) a quick check with the
* <tt>isUpdateNeeded</tt> method and a subsequent <tt>update</tt>
* before accessing the touched point can be a good strategy.
* <p> </small>
*
*
* See the <tt>setBrushHeight</tt> method for the rules
* GChart uses to determine the currently touched point.
* <p>
*
* <small>
* <i>Warning:</i> The currently touched point, on FF2 (but not in
* IE7) can be changed (or set to <tt>None</tt>) by invoking
* <tt>Window.alert</tt>. Though I originally expected that such
* a modal alert box would "eat" all mouse events (and it does
* just that in IE7) in FF2 (and possibly other browsers)
* some mouse events on the alert box are also passed on up to
* the GChart. It's best for applications that need to "lock on"
* to the <i>initially</i> touched point to grab a
* reference to the touched point <i>before</i> performing any
* activity that allows the user to interact with the
* browser in ways that could possibly generate GChart-visible
* mouse events.
* </small>
* <p>
*
* @return a reference to the point that the mouse "brush"
* is currently "touching".
*
* @see #getTouchedCurve getTouchedCurve
* @see #touch touch
* @see Symbol#setBrushHeight setBrushHeight
* @see Symbol#setHoverSelectionSymbolType
* setHoverSelectionSymbolType
* @see #isUpdateNeeded isUpdateNeeded
* @see #update update
* @see Axis#getMouseCoordinate getMouseCoordinate
* @see Axis#clientToModel clientToModel
* @see Axis#modelToClient modelToClient
* @see Axis#pixelToModel pixelToModel
* @see Axis#modelToPixel modelToPixel
*
"""
def getTouchedPoint(self):
return self.plotPanel.touchedPoint
"""*
* Sets the number of pixels, in the horizontal
* dimension, available for curve display. Note that
* this curve display area does <i>not</i> include the
* axes themselves, their tick marks, their labels, etc.
*
* <p>
*
* <i>Note</i>: Most modern display devices use "square"
* pixels, that is, pixels whose width and height are
* the same. GChart tacitly assumes square pixels in
* many of its default settings.
*
*
* @param xChartSize the number of x-pixels in the chart region
* used for curve display.
*
* @see #getXChartSize getXChartSize
* @see #getXChartSizeDecorated getXChartSizeDecorated
* @see #setYChartSize setYChartSize
*
"""
def setXChartSize(self, xChartSize):
self.chartDecorationsChanged = True
self.xChartSize = xChartSize
c = self.getSystemCurve(PLOTAREA_ID)
c.getSymbol().setWidth(xChartSize)
"""*
* Sets the number of pixels, in the vertical dimension,
* available for curve display. Note that this curve
* display region of the chart does <i>not</i> include
* the axes themselves, their tick marks, labels, etc.
*
* <p>
*
* <i>Note</i>: Most modern display devices use "square"
* pixels, that is, pixels whose width and height are
* the same. GChart tacitly assumes square pixels in
* many of its default settings.
*
* @param yChartSize the number of y-pixels in the chart region
* used for curve display.
*
* @see #getYChartSize getYChartSize
* @see #getYChartSizeDecorated getYChartSizeDecorated
* @see #setXChartSize setXChartSize
*
"""
def setYChartSize(self, yChartSize):
self.chartDecorationsChanged = True
self.yChartSize = yChartSize
c = self.getSystemCurve(PLOTAREA_ID)
c.getSymbol().setHeight(yChartSize)
"""*
* Simulates the user "touching" a point with the mouse, by
* performing those operations that occur when the user "hovers
* over" the specified point. In detail, this method does the
* following:<p>
*
* <ol>
*
* <li> The specified point is made the currently "touched point"
* (this is the reference returned by <tt>getTouchedPoint</tt>). <p>
*
* <li>If the previously touched point had a hover widget,
* that hover widget's <tt>hoverCleanup</tt> method is called.<p>
*
* <li>If the touched point has an associated hover widget, that
* widget's <tt>hoverUpdate</tt> method is called.<p>
*
* <li> Any hover selection feedback or hover annotation on
* any previously touched point is removed.<p>
*
* <li>Any hover annotation for the newly touched point is
* displayed as per the various hover annotation related
* specifications (e.g. <tt>setHoverLocation</tt>) associated with
* the symbol used to render the point.<p>
*
* <li> Any selection feedback for the newly touched point is
* displayed in accord with the hover selection feedback
* specificiations (e.g. <tt>setHoverSelectionBorderColor</tt>)
* associated with the symbol used to render the point.<p>
*
* </ol>
*
* Using <tt>None</tt> as the point to touch simulates
* the user moving the mouse into a region where it is not
* touching any point (for example, off the chart entirely).
* <p>
*
* Note that, as with all chart specification changes, you must
* invoke <tt>update</tt> before the point selection and other
* changes associated with this method will appear on the chart.
* <p>
*
* <i>Tip:</i> The touched point can sometimes be used in lieu of a
* point selection capability (which GChart lacks). For example, a
* dialog box that allowed users to choose data points by their
* names could "touch" the point associated with a user-selected
* name in order to highlight it on the chart.
*
* @param pointToTouch this method will perform appropriate
* operations (as described above) in order to simulate the user
* "touching" this point with their mouse.
*
* @see #getTouchedPoint getTouchedPoint
* @see #getTouchedCurve getTouchedCurve
* @see HoverUpdateable#hoverUpdate hoverUpdate
* @see HoverUpdateable#hoverCleanup hoverCleanup
* @see Symbol#setHoverWidget setHoverWidget
* @see Symbol#setHoverLocation setHoverLocation
* @see Symbol#setHoverSelectionBorderColor
* setHoverSelectionBorderColor
* @see Axis#getMouseCoordinate getMouseCoordinate
* @see Axis#clientToModel clientToModel
* @see Axis#modelToClient modelToClient
* @see Axis#pixelToModel pixelToModel
* @see Axis#modelToPixel modelToPixel
*
"""
def touch(self, pointToTouch):
self.plotPanel.touch(pointToTouch)
"""*
** Builds a chart that reflects current user-specified
** chart specs (curve data, symbol choices, etc.)
** <p>
**
** Before any of the chart specifications of the other
** methods of this class will actually be visible
** on the chart, you must call this method.
** <p>
**
** Typically, for efficiency, you would call this
** method only after you had made all of the desired
** chart specifications via the other methods.
**
** <p>
**
** By default, updates are optimized for speed, and this
** can end up wasting (usually not too much, though there
** are exceptions) memory. To optimize for memory
** instead, use the <tt>setOptimizeForMemory</tt> method.
** <p>
**
** For a discussion of Client-side GChart update times and
** how minimize them, see
** <a
** href="{@docRoot}/com/googlecode/gchart/client/doc-files/tipsformakingupdatesfaster.html">
** Tips for Making Client-side GChart Updates Faster</a>.
** <p>
**
** <i>Note</i> Hover feedback is disabled whenever the currently
** rendered chart does not match current chart specs, that is,
** whenever <tt>isUpdateNeeded</tt> returns <tt>True</tt>. Thus,
** to assure that hover feedback remains operational once your
** code returns control to the browser, be sure to call
** <tt>update()</tt> after making a series of changes to your
** chart's properties.
** <p>
**
** Understanding how <tt>update</tt> impacts visibility and size:
** <p>
** <blockquote>
** <small>
** Due to an implementation-related limitation,
** <tt>visibility: hidden</tt> won't hide a GChart
** (<tt>update</tt>
** commandeers the visibility attribute). Instead use
** <tt>display: none</tt> or, equivalently:
**
** <pre>
** myGChart.setVisible(False)
** </pre>
**
** If you need to avoid <tt>display: none</tt> (it can change
** page layout), you can also hide a GChart via lines such as:
**
** <pre>
** DOM.setStyleAttribute(myGChart.getElement(),"overflow","hidden")
** myGChart.setPixelSize(0, 0)
** </pre>
**
** This later approach gives you the option of leaving the top
** corner of the GChart visible, etc. Note that, with the next
** <tt>update</tt>, GChart will overwrite your size (based on the
** GChart properties that define the size of the the chart, such
** as <tt>setChartSize</tt> and <tt>set*Thickness</tt>)
** and your <tt>overflow:hidden</tt> (based on
** <tt>setClipToDecoratedChart</tt>) specifications. To preserve
** them (or in other special cases) you may need to apply such
** settings to an enclosing parent element.
**
** </small>
** </blockquote>
**
**
** @param option determines how the touched (or "hovered
** over") point changes as a result of this update. See
** <tt>TouchedPointUpdateOption</tt> for the available
** choices.
**
** @see TouchedPointUpdateOption TouchedPointUpdateOption
** @see #setOptimizeForMemory setOptimizeForMemory
** @see #isUpdateNeeded isUpdateNeeded
**
*"""
def update(self, option=None):
"""
* This method defines each curve's default pie slice
* orientations, and also separates each curve's points
* into the vertically or horizontally banded bins,
* that GChart needs to perform the hit testing
* that allows it to emulate "touching" points with
* the mouse.
* <p>
*
* Therefore, this line must come first.
*
"""
if option is None:
if self.getHoverTouchingEnabled():
option = TouchedPointUpdateOption.TOUCHED_POINT_UPDATED
else:
option = TouchedPointUpdateOption.TOUCHED_POINT_LOCKED
self.assembleChart()
if TouchedPointUpdateOption.TOUCHED_POINT_LOCKED == option:
# must re-touch (point position, hover-config can change)
self.plotPanel.touch(self.plotPanel.touchedPoint)
elif TouchedPointUpdateOption.TOUCHED_POINT_CLEARED == option:
# if needed, will clear out touched point & related feedback
self.plotPanel.touch(None)
elif TouchedPointUpdateOption.TOUCHED_POINT_UPDATED == option:
# re-determine which point is underneath the mouse now...
self.plotPanel.retouchObjectAtMousePosition()
"""
* Because hover feedback curves come at the end of the curve
* list, given how GChart's rendering process works, this
* second call only has to update these hover feedback curves
* (so it's not like we are really building the chart twice)
*
"""
self.assembleChart()
"""*
* Updates the chart, using an appropriate default touched point
* update option, depending on if hover touching is enabled or
* not.<p>
*
* A convenience method equivalent to:
* <p>
*
* <pre>
if getHoverTouchingEnabled():
update(TouchedPointUpdateOption.TOUCHED_POINT_UPDATED)
else:
update(TouchedPointUpdateOption.TOUCHED_POINT_LOCKED)
* </pre>
*
*
* @see #update(TouchedPointUpdateOption) update(TouchedPointUpdateOption)
* @see #setHoverTouchingEnabled setHoverTouchingEnabled
*
"""
# constructs the chart within the chart panel from current specs
def assembleChart(self):
if (self.chartDecorationsChanged or self.xAxis.limitsChanged() or
self.yAxis.limitsChanged() or self.y2Axis.limitsChanged()):
self.plotPanel.reset(self.xChartSize, self.yChartSize,
self.hasYAxis(), self.hasY2Axis(),
self.xAxis, self.yAxis, self.y2Axis)
GChartUtil.setFontFamily(self, self.getFontFamily())
GChartUtil.setBackgroundColor(self, self.getBackgroundColor())
GChartUtil.setBorderColor(self, self.getBorderColor())
GChartUtil.setBorderStyle(self,self.getBorderStyle())
GChartUtil.setBorderWidth(self, self.getBorderWidth())
GChartUtil.setPadding(self, self.getPadding())
GChartUtil.setOverflow(self, (self.getClipToDecoratedChart() and
"hidden" or "visible"))
self.setPixelSize(self.plotPanel.getXChartSizeDecoratedQuickly(),
self.plotPanel.getYChartSizeDecoratedQuickly())
self.updateDecorations(self.plotPanel.getXChartSizeDecoratedQuickly())
self.xAxis.rememberLimits()
self.yAxis.rememberLimits()
self.y2Axis.rememberLimits()
self.invalidateEveryCurve()
self.chartDecorationsChanged = False
# actually renders chart, including internal curves used
# to represent the decorations (title, axis labels, etc.)
self.realizePlotPanel()
# To avoid order-of-magnitude FF2 performance hit on busy pages,
# first time, must add plotPanel only AFTER building chart
if self.plotPanel != self.chartPanel.getWidget():
self.chartPanel.add(self.plotPanel)
"""
* Due to how GChart plays around with visible elements contained inside
* hidden elements to align it's labels properly, if we allowed top
* level <tt>visibility:hidden</tt> the result would be that everything
* <i>except</i> annotations would be invisible.
* <p>
*
* We can prevent such
* weird behavior by setting <tt>visibility:visible</tt> on the top
* level element; this setting effectively short-circuits any
* top level visibility setting the user may have made. <p>
*
* Users must either use <tt>display:none</tt> (as the Widget method
* <tt>setVisible</tt> does) or create an enclosing 0-sized div with
* <tt>overflow:hidden</tt>) to hide a GChart.
* <p>
*
"""
DOM.setStyleAttribute(self.getElement(), "visibility","visible")
else:
"""
* Without these 2 lines IE7 won't repaint GChart's annotations.
* The lines are not needed in FF2; an IE7 bug is suspected.<p>
*
* I got this workaround from <a href=
* "http:#examples.roughian.com">Ian Bambury</a> as part of <a
* href="http:#groups.google.com/group/Google-Web-Toolkit/browse_thread/thread/4c54d8b4aea7f98b/6efd1ab4e5fc0e7b?#6efd1ab4e5fc0e7b">
* this discussion on the GWT forum</a>.
* <p>
*
* (Note comment regarding need for explicit visibility above).
*
"""
DOM.setStyleAttribute(self.getElement(), "visibility","hidden")
DOM.setStyleAttribute(self.getElement(), "visibility","visible")
# create a Grid representing the chart legend.
def createLegend(self, pp):
result = Grid(self.getNVisibleCurvesOnLegend(), 2)
iVisible = 0
"""
* Simply eliminating the border entirely is a valid transparency
* emulation for the legend (no positional shifting is needed as is
* needed for the images used to draw the main chart's curves) because
* the legend is always positioned by its center point, and the border
* extends around the entire legend key, so removing it does not result
* in any change to the legend key's center position. <p>
*
* If multiple legend locations (beyond the current "always centered in
* a band along the right edge" option) were ever supported, appropriate
* positional shifts would then have to be introduced to emulate
* transparent borders.
*
"""
if TRANSPARENT_BORDER_COLOR == self.getLegendBorderColor():
GChartUtil.setBorderWidth(result, 0)
GChartUtil.setBorderColor(result, "transparent")
else:
GChartUtil.setBorderWidth(result, abs(self.getLegendBorderWidth()))
GChartUtil.setBorderColor(result, self.getLegendBorderColor())
GChartUtil.setBorderStyle(result, self.getLegendBorderStyle())
GChartUtil.setBackgroundColor(result, self.getLegendBackgroundColor())
nCurves = self.getNCurves()
for i in range(nCurves):
c = self.getSystemCurve(i)
if c.isVisible() and c.getLegendLabel()!=None:
symBorderFraction = (c.getSymbol().getBorderWidth()/
max(
max(1.0,c.getSymbol().getFillThickness()),
max(c.getSymbol().getWidth(pp),
c.getSymbol().getHeight(pp, c.onY2()))))
icon = c.getSymbol().getSymbolType().createIconImage(
c.getSymbol(), self.getLegendFontSize(),
symBorderFraction)
result.setWidget(iVisible, 0, icon)
result.getCellFormatter().setAlignment(iVisible, 0,
HasHorizontalAlignment.ALIGN_CENTER,
HasVerticalAlignment.ALIGN_MIDDLE)
label = HTML(c.getLegendLabel())
GChartUtil.setFontWeight(label, self.getLegendFontWeight())
GChartUtil.setFontStyle(label, self.getLegendFontStyle())
GChartUtil.setColor(label, self.getLegendFontColor())
GChartUtil.setFontSize(label, self.getLegendFontSize())
result.setWidget(iVisible, 1, label)
result.getCellFormatter().setAlignment(iVisible, 1,
HasHorizontalAlignment.ALIGN_LEFT,
HasVerticalAlignment.ALIGN_MIDDLE)
iVisible += 1
return result
# returns char-width-based default legend thickness
def getDefaultLegendThickness(self):
EXTRA_WIDTH = 5; # allow for padding & symbol
maxLen = 0
nCurves = self.getNCurves()
for i in range(nCurves):
c = self.getSystemCurve(i)
if c.isVisible() and None != c.getLegendLabel():
maxLen = max(maxLen,
GChartUtil.htmlWidth(c.getLegendLabel()))
return int ( ((maxLen + EXTRA_WIDTH) *
self.getLegendFontSize() *
TICK_CHARWIDTH_TO_FONTSIZE_LOWERBOUND))
def getNVisibleCurvesOnLegend(self):
result = 0
nCurves = self.getNCurves()
for i in range(nCurves):
if self.getSystemCurve(i).isVisible() and self.getSystemCurve(i).getLegendLabel() is not None:
result += 1
return result
# Defines a default curve border color when curves first created
def setDefaultBorderColor(self, curve, index):
curve.getSymbol().setBorderColor(
self.defaultSymbolBorderColors[
index % len(self.defaultSymbolBorderColors)
])
# renders the curve in the plot panel
def realizeCurve(self, c):
if c.isValidated():
return
internalIndex = self.getInternalCurveIndex(c)
rpIndex = self.getRenderingPanelIndex(internalIndex)
grp = self.plotPanel.getGraphicsRenderingPanel(rpIndex)
arp = self.plotPanel.getAnnotationRenderingPanel(rpIndex)
if GChartWidgets.DECORATIVE_RENDERING_PANEL_INDEX == rpIndex:
# background panel only gets initialized for first curve
if 0 == internalIndex:
# background panel never uses canvas
grp.beginRendering(None)
arp.beginRendering()
c.setWasCanvasRendered(False)
# continuous fill# non-empty fill# canvas available
elif (0 == c.getSymbol().getFillSpacing() and
0 < c.getSymbol().getFillThickness() and
None != self.getCanvasFactory() and
c.isVisible()):
grp.maybeAddCanvas()
canvasRegion = c.getContainingRectangle(self.plotPanel)
grp.beginRendering(canvasRegion)
arp.beginRendering()
c.setWasCanvasRendered(True)
else:
# does not use canvas, or it is invisible
grp.beginRendering(None)
arp.beginRendering()
c.setWasCanvasRendered(False)
if c.isVisible():
# Separate points into vertical/horizontal band-bins provided
# 1) it is not a system curve and 2) it is not of a type whose
# position follows the mouse (and thus has no fixed location
# suitable for banding) and 3) at least one kind of hover feedback
# is being provided for the curve.
if self.getCurveIndex(c) >= 0 and not isMouseAnchored(c.getSymbol().getSymbolType()) and (c.getSymbol().getHoverSelectionEnabled() or c.getSymbol().getHoverAnnotationEnabled()):
c.bandSeparatePoints()
else:
# hit test banding calcs unneeded; skip them for speed.
c.clearBandList()
# Note: these lines must come AFTER band separation lines above
nPoints = c.getNPoints()
for j in range(nPoints):
c.realizePoint(self.plotPanel, grp, arp, j)
# only end background panel rendering w last background curve
if GChartWidgets.DECORATIVE_RENDERING_PANEL_INDEX != rpIndex or internalIndex == N_PRE_SYSTEM_CURVES-1:
grp.endRendering()
arp.endRendering()
# else it's a background panel curve, and not the last one
c.validated = True
# marks every curve, including system curves, as needing an update
def invalidateEveryCurve(self):
for i in range(len(self.curves)):
self.curves[i].invalidate()
# marks every developer-accessible curve as needing an update
def invalidateAccessibleCurves(self):
nCurves = self.getNCurves()
for i in range(nCurves):
self.getSystemCurve(i).invalidate()
# invalidates every curve that has a pie slice type
def invalidateAllSlices(self):
nCurves = self.getNCurves()
for i in range(nCurves):
c = self.getSystemCurve(i)
if isinstance(c.getSymbol().getSymbolType(),
SymbolType.PieSliceSymbolType):
c.invalidate()
# Invalidates every pie slice curve whose orientation could
# depend on the orientation of the given curve
def invalidateDependentSlices(self, iFirstCurve):
# only user defined curve can have slice dependency relationships
if self.isSystemCurveIndex(iFirstCurve):
return
nCurves = self.getNCurves()
for i in range(iFirstCurve, nCurves):
c = self.getSystemCurve(i)
if isinstance(c.getSymbol().getSymbolType(),
SymbolType.PieSliceSymbolType):
c.invalidate()
elif i == iFirstCurve:
# if first curve isn't a slice,
break; # there are no dependent slices
# Defines the default pie slice orientations for every pie-slice curve
def setDefaultPieSliceOrientations(self):
self.setLastPieSliceOrientation(self.getInitialPieSliceOrientation())
nCurves = self.getNCurves()
for i in range(nCurves):
c = self.getSystemCurve(i)
# keep track of default next orientation for pie slices
# (must do this even if we don't have to redraw slice)
if isinstance(c.getSymbol().getSymbolType(),
SymbolType.PieSliceSymbolType):
c.getSymbol().setDefaultPieSliceOrientation(
self.getLastPieSliceOrientation())
self.setLastPieSliceOrientation(
c.getSymbol().getDecodedPieSliceOrientation()
+ c.getSymbol().getPieSliceSize())
def realizePlotPanel(self):
self.setDefaultPieSliceOrientations()
"""
* Render both system curves (those with negative ids that
* are used to render title, ticks, etc.) and ordinary curves.
"""
nCurves = self.getNCurves()
for i in range(-N_SYSTEM_CURVES, nCurves):
c = self.getSystemCurve(i)
self.realizeCurve(c)
""" Returns True if the rendering panel index is associated
* with one of the internal, hover-feedback curves.
* <p>
*
* This method relies on the fact that rendering panels
* appear in this order:
* <p>
*
* <ol>
* <li> a single rp that renders all chart decorations
* <li> self.getNCurves() rps (1 for each developer-defined curve)
* <li> the two rendering panels associated with the two
* system-defined hover feedback curves
* </ol>
*
"""
def isHoverFeedbackRenderingPanel(self, rpIndex):
result = rpIndex > self.getNCurves()
return result
"""
* This code works around a bug in GWTCanvas that can cause
* (in IE) previously rendered VML elements to have their fill
* and stroke color, and stroke thickness properties revert to
* some sort of defaults (I saw white, black, and 1px in my
* tests) when the canvas is re-inserted into the DOM.
*
* See TestGChart55.java and TestGChart55a.java for more
* info on the GWTCanvas bug that makes this code neccessary.
*
* TODO: Implement technique of GWTCanvasIssue293.patch to
* override removeFromParent and store/restore innerHTML
* as a more efficient workaround for this problem. If
* a GWTCanvas is released, you could remove this
* workaround altogether.
*
"""
# avoids inefficiency of re-rendering in most common case
def onUnload(self):
Composite.onUnload(self)
self.wasUnloaded = True
def onLoad(self):
Composite.onLoad(self)
if self.wasUnloaded and self.plotPanel.getRenderingPanelCount() > 0:
isUpToDate = not isUpdateNeeded()
nCurves = self.getNCurves()
for i in range(nCurves):
c = getCurve(i)
if c.isCanvasRendered():
c.invalidate()
if isUpToDate:
self.realizeCurve(c)
# else since chart needs update, presume they will
# update later, no need to auto-patch things up
# (and simple patch-rerender won't work anyway).
# else never inserted/rendered; skip patchup-rerendering
# end of class GChart
| {
"content_hash": "7c5ba57c9539dbadfc48e115927524b1",
"timestamp": "",
"source": "github",
"line_count": 3865,
"max_line_length": 195,
"avg_line_length": 36.32833117723157,
"alnum_prop": 0.6478929413356693,
"repo_name": "Hasimir/pyjs",
"id": "60b4e299b9d0a77c767967130b569af448983d8b",
"size": "140409",
"binary": false,
"copies": "7",
"ref": "refs/heads/master",
"path": "pyjswidgets/pyjamas/chart/GChart.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "4640"
},
{
"name": "Groff",
"bytes": "6633"
},
{
"name": "HTML",
"bytes": "10106"
},
{
"name": "JavaScript",
"bytes": "63385"
},
{
"name": "Makefile",
"bytes": "453"
},
{
"name": "Python",
"bytes": "5515375"
},
{
"name": "Shell",
"bytes": "4264"
}
],
"symlink_target": ""
} |
import sys
import random
lines = []
for line in sys.stdin:
lines.append(line)
#lines.sort()
length = len(lines)
for l in range(0,length):
choice = random.randint(0,len(lines)-1)
#print str(choice) + ' of ' + str(len(lines))
print lines.pop(choice),
| {
"content_hash": "a3135872517cbd846f208f1273684369",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 48,
"avg_line_length": 18.857142857142858,
"alnum_prop": 0.6590909090909091,
"repo_name": "timrdf/csv2rdf4lod-automation",
"id": "38bc1c66150621c019a345187666eb462d462299",
"size": "381",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "bin/util/randomize-line-order.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Awk",
"bytes": "36758"
},
{
"name": "Batchfile",
"bytes": "5607"
},
{
"name": "C",
"bytes": "52334"
},
{
"name": "CSS",
"bytes": "5012"
},
{
"name": "HTML",
"bytes": "607634"
},
{
"name": "Java",
"bytes": "5909684"
},
{
"name": "Makefile",
"bytes": "433"
},
{
"name": "PHP",
"bytes": "26207"
},
{
"name": "Perl",
"bytes": "39517"
},
{
"name": "Python",
"bytes": "55028"
},
{
"name": "R",
"bytes": "455"
},
{
"name": "Shell",
"bytes": "1021416"
},
{
"name": "XSLT",
"bytes": "54468"
}
],
"symlink_target": ""
} |
from toontown.safezone import DGPlayground
from toontown.safezone import SafeZoneLoader
class DGSafeZoneLoader(SafeZoneLoader.SafeZoneLoader):
def __init__(self, hood, parentFSM, doneEvent):
SafeZoneLoader.SafeZoneLoader.__init__(self, hood, parentFSM, doneEvent)
self.playgroundClass = DGPlayground.DGPlayground
self.musicFile = 'phase_8/audio/bgm/DG_nbrhood.ogg'
self.activityMusicFile = 'phase_8/audio/bgm/DG_SZ.ogg'
self.dnaFile = 'phase_8/dna/daisys_garden_sz.pdna'
self.safeZoneStorageDNAFile = 'phase_8/dna/storage_DG_sz.pdna'
def load(self):
SafeZoneLoader.SafeZoneLoader.load(self)
self.birdSound = map(base.loadSfx, ['phase_8/audio/sfx/SZ_DG_bird_01.ogg',
'phase_8/audio/sfx/SZ_DG_bird_02.ogg',
'phase_8/audio/sfx/SZ_DG_bird_03.ogg',
'phase_8/audio/sfx/SZ_DG_bird_04.ogg'])
def unload(self):
SafeZoneLoader.SafeZoneLoader.unload(self)
del self.birdSound | {
"content_hash": "958645060ac3f100f3e8761de146d2a8",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 83,
"avg_line_length": 47.43478260869565,
"alnum_prop": 0.6223648029330889,
"repo_name": "Spiderlover/Toontown",
"id": "7b5870a79d8b4254aeb1b7afb006e233576b8ef9",
"size": "1091",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "toontown/safezone/DGSafeZoneLoader.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "7774"
},
{
"name": "Python",
"bytes": "17241353"
},
{
"name": "Shell",
"bytes": "7699"
}
],
"symlink_target": ""
} |
from operator import attrgetter
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType
from pyangbind.lib.yangtypes import RestrictedClassType
from pyangbind.lib.yangtypes import TypedListType
from pyangbind.lib.yangtypes import YANGBool
from pyangbind.lib.yangtypes import YANGListType
from pyangbind.lib.yangtypes import YANGDynClass
from pyangbind.lib.yangtypes import ReferenceType
from pyangbind.lib.base import PybindBase
from collections import OrderedDict
from decimal import Decimal
from bitarray import bitarray
import six
# PY3 support of some PY2 keywords (needs improved)
if six.PY3:
import builtins as __builtin__
long = int
elif six.PY2:
import __builtin__
class config(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-network-instance - based on the path /network-instances/network-instance/protocols/protocol/isis/global/inter-level-propagation-policies/level1-to-level2/config. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: Configuration parameters relating to the propagation
of prefixes from IS-IS Level 1 to Level 2.
"""
__slots__ = (
"_path_helper", "_extmethods", "__import_policy", "__default_import_policy"
)
_yang_name = "config"
_pybind_generated_by = "container"
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__import_policy = YANGDynClass(
base=TypedListType(allowed_type=six.text_type),
is_leaf=False,
yang_name="import-policy",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="leafref",
is_config=True,
)
self.__default_import_policy = YANGDynClass(
base=RestrictedClassType(
base_type=six.text_type,
restriction_type="dict_key",
restriction_arg={"ACCEPT_ROUTE": {}, "REJECT_ROUTE": {}},
),
default=six.text_type("REJECT_ROUTE"),
is_leaf=True,
yang_name="default-import-policy",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="default-policy-type",
is_config=True,
)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path() + [self._yang_name]
else:
return [
"network-instances",
"network-instance",
"protocols",
"protocol",
"isis",
"global",
"inter-level-propagation-policies",
"level1-to-level2",
"config",
]
def _get_import_policy(self):
"""
Getter method for import_policy, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/global/inter_level_propagation_policies/level1_to_level2/config/import_policy (leafref)
YANG Description: list of policy names in sequence to be applied on
receiving a routing update in the current context, e.g.,
for the current peer group, neighbor, address family,
etc.
"""
return self.__import_policy
def _set_import_policy(self, v, load=False):
"""
Setter method for import_policy, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/global/inter_level_propagation_policies/level1_to_level2/config/import_policy (leafref)
If this variable is read-only (config: false) in the
source YANG file, then _set_import_policy is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_import_policy() directly.
YANG Description: list of policy names in sequence to be applied on
receiving a routing update in the current context, e.g.,
for the current peer group, neighbor, address family,
etc.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=TypedListType(allowed_type=six.text_type),
is_leaf=False,
yang_name="import-policy",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="leafref",
is_config=True,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """import_policy must be of a type compatible with leafref""",
"defined-type": "leafref",
"generated-type": """YANGDynClass(base=TypedListType(allowed_type=six.text_type), is_leaf=False, yang_name="import-policy", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='leafref', is_config=True)""",
}
)
self.__import_policy = t
if hasattr(self, "_set"):
self._set()
def _unset_import_policy(self):
self.__import_policy = YANGDynClass(
base=TypedListType(allowed_type=six.text_type),
is_leaf=False,
yang_name="import-policy",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="leafref",
is_config=True,
)
def _get_default_import_policy(self):
"""
Getter method for default_import_policy, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/global/inter_level_propagation_policies/level1_to_level2/config/default_import_policy (default-policy-type)
YANG Description: explicitly set a default policy if no policy definition
in the import policy chain is satisfied.
"""
return self.__default_import_policy
def _set_default_import_policy(self, v, load=False):
"""
Setter method for default_import_policy, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/global/inter_level_propagation_policies/level1_to_level2/config/default_import_policy (default-policy-type)
If this variable is read-only (config: false) in the
source YANG file, then _set_default_import_policy is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_default_import_policy() directly.
YANG Description: explicitly set a default policy if no policy definition
in the import policy chain is satisfied.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=RestrictedClassType(
base_type=six.text_type,
restriction_type="dict_key",
restriction_arg={"ACCEPT_ROUTE": {}, "REJECT_ROUTE": {}},
),
default=six.text_type("REJECT_ROUTE"),
is_leaf=True,
yang_name="default-import-policy",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="default-policy-type",
is_config=True,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """default_import_policy must be of a type compatible with default-policy-type""",
"defined-type": "openconfig-network-instance:default-policy-type",
"generated-type": """YANGDynClass(base=RestrictedClassType(base_type=six.text_type, restriction_type="dict_key", restriction_arg={'ACCEPT_ROUTE': {}, 'REJECT_ROUTE': {}},), default=six.text_type("REJECT_ROUTE"), is_leaf=True, yang_name="default-import-policy", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='default-policy-type', is_config=True)""",
}
)
self.__default_import_policy = t
if hasattr(self, "_set"):
self._set()
def _unset_default_import_policy(self):
self.__default_import_policy = YANGDynClass(
base=RestrictedClassType(
base_type=six.text_type,
restriction_type="dict_key",
restriction_arg={"ACCEPT_ROUTE": {}, "REJECT_ROUTE": {}},
),
default=six.text_type("REJECT_ROUTE"),
is_leaf=True,
yang_name="default-import-policy",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="default-policy-type",
is_config=True,
)
import_policy = __builtin__.property(_get_import_policy, _set_import_policy)
default_import_policy = __builtin__.property(
_get_default_import_policy, _set_default_import_policy
)
_pyangbind_elements = OrderedDict(
[
("import_policy", import_policy),
("default_import_policy", default_import_policy),
]
)
class config(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-network-instance-l2 - based on the path /network-instances/network-instance/protocols/protocol/isis/global/inter-level-propagation-policies/level1-to-level2/config. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: Configuration parameters relating to the propagation
of prefixes from IS-IS Level 1 to Level 2.
"""
__slots__ = (
"_path_helper", "_extmethods", "__import_policy", "__default_import_policy"
)
_yang_name = "config"
_pybind_generated_by = "container"
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__import_policy = YANGDynClass(
base=TypedListType(allowed_type=six.text_type),
is_leaf=False,
yang_name="import-policy",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="leafref",
is_config=True,
)
self.__default_import_policy = YANGDynClass(
base=RestrictedClassType(
base_type=six.text_type,
restriction_type="dict_key",
restriction_arg={"ACCEPT_ROUTE": {}, "REJECT_ROUTE": {}},
),
default=six.text_type("REJECT_ROUTE"),
is_leaf=True,
yang_name="default-import-policy",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="default-policy-type",
is_config=True,
)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path() + [self._yang_name]
else:
return [
"network-instances",
"network-instance",
"protocols",
"protocol",
"isis",
"global",
"inter-level-propagation-policies",
"level1-to-level2",
"config",
]
def _get_import_policy(self):
"""
Getter method for import_policy, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/global/inter_level_propagation_policies/level1_to_level2/config/import_policy (leafref)
YANG Description: list of policy names in sequence to be applied on
receiving a routing update in the current context, e.g.,
for the current peer group, neighbor, address family,
etc.
"""
return self.__import_policy
def _set_import_policy(self, v, load=False):
"""
Setter method for import_policy, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/global/inter_level_propagation_policies/level1_to_level2/config/import_policy (leafref)
If this variable is read-only (config: false) in the
source YANG file, then _set_import_policy is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_import_policy() directly.
YANG Description: list of policy names in sequence to be applied on
receiving a routing update in the current context, e.g.,
for the current peer group, neighbor, address family,
etc.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=TypedListType(allowed_type=six.text_type),
is_leaf=False,
yang_name="import-policy",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="leafref",
is_config=True,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """import_policy must be of a type compatible with leafref""",
"defined-type": "leafref",
"generated-type": """YANGDynClass(base=TypedListType(allowed_type=six.text_type), is_leaf=False, yang_name="import-policy", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='leafref', is_config=True)""",
}
)
self.__import_policy = t
if hasattr(self, "_set"):
self._set()
def _unset_import_policy(self):
self.__import_policy = YANGDynClass(
base=TypedListType(allowed_type=six.text_type),
is_leaf=False,
yang_name="import-policy",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="leafref",
is_config=True,
)
def _get_default_import_policy(self):
"""
Getter method for default_import_policy, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/global/inter_level_propagation_policies/level1_to_level2/config/default_import_policy (default-policy-type)
YANG Description: explicitly set a default policy if no policy definition
in the import policy chain is satisfied.
"""
return self.__default_import_policy
def _set_default_import_policy(self, v, load=False):
"""
Setter method for default_import_policy, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/global/inter_level_propagation_policies/level1_to_level2/config/default_import_policy (default-policy-type)
If this variable is read-only (config: false) in the
source YANG file, then _set_default_import_policy is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_default_import_policy() directly.
YANG Description: explicitly set a default policy if no policy definition
in the import policy chain is satisfied.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=RestrictedClassType(
base_type=six.text_type,
restriction_type="dict_key",
restriction_arg={"ACCEPT_ROUTE": {}, "REJECT_ROUTE": {}},
),
default=six.text_type("REJECT_ROUTE"),
is_leaf=True,
yang_name="default-import-policy",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="default-policy-type",
is_config=True,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """default_import_policy must be of a type compatible with default-policy-type""",
"defined-type": "openconfig-network-instance:default-policy-type",
"generated-type": """YANGDynClass(base=RestrictedClassType(base_type=six.text_type, restriction_type="dict_key", restriction_arg={'ACCEPT_ROUTE': {}, 'REJECT_ROUTE': {}},), default=six.text_type("REJECT_ROUTE"), is_leaf=True, yang_name="default-import-policy", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='default-policy-type', is_config=True)""",
}
)
self.__default_import_policy = t
if hasattr(self, "_set"):
self._set()
def _unset_default_import_policy(self):
self.__default_import_policy = YANGDynClass(
base=RestrictedClassType(
base_type=six.text_type,
restriction_type="dict_key",
restriction_arg={"ACCEPT_ROUTE": {}, "REJECT_ROUTE": {}},
),
default=six.text_type("REJECT_ROUTE"),
is_leaf=True,
yang_name="default-import-policy",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="default-policy-type",
is_config=True,
)
import_policy = __builtin__.property(_get_import_policy, _set_import_policy)
default_import_policy = __builtin__.property(
_get_default_import_policy, _set_default_import_policy
)
_pyangbind_elements = OrderedDict(
[
("import_policy", import_policy),
("default_import_policy", default_import_policy),
]
)
| {
"content_hash": "2f00321e283a0d9c3c82353d04ee2de6",
"timestamp": "",
"source": "github",
"line_count": 519,
"max_line_length": 603,
"avg_line_length": 42.86897880539499,
"alnum_prop": 0.5935547665063599,
"repo_name": "napalm-automation/napalm-yang",
"id": "ea9b6405ab2269e679a8da87d9b69958e7dcaa91",
"size": "22273",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "napalm_yang/models/openconfig/network_instances/network_instance/protocols/protocol/isis/global_/inter_level_propagation_policies/level1_to_level2/config/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "370237"
},
{
"name": "Jupyter Notebook",
"bytes": "152135"
},
{
"name": "Makefile",
"bytes": "1965"
},
{
"name": "Python",
"bytes": "105688785"
},
{
"name": "Roff",
"bytes": "1632"
}
],
"symlink_target": ""
} |
from collections import abc
import itertools
import re
import sys
from neutron_lib.api import attributes
from neutron_lib.api.definitions import network as net_apidef
from neutron_lib import constants
from neutron_lib import context
from neutron_lib import exceptions
from neutron_lib.plugins import directory
from neutron_lib.services import constants as service_const
from oslo_config import cfg
from oslo_db import exception as db_exc
from oslo_log import log as logging
from oslo_policy import opts
from oslo_policy import policy
from oslo_utils import excutils
import stevedore
from neutron._i18n import _
from neutron.common import cache_utils as cache
LOG = logging.getLogger(__name__)
_ENFORCER = None
ADMIN_CTX_POLICY = 'context_is_admin'
ADVSVC_CTX_POLICY = 'context_is_advsvc'
# Identify the attribute used by a resource to reference another resource
_RESOURCE_FOREIGN_KEYS = {
net_apidef.COLLECTION_NAME: 'network_id',
# TODO(slaweq): use SECURITYGROUPS constant from api def when
# securitygroups api def will be moved to neutron-lib
'security_groups': 'security_group_id'
}
# TODO(gmann): Remove setting the default value of config policy_file
# once oslo_policy change the default value to 'policy.yaml'.
# https://github.com/openstack/oslo.policy/blob/a626ad12fe5a3abd49d70e3e5b95589d279ab578/oslo_policy/opts.py#L49
DEFAULT_POLICY_FILE = 'policy.yaml'
opts.set_defaults(cfg.CONF, DEFAULT_POLICY_FILE)
def reset():
global _ENFORCER
if _ENFORCER:
_ENFORCER.clear()
_ENFORCER = None
def register_rules(enforcer):
extmgr = stevedore.extension.ExtensionManager('neutron.policies',
invoke_on_load=True)
policies = [list(e.obj) for e in extmgr.extensions]
LOG.debug('Loaded default policies from %s '
'under neutron.policies entry points',
[e.name for e in extmgr.extensions])
enforcer.register_defaults(itertools.chain(*policies))
def init(conf=cfg.CONF, policy_file=None, suppress_deprecation_warnings=False):
"""Init an instance of the Enforcer class."""
global _ENFORCER
if not _ENFORCER:
_ENFORCER = policy.Enforcer(conf, policy_file=policy_file)
# TODO(slaweq) Explictly disable the warnings for policies
# changing their default check_str. During policy-defaults-refresh
# work, all the policy defaults have been changed and warning for
# each policy started filling the logs limit for various tool.
# Once we move to new defaults only world then we can enable these
# warning again.
_ENFORCER.suppress_default_change_warnings = True
if suppress_deprecation_warnings:
_ENFORCER.suppress_deprecation_warnings = True
register_rules(_ENFORCER)
_ENFORCER.load_rules(True)
def refresh(policy_file=None):
"""Reset policy and init a new instance of Enforcer."""
reset()
init(policy_file=policy_file)
def get_resource_and_action(action, pluralized=None):
"""Return resource and enforce_attr_based_check(boolean) per
resource and action extracted from api operation.
"""
data = action.split(':', 1)[0].split('_', 1)
resource = pluralized or ("%ss" % data[-1])
enforce_attr_based_check = data[0] not in ('get', 'delete')
return (resource, enforce_attr_based_check)
def set_rules(policies, overwrite=True):
"""Set rules based on the provided dict of rules.
:param policies: New policies to use. It should be an instance of dict.
:param overwrite: Whether to overwrite current rules or update them
with the new rules.
"""
LOG.debug("Loading policies from file: %s", _ENFORCER.policy_path)
init()
_ENFORCER.set_rules(policies, overwrite)
def _is_attribute_explicitly_set(attribute_name, resource, target, action):
"""Verify that an attribute is present and is explicitly set."""
if target.get(constants.ATTRIBUTES_TO_UPDATE):
# In the case of update, the function should not pay attention to a
# default value of an attribute, but check whether it was explicitly
# marked as being updated instead.
return (attribute_name in target[constants.ATTRIBUTES_TO_UPDATE] and
target[attribute_name] is not constants.ATTR_NOT_SPECIFIED)
result = (attribute_name in target and
target[attribute_name] is not constants.ATTR_NOT_SPECIFIED)
if result and 'default' in resource[attribute_name]:
return target[attribute_name] != resource[attribute_name]['default']
return result
def _should_validate_sub_attributes(attribute, sub_attr):
"""Verify that sub-attributes are iterable and should be validated."""
validate = attribute.get('validate')
return (validate and isinstance(sub_attr, abc.Iterable) and
any([k.startswith('type:dict') and
v for (k, v) in validate.items()]))
def _build_subattr_match_rule(attr_name, attr, action, target):
"""Create the rule to match for sub-attribute policy checks."""
# TODO(salv-orlando): Instead of relying on validator info, introduce
# typing for API attributes
# Expect a dict as type descriptor
validate = attr['validate']
key = [k for k in validate.keys() if k.startswith('type:dict')]
if not key:
LOG.warning("Unable to find data type descriptor for attribute %s",
attr_name)
return
data = validate[key[0]]
if not isinstance(data, dict):
LOG.debug("Attribute type descriptor is not a dict. Unable to "
"generate any sub-attr policy rule for %s.",
attr_name)
return
sub_attr_rules = [policy.RuleCheck('rule', '%s:%s:%s' %
(action, attr_name,
sub_attr_name)) for
sub_attr_name in data if sub_attr_name in
target[attr_name]]
return policy.AndCheck(sub_attr_rules)
def _build_list_of_subattrs_rule(attr_name, attribute_value, action):
rules = []
for sub_attr in attribute_value:
if isinstance(sub_attr, dict):
for k in sub_attr:
rules.append(policy.RuleCheck(
'rule', '%s:%s:%s' % (action, attr_name, k)))
if rules:
return policy.AndCheck(rules)
def _process_rules_list(rules, match_rule):
"""Recursively walk a policy rule to extract a list of match entries."""
if isinstance(match_rule, policy.RuleCheck):
rules.append(match_rule.match)
elif isinstance(match_rule, policy.AndCheck):
for rule in match_rule.rules:
_process_rules_list(rules, rule)
return rules
def _build_match_rule(action, target, pluralized):
"""Create the rule to match for a given action.
The policy rule to be matched is built in the following way:
1) add entries for matching permission on objects
2) add an entry for the specific action (e.g.: create_network)
3) add an entry for attributes of a resource for which the action
is being executed (e.g.: create_network:shared)
4) add an entry for sub-attributes of a resource for which the
action is being executed
(e.g.: create_router:external_gateway_info:network_id)
"""
match_rule = policy.RuleCheck('rule', action)
registered_rule = _ENFORCER.registered_rules.get(action)
if registered_rule and registered_rule.scope_types:
match_rule.scope_types = registered_rule.scope_types
resource, enforce_attr_based_check = get_resource_and_action(
action, pluralized)
if enforce_attr_based_check:
# assigning to variable with short name for improving readability
res_map = attributes.RESOURCES
if resource in res_map:
for attribute_name in res_map[resource]:
if _is_attribute_explicitly_set(attribute_name,
res_map[resource],
target, action):
attribute = res_map[resource][attribute_name]
if 'enforce_policy' in attribute:
attr_rule = policy.RuleCheck(
'rule', '%s:%s' % (action, attribute_name))
# Build match entries for sub-attributes
if _should_validate_sub_attributes(
attribute, target[attribute_name]):
attr_rule = policy.AndCheck(
[attr_rule, _build_subattr_match_rule(
attribute_name, attribute,
action, target)])
attribute_value = target[attribute_name]
if isinstance(attribute_value, list):
subattr_rule = _build_list_of_subattrs_rule(
attribute_name, attribute_value, action)
if subattr_rule:
attr_rule = policy.AndCheck(
[attr_rule, subattr_rule])
match_rule = policy.AndCheck([match_rule, attr_rule])
return match_rule
# This check is registered as 'tenant_id' so that it can override
# GenericCheck which was used for validating parent resource ownership.
# This will prevent us from having to handling backward compatibility
# for policy.yaml
# TODO(salv-orlando): Reinstate GenericCheck for simple tenant_id checks
@policy.register('tenant_id')
class OwnerCheck(policy.Check):
"""Resource ownership check.
This check verifies the owner of the current resource, or of another
resource referenced by the one under analysis.
In the former case it falls back to a regular GenericCheck, whereas
in the latter case it leverages the plugin to load the referenced
resource and perform the check.
"""
def __init__(self, kind, match):
self._orig_kind = kind
self._orig_match = match
# Process the match
try:
self.target_field = re.findall(r'^\%\((.*)\)s$',
match)[0]
except IndexError:
err_reason = (_("Unable to identify a target field from:%s. "
"Match should be in the form %%(<field_name>)s") %
match)
LOG.exception(err_reason)
raise exceptions.PolicyInitError(
policy="%s:%s" % (kind, match),
reason=err_reason)
self._cache = cache._get_memory_cache_region(expiration_time=5)
super(OwnerCheck, self).__init__(kind, match)
# NOTE(slaweq): It seems we need to have it like that, otherwise we hit
# TypeError: cannot pickle '_thread.RLock' object
# during initialization of the policy rules when Neutron is run with
# mod_uwsgi, see bug https://bugs.launchpad.net/neutron/+bug/1915494 for
# details
def __deepcopy__(self, memo):
return OwnerCheck(self._orig_kind, self._orig_match)
@cache.cache_method_results
def _extract(self, resource_type, resource_id, field):
# NOTE(salv-orlando): This check currently assumes the parent
# resource is handled by the core plugin. It might be worth
# having a way to map resources to plugins so to make this
# check more general
plugin = directory.get_plugin()
if resource_type in service_const.EXT_PARENT_RESOURCE_MAPPING:
plugin = directory.get_plugin(
service_const.EXT_PARENT_RESOURCE_MAPPING[resource_type])
f = getattr(plugin, 'get_%s' % resource_type)
# f *must* exist, if not found it is better to let neutron
# explode. Check will be performed with admin context
try:
data = f(context.get_admin_context(),
resource_id,
fields=[field])
except exceptions.NotFound as e:
# NOTE(kevinbenton): a NotFound exception can occur if a
# list operation is happening at the same time as one of
# the parents and its children being deleted. So we issue
# a RetryRequest so the API will redo the lookup and the
# problem items will be gone.
raise db_exc.RetryRequest(e)
except Exception:
with excutils.save_and_reraise_exception():
LOG.exception('Policy check error while calling %s!', f)
return data[field]
def __call__(self, target, creds, enforcer):
if self.target_field not in target:
# policy needs a plugin check
# target field is in the form resource:field
# however if they're not separated by a colon, use an underscore
# as a separator for backward compatibility
def do_split(separator):
parent_res, parent_field = self.target_field.split(
separator, 1)
return parent_res, parent_field
for separator in (':', '_'):
try:
parent_res, parent_field = do_split(separator)
break
except ValueError:
LOG.debug("Unable to find ':' as separator in %s.",
self.target_field)
else:
# If we are here split failed with both separators
err_reason = (_("Unable to find resource name in %s") %
self.target_field)
LOG.error(err_reason)
raise exceptions.PolicyCheckError(
policy="%s:%s" % (self.kind, self.match),
reason=err_reason)
parent_foreign_key = _RESOURCE_FOREIGN_KEYS.get(
"%ss" % parent_res, None)
if parent_res == constants.EXT_PARENT_PREFIX:
for resource in service_const.EXT_PARENT_RESOURCE_MAPPING:
key = "%s_%s_id" % (constants.EXT_PARENT_PREFIX, resource)
if key in target:
parent_foreign_key = key
parent_res = resource
break
if not parent_foreign_key:
err_reason = (_("Unable to verify match:%(match)s as the "
"parent resource: %(res)s was not found") %
{'match': self.match, 'res': parent_res})
LOG.error(err_reason)
raise exceptions.PolicyCheckError(
policy="%s:%s" % (self.kind, self.match),
reason=err_reason)
target[self.target_field] = self._extract(
parent_res, target[parent_foreign_key], parent_field)
match = self.match % target
if self.kind in creds:
return match == str(creds[self.kind])
return False
@policy.register('field')
class FieldCheck(policy.Check):
def __init__(self, kind, match):
self._orig_kind = kind
self._orig_match = match
# Process the match
resource, field_value = match.split(':', 1)
field, value = field_value.split('=', 1)
super(FieldCheck, self).__init__(kind, '%s:%s:%s' %
(resource, field, value))
# Value might need conversion - we need help from the attribute map
try:
attr = attributes.RESOURCES[resource][field]
conv_func = attr['convert_to']
except KeyError:
conv_func = lambda x: x
self.field = field
self.resource = resource
self.value = conv_func(value)
self.regex = re.compile(value[1:]) if value.startswith('~') else None
# TODO(stephenfin): Remove this when we drop support for Python 3.6, since
# that supports copying regex objects natively
def __deepcopy__(self, memo):
return FieldCheck(self._orig_kind, self._orig_match)
def __call__(self, target_dict, cred_dict, enforcer):
target_value = self._get_target_value(target_dict)
# target_value might be a boolean, explicitly compare with None
if target_value is None:
return False
if self.regex:
return bool(self.regex.match(target_value))
return target_value == self.value
def _get_target_value(self, target_dict):
if self.field in target_dict:
return target_dict[self.field]
# NOTE(slaweq): In case that target field is "networks:shared" we need
# to treat it in "special" way as it may be used for resources other
# than network, e.g. for port or subnet
target_value = None
if self.resource == "networks" and self.field == constants.SHARED:
target_network_id = target_dict.get("network_id")
if not target_network_id:
LOG.debug("Unable to find network_id field in target: "
"%(target_dict)s",
{'field': self.field, 'target_dict': target_dict})
return
project_id = target_dict.get('project_id')
ctx = (context.Context(tenant_id=project_id) if project_id
else context.get_admin_context())
plugin = directory.get_plugin()
network = plugin.get_network(ctx, target_network_id)
target_value = network.get(self.field)
if target_value is None:
LOG.debug("Unable to find requested field: %(field)s in target: "
"%(target_dict)s",
{'field': self.field, 'target_dict': target_dict})
return target_value
def _prepare_check(context, action, target, pluralized):
"""Prepare rule, target, and context for the policy engine."""
# Compare with None to distinguish case in which target is {}
if target is None:
target = {}
match_rule = _build_match_rule(action, target, pluralized)
return match_rule, target, context
def log_rule_list(match_rule):
if LOG.isEnabledFor(logging.DEBUG):
rules = _process_rules_list([], match_rule)
LOG.debug("Enforcing rules: %s", rules)
def check(context, action, target, plugin=None, might_not_exist=False,
pluralized=None):
"""Verifies that the action is valid on the target in this context.
:param context: neutron context
:param action: string representing the action to be checked
this should be colon separated for clarity.
:param target: dictionary representing the object of the action
for object creation this should be a dictionary representing the
location of the object e.g. ``{'project_id': context.project_id}``
:param plugin: currently unused and deprecated.
Kept for backward compatibility.
:param might_not_exist: If True the policy check is skipped (and the
function returns True) if the specified policy does not exist.
Defaults to false.
:param pluralized: pluralized case of resource
e.g. firewall_policy -> pluralized = "firewall_policies"
:return: Returns True if access is permitted else False.
"""
# If we already know the context has admin rights do not perform an
# additional check and authorize the operation
# TODO(slaweq): Remove that is_admin check and always perform rules checks
# when old, deprecated rules will be removed and only rules with new
# personas will be supported
if not cfg.CONF.oslo_policy.enforce_new_defaults and context.is_admin:
return True
if might_not_exist and not (_ENFORCER.rules and action in _ENFORCER.rules):
return True
match_rule, target, credentials = _prepare_check(context,
action,
target,
pluralized)
return _ENFORCER.enforce(match_rule,
target,
credentials,
pluralized=pluralized)
def enforce(context, action, target, plugin=None, pluralized=None):
"""Verifies that the action is valid on the target in this context.
:param context: neutron context
:param action: string representing the action to be checked
this should be colon separated for clarity.
:param target: dictionary representing the object of the action
for object creation this should be a dictionary representing the
location of the object e.g. ``{'project_id': context.project_id}``
:param plugin: currently unused and deprecated.
Kept for backward compatibility.
:param pluralized: pluralized case of resource
e.g. firewall_policy -> pluralized = "firewall_policies"
:raises oslo_policy.policy.PolicyNotAuthorized:
if verification fails.
"""
# If we already know the context has admin rights do not perform an
# additional check and authorize the operation
# TODO(slaweq): Remove that is_admin check and always perform rules checks
# when old, deprecated rules will be removed and only rules with new
# personas will be supported
if not cfg.CONF.oslo_policy.enforce_new_defaults and context.is_admin:
return True
rule, target, context = _prepare_check(context, action, target, pluralized)
try:
result = _ENFORCER.enforce(rule, target, context, action=action,
do_raise=True)
except (policy.PolicyNotAuthorized, policy.InvalidScope):
with excutils.save_and_reraise_exception():
log_rule_list(rule)
LOG.debug("Failed policy enforce for '%s'", action)
return result
def get_enforcer():
# NOTE(amotoki): This was borrowed from nova/policy.py.
# This method is for use by oslo.policy CLI scripts. Those scripts need the
# 'output-file' and 'namespace' options, but having those in sys.argv means
# loading the neutron config options will fail as those are not expected to
# be present. So we pass in an arg list with those stripped out.
conf_args = []
# Start at 1 because cfg.CONF expects the equivalent of sys.argv[1:]
i = 1
while i < len(sys.argv):
if sys.argv[i].strip('-') in ['namespace', 'output-file']:
i += 2
continue
conf_args.append(sys.argv[i])
i += 1
cfg.CONF(conf_args, project='neutron')
init()
return _ENFORCER
| {
"content_hash": "cc2476fdce7b295a261f4a9eaefe21f1",
"timestamp": "",
"source": "github",
"line_count": 530,
"max_line_length": 112,
"avg_line_length": 42.8188679245283,
"alnum_prop": 0.6135101789019124,
"repo_name": "openstack/neutron",
"id": "dca079b3c980e378c1a5b92fc95fd93a458c8f60",
"size": "23335",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "neutron/policy.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Jinja",
"bytes": "2773"
},
{
"name": "Mako",
"bytes": "1047"
},
{
"name": "Python",
"bytes": "15932611"
},
{
"name": "Ruby",
"bytes": "1257"
},
{
"name": "Shell",
"bytes": "83270"
}
],
"symlink_target": ""
} |
from __future__ import print_function
import subprocess
from simlammps import read_data_file
from simlammps.cuba_extension import CUBAExtension
from simphony.core.cuba import CUBA
lammps_script = """# example of creating lammps data file (to be then used by SimPhoNy"
dimension 2
atom_style atomic
# create geometry
lattice hex 0.7
region box block 0 20 0 10 -0.25 0.25
create_box 3 box
create_atoms 1 box
mass 1 1.0
mass 2 1.0
mass 3 1.0
# LJ potentials
pair_style lj/cut 1.12246
pair_coeff * * 1.0 1.0 1.12246
# define groups
region 1 block INF INF INF 1.25 INF INF
group lower region 1
region 2 block INF INF 8.75 INF INF INF
group upper region 2
group boundary union lower upper
group flow subtract all boundary
set group lower type 2
set group upper type 3
# initial velocities
compute mobile flow temp
velocity flow create 1.0 482748 temp mobile
velocity boundary set 0.0 0.0 0.0
# write atoms to a lammps data file
write_data example.data"""
with open("lammps_example_script", "w") as script_file:
script_file.write(lammps_script)
subprocess.check_call("lammps < lammps_example_script", shell=True)
particles, state_data = read_data_file("example.data")
print("\n\nFinished converting files")
print("\nA Particles data-set was read from the file:")
print(" '{}' has {} particles".format(
particles.name,
particles.count_of(CUBA.PARTICLE)))
for particles in particles:
number_particles = sum(1 for _ in particles.iter(item_type=CUBA.PARTICLE))
number_materials = sum(1 for _ in state_data.iter_materials())
print("\n{} materials were read from the file.\n".format(number_materials))
box_description = \
""\
"The data-set has the following simulation box description:\n"\
" CUBAExtension.BOX_ORIGIN: {}\n" \
" CUBAExtension.BOX_VECTORS: {}"
print(box_description.format(
particles.data_extension[CUBAExtension.BOX_ORIGIN],
particles.data_extension[CUBAExtension.BOX_VECTORS]))
| {
"content_hash": "e960d24c4955ba3b89273ed52518b381",
"timestamp": "",
"source": "github",
"line_count": 74,
"max_line_length": 87,
"avg_line_length": 27.135135135135137,
"alnum_prop": 0.7166334661354582,
"repo_name": "simphony/simphony-lammps-md",
"id": "24b2435717e16e2b744d392ca919ab596eff6357",
"size": "2008",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/file_conversion/convert.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "155311"
},
{
"name": "Shell",
"bytes": "1045"
}
],
"symlink_target": ""
} |
from pydevd_constants import * #@UnusedWildImport
try:
import cStringIO as StringIO #may not always be available @UnusedImport
except:
try:
import StringIO #@Reimport
except:
import io as StringIO
if USE_LIB_COPY:
import _pydev_threading as threading
else:
import threading
import sys #@Reimport
import traceback
class TracingFunctionHolder:
'''This class exists just to keep some variables (so that we don't keep them in the global namespace).
'''
_original_tracing = None
_warn = True
_lock = threading.Lock()
_traceback_limit = 1
_warnings_shown = {}
def GetExceptionTracebackStr():
exc_info = sys.exc_info()
s = StringIO.StringIO()
traceback.print_exception(exc_info[0], exc_info[1], exc_info[2], file=s)
return s.getvalue()
def _GetStackStr(frame):
msg = '\nIf this is needed, please check: ' + \
'\nhttp://pydev.blogspot.com/2007/06/why-cant-pydev-debugger-work-with.html' + \
'\nto see how to restore the debug tracing back correctly.\n'
if TracingFunctionHolder._traceback_limit:
s = StringIO.StringIO()
s.write('Call Location:\n')
traceback.print_stack(f=frame, limit=TracingFunctionHolder._traceback_limit, file=s)
msg = msg + s.getvalue()
return msg
def _InternalSetTrace(tracing_func):
if TracingFunctionHolder._warn:
frame = GetFrame()
if frame is not None and frame.f_back is not None:
if not frame.f_back.f_code.co_filename.lower().endswith('threading.py'):
message = \
'\nPYDEV DEBUGGER WARNING:' + \
'\nsys.settrace() should not be used when the debugger is being used.' + \
'\nThis may cause the debugger to stop working correctly.' + \
'%s' % _GetStackStr(frame.f_back)
if message not in TracingFunctionHolder._warnings_shown:
#only warn about each message once...
TracingFunctionHolder._warnings_shown[message] = 1
sys.stderr.write('%s\n' % (message,))
sys.stderr.flush()
if TracingFunctionHolder._original_tracing:
TracingFunctionHolder._original_tracing(tracing_func)
def SetTrace(tracing_func):
TracingFunctionHolder._lock.acquire()
try:
TracingFunctionHolder._warn = False
_InternalSetTrace(tracing_func)
TracingFunctionHolder._warn = True
finally:
TracingFunctionHolder._lock.release()
def ReplaceSysSetTraceFunc():
if TracingFunctionHolder._original_tracing is None:
TracingFunctionHolder._original_tracing = sys.settrace
sys.settrace = _InternalSetTrace
def RestoreSysSetTraceFunc():
if TracingFunctionHolder._original_tracing is not None:
sys.settrace = TracingFunctionHolder._original_tracing
TracingFunctionHolder._original_tracing = None
| {
"content_hash": "3f3b579c0fe5da432abbbfca146d0251",
"timestamp": "",
"source": "github",
"line_count": 90,
"max_line_length": 107,
"avg_line_length": 33.41111111111111,
"alnum_prop": 0.6361822414366478,
"repo_name": "akiokio/centralfitestoque",
"id": "7c197efc2002cc9f74b385d90e0d981c36766701",
"size": "3007",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "src/.pycharm_helpers/pydev/pydevd_tracing.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "CSS",
"bytes": "253279"
},
{
"name": "JavaScript",
"bytes": "253299"
},
{
"name": "Python",
"bytes": "6144500"
},
{
"name": "Ruby",
"bytes": "168219"
},
{
"name": "Shell",
"bytes": "21"
}
],
"symlink_target": ""
} |
from __future__ import print_function, division, absolute_import
from marvin.api.base import BaseView
import pytest
@pytest.fixture(autouse=True)
def baseview():
baseview = BaseView()
yield baseview
class TestBase(object):
def test_reset_results(self, baseview):
baseview.results = {'key1': 'value1'}
baseview.reset_results()
desired = {'data': None, 'status': -1, 'error': None, 'traceback': None}
assert baseview.results == desired, 'baseview results should be the same as desired'
def test_update_results(self, baseview):
new_results = {'key1': 'value1'}
baseview.update_results(new_results)
desired = {'data': None, 'status': -1, 'error': None, 'key1': 'value1', 'traceback': None}
assert baseview.results == desired, 'baseview results should be the same as desired'
def test_reset_status(self, baseview):
baseview.results['status'] = 42
baseview.reset_status()
assert baseview.results['status'] == -1
def test_add_config(self, baseview, release, mode):
baseview.add_config()
desired = {'data': None, 'status': -1, 'error': None, 'traceback': None,
'utahconfig': {'release': 'MPL-7', 'mode': 'local'}}
assert baseview.results == desired
def test_after_request_return_response(self, baseview):
name = 'test_name'
req = 'test_request'
actual = baseview.after_request(name, req)
desired = 'test_request'
assert actual == desired
def test_after_request_reset_results(self, baseview):
name = 'test_name'
req = 'test_request'
baseview.after_request(name, req)
desired = {'data': None, 'status': -1, 'error': None, 'traceback': None}
assert baseview.results == desired
| {
"content_hash": "ff7d2928e34dc166fe642cdd5feb19f9",
"timestamp": "",
"source": "github",
"line_count": 50,
"max_line_length": 98,
"avg_line_length": 36.4,
"alnum_prop": 0.6236263736263736,
"repo_name": "albireox/marvin",
"id": "1f48aec6a89dc4c49f44aa958b4e345358a362dd",
"size": "2051",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/marvin/tests/api/test_base.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "210343"
},
{
"name": "HTML",
"bytes": "68596"
},
{
"name": "JavaScript",
"bytes": "217699"
},
{
"name": "PLpgSQL",
"bytes": "1577"
},
{
"name": "Python",
"bytes": "1390874"
},
{
"name": "SQLPL",
"bytes": "141212"
},
{
"name": "Shell",
"bytes": "1150"
}
],
"symlink_target": ""
} |
import xadmin
from .models import Address, City, Country
xadmin.site.register(Address)
xadmin.site.register(City)
xadmin.site.register(Country)
| {
"content_hash": "85acd4533ca7b9eb50867ece87fd8ef2",
"timestamp": "",
"source": "github",
"line_count": 8,
"max_line_length": 42,
"avg_line_length": 18.375,
"alnum_prop": 0.8027210884353742,
"repo_name": "glasslion/django-sakila",
"id": "605ae2568f0fa470ece98a0cfa7f06c887626fe4",
"size": "147",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "project_template/project_name/addresses/adminx.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "39"
},
{
"name": "JavaScript",
"bytes": "45"
},
{
"name": "Python",
"bytes": "52430"
}
],
"symlink_target": ""
} |
""" Classes to handle HTMLText and Catalogues in PubTal.
Copyright (c) 2004 Colin Stewart (http://www.owlfish.com/)
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
1. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
3. The name of the author may not be used to endorse or promote products
derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
If you make any bug fixes or feature enhancements please let me know!
"""
try:
import logging
except:
import InfoLogging as logging
import SitePublisher, CatalogueContent, ContentToHTMLConverter, SiteUploader, FtpLibrary
import os, time, anydbm, codecs
import timeformat
from simpletal import simpleTAL, simpleTALES
# getPluginInfo provides the list of built-in supported content.
def getPluginInfo ():
builtInContent = [{'functionality': 'content', 'content-type': 'HTMLText' ,'file-type': 'txt','class': HTMLTextPagePublisher}
, {'functionality': 'content', 'content-type': 'Catalogue','file-type': 'catalogue','class': CataloguePublisher}
, {'functionality': 'upload-method', 'method-type': 'FTP', 'class': FTPUploadMethod}]
return builtInContent
class CataloguePublisher (SitePublisher.ContentPublisher):
def __init__ (self, pagePublisher):
SitePublisher.ContentPublisher.__init__ (self, pagePublisher)
self.log = logging.getLogger ("PubTal.CataloguePublisher")
self.ui = pagePublisher.getUI ()
def publish (self, page):
indexTemplate = self.templateConfig.getTemplate (page.getOption ('catalogue-index-template', 'template.html'))
itemTemplate = self.templateConfig.getTemplate (page.getOption ('catalogue-item-template', 'template.html'))
maxCols = int (page.getOption ('catalogue-max-columns', '5'))
buildIndexPage = 0
buildItemPages = 0
catalogueBuildPages = page.getOption ('catalogue-build-pages', 'index,item')
for option in catalogueBuildPages.split (','):
if (option == "index"):
if (indexTemplate is not None):
buildIndexPage = 1
else:
msg = "Unable to build the index page for catalogue %s because no catalogue-index-template has been specified." % page.getSource()
self.log.warn (msg)
self.ui.warn (msg)
elif (option == "item"):
if (itemTemplate is not None):
buildItemPages = 1
else:
msg = "Unable to build the item pages for catalogue %s because no catalogue-item-template has been specified." % page.getSource()
self.log.warn (msg)
self.ui.warn (msg)
if (not buildIndexPage | buildItemPages):
msg = "Neither index or item pages are being built for catalogue %s" % page.getSource()
self.log.warn (msg)
self.ui.warn (msg)
return
itemContentType = page.getOption ('catalogue-item-content-type', None)
if (itemContentType is None or itemContentType.lower() == 'none'):
# We wish to turn off item content publishing
itemContentPublisher = None
else:
itemContentPublisher = self.pagePublisher.getContentPublisher (itemContentType)
if (itemContentPublisher is None):
msg = "Unable to find a publisher for catalogue item content type %s." % itemContentType
self.log.error (msg)
raise SitePublisher.PublisherException (msg)
# Build the context pieces we are going to need
pageCharSet = page.getOption ('character-set', None)
if (pageCharSet is not None):
# This page has it's own character set
pageCodec = codecs.lookup (self.pageCharSet)
else:
# This page uses the default character set.
pageCodec = self.characterSetCodec
catalogue = CatalogueContent.CatalogueContent (page.getSource(), pageCodec)
items = []
rows = []
col = []
lastModDate = timeformat.format ('%a[SHORT], %d %b[SHORT] %Y %H:%M:%S %Z', time.localtime (page.getModificationTime()))
copyrightYear = timeformat.format ('%Y')
# Source paths
relativeSourcePath = page.getRelativePath()
contentDir = self.contentDir
absSourcePath = page.getSource()
localDestinationDir = os.path.dirname (page.getRelativePath())
depth = page.getDepthString()
self.log.debug ("Building the context for each item in the catalogue.")
for itemHeaders in catalogue.getItems():
# Destination paths
filename = itemHeaders.get ('filename', None)
if (filename is None and buildItemPages):
msg = "Unable to publish catalogue %s. Missing filename header in catalogue but item publishing is enabled." % page.getSource()
self.log.error (msg)
raise SitePublisher.PublisherException (msg)
actualHeaders = {}
actualHeaders.update (page.getHeaders())
actualHeaders.update (itemHeaders)
if (filename is not None):
# Used to determine the file to write to, kept in case the pageContext doesn't contain them.
relativeDestPath = os.path.join (localDestinationDir, os.path.splitext (filename)[0] + '.html')
destPath = os.path.join (self.destDir, relativeDestPath)
if (itemContentPublisher is not None):
self.log.debug ("Retrieving page context for this catalogue entry.")
# We need a page for this entry so that we can get it's content.
itemPageList = self.contentConfig.getPages (os.path.join (contentDir, filename), {})
if (len (itemPageList) > 1):
self.ui.warn ("Catalogue contains content type that returns more than one page! Only building first page.")
itemPage = itemPageList [0]
pageContext = itemContentPublisher.getPageContext (itemPage, itemTemplate)
actualHeaders.update (pageContext.get ('headers', {}))
pageContext ['headers'] = actualHeaders
if (not pageContext.has_key ('destinationPath')):
pageContext ['destinationPath'] = relativeDestPath
if (not pageContext.has_key ('absoluteDestinationPath')):
pageContext ['absoluteDestinationPath'] = destPath
else:
self.log.debug ("No content type for this catalogue entry - just publish what we have.")
# Get the generic page information for this file
relativeDestPath = os.path.join (localDestinationDir, os.path.splitext (filename)[0] + '.' + itemTemplate.getTemplateExtension())
destPath = os.path.join (self.destDir, relativeDestPath)
destFilename = os.path.basename (destPath)
actualHeaders = {}
actualHeaders.update (page.getHeaders())
actualHeaders.update (itemHeaders)
pageContext = {'lastModifiedDate': lastModDate
,'copyrightYear': copyrightYear
,'sourcePath': relativeSourcePath
,'absoluteSourcePath': absSourcePath
,'destinationPath': relativeDestPath
,'absoluteDestinationPath': destPath
,'destinationFilename': destFilename
,'depth': depth
,'headers': actualHeaders
}
else:
# No filename specified for this entry
pageContext = {'headers': actualHeaders}
items.append (pageContext)
if (len (col) == maxCols):
rows.append (col)
col = []
col.append (pageContext)
if (len (col) > 0):
rows.append (col)
col = []
# Build the Catalogue context
catalogueMap = {'entries': items, 'rows': rows, 'headers': catalogue.getCatalogueHeaders()}
# Do the individual items now
if (buildItemPages):
itemCount = 0
itemLength = len (items)
for item in items:
relativeDestPath = item['destinationPath']
context = simpleTALES.Context(allowPythonPath=1)
context.addGlobal ('page', item)
if (itemCount > 0):
catalogueMap ['previous'] = items[itemCount - 1]
elif (catalogueMap.has_key ('previous')):
del catalogueMap ['previous']
if (itemCount < itemLength - 1):
catalogueMap ['next'] = items[itemCount + 1]
elif (catalogueMap.has_key ('next')):
del catalogueMap ['next']
context.addGlobal ('catalogue', catalogueMap)
macros = page.getMacros()
self.pagePublisher.expandTemplate (itemTemplate, context, relativeDestPath, macros)
itemCount += 1
if (buildIndexPage):
# Cleanup the catalogueMap from the items pages.
if (catalogueMap.has_key ('previous')):
del catalogueMap ['previous']
if (catalogueMap.has_key ('next')):
del catalogueMap ['next']
indexMap = self.getPageContext (page, indexTemplate, catalogue)
relativeDestPath = indexMap ['destinationPath']
context = simpleTALES.Context(allowPythonPath=1)
context.addGlobal ('page', indexMap)
context.addGlobal ('catalogue', catalogueMap)
macros = page.getMacros()
self.pagePublisher.expandTemplate (indexTemplate, context, relativeDestPath, macros)
def getPageContext (self, page, template, catalogue=None):
# The page context for a Catalogue is fairly boring, but someone might use it
indexMap = SitePublisher.ContentPublisher.getPageContext (self, page, template)
if (catalogue is None):
localCatalogue = CatalogueContent.CatalogueContent (page.getSource(), self.characterSetCodec)
else:
localCatalogue = catalogue
actualHeaders = indexMap ['headers']
actualHeaders.update (localCatalogue.getCatalogueHeaders())
indexMap ['headers'] = actualHeaders
return indexMap
class HTMLTextPagePublisher (SitePublisher.ContentPublisher):
def __init__ (self, pagePublisher):
SitePublisher.ContentPublisher.__init__ (self, pagePublisher)
self.htmlConverter = ContentToHTMLConverter.ContentToHTMLConverter()
self.xhtmlConverter = ContentToHTMLConverter.ContentToXHTMLConverter()
self.log = logging.getLogger ("PubTal.HTMLTextPagePublisher")
def publish (self, page):
templateName = page.getOption ('template')
# Get this template's configuration
template = self.templateConfig.getTemplate (templateName)
context = simpleTALES.Context(allowPythonPath=1)
# Get the page context for this content
map = self.getPageContext (page, template)
# Determine the destination for this page
relativeDestPath = map ['destinationPath']
context.addGlobal ('page', map)
macros = page.getMacros()
self.pagePublisher.expandTemplate (template, context, relativeDestPath, macros)
def getPageContext (self, page, template):
pageMap = SitePublisher.ContentPublisher.getPageContext (self, page, template)
ignoreNewlines = page.getBooleanOption ('htmltext-ignorenewlines')
preserveSpaces = page.getBooleanOption ('preserve-html-spaces', 1)
headers, rawContent = self.readHeadersAndContent(page)
# Determine desired output type, HTML or XHTML
outputType = template.getOption ('output-type')
if (outputType == 'HTML'):
content = self.htmlConverter.convertContent (rawContent, ignoreNewLines=ignoreNewlines, preserveSpaces=preserveSpaces)
elif (outputType == 'XHTML'):
content = self.xhtmlConverter.convertContent (rawContent, ignoreNewLines=ignoreNewlines, preserveSpaces=preserveSpaces)
elif (outputType == 'PlainText'):
# It doesn't actually matter how the markup has been entered in the HTMLText, because we
# are going to output Plain Text anyway. We use HTML because it's the least demanding.
content = self.htmlConverter.convertContent (rawContent, ignoreNewLines=ignoreNewlines, plainTextOuput=1)
else:
msg = "HTMLText content doesn't support output in type '%s'." % outputType
self.log.error (msg)
raise SitePublisher.PublisherException (msg)
actualHeaders = pageMap ['headers']
actualHeaders.update (headers)
pageMap ['headers'] = actualHeaders
pageMap ['content'] = content
pageMap ['rawContent'] = rawContent
return pageMap
class FTPUploadMethod (SiteUploader.UploadMethod):
def __init__ (self, siteConfig, uploadConfig):
self.siteConfig = siteConfig
self.uploadConfig = uploadConfig
self.utfencoder = codecs.lookup ("utf8")[0]
self.utfdecoder = codecs.lookup ("utf8")[1]
self.db = None
self.ftpClient = None
self.log = logging.getLogger ("FTPUploadMethod")
try:
conf = 'host'
self.hostname = uploadConfig [conf]
conf = 'username'
self.username = uploadConfig [conf]
except:
raise Exception ("Missing FTP configuration option %s" % conf)
self.password = uploadConfig.get ('password', None)
self.initialDir = uploadConfig.get ('base-dir')
def getDB (self):
if (self.db is None):
self.db = anydbm.open (os.path.join (self.siteConfig.getDataDir(), 'FtpCache-%s-%s' % (self.hostname, self.username)), 'c')
return self.db
def uploadFiles (self, fileDict, userInteraction):
"Return 1 for success, 0 for failure. Must notify userInteraction directly."
if (self.ftpClient is None):
self.log.debug ("First file, there is no ftp client yet.")
if (self.password is None):
self.log.debug ("Asking for password - none in config file.")
self.password = userInteraction.promptPassword ("Password required (%s@%s)" % (self.username, self.hostname))
self.ftpClient = FtpLibrary.FTPUpload (self.hostname, self.username, self.password, self.initialDir)
try:
self.log.info ("Connecting to FTP site.")
userInteraction.info ("Connecting to FTP site.")
if (not self.ftpClient.connect (userInteraction)):
return 0
self.log.info ("Connected.")
userInteraction.info ("Connected.")
except Exception, e:
msg = "Error connecting to FTP site: %s" % str (e)
userInteraction.taskError ("Error connecting to FTP site: %s" % str (e))
return 0
percentageDone = 0.0
incrementSize = 100.0/float (len (fileDict))
db = self.getDB()
allFiles = fileDict.keys()
# Try to keep files together to help FTP performance.
allFiles.sort()
for fName in allFiles:
userInteraction.taskProgress ("Uploading %s" % fName, percentageDone)
percentageDone += incrementSize
if (self.ftpClient.uploadFile (self.siteConfig.getDestinationDir(), fName, userInteraction)):
db [self.utfencoder (fName)[0]] = fileDict [fName]
return 1
def finished (self):
if (self.ftpClient is not None):
self.ftpClient.disconnect()
self.ftpClient = None
if (self.db is not None):
self.db.close()
self.db = None | {
"content_hash": "302ca2bc0de30e56b221b899693a9e66",
"timestamp": "",
"source": "github",
"line_count": 360,
"max_line_length": 135,
"avg_line_length": 41.58611111111111,
"alnum_prop": 0.7207267383608309,
"repo_name": "owlfish/pubtal",
"id": "1bdc4211059e0df2ee210ac9c42464b89c04625c",
"size": "14971",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lib/pubtal/BuiltInPlugins.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "372"
},
{
"name": "HTML",
"bytes": "56487"
},
{
"name": "Python",
"bytes": "517058"
},
{
"name": "Shell",
"bytes": "1544"
}
],
"symlink_target": ""
} |
"""Pyhole Entertainment Plugin"""
import re
from BeautifulSoup import BeautifulSoup
from pyhole import plugin
from pyhole import utils
class Entertainment(plugin.Plugin):
"""Provide access to entertaining sites"""
@plugin.hook_add_command("grouphug")
@utils.spawn
def grouphug(self, params=None, **kwargs):
"""Display a random Group Hug (ex: .grouphug)"""
url = "http://grouphug.us/random"
response = self.irc.fetch_url(url, self.name)
if not response:
return
soup = BeautifulSoup(response.read())
grouphug = utils.decode_entities(
soup.findAll(id=re.compile("node-\d+"))[2].p.contents[0])
self.irc.reply(grouphug)
@plugin.hook_add_command("lastnight")
@utils.spawn
def lastnight(self, params=None, **kwargs):
"""Display a random Text From Last Night (ex: .lastnight)"""
url = ("http://www.textsfromlastnight.com/"
"Random-Texts-From-Last-Night.html")
response = self.irc.fetch_url(url, self.name)
if not response:
return
soup = BeautifulSoup(response.read())
lastnight = utils.decode_entities(
soup.findAll(href=re.compile(
"/Text-Replies-\d+.html"))[0].contents[0])
self.irc.reply(lastnight)
| {
"content_hash": "3c882d9944d9c75f63b8bca840900787",
"timestamp": "",
"source": "github",
"line_count": 42,
"max_line_length": 73,
"avg_line_length": 31.88095238095238,
"alnum_prop": 0.6109036594473488,
"repo_name": "rconradharris/pyhole",
"id": "14e6abcfae3fdd8cd6b185a3624dc6661aa7bfe5",
"size": "1941",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "plugins/entertainment.py",
"mode": "33188",
"license": "apache-2.0",
"language": [],
"symlink_target": ""
} |
from django.core.management import execute_manager
try:
import settings # Assumed to be in the same directory.
except ImportError:
import sys
sys.stderr.write("Error: Can't find the file 'settings.py' in the directory containing %r. It appears you've customized things.\nYou'll have to run django-admin.py, passing it your settings module.\n(If the file settings.py does indeed exist, it's causing an ImportError somehow.)\n" % __file__)
sys.exit(1)
if __name__ == "__main__":
execute_manager(settings)
| {
"content_hash": "c9e72d872cf6642b2b4eace9ddbd9bd8",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 299,
"avg_line_length": 52.4,
"alnum_prop": 0.7251908396946565,
"repo_name": "cernops/CloudMan",
"id": "d47073cec3bb406edd140047e4501eb267202b52",
"size": "524",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "cloudman/cloudman/manage.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "31999"
},
{
"name": "JavaScript",
"bytes": "101757"
},
{
"name": "Python",
"bytes": "591308"
},
{
"name": "Shell",
"bytes": "423"
},
{
"name": "TeX",
"bytes": "95737"
}
],
"symlink_target": ""
} |
""" Transform Python code by omitting strings, comments, and/or code.
"""
from cStringIO import StringIO
import os
import shlex
import string
import sys
import tokenize
import grin
__version__ = '1.2'
class Transformer(object):
""" Transform Python files to remove certain features.
"""
def __init__(self, python_code, comments, strings):
# Keep code.
self.python_code = python_code
# Keep comments.
self.comments = comments
# Keep strings.
self.strings = strings
table = [' '] * 256
for s in string.whitespace:
table[ord(s)] = s
# A table for the translate() function that replaces all non-whitespace
# characters with spaces.
self.space_table = ''.join(table)
def keep_token(self, kind):
""" Return True if we should keep the token in the output.
"""
if kind in (tokenize.NL, tokenize.NEWLINE):
return True
elif kind == tokenize.COMMENT:
return self.comments
elif kind == tokenize.STRING:
return self.strings
else:
return self.python_code
def replace_with_spaces(self, s):
""" Replace all non-newline characters in a string with spaces.
"""
return s.translate(self.space_table)
def __call__(self, filename, mode='rb'):
""" Open a file and convert it to a filelike object with transformed
contents.
"""
g = StringIO()
f = open(filename, mode)
try:
gen = tokenize.generate_tokens(f.readline)
old_end = (1, 0)
for kind, token, start, end, line in gen:
if old_end[0] == start[0]:
dx = start[1] - old_end[1]
else:
dx = start[1]
# Put in any omitted whitespace.
g.write(' ' * dx)
old_end = end
if not self.keep_token(kind):
token = self.replace_with_spaces(token)
g.write(token)
finally:
f.close()
# Seek back to the beginning of the file.
g.seek(0, 0)
return g
def get_grinpython_arg_parser(parser=None):
""" Create the command-line parser.
"""
parser = grin.get_grin_arg_parser(parser)
parser.set_defaults(include='*.py')
parser.description = ("Search Python code with strings, comments, and/or "
"code removed.")
for action in parser._actions:
if hasattr(action, 'version'):
action.version = 'grinpython %s' % __version__
group = parser.add_argument_group('Code Transformation')
group.add_argument('-p', '--python-code', action='store_true',
help="Keep non-string, non-comment Python code.")
group.add_argument('-c', '--comments', action='store_true',
help="Keep Python comments.")
group.add_argument('-t', '--strings', action='store_true',
help="Keep Python strings, especially docstrings.")
return parser
def grinpython_main(argv=None):
if argv is None:
# Look at the GRIN_ARGS environment variable for more arguments.
env_args = shlex.split(os.getenv('GRIN_ARGS', ''))
argv = [sys.argv[0]] + env_args + sys.argv[1:]
parser = get_grinpython_arg_parser()
args = parser.parse_args(argv[1:])
if args.context is not None:
args.before_context = args.context
args.after_context = args.context
args.use_color = args.force_color or (not args.no_color and
sys.stdout.isatty() and
(os.environ.get('TERM') != 'dumb'))
xform = Transformer(args.python_code, args.comments, args.strings)
regex = grin.get_regex(args)
g = grin.GrepText(regex, args)
for filename, kind in grin.get_filenames(args):
if kind == 'text':
# Ignore gzipped files.
report = g.grep_a_file(filename, opener=xform)
sys.stdout.write(report)
if __name__ == '__main__':
grinpython_main()
| {
"content_hash": "9aec32b7e186d56fe0322124d981c4f8",
"timestamp": "",
"source": "github",
"line_count": 124,
"max_line_length": 79,
"avg_line_length": 32.58064516129032,
"alnum_prop": 0.5782178217821782,
"repo_name": "lecheel/grin",
"id": "31e23ee4e3e5fc6e4ae2bbfcb06754a8d84c48e9",
"size": "4086",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "examples/grinpython.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "65582"
}
],
"symlink_target": ""
} |
"""
Execute tasks through strace to obtain dependencies after the process is run. This
scheme is similar to that of the Fabricate script.
To use::
def configure(conf):
conf.load('strace')
WARNING:
* This will not work when advanced scanners are needed (qt4/qt5)
* The overhead of running 'strace' is significant (56s -> 1m29s)
* It will not work on Windows :-)
"""
import os, re, threading
from waflib import Task, Logs, Utils
#TRACECALLS = 'trace=access,chdir,clone,creat,execve,exit_group,fork,lstat,lstat64,mkdir,open,rename,stat,stat64,symlink,vfork'
TRACECALLS = 'trace=process,file'
BANNED = ('/tmp', '/proc', '/sys', '/dev')
s_process = r'(?:clone|fork|vfork)\(.*?(?P<npid>\d+)'
s_file = r'(?P<call>\w+)\("(?P<path>([^"\\]|\\.)*)"(.*)'
re_lines = re.compile(r'^(?P<pid>\d+)\s+(?:(?:%s)|(?:%s))\r*$' % (s_file, s_process), re.IGNORECASE | re.MULTILINE)
strace_lock = threading.Lock()
def configure(conf):
conf.find_program('strace')
def task_method(func):
# Decorator function to bind/replace methods on the base Task class
#
# The methods Task.exec_command and Task.sig_implicit_deps already exists and are rarely overridden
# we thus expect that we are the only ones doing this
try:
setattr(Task.Task, 'nostrace_%s' % func.__name__, getattr(Task.Task, func.__name__))
except AttributeError:
pass
setattr(Task.Task, func.__name__, func)
return func
@task_method
def get_strace_file(self):
try:
return self.strace_file
except AttributeError:
pass
if self.outputs:
ret = self.outputs[0].abspath() + '.strace'
else:
ret = '%s%s%d%s' % (self.generator.bld.bldnode.abspath(), os.sep, id(self), '.strace')
self.strace_file = ret
return ret
@task_method
def get_strace_args(self):
return (self.env.STRACE or ['strace']) + ['-e', TRACECALLS, '-f', '-o', self.get_strace_file()]
@task_method
def exec_command(self, cmd, **kw):
bld = self.generator.bld
if not 'cwd' in kw:
kw['cwd'] = self.get_cwd()
args = self.get_strace_args()
fname = self.get_strace_file()
if isinstance(cmd, list):
cmd = args + cmd
else:
cmd = '%s %s' % (' '.join(args), cmd)
try:
ret = bld.exec_command(cmd, **kw)
finally:
if not ret:
self.parse_strace_deps(fname, kw['cwd'])
return ret
@task_method
def sig_implicit_deps(self):
# bypass the scanner functions
return
@task_method
def parse_strace_deps(self, path, cwd):
# uncomment the following line to disable the dependencies and force a file scan
# return
try:
cnt = Utils.readf(path)
finally:
try:
os.remove(path)
except OSError:
pass
if not isinstance(cwd, str):
cwd = cwd.abspath()
nodes = []
bld = self.generator.bld
try:
cache = bld.strace_cache
except AttributeError:
cache = bld.strace_cache = {}
# chdir and relative paths
pid_to_cwd = {}
global BANNED
done = set()
for m in re.finditer(re_lines, cnt):
# scraping the output of strace
pid = m.group('pid')
if m.group('npid'):
npid = m.group('npid')
pid_to_cwd[npid] = pid_to_cwd.get(pid, cwd)
continue
p = m.group('path').replace('\\"', '"')
if p == '.' or m.group().find('= -1 ENOENT') > -1:
# just to speed it up a bit
continue
if not os.path.isabs(p):
p = os.path.join(pid_to_cwd.get(pid, cwd), p)
call = m.group('call')
if call == 'chdir':
pid_to_cwd[pid] = p
continue
if p in done:
continue
done.add(p)
for x in BANNED:
if p.startswith(x):
break
else:
if p.endswith('/') or os.path.isdir(p):
continue
try:
node = cache[p]
except KeyError:
strace_lock.acquire()
try:
cache[p] = node = bld.root.find_node(p)
if not node:
continue
finally:
strace_lock.release()
nodes.append(node)
# record the dependencies then force the task signature recalculation for next time
if Logs.verbose:
Logs.debug('deps: real scanner for %r returned %r', self, nodes)
bld = self.generator.bld
bld.node_deps[self.uid()] = nodes
bld.raw_deps[self.uid()] = []
try:
del self.cache_sig
except AttributeError:
pass
self.signature()
| {
"content_hash": "d446f629a5fd841f2fbb5889022432bb",
"timestamp": "",
"source": "github",
"line_count": 170,
"max_line_length": 127,
"avg_line_length": 23.735294117647058,
"alnum_prop": 0.6537794299876084,
"repo_name": "MarekIgnaszak/econ-project-templates",
"id": "37d82cbb724b14f4068415f2d49d3e9f60df20a9",
"size": "4102",
"binary": false,
"copies": "49",
"ref": "refs/heads/python",
"path": ".mywaflib/waflib/extras/stracedeps.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "663"
},
{
"name": "Jupyter Notebook",
"bytes": "3572"
},
{
"name": "Python",
"bytes": "1222989"
},
{
"name": "Shell",
"bytes": "1716"
},
{
"name": "TeX",
"bytes": "14224"
}
],
"symlink_target": ""
} |
"""
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
import re
import getpass
import tempfile
from copy import copy
from resource_management.libraries.functions.version import compare_versions
from resource_management import *
from resource_management.core import shell
def setup_users():
"""
Creates users before cluster installation
"""
import params
should_create_users_and_groups = False
if params.host_sys_prepped:
should_create_users_and_groups = not params.sysprep_skip_create_users_and_groups
else:
should_create_users_and_groups = not params.ignore_groupsusers_create
if should_create_users_and_groups:
for group in params.group_list:
Group(group,
)
for user in params.user_list:
User(user,
uid = get_uid(user) if params.override_uid == "true" else None,
gid = params.user_to_gid_dict[user],
groups = params.user_to_groups_dict[user],
fetch_nonlocal_groups = params.fetch_nonlocal_groups,
)
if params.override_uid == "true":
set_uid(params.smoke_user, params.smoke_user_dirs)
else:
Logger.info('Skipping setting uid for smoke user as host is sys prepped')
else:
Logger.info('Skipping creation of User and Group as host is sys prepped or ignore_groupsusers_create flag is on')
pass
if params.has_hbase_masters:
Directory (params.hbase_tmp_dir,
owner = params.hbase_user,
mode=0775,
create_parents = True,
cd_access="a",
)
if params.override_uid == "true":
set_uid(params.hbase_user, params.hbase_user_dirs)
else:
Logger.info('Skipping setting uid for hbase user as host is sys prepped')
if should_create_users_and_groups:
if params.has_namenode:
create_dfs_cluster_admins()
if params.has_tez and params.stack_version_formatted != "" and compare_versions(params.stack_version_formatted, '2.3') >= 0:
create_tez_am_view_acls()
else:
Logger.info('Skipping setting dfs cluster admin and tez view acls as host is sys prepped')
def create_dfs_cluster_admins():
"""
dfs.cluster.administrators support format <comma-delimited list of usernames><space><comma-delimited list of group names>
"""
import params
groups_list = create_users_and_groups(params.dfs_cluster_administrators_group)
User(params.hdfs_user,
groups = params.user_to_groups_dict[params.hdfs_user] + groups_list,
fetch_nonlocal_groups = params.fetch_nonlocal_groups
)
def create_tez_am_view_acls():
"""
tez.am.view-acls support format <comma-delimited list of usernames><space><comma-delimited list of group names>
"""
import params
if not params.tez_am_view_acls.startswith("*"):
create_users_and_groups(params.tez_am_view_acls)
def create_users_and_groups(user_and_groups):
import params
parts = re.split('\s+', user_and_groups)
if len(parts) == 1:
parts.append("")
users_list = parts[0].strip(",").split(",") if parts[0] else []
groups_list = parts[1].strip(",").split(",") if parts[1] else []
# skip creating groups and users if * is provided as value.
users_list = filter(lambda x: x != '*' , users_list)
groups_list = filter(lambda x: x != '*' , groups_list)
if users_list:
User(users_list,
fetch_nonlocal_groups = params.fetch_nonlocal_groups
)
if groups_list:
Group(copy(groups_list),
)
return groups_list
def set_uid(user, user_dirs):
"""
user_dirs - comma separated directories
"""
import params
File(format("{tmp_dir}/changeUid.sh"),
content=StaticFile("changeToSecureUid.sh"),
mode=0555)
ignore_groupsusers_create_str = str(params.ignore_groupsusers_create).lower()
uid = get_uid(user)
Execute(format("{tmp_dir}/changeUid.sh {user} {user_dirs} {new_uid}", new_uid=0 if uid is None else uid),
not_if = format("(test $(id -u {user}) -gt 1000) || ({ignore_groupsusers_create_str})"))
def get_uid(user):
import params
user_str = str(user) + "_uid"
service_env = [ serviceEnv for serviceEnv in params.config['configurations'] if user_str in params.config['configurations'][serviceEnv]]
if service_env and params.config['configurations'][service_env[0]][user_str]:
service_env_str = str(service_env[0])
uid = params.config['configurations'][service_env_str][user_str]
if len(service_env) > 1:
Logger.warning("Multiple values found for %s, using %s" % (user_str, uid))
return uid
else:
if user == params.smoke_user:
return None
File(format("{tmp_dir}/changeUid.sh"),
content=StaticFile("changeToSecureUid.sh"),
mode=0555)
code, newUid = shell.call(format("{tmp_dir}/changeUid.sh {user}"))
return int(newUid)
def setup_hadoop_env():
import params
stackversion = params.stack_version_unformatted
Logger.info("FS Type: {0}".format(params.dfs_type))
if params.has_namenode or stackversion.find('Gluster') >= 0 or params.dfs_type == 'HCFS':
if params.security_enabled:
tc_owner = "root"
else:
tc_owner = params.hdfs_user
# create /etc/hadoop
Directory(params.hadoop_dir, mode=0755)
# HDP < 2.2 used a conf -> conf.empty symlink for /etc/hadoop/
if Script.is_stack_less_than("2.2"):
Directory(params.hadoop_conf_empty_dir, create_parents = True, owner="root",
group=params.user_group )
Link(params.hadoop_conf_dir, to=params.hadoop_conf_empty_dir,
not_if=format("ls {hadoop_conf_dir}"))
# write out hadoop-env.sh, but only if the directory exists
if os.path.exists(params.hadoop_conf_dir):
File(os.path.join(params.hadoop_conf_dir, 'hadoop-env.sh'), owner=tc_owner,
group=params.user_group,
content=InlineTemplate(params.hadoop_env_sh_template))
# Create tmp dir for java.io.tmpdir
# Handle a situation when /tmp is set to noexec
Directory(params.hadoop_java_io_tmpdir,
owner=params.hdfs_user,
group=params.user_group,
mode=01777
)
def setup_java():
"""
Install jdk using specific params.
Install ambari jdk as well if the stack and ambari jdk are different.
"""
import params
__setup_java(custom_java_home=params.java_home, custom_jdk_name=params.jdk_name)
if params.ambari_java_home and params.ambari_java_home != params.java_home:
__setup_java(custom_java_home=params.ambari_java_home, custom_jdk_name=params.ambari_jdk_name)
def __setup_java(custom_java_home, custom_jdk_name):
"""
Installs jdk using specific params, that comes from ambari-server
"""
import params
java_exec = format("{custom_java_home}/bin/java")
if not os.path.isfile(java_exec):
if not params.jdk_name: # if custom jdk is used.
raise Fail(format("Unable to access {java_exec}. Confirm you have copied jdk to this host."))
jdk_curl_target = format("{tmp_dir}/{custom_jdk_name}")
java_dir = os.path.dirname(params.java_home)
Directory(params.artifact_dir,
create_parents = True,
)
File(jdk_curl_target,
content = DownloadSource(format("{jdk_location}/{custom_jdk_name}")),
not_if = format("test -f {jdk_curl_target}")
)
File(jdk_curl_target,
mode = 0755,
)
tmp_java_dir = tempfile.mkdtemp(prefix="jdk_tmp_", dir=params.tmp_dir)
try:
if params.jdk_name.endswith(".bin"):
chmod_cmd = ("chmod", "+x", jdk_curl_target)
install_cmd = format("cd {tmp_java_dir} && echo A | {jdk_curl_target} -noregister && {sudo} cp -rp {tmp_java_dir}/* {java_dir}")
elif params.jdk_name.endswith(".gz"):
chmod_cmd = ("chmod","a+x", java_dir)
install_cmd = format("cd {tmp_java_dir} && tar -xf {jdk_curl_target} && {sudo} cp -rp {tmp_java_dir}/* {java_dir}")
Directory(java_dir
)
Execute(chmod_cmd,
sudo = True,
)
Execute(install_cmd,
)
finally:
Directory(tmp_java_dir, action="delete")
File(format("{custom_java_home}/bin/java"),
mode=0755,
cd_access="a",
)
Execute(('chmod', '-R', '755', params.java_home),
sudo = True,
)
| {
"content_hash": "f4bf50c6ed1827246a3af397bce00403",
"timestamp": "",
"source": "github",
"line_count": 268,
"max_line_length": 138,
"avg_line_length": 33.406716417910445,
"alnum_prop": 0.660895789120965,
"repo_name": "radicalbit/ambari",
"id": "ee950e83cf7b68c7efd3dfa70f14c358304cfef2",
"size": "8953",
"binary": false,
"copies": "1",
"ref": "refs/heads/trunk",
"path": "ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-ANY/scripts/shared_initialization.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "42212"
},
{
"name": "C",
"bytes": "331204"
},
{
"name": "C#",
"bytes": "182799"
},
{
"name": "C++",
"bytes": "257"
},
{
"name": "CSS",
"bytes": "1287531"
},
{
"name": "CoffeeScript",
"bytes": "4323"
},
{
"name": "FreeMarker",
"bytes": "2654"
},
{
"name": "Groovy",
"bytes": "88056"
},
{
"name": "HTML",
"bytes": "5098825"
},
{
"name": "Java",
"bytes": "29006663"
},
{
"name": "JavaScript",
"bytes": "17274453"
},
{
"name": "Makefile",
"bytes": "11111"
},
{
"name": "PHP",
"bytes": "149648"
},
{
"name": "PLSQL",
"bytes": "2160"
},
{
"name": "PLpgSQL",
"bytes": "314333"
},
{
"name": "PowerShell",
"bytes": "2087991"
},
{
"name": "Python",
"bytes": "14584206"
},
{
"name": "R",
"bytes": "1457"
},
{
"name": "Roff",
"bytes": "13935"
},
{
"name": "Ruby",
"bytes": "14478"
},
{
"name": "SQLPL",
"bytes": "2117"
},
{
"name": "Shell",
"bytes": "741459"
},
{
"name": "Vim script",
"bytes": "5813"
}
],
"symlink_target": ""
} |
from mock import patch
from channel_facebook.channel import TriggerType
from core.channel import (NotSupportedTrigger, ConditionNotMet)
from core.models import Channel, Trigger
from .test_base import FacebookBaseTestCase
class ChannelTriggerTestCase(FacebookBaseTestCase):
def test_fire_trigger_post(self):
get_data = [{
'message': 'Testmessage',
'permalink_url': 'https://www.facebook.com/permalink.php?story_fbid=1234',
'created_time': '2016-09-14T14:27:06+0000',
'id': '101915710270588_102025766926249',
'type': 'status'
}]
payload = {
'permalink_url': 'https://www.facebook.com/permalink.php?story_fbid=1234',
'message': 'Testmessage'
}
with patch('channel_facebook.channel.FacebookChannel._getFeeds') as mock_get_feeds, \
patch('core.core.Core.handle_trigger') as mock_handle_trigger:
trigger_type = TriggerType.new_post
mock_get_feeds.return_value = get_data
self.channel.fire_trigger(self.webhook_data)
mock_get_feeds.assert_called_once_with(fields=self.fields,
user=self.facebook_account,
time=self.time)
mock_handle_trigger.assert_called_once_with(channel_name=self.channel_name,
trigger_type=trigger_type,
userid=self.facebook_account.user_id,
payload=payload)
def test_fire_trigger_photo(self):
get_data = [{
'id': '101915710270588_101933616935464',
'message': 'testmessage',
'link': 'https://www.facebook.com/photo.php?fbid=101933566935469',
'created_time': '2016-09-14T13:08:39+0000',
'permalink_url': 'https://www.facebook.com/permalink.php?story_fbid=1019&id=100',
'full_picture': 'https://scontent.xx.fbcdn.net/1_n.jpg',
'picture': 'https://scontent.xx.fbcdn.net/1_n.jpg',
'type': 'photo'
}]
payload = {
'message': 'testmessage',
'link': 'https://www.facebook.com/photo.php?fbid=101933566935469',
'permalink_url': 'https://www.facebook.com/permalink.php?story_fbid=1019&id=100',
'full_picture': 'https://scontent.xx.fbcdn.net/1_n.jpg',
'picture': 'https://scontent.xx.fbcdn.net/1_n.jpg'
}
with patch('channel_facebook.channel.FacebookChannel._getFeeds') as mock_get_feeds, \
patch('core.core.Core.handle_trigger') as mock_handle_trigger:
trigger_type = TriggerType.new_photo
mock_get_feeds.return_value = get_data
self.channel.fire_trigger(self.webhook_data)
mock_get_feeds.assert_called_once_with(fields=self.fields,
user=self.facebook_account,
time=self.time)
mock_handle_trigger.assert_called_once_with(channel_name=self.channel_name,
trigger_type=trigger_type,
userid=self.facebook_account.user_id,
payload=payload)
def test_fire_trigger_photo_with_hashtag(self):
get_data = [{
'id': '101915710270588_101933616935464',
'message': '#me',
'link': 'https://www.facebook.com/photo.php?fbid=101933566935469',
'created_time': '2016-09-14T13:08:39+0000',
'permalink_url': 'https://www.facebook.com/permalink.php?story_fbid=1019&id=100',
'full_picture': 'https://scontent.xx.fbcdn.net/1_n.jpg',
'picture': 'https://scontent.xx.fbcdn.net/1_n.jpg',
'type': 'photo'}]
payload = {
'permalink_url': 'https://www.facebook.com/permalink.php?story_fbid=1019&id=100',
'message': '#me', 'full_picture': 'https://scontent.xx.fbcdn.net/1_n.jpg',
'picture': 'https://scontent.xx.fbcdn.net/1_n.jpg',
'link': 'https://www.facebook.com/photo.php?fbid=101933566935469'
}
with patch('channel_facebook.channel.FacebookChannel._getFeeds') as mock_get_feeds, \
patch('core.core.Core.handle_trigger') as mock_handle_trigger:
trigger_type = TriggerType.new_photo_with_hashtag
mock_get_feeds.return_value = get_data
self.channel.fire_trigger(self.webhook_data)
mock_get_feeds.assert_called_once_with(fields=self.fields,
user=self.facebook_account,
time=self.time)
mock_handle_trigger.assert_called_with(channel_name=self.channel_name,
trigger_type=trigger_type,
userid=self.facebook_account.user_id,
payload=payload)
def test_fire_trigger_link(self):
get_data = [{
'link': 'http://daisychain.me/',
'id': '101915710270588_102045486924277',
'message': 'testmessage',
'created_time': '2016-09-14T14:45:28+0000',
'permalink_url': 'https://www.facebook.com/permalink.php?story_fbid=102',
'type': 'link'
}]
payload = {
'permalink_url': 'https://www.facebook.com/permalink.php?story_fbid=102',
'link': 'http://daisychain.me/',
'message': 'testmessage'
}
with patch('channel_facebook.channel.FacebookChannel._getFeeds') as mock_get_feeds, \
patch('core.core.Core.handle_trigger') as mock_handle_trigger:
trigger_type = TriggerType.new_link
mock_get_feeds.return_value = get_data
self.channel.fire_trigger(self.webhook_data)
mock_get_feeds.assert_called_once_with(fields=self.fields,
user=self.facebook_account,
time=self.time)
mock_handle_trigger.assert_called_once_with(channel_name=self.channel_name,
trigger_type=trigger_type,
userid=self.facebook_account.user_id,
payload=payload)
def test_fire_trigger_video(self):
get_data = [{
'link': 'https://www.facebook.com/101915710270588/videos/102056993589793',
'id': '101915710270588_102057203589772',
'message': 'Video',
'created_time': '2016-09-14T14:48:18+0000',
'permalink_url': 'https://www.facebook.com/permalink.php',
'type': 'video',
'picture': 'https://scontent.xx.fbcdn.net/v/t15.0-10/s130x130/14356708_n.jpg?',
'full_picture': 'https://scontent.xx.fbcdn.net/v/t15.0-10/s720x720/1435_n.jpg'
}]
payload = {'message': 'Video',
'link': 'https://www.facebook.com/101915710270588/videos/102056993589793',
'full_picture': 'https://scontent.xx.fbcdn.net/v/t15.0-10/s720x720/1435_n.jpg',
'permalink_url': 'https://www.facebook.com/permalink.php',
'picture': 'https://scontent.xx.fbcdn.net/v/t15.0-10/s130x130/14356708_n.jpg?'}
with patch('channel_facebook.channel.FacebookChannel._getFeeds') as mock_get_feeds, \
patch('core.core.Core.handle_trigger') as mock_handle_trigger:
trigger_type = TriggerType.new_video
mock_get_feeds.return_value = get_data
self.channel.fire_trigger(self.webhook_data)
mock_get_feeds.assert_called_once_with(fields=self.fields,
user=self.facebook_account,
time=self.time)
mock_handle_trigger.assert_called_once_with(channel_name=self.channel_name,
trigger_type=trigger_type,
userid=self.facebook_account.user_id,
payload=payload)
def test_fire_trigger_invalid(self):
invalid_webhook_data = {
"time": self.time,
"id": "101915710270588",
"changed_fields": ["feed"],
"uid": "101915710270588"
}
with self.assertRaises(NotSupportedTrigger):
self.channel.fire_trigger(invalid_webhook_data)
@patch('channel_facebook.channel.FacebookChannel._getFeeds')
def test_fire_trigger_with_invalid_user(self, mock_getFeeds):
invalid_webhook_data = {
"time": self.time,
"id": "101915710270588",
"changed_fields": ["statuses"],
"uid": "invaliduser2"
}
self.channel.fire_trigger(invalid_webhook_data)
mock_getFeeds.assert_not_called()
def test_fire_trigger_with_invalid_channel_object(self):
get_data = [{
'message': 'Testmessage',
'permalink_url': 'https://www.facebook.com/permalink.php?story_fbid=1234',
'created_time': '2016-09-14T14:27:06+0000',
'id': '101915710270588_102025766926249',
'type': 'status'
}]
payload = {
'permalink_url': 'https://www.facebook.com/permalink.php?story_fbid=1234',
'message': 'Testmessage'
}
with patch('channel_facebook.channel.FacebookChannel._getFeeds') as mock_get_feeds, \
patch('core.models.Channel.objects.get') as mock_get_Channel, \
patch('core.core.Core.handle_trigger') as mock_handle_trigger:
mock_get_feeds.return_value = get_data
mock_get_Channel.side_effect = Channel.DoesNotExist
self.channel.fire_trigger(self.webhook_data)
mock_get_feeds.assert_called_once()
mock_handle_trigger.assert_not_called()
def test_fill_recipe_mappings_with_valid_trigger(self):
payload = {
'message': 'testmessage #me',
'link': 'https://www.facebook.com/photo.php?fbid=101933566935469',
'permalink_url': 'https://www.facebook.com/permalink.php?story_fbid=1019&id=100',
'full_picture': 'https://scontent.xx.fbcdn.net/1_n.jpg',
'picture': 'https://scontent.xx.fbcdn.net/1_n.jpg'
}
conditions = {'hashtag': '#me'}
with patch('channel_facebook.channel.FacebookChannel._fill_mappings_for_new_entry') as mock_replace_mappings:
self.channel.fill_recipe_mappings(trigger_type=TriggerType.new_photo,
userid=self.user.id,
payload=payload,
conditions=self.conditions,
mappings={})
mock_replace_mappings.assert_called_once_with(inputs={},
payload=payload)
with patch(
'channel_facebook.channel.FacebookChannel._fill_mappings_for_new_entry_with_hashtags') as mock_replace_mappings:
self.channel.fill_recipe_mappings(trigger_type=TriggerType.new_photo_with_hashtag,
userid=self.user.id,
payload=payload,
conditions=self.conditions,
mappings={})
mock_replace_mappings.assert_called_once_with(inputs={},
payload=payload,
conditions=conditions)
def test_get_trigger_types_with_notSupportedTrigger(self):
data = {'type': 'notSupported'}
with self.assertRaises(NotSupportedTrigger):
self.channel._get_trigger_types(data)
@patch('channel_facebook.channel.FacebookChannel._fill_mappings_for_new_entry')
def test_fill_recipe_mappings_with_ínvalid_trigger(self,
mock_replace_mappings):
payload = {
'message': 'testmessage',
'link': 'https://www.facebook.com/photo.php?fbid=101933566935469',
'permalink_url': 'https://www.facebook.com/permalink.php?story_fbid=1019&id=100',
'full_picture': 'https://scontent.xx.fbcdn.net/1_n.jpg',
'picture': 'https://scontent.xx.fbcdn.net/1_n.jpg'
}
self.channel.fill_recipe_mappings(trigger_type=-42,
userid=self.user.id,
payload=payload,
conditions=self.conditions,
mappings={})
self.assertTrue(mock_replace_mappings.called)
@patch('channel_facebook.channel.FacebookChannel._fill_mappings_for_new_entry')
def test_fill_mappings_for_new_entry_with_hashtags_without_hashtag(self,
mock_replace_mappings):
payload = {
'message': 'testmessage',
'link': 'https://www.facebook.com/photo.php?fbid=101933566935469',
'permalink_url': 'https://www.facebook.com/permalink.php?story_fbid=1019&id=100',
'full_picture': 'https://scontent.xx.fbcdn.net/1_n.jpg',
'picture': 'https://scontent.xx.fbcdn.net/1_n.jpg'
}
with self.assertRaises(ConditionNotMet):
self.channel.fill_recipe_mappings(trigger_type=TriggerType.new_photo_with_hashtag,
userid=self.user.id,
payload=payload,
conditions={},
mappings={})
self.assertFalse(mock_replace_mappings.called)
@patch('channel_facebook.channel.FacebookChannel._fill_mappings_for_new_entry')
def test_fill_mappings_for_new_entry_with_hashtags_not_matching_hashtag(self,
mock_replace_mappings):
payload = {
'message': 'testmessage #falseHashtag',
'link': 'https://www.facebook.com/photo.php?fbid=101933566935469',
'permalink_url': 'https://www.facebook.com/permalink.php?story_fbid=1019&id=100',
'full_picture': 'https://scontent.xx.fbcdn.net/1_n.jpg',
'picture': 'https://scontent.xx.fbcdn.net/1_n.jpg'
}
with self.assertRaises(ConditionNotMet):
self.channel.fill_recipe_mappings(trigger_type=TriggerType.new_photo_with_hashtag,
userid=self.user.id,
payload=payload,
conditions=self.conditions,
mappings={})
self.assertFalse(mock_replace_mappings.called)
@patch('channel_facebook.channel.FacebookChannel._fill_mappings_for_new_entry')
def test_fill_mappings_for_new_entry_with_hashtags_without_hash(self,
mock_replace_mappings):
payload = {
'message': 'testmessage #me',
'link': 'https://www.facebook.com/photo.php?fbid=101933566935469',
'permalink_url': 'https://www.facebook.com/permalink.php?story_fbid=1019&id=100',
'full_picture': 'https://scontent.xx.fbcdn.net/1_n.jpg',
'picture': 'https://scontent.xx.fbcdn.net/1_n.jpg'
}
conditions = {'hashtag': 'me'}
self.channel.fill_recipe_mappings(trigger_type=TriggerType.new_photo_with_hashtag,
userid=self.user.id,
payload=payload,
conditions=conditions,
mappings={})
mock_replace_mappings.assert_called_once()
def test_replace_mappings(self):
def mock_downloadfile(input):
return input
payload = {
'message': 'testmessage',
'description': 'a test description',
'link': 'https://www.facebook.com/photo.php?fbid=101933566935469',
'permalink_url': 'https://www.facebook.com/',
'full_picture': 'https://scontent.xx.fbcdn.net/1_n.jpg',
'picture': 'https://scontent.xx.fbcdn.net/1_n.jpg',
'video': 'https://scontent.xx.fbcdn.net/1_n.mp4',
}
mappings = {
'input1': 'you wrote: %message%',
'input2': 'New Post: %permalink_url%',
'input3': 'A link: %link%',
'input4': 'A Men wrote: %description%',
'input5': 'a picture in small: %image_low%',
'input6': 'a large picture: %image_standard%',
'input7': 'a video: %video%',
}
with patch('core.utils.download_file') as mock_download:
mock_download.side_effect = mock_downloadfile
res = self.channel._fill_mappings_for_new_entry(inputs=mappings,
payload=payload)
expected = {
'input1': 'you wrote: ' + payload['message'],
'input2': 'New Post: ' + payload['permalink_url'],
'input3': 'A link: ' + payload['link'],
'input4': 'A Men wrote: ' + payload['description'],
'input5': payload['picture'],
'input6': payload['full_picture'],
'input7': payload['video'],
}
self.assertEquals(res, expected)
res = self.channel._fill_mappings_for_new_entry(inputs=mappings,
payload={})
self.assertEquals(res, mappings)
def test_trigger_synopsis(self):
conditions = [{'value': '#Daisychain'}]
for trigger in TriggerType:
trigger_id = Trigger.objects.get(trigger_type=trigger,
channel_id=self.channel_id).id
if trigger == TriggerType.new_post:
return_value = "new post by you on Facebook"
elif trigger == TriggerType.new_link:
return_value = "new link posted by you on Facebook"
elif trigger == TriggerType.new_photo:
return_value = "new photo posted by you on Facebook"
elif trigger == TriggerType.new_photo_with_hashtag:
return_value = 'new photo by you with hashtag "{}" ' \
'on Facebook'.format(conditions[0]['value'])
elif trigger == TriggerType.new_video:
return_value = "new video posted by you on Facebook"
self.assertEqual(self.channel.trigger_synopsis(
trigger_id, conditions=conditions
), return_value)
def test_get_payload(self):
feed = {
'description': 'blablabla',
'message': 'blablub',
'picture': 'nakedwoman',
'full_picture': 'largepicture',
'permalink_url': 'http://daisychain.me/secret',
'link': 'http://daisychain.me/s3cr3t',
'source': 'linktovideo',
}
response = self.channel._get_payload(feed)
feed['video'] = feed.pop('source')
self.assertDictEqual(response, feed)
@patch("channel_facebook.channel.Config.get")
@patch("channel_facebook.channel.reverse")
@patch("channel_facebook.channel.log.warning")
def test_build_absolute_uri(self, mock_log, mock_reverse, mock_get):
# first test without having set DOMAIN_BASE before
mock_get.return_value = ""
mock_reverse.return_value = "/test_alias"
actual = self.channel._build_absolute_uri("test_alias")
mock_get.assert_called_with("DOMAIN_BASE")
mock_log.assert_called_with("Facebook Config DOMAIN_BASE "
"was accessed before set")
mock_reverse.assert_called_with("test_alias")
self.assertEqual("/test_alias", actual)
# reset mocks
mock_log.reset_mock()
mock_reverse.reset_mock()
mock_get.reset_mock()
# second test with having set DOMAIN_BASE before
mock_get.return_value = "http://test.domain:1234"
actual = self.channel._build_absolute_uri("test_alias")
mock_get.assert_called_with("DOMAIN_BASE")
mock_log.assert_not_called()
mock_reverse.assert_called_with("test_alias")
self.assertEqual("http://test.domain:1234/test_alias", actual)
| {
"content_hash": "894d699c40a0c6c81d88a65f48328efe",
"timestamp": "",
"source": "github",
"line_count": 426,
"max_line_length": 128,
"avg_line_length": 50.04929577464789,
"alnum_prop": 0.5289151540734487,
"repo_name": "daisychainme/daisychain",
"id": "876126f198a057bc9aa5631aea9de915e88942bd",
"size": "21322",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "daisychain/channel_facebook/tests/test_channel_trigger.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "33038"
},
{
"name": "HTML",
"bytes": "69989"
},
{
"name": "JavaScript",
"bytes": "22115"
},
{
"name": "Makefile",
"bytes": "995"
},
{
"name": "Python",
"bytes": "610321"
}
],
"symlink_target": ""
} |
import json
import os
import sys
root_dir = sys.argv[1]
answer_path = sys.argv[2]
file_names = os.listdir(root_dir)
num_correct = 0
num_wrong = 0
with open(answer_path, 'r') as fh:
id2answer_dict = json.load(fh)
for file_name in file_names:
if not file_name.endswith(".question"):
continue
with open(os.path.join(root_dir, file_name), 'r') as fh:
url = fh.readline().strip()
_ = fh.readline()
para = fh.readline().strip()
_ = fh.readline()
ques = fh.readline().strip()
_ = fh.readline()
answer = fh.readline().strip()
_ = fh.readline()
if file_name in id2answer_dict:
pred = id2answer_dict[file_name]
if pred == answer:
num_correct += 1
else:
num_wrong += 1
else:
num_wrong += 1
total = num_correct + num_wrong
acc = float(num_correct) / total
print("{} = {} / {}".format(acc, num_correct, total)) | {
"content_hash": "20f3a764bd97ee2edb10b858db6956e1",
"timestamp": "",
"source": "github",
"line_count": 38,
"max_line_length": 60,
"avg_line_length": 25.842105263157894,
"alnum_prop": 0.5488798370672098,
"repo_name": "allenai/bi-att-flow",
"id": "33d99363ec31ba8ac7a79a4d4a4c1f4b4bf1530e",
"size": "982",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "cnn_dm/evaluate.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "7398"
},
{
"name": "Jupyter Notebook",
"bytes": "84356"
},
{
"name": "Python",
"bytes": "285931"
},
{
"name": "Shell",
"bytes": "2428"
}
],
"symlink_target": ""
} |
import cStringIO
import logging
logger = logging.getLogger(__name__)
# import volatility.plugins.gui.messagehooks as messagehooks
LOPHI_TIMEOUT = 100
LOPHI_RETRIES = 5
pil_installed = True
try:
from PIL import Image, ImageDraw
except ImportError:
pil_installed = False
class VolatilityWrapper:
def __init__(self, uri, profile, memory_size):
"""
Initialize volatility with all of our parameters
@param uri: URI to read from (e.g. lophi://, file://, vmi://)
@param profile: Volatility profile (e.g. WinXPSP3x86)
@param memory_size: Memory size in bytes
"""
logger.debug("Initializg volatility: uri: %s, profile: %s, mem: %s"%(
uri,
profile,
memory_size))
self.MODULE_MAP = {"pslist":self._render_pslist,
"ssdt":self._render_ssdt,
"windows":self._render_windows,
"screenshot":self._render_screenshot}
self.uri = uri
self.profile = profile
self.memory_size = memory_size
# Import all of our Volatility classes
import volatility.registry as MemoryRegistry # @UnresolvedImport
import volatility.utils as utils # @UnresolvedImport
import volatility.cache as cache # @UnresolvedImport
import volatility.debug as debug # @UnresolvedImport
import volatility.addrspace as addrspace # @UnresolvedImport
import volatility.commands as commands # @UnresolvedImport
self.MemoryRegistry = MemoryRegistry
self.utils = utils
self.commands = commands
self.addrspace = addrspace
# Hack to disable caching
# cache.disable_caching(0, 0, 0, 0)
# Initialize Volatility
self.volatility_config = None
self.init_ok = self._init_volatility_config()
# Did everything init ok?
if self.init_ok == False:
#logger.error("Could not start memory analysis for %s."%self.machine.name)
logger.error("Could not start memory analysis for uri %s."%self.uri)
return
# Import our plugins module
self.PLUGIN_COMMANDS = None
try:
logger.debug("Init MemoryRegistry")
# try the older method which had an init function
self.MemoryRegistry.Init()
self.PLUGIN_COMMANDS = self.MemoryRegistry.PLUGIN_COMMANDS
except:
logger.debug("Plugin Importer MemoryRegistry (version 2.2+)")
self.MemoryRegistry.PluginImporter()
self.MemoryRegistry.register_global_options(self.volatility_config, self.addrspace.BaseAddressSpace)
self.MemoryRegistry.register_global_options(self.volatility_config, self.commands.Command)
self.PLUGIN_COMMANDS = self.MemoryRegistry.get_plugin_classes(self.commands.Command, lower=True)
self.command_objs = {}
self.addr_space = None
def _init_volatility_config(self):
"""
Creates a new Volatility ConfObject with its own storage
"""
import volatility.conf as conf # @UnresolvedImport
if not self.volatility_config:
config = conf.ConfObject()
# Set all of our static settings
config.update('DONOTLOADADDRSPACE', True)
config.update('LOCATION', self.uri)
config.update('DTB', None)
config.update('KDBG', None)
config.update('NO_CACHE', True) # IMPORTANT: No Cache!
# config.update("OUTPUT", OUTPUT_TYPE)
config.update("CACHE_DTB", False)
# LOPHI Addrspace stuff
config.update("RAM_SIZE", self.memory_size)
# config.update('RETRIES', LOPHI_RETRIES)
# config.update('TIMEOUT', LOPHI_TIMEOUT)
# Ensure our profile is valid and assign it
if self._is_valid_profile(self.profile):
config.update('PROFILE', self.profile)
else:
logger.error("Unrecognized Profile (%s)." % self.profile)
return False
self.volatility_config = config
self.config = property(self.volatility_config)
return True
def _is_valid_profile(self, profile_name):
"""
Just a nice simple function to check if a profile is valid
"""
return True
for p in self.MemoryRegistry.PROFILES.classes:
if p.__name__ == profile_name:
return True
return False
def execute_plugin(self,plugin_name):
"""
This will execute a volatility plugin, render its output with one of
our render functions, and return that output
@param plugin_name: Name of plugin as you would use on the command line
"""
logger.debug("Executing volatility plugin: %s"%plugin_name)
if plugin_name not in self.PLUGIN_COMMANDS:
logger.error("%s is not a valid plugin for this Volatility installation")
return False
# Initialize every module (No need to recall it everytime)
if plugin_name not in self.command_objs:
command = self.PLUGIN_COMMANDS[plugin_name]
command_obj = command(self.volatility_config)
self.command_objs[plugin_name] = command_obj
# Initialize our address space (Only do this once)
if self.addr_space is None:
self.addr_space = self.utils.load_as(self.volatility_config)
# Enable our cache
self.volatility_config.update('LOPHI_CACHE',True)
# Get our results for this module
command_obj = self.command_objs[plugin_name]
# data = command_obj.calculate()
data = command_obj.calculate(self.addr_space)
# Disable and wipe our cache
self.volatility_config.update('LOPHI_CACHE',False)
# Render out output into the format we want
output = self._render_data(plugin_name, self.addr_space, data)
if output is not None:
# We have to append our output specific info for processing
output['MODULE'] = plugin_name
output['URI'] = self.uri
output['PROFILE'] = self.profile
else:
stringio = cStringIO.StringIO()
command_obj.render_text(stringio, data)
output = stringio.getvalue()
stringio.close()
return output
def _render_data(self,module_name, addr_space, data):
"""
Given volatility plugin output, will attempt to render it into a
format that we have specified
"""
logger.debug("Trying to process data for %s"%module_name)
if module_name in self.MODULE_MAP:
return self.MODULE_MAP[module_name](addr_space,data)
else:
return None
def _render_screenshot(self, addr_space, data):
"""
Render the screenshot data and return a Python Image object
To save the output as images:
data[0].save(header[0]+".png","PNG")
Note: This plug seg faults, which is why we are only returning the
default screen
"""
def draw_text(draw, text, left, top, fill = "Black"):
"""Label windows in the screen shot"""
lines = text.split('\x0d\x0a')
for line in lines:
draw.text( (left, top), line, fill = fill)
_, height = draw.textsize(line)
top += height
if not pil_installed:
logger.error("Must install PIL for this plugin.")
return None
out_header = []
out_data = []
seen = []
found = False
for window_station in data:
if found:
break
for desktop in window_station.desktops():
session_name = "session_{0}.{1}.{2}".format(
desktop.dwSessionId,
window_station.Name, desktop.Name)
offset = desktop.PhysicalAddress
if offset in seen:
continue
seen.append(offset)
# The foreground window
win = desktop.DeskInfo.spwnd
# Some desktops don't have any windows
if not win:
logger.info("{0}\{1}\{2} has no windows (Skipping)".format(
desktop.dwSessionId, window_station.Name, desktop.Name))
continue
im = Image.new("RGB", (win.rcWindow.right + 1, win.rcWindow.bottom + 1), "White")
draw = ImageDraw.Draw(im)
# Traverse windows, visible only
for win, _level in desktop.windows(
win = win,
filter = lambda x : 'WS_VISIBLE' in str(x.style)):
draw.rectangle(win.rcWindow.get_tup(), outline = "Black", fill = "White")
draw.rectangle(win.rcClient.get_tup(), outline = "Black", fill = "White")
## Create labels for the windows
draw_text(draw, str(win.strName or ''), win.rcWindow.left + 2, win.rcWindow.top)
del draw
out_header.append(session_name)
out_data.append(im)
break
# Return our output
if len(out_data) > 0:
return {'HEADER':out_header,'DATA':out_data}
else:
return {'HEADER':[],'DATA':[]}
# Render Abstract method added by Chad Spensky for LO-PHI
def _render_pslist(self, addr_space, data):
offsettype = "(V)"
out_header = ['Offset'+offsettype,'Name', 'Pid', 'PPid', 'Thds', 'Hnds', 'Time']
out_data = []
for task in data:
offset = task.obj_offset
try:
out_data.append(map(str,[
hex(offset),
task.ImageFileName,
task.UniqueProcessId,
task.InheritedFromUniqueProcessId,
task.ActiveThreads,
task.ObjectTable.HandleCount,
task.CreateTime]))
except:
logger.error("Could not convert column to string")
return {'HEADER':out_header,'DATA':out_data}
def _render_windows(self,addr_space,data):
"""
Render the windows module output into a nice dict
"""
def translate_atom(winsta, atom_tables, atom_id):
"""
Translate an atom into an atom name.
@param winsta: a tagWINDOWSTATION in the proper
session space
@param atom_tables: a dictionary with _RTL_ATOM_TABLE
instances as the keys and owning window stations as
the values.
@param index: the index into the atom handle table.
"""
import volatility.plugins.gui.constants as consts
# First check the default atoms
if consts.DEFAULT_ATOMS.has_key(atom_id):
return consts.DEFAULT_ATOMS[atom_id].Name
# A list of tables to search. The session atom tables
# have priority and will be searched first.
table_list = [
table for (table, window_station)
in atom_tables.items() if window_station == None
]
table_list.append(winsta.AtomTable)
## Fixme: the session atom tables are found via physical
## AS pool tag scanning, and there's no good way (afaik)
## to associate the table with its session. Thus if more
## than one session has atoms with the same id but different
## values, then we could possibly select the wrong one.
for table in table_list:
atom = table.find_atom(atom_id)
if atom:
return atom.Name
return None
output_dict = {}
for winsta, atom_tables in data:
for desktop in winsta.desktops():
# Create our hierarchy
if winsta.dwSessionId not in output_dict:
output_dict[winsta.dwSessionId] = {}
if winsta.Name not in output_dict[winsta.dwSessionId]:
output_dict[winsta.dwSessionId][winsta.Name] = {}
if desktop.Name not in output_dict[winsta.dwSessionId][winsta.Name]:
output_dict[winsta.dwSessionId][winsta.Name][desktop.Name] = []
output_dict[winsta.dwSessionId][winsta.Name][desktop.Name]
for wnd, _level in desktop.windows(desktop.DeskInfo.spwnd):
window_dict = {'windowhandle':wnd.head.h,
'windowhandle_addr':wnd.obj_offset,
'name':str(wnd.strName or ''),
'classatom':wnd.ClassAtom,
'class':translate_atom(winsta, atom_tables, wnd.ClassAtom),
'superclassatom':wnd.SuperClassAtom,
'superclass':translate_atom(winsta, atom_tables, wnd.SuperClassAtom),
'pti':wnd.head.pti.v(),
'tid':wnd.Thread.Cid.UniqueThread,
'tid_addr':wnd.Thread.obj_offset,
'ppi':wnd.head.pti.ppi.v(),
'process':wnd.Process.ImageFileName,
'pid':wnd.Process.UniqueProcessId,
'visible':wnd.Visible,
'left':wnd.rcClient.left,
'top':wnd.rcClient.top,
'bottom':wnd.rcClient.bottom,
'right':wnd.rcClient.right,
'style_flags':wnd.style,
'exstyle_flags':wnd.ExStyle,
'windows_proc':wnd.lpfnWndProc
}
# Append this window to our list
output_dict[winsta.dwSessionId][winsta.Name][desktop.Name].append(window_dict)
# Return our out nested dictionaries
return {'DATA':output_dict}
def _render_ssdt(self,addr_space,data):
from bisect import bisect_right
# Volatility
import volatility.obj as obj
def find_module(modlist, mod_addrs, addr):
"""Uses binary search to find what module a given address resides in.
This is much faster than a series of linear checks if you have
to do it many times. Note that modlist and mod_addrs must be sorted
in order of the module base address."""
pos = bisect_right(mod_addrs, addr) - 1
if pos == -1:
return None
mod = modlist[mod_addrs[pos]]
if (addr >= mod.DllBase.v() and
addr < mod.DllBase.v() + mod.SizeOfImage.v()):
return mod
else:
return None
syscalls = addr_space.profile.syscalls
# Print out the entries for each table
out_header = ['SSDT Index','Table','Entry Count','Entry Index','Address', 'Name', 'Owner Module']
out_data = []
for idx, table, n, vm, mods, mod_addrs in data:
if vm.is_valid_address(table):
for i in range(n):
syscall_addr = obj.Object('unsigned long', table + (i * 4), vm).v()
try:
syscall_name = syscalls[idx][i]
except IndexError:
syscall_name = "Unknown"
syscall_mod = find_module(mods, mod_addrs, syscall_addr)
if syscall_mod:
syscall_modname = syscall_mod.BaseDllName
else:
syscall_modname = "UNKNOWN"
out_data.append(map(str,[
idx,
table,
n,
"%06x"%(idx * 0x1000 + i),
"%x"%syscall_addr,
syscall_name,
"{0}".format(syscall_modname)
# "WTF"
]))
else:
out_data.append(map(str[
idx,
table,
n,
0,
0,
0,
0]))
return {'HEADER':out_header,'DATA':out_data}
# outfd.write(" [SSDT not resident at 0x{0:08X} ]\n".format(table))
class ButtonClicker():
"""
This module wraps volatility to click buttons using memory introspection.
"""
# Names of buttons that we don't want to click.
bad_button_names = ['save',
'reject',
'print',
'decline',
'back',
'cancel',
'exit',
'close']
def __init__(self, uri, profile, mem_size, control_sensor):
"""
Initialize our volatility instance fro our machine object.
"""
self._vol = VolatilityWrapper(uri,profile,mem_size)
# Init our previous butonns list
self.buttons_prev = {}
# Store our machine
self.control_sensor = control_sensor
def _is_bad_button(self, name):
"""
Check a button name and see if it is in our list of buttons we
shouldn't click
@param name: String to compare against our list
"""
for b in self.bad_button_names:
if b.lower() in str(name).lower():
return True
return False
def __get_windows(self):
"""
Use our volatility instance to get the list of windows on the machine.
"""
return self._vol.execute_plugin("windows")
def _get_buttons(self):
# Get our list of windows
windows = self.__get_windows()
# Create list to store buttons
buttons = []
# Loop through all windows extracting buttons
for session in windows['DATA']:
session_dict = windows['DATA'][session]
for window_ctx in session_dict:
window_dict = session_dict[window_ctx]
for desktop in window_dict:
desktop_dict = window_dict[desktop]
for window in desktop_dict:
# Ensure it is a Windows button Atom
if window['superclassatom'] == 0xc017 \
or window['classatom'] == 0xc061 \
or window['class'] == "Button" \
or window['superclass'] == "Button":
buttons.append({"name":str(window['name']),
"process":str(window['process']),
"visible":str(window['visible']),
"top":int(window['top']),
"left":int(window['left'])
})
return buttons
def update_buttons(self):
"""
Simply extract the list of current buttons and save them
Meant to be used by calling this, then eventually click_buttons with
new_only = True.
"""
self.buttons_prev = self._get_buttons()
def click_buttons(self, process=None, new_only=False):
"""
Attempt to click all buttons
@param process: If provided will only click buttons assigned to
this proccess name
@param new_only: If true, will only click buttons new since the
last funciton call
"""
buttons = self._get_buttons()
clicked = []
for button in buttons:
# Extract our location to click
(top, left) = (button['top'], button['left'])
btn_o = "[ Button ]"
btn_o += " Name: %s"%button['name']
btn_o += " Process: %s"%button['process']
btn_o += " Visible: %s"%button['visible']
btn_o += " (Top,Left): (%d, %d)"%(top,left)
logger.info(btn_o)
# Are we filtering a specific process?
if process is not None and process != str(button['process']):
logger.info("Button not in process specified, skipping.")
continue
# Are we only clicking new windows
if new_only and button in self.buttons_prev:
# Hack: Just catch the key error if the keys don't exist.
logger.info("Button not new, skipping.")
continue
# Does it match a bad word?
if self._is_bad_button(button['name']):
logger.info("Button has a bad word, skipping.")
continue
# Click it!
self.control_sensor.mouse_click(left,top)
clicked.append(button)
# Save these windows for later
self.buttons_prev = buttons
return clicked
| {
"content_hash": "36c96ab1e6034d5e68951f23127b0b7a",
"timestamp": "",
"source": "github",
"line_count": 604,
"max_line_length": 112,
"avg_line_length": 38.061258278145694,
"alnum_prop": 0.48979946931140983,
"repo_name": "mit-ll/LO-PHI",
"id": "afdcabbb58083c69574f88e6ba69983514440adb",
"size": "22998",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python-lophi-semanticgap/lophi_semanticgap/memory/volatility_extensions.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "58723"
},
{
"name": "Elixir",
"bytes": "18208"
},
{
"name": "Emacs Lisp",
"bytes": "1368"
},
{
"name": "Groff",
"bytes": "1900"
},
{
"name": "M4",
"bytes": "2284"
},
{
"name": "Makefile",
"bytes": "64810"
},
{
"name": "Protocol Buffer",
"bytes": "1803"
},
{
"name": "Python",
"bytes": "1220515"
},
{
"name": "Shell",
"bytes": "23976"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.contrib.auth.models import User
from django.db import models
class Book(models.Model):
title = models.CharField(max_length=50)
year = models.PositiveIntegerField(null=True, blank=True)
author = models.ForeignKey(User, verbose_name="Verbose Author", related_name='books_authored', blank=True, null=True)
contributors = models.ManyToManyField(User, verbose_name="Verbose Contributors", related_name='books_contributed', blank=True, null=True)
is_best_seller = models.NullBooleanField(default=0)
date_registered = models.DateField(null=True)
no = models.IntegerField(verbose_name='number', blank=True, null=True) # This field is intentionally 2 characters long. See #16080.
def __unicode__(self):
return self.title
class Department(models.Model):
code = models.CharField(max_length=4, unique=True)
description = models.CharField(max_length=50, blank=True, null=True)
def __unicode__(self):
return self.description
class Employee(models.Model):
department = models.ForeignKey(Department, to_field="code")
name = models.CharField(max_length=100)
def __unicode__(self):
return self.name
| {
"content_hash": "ff52bbf941f49eef32921c4b6394e2bf",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 141,
"avg_line_length": 38.1875,
"alnum_prop": 0.7242225859247136,
"repo_name": "paulproteus/django",
"id": "371c67061f2435e7dd0b4b2d4ad3773f2ca386e9",
"size": "1222",
"binary": false,
"copies": "9",
"ref": "refs/heads/master",
"path": "tests/regressiontests/admin_filters/models.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
} |
from django.contrib import admin
from import_export.admin import ExportMixin
from models import (Poll, PollComment, RadioPoll, RadioPollChoice,
RangePoll, RangePollChoice, Vote)
class RangePollChoiceInline(admin.StackedInline):
"""Poll Range Votes Inline."""
model = RangePollChoice
extra = 0
readonly_fields = ['votes']
class RangePollInline(admin.StackedInline):
"""Range Poll Inline."""
model = RangePoll
extra = 0
class RadioPollChoiceInline(admin.StackedInline):
"""Radio Poll Choice Inline."""
model = RadioPollChoice
extra = 0
readonly_fields = ['votes']
class RadioPollInline(admin.StackedInline):
"""Poll Radio Inline."""
model = RadioPoll
extra = 0
class RadioPollAdmin(ExportMixin, admin.ModelAdmin):
inlines = [RadioPollChoiceInline]
class RangePollAdmin(ExportMixin, admin.ModelAdmin):
inlines = [RangePollChoiceInline]
class PollCommentInline(admin.StackedInline):
"""PollComment Inline."""
model = PollComment
class PollAdmin(ExportMixin, admin.ModelAdmin):
"""Voting Admin."""
inlines = [RangePollInline, RadioPollInline, PollCommentInline]
search_fields = ['name']
list_display = ['name', 'start', 'end', 'valid_groups']
date_hierarchy = 'start'
readonly_fields = ['task_start_id', 'task_end_id', 'bug']
list_filter = ['automated_poll', 'is_extended', 'comments_allowed']
class VoteAdmin(ExportMixin, admin.ModelAdmin):
"""Vote Admin"""
model = Vote
search_fields = ['user__first_name', 'user__last_name', 'poll__name']
list_display = ['user', 'poll', 'date_voted']
admin.site.register(Vote, VoteAdmin)
admin.site.register(RangePoll, RangePollAdmin)
admin.site.register(RadioPoll, RadioPollAdmin)
admin.site.register(Poll, PollAdmin)
| {
"content_hash": "33b382744f5ca2d1611017a9e1f0cc57",
"timestamp": "",
"source": "github",
"line_count": 67,
"max_line_length": 73,
"avg_line_length": 27,
"alnum_prop": 0.6976229961304589,
"repo_name": "johngian/remo",
"id": "5934a4dbf44ee1c339cb45c5607285147e6584ef",
"size": "1809",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "remo/voting/admin.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "ApacheConf",
"bytes": "993"
},
{
"name": "Batchfile",
"bytes": "4531"
},
{
"name": "CSS",
"bytes": "372453"
},
{
"name": "HTML",
"bytes": "373393"
},
{
"name": "JavaScript",
"bytes": "606422"
},
{
"name": "Makefile",
"bytes": "4630"
},
{
"name": "Puppet",
"bytes": "7140"
},
{
"name": "Python",
"bytes": "7472017"
},
{
"name": "Shell",
"bytes": "3221"
},
{
"name": "Smarty",
"bytes": "215"
},
{
"name": "TeX",
"bytes": "1525"
}
],
"symlink_target": ""
} |
"""Tests for the T9 model."""
from pynini.examples import t9
from absl.testing import absltest
class T9Test(absltest.TestCase):
t9: t9.T9
@classmethod
def setUpClass(cls):
super().setUpClass()
lexicon = [
"the", "cool", "warthog", "escaped", "easily", "from", "baltimore",
"zoo", "col"
]
cls.t9 = t9.T9(lexicon)
def testExample(self):
example = "the cool warthog escaped easily from baltimore zoo"
encoded = self.t9.encode(example)
self.assertTrue(example in self.t9.decode(encoded).paths().ostrings())
if __name__ == "__main__":
absltest.main()
| {
"content_hash": "9cd3ec31a5360b3a9fe102026140589e",
"timestamp": "",
"source": "github",
"line_count": 29,
"max_line_length": 75,
"avg_line_length": 21.03448275862069,
"alnum_prop": 0.6360655737704918,
"repo_name": "kylebgorman/pynini",
"id": "3257ac269ef456c1d9c04717b4c56f5463c056ad",
"size": "1287",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/t9_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "173515"
},
{
"name": "Cython",
"bytes": "285320"
},
{
"name": "Python",
"bytes": "287089"
},
{
"name": "Shell",
"bytes": "1897"
},
{
"name": "Starlark",
"bytes": "31558"
}
],
"symlink_target": ""
} |
from swgpy.object import *
def create(kernel):
result = Static()
result.template = "object/static/space/debris/shared_death_star_debris_f.iff"
result.attribute_template_id = -1
result.stfName("obj_n","unknown_object")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result | {
"content_hash": "7f0a652702aa367a26aba6da9d319e7c",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 78,
"avg_line_length": 23.615384615384617,
"alnum_prop": 0.6905537459283387,
"repo_name": "obi-two/Rebelion",
"id": "3262df0347889b464ed89f940627950e22998d6b",
"size": "452",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "data/scripts/templates/object/static/space/debris/shared_death_star_debris_f.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "11818"
},
{
"name": "C",
"bytes": "7699"
},
{
"name": "C++",
"bytes": "2293610"
},
{
"name": "CMake",
"bytes": "39727"
},
{
"name": "PLSQL",
"bytes": "42065"
},
{
"name": "Python",
"bytes": "7499185"
},
{
"name": "SQLPL",
"bytes": "41864"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals, print_function, division, absolute_import
from pprint import pprint
import threading
import string
import logging
import multiprocessing
from six.moves import queue
import six
import errors
import settings
import taskq
import webservice
import notifier
logger = logging.getLogger(__name__)
class Worker(threading.Thread):
""" Thread executing tasks from a given tasks queue """
def __init__(self, tasks, *args, **kwargs):
daemon = kwargs.pop('daemon', False)
threading.Thread.__init__(self, *args, **kwargs)
self.daemon = daemon
self.tasks = tasks
self.stop_flag = threading.Event()
self.start()
def run(self):
while not self.stop_flag.is_set():
try:
task = self.tasks.get(block=True, timeout=10)
except queue.Empty:
pass
func, args, kwargs = task['func'], task['args'], task['kwargs']
options = task.get('options', {})
if 'name' in options:
self.name = options['name']
try:
func(*args, **kwargs)
except Exception as e:
# An exception happened in this thread
logger.exception(e)
finally:
# Mark this task as done, whether an exception happened or not
self.tasks.task_done()
logger.debug('thread was flagged to stop')
class ThreadPool(object):
""" Pool of threads consuming tasks from a queue """
def __init__(self, max_threads):
self.max_threads = max_threads
self.tasks = queue.Queue(maxsize=max_threads)
self.pool = []
for i in range(min(self.tasks.qsize(), max_threads)):
worker = Worker(self.tasks, name='worker{}'.format(i+1))
self.pool.append(worker)
def add_task(self, func_signature, **options):
""" Add a task to the queue """
func, args, kwargs = func_signature['func'], func_signature['args'], func_signature['kwargs']
# worker threads should be daemonic, so that they exit when the main program exits, and there be no need for joining.
daemon = options.pop('daemon', True)
self.tasks.put({'func': func, 'args': args, 'kwargs': kwargs, 'options': options})
if self.tasks.qsize() > 0 and len(self.pool) < self.max_threads:
worker = Worker(self.tasks, daemon=daemon, name='worker{}'.format(len(self.pool)+1))
self.pool.append(worker)
def map(self, func, args_list):
""" Add a list of tasks to the queue """
for args in args_list:
self.add_task(func, args)
def stop(self):
for trd in self.pool:
trd.stop_flag.set()
trd.join()
def wait_completion(self):
""" Wait for completion of all the tasks in the queue """
self.tasks.join()
class WebServiceThread(threading.Thread):
def __init__(self, *args, **kwargs):
self.qs = kwargs.pop('qs')
self.daemon = True
threading.Thread.__init__(self, *args, **kwargs)
def run(self, *args, **kwargs):
logger.info('webservice thread started')
try:
srv = webservice.Service(qs=self.qs)
srv.run(*args, **kwargs)
except errors.PontiacError as e:
print('Pontiac Error. type: "{}", {}'.format(type(e), e))
logger.info('webservice thread finished')
def webservice_func(*args, **kwargs):
logger.info('webservice thread started')
try:
srv = webservice.Service(qs=kwargs.pop('qs'))
srv.run(*args, **kwargs)
except errors.PontiacError as e:
print('Pontiac Error. type: "{}", {}'.format(type(e), e))
logger.info('webservice thread finished')
class NotifierThread(threading.Thread):
def __init__(self, *args, **kwargs):
self.queue = kwargs.pop('queue')
threading.Thread.__init__(self, *args, **kwargs)
def run(self, *args, **kwargs):
logger.info('notifier thread started')
try:
notifr = notifier.Notifier()
while True:
msg = self.queue.get()
logger.debug('received a new message on notification queue: "{}"'.format(msg))
try:
notifr.notify(msg=msg)
except errors.DataValidationError as e:
print('Data Validation Error: {}'.format(e))
except errors.PontiacError as e:
print('Pontiac Error. type: "{}", {}'.format(type(e), e))
logger.info('notifier thread finished')
def notifier_func(*args, **kwargs):
logger.info('notifier thread started')
try:
notifr = notifier.Notifier()
while True:
msg = kwargs['queue'].get()
logger.debug('received a new message on notification queue: "{}"'.format(msg))
try:
notifr.notify(msg=msg)
except errors.DataValidationError as e:
print('Data Validation Error: {}'.format(e))
except errors.PontiacError as e:
print('Pontiac Error. type: "{}", {}'.format(type(e), e))
logger.info('notifier thread finished')
def run_multi_thread(args):
"""Run two threads for notification receiver (webservice) and notification processor (notifier)
"""
logger.info('running in multi-thread mode')
if args.queuer == 'queue':
q_class = taskq.MemoryQueue
elif args.queuer == 'redis':
q_class = taskq.RedisQueue
else:
raise NotImplementedError()
qs = {
'notif': q_class(key='notif'),
}
pool = ThreadPool(max_threads=sum(settings.THREAD_COUNT.values()))
logger.info('creating {} webservice threads'.format(settings.THREAD_COUNT['WEBSERVICE']))
pool.add_task({'func': webservice_func, 'args': (), 'kwargs': {'qs': qs}}, name='webservice', daemon=True)
logger.info('creating {} notification threads'.format(settings.THREAD_COUNT['NOTIFICATION']))
for i in range(settings.THREAD_COUNT['NOTIFICATION']):
pool.add_task({'func': notifier_func, 'args': (), 'kwargs': {'queue': qs['notif']}}, name='notifier{}'.format(i), daemon=True)
pool.wait_completion()
pool.stop()
| {
"content_hash": "ea3ac577c4e3cc5c922853709db76a92",
"timestamp": "",
"source": "github",
"line_count": 174,
"max_line_length": 134,
"avg_line_length": 35.93103448275862,
"alnum_prop": 0.5937300063979527,
"repo_name": "bisphon/pontiac",
"id": "eacb80d63cd06412fcb44e898d63aec359b568a2",
"size": "6252",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "threaded.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "607"
},
{
"name": "Python",
"bytes": "45165"
}
],
"symlink_target": ""
} |
"""mysite URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.8/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Add an import: from blog import urls as blog_urls
2. Add a URL to urlpatterns: url(r'^blog/', include(blog_urls))
"""
from django.conf.urls import include, url
from django.contrib import admin
urlpatterns = [
url(r'^polls/', include('polls.urls')),
url(r'^admin/', include(admin.site.urls)),
]
| {
"content_hash": "9ec9e90112dca57b658ce92a499343c7",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 77,
"avg_line_length": 36.22727272727273,
"alnum_prop": 0.6938519447929736,
"repo_name": "mishka28/NYU-Python",
"id": "80e7b0447250710397a6ecae103cb13aa21341e8",
"size": "797",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "advance_python_class_3/Homework4/mysite/mysite/urls.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "206"
},
{
"name": "HTML",
"bytes": "43267"
},
{
"name": "Makefile",
"bytes": "561"
},
{
"name": "Python",
"bytes": "100728"
},
{
"name": "Shell",
"bytes": "7729"
},
{
"name": "Vim script",
"bytes": "719"
}
],
"symlink_target": ""
} |
from greetings import greet
def test_greet_the_world():
assert greet("world") == "Hello, world!"
| {
"content_hash": "5ac8d2d1f528f6babc44bf745f09d2d6",
"timestamp": "",
"source": "github",
"line_count": 5,
"max_line_length": 44,
"avg_line_length": 20.6,
"alnum_prop": 0.6796116504854369,
"repo_name": "PPPoSD-2017/greetings",
"id": "68dea7cdf284a80805f2b1258b5706a7b275eb44",
"size": "103",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_greet.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "1048"
}
],
"symlink_target": ""
} |
import os
from oslo.serialization import jsonutils as json
from daisy.common import client as base_client
from daisy.common import exception
from daisy import i18n
_ = i18n._
class CacheClient(base_client.BaseClient):
DEFAULT_PORT = 9292
DEFAULT_DOC_ROOT = '/v1'
def delete_cached_image(self, image_id):
"""
Delete a specified image from the cache
"""
self.do_request("DELETE", "/cached_images/%s" % image_id)
return True
def get_cached_images(self, **kwargs):
"""
Returns a list of images stored in the image cache.
"""
res = self.do_request("GET", "/cached_images")
data = json.loads(res.read())['cached_images']
return data
def get_queued_images(self, **kwargs):
"""
Returns a list of images queued for caching
"""
res = self.do_request("GET", "/queued_images")
data = json.loads(res.read())['queued_images']
return data
def delete_all_cached_images(self):
"""
Delete all cached images
"""
res = self.do_request("DELETE", "/cached_images")
data = json.loads(res.read())
num_deleted = data['num_deleted']
return num_deleted
def queue_image_for_caching(self, image_id):
"""
Queue an image for prefetching into cache
"""
self.do_request("PUT", "/queued_images/%s" % image_id)
return True
def delete_queued_image(self, image_id):
"""
Delete a specified image from the cache queue
"""
self.do_request("DELETE", "/queued_images/%s" % image_id)
return True
def delete_all_queued_images(self):
"""
Delete all queued images
"""
res = self.do_request("DELETE", "/queued_images")
data = json.loads(res.read())
num_deleted = data['num_deleted']
return num_deleted
def get_client(host, port=None, timeout=None, use_ssl=False, username=None,
password=None, tenant=None,
auth_url=None, auth_strategy=None,
auth_token=None, region=None,
is_silent_upload=False, insecure=False):
"""
Returns a new client Glance client object based on common kwargs.
If an option isn't specified falls back to common environment variable
defaults.
"""
if auth_url or os.getenv('OS_AUTH_URL'):
force_strategy = 'keystone'
else:
force_strategy = None
creds = {
'username': username or
os.getenv('OS_AUTH_USER', os.getenv('OS_USERNAME')),
'password': password or
os.getenv('OS_AUTH_KEY', os.getenv('OS_PASSWORD')),
'tenant': tenant or
os.getenv('OS_AUTH_TENANT', os.getenv('OS_TENANT_NAME')),
'auth_url': auth_url or
os.getenv('OS_AUTH_URL'),
'strategy': force_strategy or
auth_strategy or
os.getenv('OS_AUTH_STRATEGY', 'noauth'),
'region': region or
os.getenv('OS_REGION_NAME'),
}
if creds['strategy'] == 'keystone' and not creds['auth_url']:
msg = _("--os_auth_url option or OS_AUTH_URL environment variable "
"required when keystone authentication strategy is enabled\n")
raise exception.ClientConfigurationError(msg)
return CacheClient(
host=host,
port=port,
timeout=timeout,
use_ssl=use_ssl,
auth_token=auth_token or
os.getenv('OS_TOKEN'),
creds=creds,
insecure=insecure)
| {
"content_hash": "170723030a1aa446c91e233f51a18f5b",
"timestamp": "",
"source": "github",
"line_count": 118,
"max_line_length": 78,
"avg_line_length": 30.04237288135593,
"alnum_prop": 0.5847672778561354,
"repo_name": "OpenDaisy/daisy-api",
"id": "f3c2a2828f9486b83ad288693fddb4893698d581",
"size": "4181",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "daisy/image_cache/client.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "1475450"
},
{
"name": "Shell",
"bytes": "7860"
}
],
"symlink_target": ""
} |
"""
__graph_StateMachineElement.py___________________________________________________________
Automatically generated graphical appearance ---> MODIFY DIRECTLY WITH CAUTION
_________________________________________________________________________________________
"""
import tkFont
from graphEntity import *
from GraphicalForm import *
from ATOM3Constraint import *
class graph_StateMachineElement(graphEntity):
def __init__(self, x, y, semObject = None):
self.semanticObject = semObject
self.sizeX, self.sizeY = 172, 82
graphEntity.__init__(self, x, y)
self.ChangesAtRunTime = 0
self.constraintList = []
if self.semanticObject: atribs = self.semanticObject.attributesToDraw()
else: atribs = None
self.graphForms = []
self.imageDict = self.getImageDict()
def DrawObject(self, drawing, showGG = 0):
self.dc = drawing
if showGG and self.semanticObject: self.drawGGLabel(drawing)
h = drawing.create_oval(self.translate([189.0, 62.0, 189.0, 62.0]), tags = (self.tag, 'connector'), outline = '', fill = '' )
self.connectors.append( h )
h = drawing.create_rectangle(self.translate([20.0, 20.0, 190.0, 100.0]), tags = self.tag, stipple = '', width = 1, outline = 'black', fill = 'moccasin')
self.gf4 = GraphicalForm(drawing, h, "gf4")
self.graphForms.append(self.gf4)
font = tkFont.Font( family='Arial', size=12, weight='normal', slant='roman', underline=0)
h = drawing.create_text(self.translate([134.0, 37.0, 134.0, 12.0])[:2], tags = self.tag, font=font, fill = 'grey45', anchor = 'center', text = '', width = '0', justify= 'left', stipple='' )
self.gf30 = GraphicalForm(drawing, h, 'gf30', fontObject=font)
self.graphForms.append(self.gf30)
font = tkFont.Font( family='Arial', size=12, weight='normal', slant='roman', underline=0)
h = drawing.create_text(self.translate([100.0, 40.0, 100.0, 12.0])[:2], tags = self.tag, font=font, fill = 'black', anchor = 'center', text = 'StateMachineElement', width = '0', justify= 'left', stipple='' )
self.gf31 = GraphicalForm(drawing, h, 'gf31', fontObject=font)
self.graphForms.append(self.gf31)
if self.semanticObject: drawText = self.semanticObject.name.toString()
else: drawText = "<name>"
font = tkFont.Font( family='Helvetica', size=12, weight='normal', slant='roman', underline=0)
h = drawing.create_text(self.translate([72.0, 66.0, 72.0, 12.0])[:2], tags = self.tag, font=font, fill = 'black', anchor = 'center', text = drawText, width = '0', justify= 'left', stipple='' )
self.attr_display["name"] = h
self.gf32 = GraphicalForm(drawing, h, 'gf32', fontObject=font)
self.graphForms.append(self.gf32)
def postCondition( self, actionID, * params):
return None
def preCondition( self, actionID, * params):
return None
def getImageDict( self ):
imageDict = dict()
return imageDict
new_class = graph_StateMachineElement
| {
"content_hash": "de8a5a667452bbce03a332e6a21f0a67",
"timestamp": "",
"source": "github",
"line_count": 67,
"max_line_length": 215,
"avg_line_length": 45.97014925373134,
"alnum_prop": 0.6038961038961039,
"repo_name": "levilucio/SyVOLT",
"id": "9da2e04b691b0d51c343ac38612412c40e3265fd",
"size": "3080",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "UMLRT2Kiltera_MM/graph_StateMachineElement.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "166159"
},
{
"name": "Python",
"bytes": "34207588"
},
{
"name": "Shell",
"bytes": "1118"
}
],
"symlink_target": ""
} |
import functools
import numpy as np
from scipy.stats import norm as ndist
import regreg.api as rr
from selection.tests.instance import gaussian_instance
from selection.learning.utils import (partial_model_inference,
pivot_plot,
lee_inference)
from selection.learning.core import normal_sampler, keras_fit
def simulate(n=200, p=100, s=10, signal=(0.5, 1), sigma=2, alpha=0.1, B=8000):
# description of statistical problem
X, y, truth = gaussian_instance(n=n,
p=p,
s=s,
equicorrelated=False,
rho=0.5,
sigma=sigma,
signal=signal,
random_signs=True,
scale=False)[:3]
dispersion = sigma**2
S = X.T.dot(y)
covS = dispersion * X.T.dot(X)
smooth_sampler = normal_sampler(S, covS)
def meta_algorithm(XTX, XTXi, lam, sampler):
p = XTX.shape[0]
success = np.zeros(p)
loss = rr.quadratic_loss((p,), Q=XTX)
pen = rr.l1norm(p, lagrange=lam)
scale = 0.
noisy_S = sampler(scale=scale)
loss.quadratic = rr.identity_quadratic(0, 0, -noisy_S, 0)
problem = rr.simple_problem(loss, pen)
soln = problem.solve(max_its=100, tol=1.e-10)
success += soln != 0
return tuple(sorted(np.nonzero(success)[0]))
XTX = X.T.dot(X)
XTXi = np.linalg.inv(XTX)
resid = y - X.dot(XTXi.dot(X.T.dot(y)))
dispersion = np.linalg.norm(resid)**2 / (n-p)
lam = 4. * np.sqrt(n)
selection_algorithm = functools.partial(meta_algorithm, XTX, XTXi, lam)
# run selection algorithm
df = partial_model_inference(X,
y,
truth,
selection_algorithm,
smooth_sampler,
fit_probability=keras_fit,
fit_args={'epochs':30, 'sizes':[100]*5, 'dropout':0., 'activation':'relu'},
success_params=(1, 1),
B=B,
alpha=alpha)
lee_df = lee_inference(X,
y,
lam,
dispersion,
truth,
alpha=alpha)
return pd.merge(df, lee_df, on='variable')
if __name__ == "__main__":
import statsmodels.api as sm
import matplotlib.pyplot as plt
import pandas as pd
U = np.linspace(0, 1, 101)
plt.clf()
for i in range(500):
df = simulate()
csvfile = 'lee_multi.csv'
outbase = csvfile[:-4]
if df is not None and i > 0:
try: # concatenate to disk
df = pd.concat([df, pd.read_csv(csvfile)])
except FileNotFoundError:
pass
df.to_csv(csvfile, index=False)
if len(df['pivot']) > 0:
pivot_ax, length_ax = pivot_plot(df, outbase)
# pivot_ax.plot(U, sm.distributions.ECDF(df['lee_pivot'][~np.isnan(df['lee_pivot'])])(U), 'g', label='Lee', linewidth=3)
pivot_ax.figure.savefig(outbase + '.pdf')
length_ax.scatter(df['naive_length'], df['lee_length'])
length_ax.figure.savefig(outbase + '_lengths.pdf')
| {
"content_hash": "77158e0a48355b2fe9ec2c4f35f647e8",
"timestamp": "",
"source": "github",
"line_count": 108,
"max_line_length": 136,
"avg_line_length": 33.425925925925924,
"alnum_prop": 0.4700831024930748,
"repo_name": "selective-inference/selective-inference",
"id": "d81ff4cb1b3a0dc7afba52483bf38fb021dd9ad8",
"size": "3610",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "doc/learning_examples/multi_target/lee_multi.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "269"
},
{
"name": "C++",
"bytes": "13148"
},
{
"name": "Python",
"bytes": "572490"
},
{
"name": "R",
"bytes": "11134"
},
{
"name": "TeX",
"bytes": "3355"
}
],
"symlink_target": ""
} |
from django.views.generic import TemplateView
class ExampleView(TemplateView):
def get_context_data(self, **kwargs):
context = super(self, ExampleView).get_context_data(**kwargs)
# context['form'] = form | {
"content_hash": "23a0ad311960bbcaa08d822b93208860",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 69,
"avg_line_length": 32.142857142857146,
"alnum_prop": 0.6977777777777778,
"repo_name": "un33k/django-dropzone",
"id": "6ef3f13806e13274a081f2615f307af504365fd0",
"size": "225",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "example_project/example_project/views.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "23336"
},
{
"name": "HTML",
"bytes": "1864"
},
{
"name": "JavaScript",
"bytes": "116273"
},
{
"name": "Python",
"bytes": "11638"
}
],
"symlink_target": ""
} |
import requests
import json
from conf import appid, apikey, sender, receivers, content
url = 'https://api.bluehouselab.com/smscenter/v1.0/sendsms'
params = {
'sender' : sender,
'receivers' : receivers,
'content' : content,
}
headers = {'Content-type': 'application/json; charset=utf-8',}
r = requests.post(url, data=json.dumps(params),
auth=(appid, apikey), headers=headers)
print r.status_code, r.reason
if r.status_code == 200:
print r.json()
| {
"content_hash": "fd54fd192e4332bfb65aceffe18e1c30",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 62,
"avg_line_length": 28.647058823529413,
"alnum_prop": 0.6632443531827515,
"repo_name": "BlueHouseLab/sms-openapi",
"id": "1f3f7a2de8cba2a589cbf34d2f53fd51bba6e1d7",
"size": "533",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python-requests/sendsms.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "3575"
},
{
"name": "C#",
"bytes": "2181"
},
{
"name": "C++",
"bytes": "4165"
},
{
"name": "Java",
"bytes": "5863"
},
{
"name": "JavaScript",
"bytes": "1705"
},
{
"name": "PHP",
"bytes": "1448"
},
{
"name": "Python",
"bytes": "15062"
},
{
"name": "Ruby",
"bytes": "1196"
},
{
"name": "Shell",
"bytes": "7905"
}
],
"symlink_target": ""
} |
"""matplotlib_kit package driver file.
Inserts the following modules in sys.modules: matplotlib, pylab.
@author: Charl P. Botha <http://cpbotha.net/>
"""
import os
import re
import sys
import types
# you have to define this
VERSION = ''
def init(theModuleManager, pre_import=True):
if hasattr(sys, 'frozen') and sys.frozen:
# matplotlib supports py2exe by checking for matplotlibdata in the appdir
# but this is only done on windows (and therefore works for our windows
# installer builds). On non-windows, we have to stick it in the env
# to make sure that MPL finds its datadir (only if we're frozen)
mpldir = os.path.join(theModuleManager.get_appdir(), 'matplotlibdata')
os.environ['MATPLOTLIBDATA'] = mpldir
# import the main module itself
# this doesn't import numerix yet...
global matplotlib
import matplotlib
# use WX + Agg backend (slower, but nicer that WX)
matplotlib.use('WXAgg')
# interactive mode: user can use pylab commands from any introspection
# interface, changes will be made immediately and matplotlib cooperates
# nicely with main WX event loop
matplotlib.interactive(True)
# with matplotlib 1.0.1 we can't do this anymore.
# makes sure we use the numpy backend
#from matplotlib import rcParams
#rcParams['numerix'] = 'numpy'
theModuleManager.setProgress(25, 'Initialising matplotlib_kit: config')
# @PATCH:
# this is for the combination numpy 1.0.4 and matplotlib 0.91.2
# matplotlib/numerix/ma/__init__.py:
# . normal installation fails on "from numpy.ma import *", so "from
# numpy.core.ma import *" is done, thus bringing in e.g. getmask
# . pyinstaller binaries for some or other reason succeed on
# "from numpy.ma import *" (no exception raised), therefore do
# not do "from numpy.core.ma import *", and therefore things like
# getmask are not imported.
# solution:
# we make sure that "from numpy.ma import *" actually brings in
# numpy.core.ma by importing that and associating the module
# binding to the global numpy.ma.
#if hasattr(sys, 'frozen') and sys.frozen:
# import numpy.core.ma
# sys.modules['numpy.ma'] = sys.modules['numpy.core.ma']
# import the pylab interface, make sure it's available from this namespace
global pylab
import pylab
theModuleManager.setProgress(90, 'Initialising matplotlib_kit: pylab')
# build up VERSION
global VERSION
VERSION = '%s' % (matplotlib.__version__,)
theModuleManager.setProgress(100, 'Initialising matplotlib_kit: complete')
| {
"content_hash": "cc44d592364385b685eccdeaa684107a",
"timestamp": "",
"source": "github",
"line_count": 76,
"max_line_length": 81,
"avg_line_length": 34.94736842105263,
"alnum_prop": 0.6863704819277109,
"repo_name": "zhangfangyan/devide",
"id": "54499ba91041c1406c4e3c5706dbe2d5e6e2bf88",
"size": "2895",
"binary": false,
"copies": "7",
"ref": "refs/heads/master",
"path": "module_kits/matplotlib_kit/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "NSIS",
"bytes": "2786"
},
{
"name": "Python",
"bytes": "3102319"
},
{
"name": "Shell",
"bytes": "7369"
}
],
"symlink_target": ""
} |
import uuid
import types
from traitlets import (
HasTraits,
Instance,
default
)
def issue(self, message, exc=None):
"""Accepts a message that will be logged with an additional reference
code for easy log lookup.
The identifier will be returned for inclusion in user-visible
error messages.
"""
ref = str(uuid.uuid1())
if exc is None:
err_message = "{}. Ref: {}".format(message, ref)
else:
err_message = "{} : {}. Ref: {}".format(
message, str(exc), ref)
self.error(err_message)
return ref
class LoggingMixin(HasTraits):
"""A HasTrait class that provides logging. Used as a mixin.
"""
log = Instance('logging.Logger')
@default('log')
def _log_default(self):
from tornado.log import app_log
# monkey patch the logger to provide an additional method that handles
# issues
app_log.issue = types.MethodType(issue, app_log)
return app_log
| {
"content_hash": "d065f5bdd41cf859e0d444d96fa9dadc",
"timestamp": "",
"source": "github",
"line_count": 43,
"max_line_length": 78,
"avg_line_length": 22.88372093023256,
"alnum_prop": 0.6239837398373984,
"repo_name": "simphony/simphony-remote",
"id": "fddc6f688246c29e383eac125a071a172a6a0855",
"size": "984",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "remoteappmanager/logging/logging_mixin.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "14011"
},
{
"name": "JavaScript",
"bytes": "51718"
},
{
"name": "Makefile",
"bytes": "6052"
},
{
"name": "Mako",
"bytes": "494"
},
{
"name": "Python",
"bytes": "418020"
},
{
"name": "Shell",
"bytes": "1690"
},
{
"name": "Vue",
"bytes": "46644"
}
],
"symlink_target": ""
} |
"""Unit tests for Superset"""
import json
import pytest
import prison
from sqlalchemy.sql import func
import tests.integration_tests.test_app
from superset import db
from superset.models.annotations import Annotation, AnnotationLayer
from tests.integration_tests.base_tests import SupersetTestCase
from tests.integration_tests.annotation_layers.fixtures import (
create_annotation_layers,
get_end_dttm,
get_start_dttm,
)
from tests.unit_tests.annotation_layers.fixtures import (
START_STR,
END_STR,
)
ANNOTATION_LAYERS_COUNT = 10
ANNOTATIONS_COUNT = 5
class TestAnnotationLayerApi(SupersetTestCase):
@staticmethod
def get_layer_with_annotation() -> AnnotationLayer:
return (
db.session.query(AnnotationLayer)
.filter(AnnotationLayer.name == "layer_with_annotations")
.one_or_none()
)
@pytest.mark.usefixtures("create_annotation_layers")
def test_get_annotation_layer(self):
"""
Annotation Api: Test get annotation layer
"""
annotation_layer = (
db.session.query(AnnotationLayer)
.filter(AnnotationLayer.name == "name1")
.first()
)
self.login(username="admin")
uri = f"api/v1/annotation_layer/{annotation_layer.id}"
rv = self.get_assert_metric(uri, "get")
assert rv.status_code == 200
expected_result = {
"id": annotation_layer.id,
"name": "name1",
"descr": "descr1",
}
data = json.loads(rv.data.decode("utf-8"))
assert data["result"] == expected_result
def test_info_annotation(self):
"""
Annotation API: Test info
"""
self.login(username="admin")
uri = "api/v1/annotation_layer/_info"
rv = self.get_assert_metric(uri, "info")
assert rv.status_code == 200
def test_info_security_query(self):
"""
Annotation API: Test info security
"""
self.login(username="admin")
params = {"keys": ["permissions"]}
uri = f"api/v1/annotation_layer/_info?q={prison.dumps(params)}"
rv = self.get_assert_metric(uri, "info")
data = json.loads(rv.data.decode("utf-8"))
assert rv.status_code == 200
assert "can_read" in data["permissions"]
assert "can_write" in data["permissions"]
assert len(data["permissions"]) == 2
@pytest.mark.usefixtures("create_annotation_layers")
def test_get_annotation_layer_not_found(self):
"""
Annotation Api: Test get annotation layer not found
"""
max_id = db.session.query(func.max(AnnotationLayer.id)).scalar()
self.login(username="admin")
uri = f"api/v1/annotation_layer/{max_id + 1}"
rv = self.get_assert_metric(uri, "get")
assert rv.status_code == 404
@pytest.mark.usefixtures("create_annotation_layers")
def test_get_list_annotation_layer(self):
"""
Annotation Api: Test get list annotation layers
"""
self.login(username="admin")
uri = "api/v1/annotation_layer/"
rv = self.get_assert_metric(uri, "get_list")
expected_fields = [
"name",
"descr",
"created_by",
"created_on",
"changed_by",
"changed_on_delta_humanized",
"changed_on",
]
assert rv.status_code == 200
data = json.loads(rv.data.decode("utf-8"))
assert data["count"] == ANNOTATION_LAYERS_COUNT
for expected_field in expected_fields:
assert expected_field in data["result"][0]
@pytest.mark.usefixtures("create_annotation_layers")
def test_get_list_annotation_layer_sorting(self):
"""
Annotation Api: Test sorting on get list annotation layers
"""
self.login(username="admin")
uri = "api/v1/annotation_layer/"
order_columns = [
"name",
"descr",
"created_by.first_name",
"changed_by.first_name",
"changed_on",
"changed_on_delta_humanized",
"created_on",
]
for order_column in order_columns:
arguments = {"order_column": order_column, "order_direction": "asc"}
uri = f"api/v1/annotation_layer/?q={prison.dumps(arguments)}"
rv = self.get_assert_metric(uri, "get_list")
assert rv.status_code == 200
@pytest.mark.usefixtures("create_annotation_layers")
def test_get_list_annotation_layer_filter(self):
"""
Annotation Api: Test filters on get list annotation layers
"""
self.login(username="admin")
arguments = {
"columns": ["name", "descr"],
"filters": [
{"col": "name", "opr": "annotation_layer_all_text", "value": "2"}
],
}
uri = f"api/v1/annotation_layer/?q={prison.dumps(arguments)}"
rv = self.get_assert_metric(uri, "get_list")
expected_result = {
"name": "name2",
"descr": "descr2",
}
assert rv.status_code == 200
data = json.loads(rv.data.decode("utf-8"))
assert data["count"] == 1
assert data["result"][0] == expected_result
arguments = {
"columns": ["name", "descr"],
"filters": [
{"col": "name", "opr": "annotation_layer_all_text", "value": "descr3"}
],
}
uri = f"api/v1/annotation_layer/?q={prison.dumps(arguments)}"
rv = self.get_assert_metric(uri, "get_list")
expected_result = {
"name": "name3",
"descr": "descr3",
}
assert rv.status_code == 200
data = json.loads(rv.data.decode("utf-8"))
assert data["count"] == 1
assert data["result"][0] == expected_result
def test_create_annotation_layer(self):
"""
Annotation Api: Test create annotation layer
"""
self.login(username="admin")
annotation_layer_data = {
"name": "new3",
"descr": "description",
}
uri = "api/v1/annotation_layer/"
rv = self.client.post(uri, json=annotation_layer_data)
assert rv.status_code == 201
data = json.loads(rv.data.decode("utf-8"))
created_model = db.session.query(AnnotationLayer).get(data.get("id"))
assert created_model is not None
assert created_model.name == annotation_layer_data["name"]
assert created_model.descr == annotation_layer_data["descr"]
# Rollback changes
db.session.delete(created_model)
db.session.commit()
def test_create_incorrect_annotation_layer(self):
"""
Annotation Api: Test create incorrect annotation layer
"""
self.login(username="admin")
annotation_layer_data = {}
uri = "api/v1/annotation_layer/"
rv = self.client.post(uri, json=annotation_layer_data)
assert rv.status_code == 400
data = json.loads(rv.data.decode("utf-8"))
assert data == {"message": {"name": ["Missing data for required field."]}}
@pytest.mark.usefixtures("create_annotation_layers")
def test_create_annotation_layer_uniqueness(self):
"""
Annotation Api: Test create annotation layer uniqueness
"""
self.login(username="admin")
annotation_layer_data = {"name": "name3", "descr": "description"}
uri = "api/v1/annotation_layer/"
rv = self.client.post(uri, json=annotation_layer_data)
assert rv.status_code == 422
data = json.loads(rv.data.decode("utf-8"))
assert data == {"message": {"name": ["Name must be unique"]}}
@pytest.mark.usefixtures("create_annotation_layers")
def test_update_annotation_layer(self):
"""
Annotation Api: Test update annotation layer
"""
annotation_layer = (
db.session.query(AnnotationLayer)
.filter(AnnotationLayer.name == "name2")
.one_or_none()
)
self.login(username="admin")
annotation_layer_data = {"name": "changed_name"}
uri = f"api/v1/annotation_layer/{annotation_layer.id}"
rv = self.client.put(uri, json=annotation_layer_data)
assert rv.status_code == 200
updated_model = db.session.query(AnnotationLayer).get(annotation_layer.id)
assert updated_model is not None
assert updated_model.name == annotation_layer_data["name"]
# make sure the descr hasn't updated
assert updated_model.descr == annotation_layer.descr
@pytest.mark.usefixtures("create_annotation_layers")
def test_update_annotation_layer_uniqueness(self):
"""
Annotation Api: Test update annotation layer uniqueness
"""
annotation_layer = (
db.session.query(AnnotationLayer)
.filter(AnnotationLayer.name == "name2")
.one_or_none()
)
self.login(username="admin")
annotation_layer_data = {"name": "name3", "descr": "changed_description"}
uri = f"api/v1/annotation_layer/{annotation_layer.id}"
rv = self.client.put(uri, json=annotation_layer_data)
data = json.loads(rv.data.decode("utf-8"))
assert rv.status_code == 422
assert data == {"message": {"name": ["Name must be unique"]}}
@pytest.mark.usefixtures("create_annotation_layers")
def test_update_annotation_layer_not_found(self):
"""
Annotation Api: Test update annotation layer not found
"""
max_id = db.session.query(func.max(AnnotationLayer.id)).scalar()
self.login(username="admin")
annotation_layer_data = {"name": "changed_name", "descr": "changed_description"}
uri = f"api/v1/annotation_layer/{max_id + 1}"
rv = self.client.put(uri, json=annotation_layer_data)
assert rv.status_code == 404
@pytest.mark.usefixtures("create_annotation_layers")
def test_delete_annotation_layer(self):
"""
Annotation Api: Test update annotation layer
"""
annotation_layer = (
db.session.query(AnnotationLayer)
.filter(AnnotationLayer.name == "name1")
.one_or_none()
)
self.login(username="admin")
uri = f"api/v1/annotation_layer/{annotation_layer.id}"
rv = self.client.delete(uri)
assert rv.status_code == 200
updated_model = db.session.query(AnnotationLayer).get(annotation_layer.id)
assert updated_model is None
@pytest.mark.usefixtures("create_annotation_layers")
def test_delete_annotation_layer_not_found(self):
"""
Annotation Api: Test delete annotation layer not found
"""
max_id = db.session.query(func.max(AnnotationLayer.id)).scalar()
self.login(username="admin")
uri = f"api/v1/annotation_layer/{max_id + 1}"
rv = self.client.delete(uri)
assert rv.status_code == 404
@pytest.mark.usefixtures("create_annotation_layers")
def test_delete_annotation_layer_integrity(self):
"""
Annotation Api: Test delete annotation layer integrity error
"""
query_child_layer = db.session.query(AnnotationLayer).filter(
AnnotationLayer.name == "layer_with_annotations"
)
child_layer = query_child_layer.one_or_none()
self.login(username="admin")
uri = f"api/v1/annotation_layer/{child_layer.id}"
rv = self.client.delete(uri)
assert rv.status_code == 422
@pytest.mark.usefixtures("create_annotation_layers")
def test_bulk_delete_annotation_layer(self):
"""
Annotation Api: Test bulk delete annotation layers
"""
query_no_child_layers = db.session.query(AnnotationLayer).filter(
AnnotationLayer.name.like("name%")
)
no_child_layers = query_no_child_layers.all()
no_child_layers_ids = [
annotation_layer.id for annotation_layer in no_child_layers
]
self.login(username="admin")
uri = f"api/v1/annotation_layer/?q={prison.dumps(no_child_layers_ids)}"
rv = self.client.delete(uri)
assert rv.status_code == 200
deleted_annotation_layers = query_no_child_layers.all()
assert deleted_annotation_layers == []
response = json.loads(rv.data.decode("utf-8"))
expected_response = {
"message": f"Deleted {len(no_child_layers_ids)} annotation layers"
}
assert response == expected_response
@pytest.mark.usefixtures("create_annotation_layers")
def test_bulk_delete_annotation_layer_not_found(self):
"""
Annotation Api: Test bulk delete annotation layers not found
"""
all_annotation_layers = (
db.session.query(AnnotationLayer)
.filter(AnnotationLayer.name.like("name%"))
.all()
)
all_annotation_layers_ids = [
annotation_layer.id for annotation_layer in all_annotation_layers
]
max_id = db.session.query(func.max(AnnotationLayer.id)).scalar()
all_annotation_layers_ids.append(max_id + 1)
self.login(username="admin")
uri = f"api/v1/annotation_layer/?q={prison.dumps(all_annotation_layers_ids)}"
rv = self.client.delete(uri)
assert rv.status_code == 404
@pytest.mark.usefixtures("create_annotation_layers")
def test_get_annotation(self):
"""
Annotation API: Test get annotation
"""
annotation_id = 1
annotation = (
db.session.query(Annotation)
.filter(Annotation.short_descr == f"short_descr{annotation_id}")
.one_or_none()
)
self.login(username="admin")
uri = (
f"api/v1/annotation_layer/{annotation.layer_id}/annotation/{annotation.id}"
)
rv = self.get_assert_metric(uri, "get")
assert rv.status_code == 200
expected_result = {
"id": annotation.id,
"end_dttm": get_end_dttm(annotation_id).isoformat(),
"json_metadata": "",
"layer": {"id": annotation.layer_id, "name": "layer_with_annotations"},
"long_descr": annotation.long_descr,
"short_descr": annotation.short_descr,
"start_dttm": get_start_dttm(annotation_id).isoformat(),
}
data = json.loads(rv.data.decode("utf-8"))
assert data["result"] == expected_result
@pytest.mark.usefixtures("create_annotation_layers")
def test_get_annotation_not_found(self):
"""
Annotation API: Test get annotation not found
"""
layer = self.get_layer_with_annotation()
max_id = db.session.query(func.max(Annotation.id)).scalar()
self.login(username="admin")
uri = f"api/v1/annotation_layer/{layer.id}/annotation/{max_id + 1}"
rv = self.get_assert_metric(uri, "get")
assert rv.status_code == 404
@pytest.mark.usefixtures("create_annotation_layers")
def test_get_list_annotation(self):
"""
Annotation Api: Test get list of annotations
"""
layer = self.get_layer_with_annotation()
self.login(username="admin")
uri = f"api/v1/annotation_layer/{layer.id}/annotation/"
rv = self.get_assert_metric(uri, "get_list")
expected_fields = [
"short_descr",
"created_by",
"changed_by",
"start_dttm",
"end_dttm",
]
assert rv.status_code == 200
data = json.loads(rv.data.decode("utf-8"))
assert data["count"] == ANNOTATIONS_COUNT
for expected_field in expected_fields:
assert expected_field in data["result"][0]
@pytest.mark.usefixtures("create_annotation_layers")
def test_get_list_annotation_sorting(self):
"""
Annotation Api: Test sorting on get list of annotations
"""
layer = self.get_layer_with_annotation()
self.login(username="admin")
order_columns = [
"short_descr",
"created_by.first_name",
"changed_by.first_name",
"changed_on_delta_humanized",
"start_dttm",
"end_dttm",
]
for order_column in order_columns:
arguments = {"order_column": order_column, "order_direction": "asc"}
uri = f"api/v1/annotation_layer/{layer.id}/annotation/?q={prison.dumps(arguments)}"
rv = self.get_assert_metric(uri, "get_list")
assert rv.status_code == 200
@pytest.mark.usefixtures("create_annotation_layers")
def test_get_list_annotation_filter(self):
"""
Annotation Api: Test filters on get list annotation layers
"""
layer = self.get_layer_with_annotation()
self.login(username="admin")
arguments = {
"filters": [
{"col": "short_descr", "opr": "annotation_all_text", "value": "2"}
]
}
uri = f"api/v1/annotation_layer/{layer.id}/annotation/?q={prison.dumps(arguments)}"
rv = self.get_assert_metric(uri, "get_list")
assert rv.status_code == 200
data = json.loads(rv.data.decode("utf-8"))
assert data["count"] == 1
arguments = {
"filters": [
{"col": "short_descr", "opr": "annotation_all_text", "value": "descr3"}
]
}
uri = f"api/v1/annotation_layer/{layer.id}/annotation/?q={prison.dumps(arguments)}"
rv = self.get_assert_metric(uri, "get_list")
assert rv.status_code == 200
data = json.loads(rv.data.decode("utf-8"))
assert data["count"] == 1
@pytest.mark.usefixtures("create_annotation_layers")
def test_create_annotation(self):
"""
Annotation Api: Test create annotation
"""
layer = self.get_layer_with_annotation()
self.login(username="admin")
annotation_data = {
"short_descr": "new",
"long_descr": "description",
"start_dttm": START_STR,
"end_dttm": END_STR,
}
uri = f"api/v1/annotation_layer/{layer.id}/annotation/"
rv = self.client.post(uri, json=annotation_data)
assert rv.status_code == 201
data = json.loads(rv.data.decode("utf-8"))
created_model: Annotation = db.session.query(Annotation).get(data.get("id"))
assert created_model is not None
assert created_model.short_descr == annotation_data["short_descr"]
assert created_model.long_descr == annotation_data["long_descr"]
# Rollback changes
db.session.delete(created_model)
db.session.commit()
@pytest.mark.usefixtures("create_annotation_layers")
def test_create_incorrect_annotation(self):
"""
Annotation Api: Test create incorrect annotation
"""
layer = self.get_layer_with_annotation()
self.login(username="admin")
annotation_data = {
"long_descr": "description",
}
uri = f"api/v1/annotation_layer/{layer.id}/annotation/"
rv = self.client.post(uri, json=annotation_data)
data = json.loads(rv.data.decode("utf-8"))
assert rv.status_code == 400
assert data == {
"message": {
"end_dttm": ["Missing data for required field."],
"short_descr": ["Missing data for required field."],
"start_dttm": ["Missing data for required field."],
}
}
@pytest.mark.usefixtures("create_annotation_layers")
def test_create_annotation_uniqueness(self):
"""
Annotation Api: Test create annotation uniqueness
"""
layer = self.get_layer_with_annotation()
self.login(username="admin")
annotation_data = {
"short_descr": "short_descr2",
"long_descr": "description",
"start_dttm": START_STR,
"end_dttm": END_STR,
}
uri = f"api/v1/annotation_layer/{layer.id}/annotation/"
rv = self.client.post(uri, json=annotation_data)
assert rv.status_code == 422
data = json.loads(rv.data.decode("utf-8"))
assert data == {
"message": {
"short_descr": ["Short description must be unique for this layer"]
}
}
@pytest.mark.usefixtures("create_annotation_layers")
def test_update_annotation(self):
"""
Annotation Api: Test update annotation
"""
layer = self.get_layer_with_annotation()
annotation = (
db.session.query(Annotation)
.filter(Annotation.short_descr == "short_descr2")
.one_or_none()
)
self.login(username="admin")
annotation_data = {
"short_descr": "changed_name",
}
uri = f"api/v1/annotation_layer/{layer.id}/annotation/{annotation.id}"
rv = self.client.put(uri, json=annotation_data)
assert rv.status_code == 200
updated_model: Annotation = db.session.query(Annotation).get(annotation.id)
assert updated_model is not None
assert updated_model.short_descr == annotation_data["short_descr"]
# make sure long_descr hasn't updated
assert updated_model.long_descr == annotation.long_descr
@pytest.mark.usefixtures("create_annotation_layers")
def test_update_annotation_null_datetime(self):
"""
Annotation Api: Test update annotation null datetime
"""
layer = self.get_layer_with_annotation()
annotation = (
db.session.query(Annotation)
.filter(Annotation.short_descr == "short_descr2")
.one_or_none()
)
self.login(username="admin")
annotation_data = {"start_dttm": None, "end_dttm": None}
uri = f"api/v1/annotation_layer/{layer.id}/annotation/{annotation.id}"
rv = self.client.put(uri, json=annotation_data)
assert rv.status_code == 400
data = json.loads(rv.data.decode("utf-8"))
assert data == {
"message": {
"end_dttm": ["Field may not be null."],
"start_dttm": ["Field may not be null."],
}
}
@pytest.mark.usefixtures("create_annotation_layers")
def test_update_annotation_uniqueness(self):
"""
Annotation Api: Test update annotation uniqueness
"""
layer = self.get_layer_with_annotation()
annotation = (
db.session.query(Annotation)
.filter(Annotation.short_descr == "short_descr2")
.one_or_none()
)
self.login(username="admin")
annotation_layer_data = {
"short_descr": "short_descr3",
"long_descr": "changed_description",
}
uri = f"api/v1/annotation_layer/{layer.id}/annotation/{annotation.id}"
rv = self.client.put(uri, json=annotation_layer_data)
assert rv.status_code == 422
data = json.loads(rv.data.decode("utf-8"))
assert data == {
"message": {
"short_descr": ["Short description must be unique for this layer"]
}
}
@pytest.mark.usefixtures("create_annotation_layers")
def test_update_annotation_not_found(self):
"""
Annotation Api: Test update annotation not found
"""
layer = self.get_layer_with_annotation()
max_id = db.session.query(func.max(Annotation.id)).scalar()
self.login(username="admin")
annotation_layer_data = {
"short_descr": "changed_name",
}
uri = f"api/v1/annotation_layer/{layer.id}/annotation/{max_id + 1}"
rv = self.client.put(uri, json=annotation_layer_data)
assert rv.status_code == 404
@pytest.mark.usefixtures("create_annotation_layers")
def test_delete_annotation(self):
"""
Annotation Api: Test update annotation
"""
layer = self.get_layer_with_annotation()
annotation = (
db.session.query(Annotation)
.filter(Annotation.short_descr == "short_descr1")
.one_or_none()
)
self.login(username="admin")
uri = f"api/v1/annotation_layer/{layer.id}/annotation/{annotation.id}"
rv = self.client.delete(uri)
assert rv.status_code == 200
updated_model = db.session.query(Annotation).get(annotation.id)
assert updated_model is None
@pytest.mark.usefixtures("create_annotation_layers")
def test_delete_annotation_not_found(self):
"""
Annotation Api: Test delete annotation not found
"""
layer = self.get_layer_with_annotation()
max_id = db.session.query(func.max(Annotation.id)).scalar()
self.login(username="admin")
uri = f"api/v1/annotation_layer/{layer.id}/annotation{max_id + 1}"
rv = self.client.delete(uri)
assert rv.status_code == 404
@pytest.mark.usefixtures("create_annotation_layers")
def test_bulk_delete_annotation(self):
"""
Annotation Api: Test bulk delete annotation
"""
layer = self.get_layer_with_annotation()
query_annotations = db.session.query(Annotation).filter(
Annotation.layer == layer
)
annotations = query_annotations.all()
annotations_ids = [annotation.id for annotation in annotations]
self.login(username="admin")
uri = f"api/v1/annotation_layer/{layer.id}/annotation/?q={prison.dumps(annotations_ids)}"
rv = self.client.delete(uri)
assert rv.status_code == 200
deleted_annotations = query_annotations.all()
assert deleted_annotations == []
response = json.loads(rv.data.decode("utf-8"))
expected_response = {"message": f"Deleted {len(annotations_ids)} annotations"}
assert response == expected_response
@pytest.mark.usefixtures("create_annotation_layers")
def test_bulk_delete_annotation_not_found(self):
"""
Annotation Api: Test bulk delete annotation not found
"""
layer = self.get_layer_with_annotation()
query_annotations = db.session.query(Annotation).filter(
Annotation.layer == layer
)
annotations = query_annotations.all()
annotations_ids = [annotation.id for annotation in annotations]
max_id = db.session.query(func.max(Annotation.id)).scalar()
annotations_ids.append(max_id + 1)
self.login(username="admin")
uri = f"api/v1/annotation_layer/{layer.id}/annotation/?q={prison.dumps(annotations_ids)}"
rv = self.client.delete(uri)
assert rv.status_code == 404
| {
"content_hash": "9deb0bf589fdcf603211d2b3338f09eb",
"timestamp": "",
"source": "github",
"line_count": 732,
"max_line_length": 97,
"avg_line_length": 36.78825136612022,
"alnum_prop": 0.5853912139329348,
"repo_name": "zhouyao1994/incubator-superset",
"id": "61f6b2ff678295c85dc0326a2306ef29bc4bb5c3",
"size": "27732",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "tests/integration_tests/annotation_layers/api_tests.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "4776"
},
{
"name": "Dockerfile",
"bytes": "6940"
},
{
"name": "HTML",
"bytes": "1243911"
},
{
"name": "JavaScript",
"bytes": "2445349"
},
{
"name": "Jinja",
"bytes": "5542"
},
{
"name": "Jupyter Notebook",
"bytes": "1925627"
},
{
"name": "Less",
"bytes": "106438"
},
{
"name": "Makefile",
"bytes": "3946"
},
{
"name": "Mako",
"bytes": "1197"
},
{
"name": "Pug",
"bytes": "2969"
},
{
"name": "Python",
"bytes": "6296253"
},
{
"name": "Shell",
"bytes": "56211"
},
{
"name": "Smarty",
"bytes": "4298"
},
{
"name": "TypeScript",
"bytes": "6909337"
}
],
"symlink_target": ""
} |
from Signal import Signal
import numpy as np
class LMSAdaptiveFilter:
"""
The LMS Adaptive Filter.
"""
def __init__(self, order, damping=0.5):
self.order = order
self.damping = damping
self.X = Signal(order)
self.Y = Signal(order)
self.weights = [0] * order
def is_signal_outlier(self, sig):
X = np.array(self.X.signal)
weights = np.array(self.weights)
yest = weights.dot(X)
c = (1.0 * (sig - yest)) / (1. * X.dot(X))
weights = weights + self.damping * c * X
self.X.add(sig)
self.weights = list(weights)
return self._check_est(yest)
def _check_est(self, est):
if self.Y.can_use():
return est >= (2.0 * self.Y.sigma() + self.Y.mean())
return False | {
"content_hash": "1dfd6a3ef1f6c92bf8f6048af7b0744c",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 64,
"avg_line_length": 26.933333333333334,
"alnum_prop": 0.5433168316831684,
"repo_name": "111t8e/h2o-2",
"id": "f38e98bc1a93082dd91db0c9a92240ea44e11e22",
"size": "808",
"binary": false,
"copies": "11",
"ref": "refs/heads/master",
"path": "h2o-perf/bench/py/h2oPerf/LMSAdaptiveFilter.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "7065"
},
{
"name": "C",
"bytes": "2461"
},
{
"name": "CSS",
"bytes": "216906"
},
{
"name": "CoffeeScript",
"bytes": "205094"
},
{
"name": "Emacs Lisp",
"bytes": "7446"
},
{
"name": "Groovy",
"bytes": "518"
},
{
"name": "HTML",
"bytes": "177967"
},
{
"name": "Java",
"bytes": "5177683"
},
{
"name": "JavaScript",
"bytes": "92357"
},
{
"name": "Makefile",
"bytes": "50927"
},
{
"name": "PHP",
"bytes": "8490"
},
{
"name": "Perl",
"bytes": "22594"
},
{
"name": "Python",
"bytes": "3244626"
},
{
"name": "R",
"bytes": "1631216"
},
{
"name": "Ruby",
"bytes": "299"
},
{
"name": "Scala",
"bytes": "39365"
},
{
"name": "Shell",
"bytes": "189829"
}
],
"symlink_target": ""
} |
from pyface.qt import QtGui
# Enthought library imports.
from traits.api import Bool, provides, Unicode
# Local imports.
from pyface.i_directory_dialog import IDirectoryDialog, MDirectoryDialog
from dialog import Dialog
@provides(IDirectoryDialog)
class DirectoryDialog(MDirectoryDialog, Dialog):
""" The toolkit specific implementation of a DirectoryDialog. See the
IDirectoryDialog interface for the API documentation.
"""
#### 'IDirectoryDialog' interface #########################################
default_path = Unicode
message = Unicode
new_directory = Bool(True)
path = Unicode
###########################################################################
# Protected 'IDialog' interface.
###########################################################################
def _create_contents(self, parent):
# In PyQt this is a canned dialog.
pass
###########################################################################
# 'IWindow' interface.
###########################################################################
def close(self):
# Get the path of the chosen directory.
files = self.control.selectedFiles()
if files:
self.path = unicode(files[0])
else:
self.path = ''
# Let the window close as normal.
super(DirectoryDialog, self).close()
###########################################################################
# Protected 'IWidget' interface.
###########################################################################
def _create_control(self, parent):
dlg = QtGui.QFileDialog(parent, self.title, self.default_path)
dlg.setViewMode(QtGui.QFileDialog.Detail)
dlg.setFileMode(QtGui.QFileDialog.DirectoryOnly)
if not self.new_directory:
dlg.setReadOnly(True)
if self.message:
dlg.setLabelText(QtGui.QFileDialog.LookIn, self.message)
return dlg
#### EOF ######################################################################
| {
"content_hash": "ff5b36eb0296eea8aa9c4fb3aea10bf4",
"timestamp": "",
"source": "github",
"line_count": 70,
"max_line_length": 79,
"avg_line_length": 29.97142857142857,
"alnum_prop": 0.47283126787416585,
"repo_name": "brett-patterson/pyface",
"id": "4c6336275ab7422ea0ab7333aaf62f858cd3b6dd",
"size": "2652",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "pyface/ui/qt4/directory_dialog.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "648"
},
{
"name": "Python",
"bytes": "2371056"
}
],
"symlink_target": ""
} |
'''
Affine invariant feature-based image matching sample.
This sample is similar to find_obj.py, but uses the affine transformation
space sampling technique, called ASIFT [1]. While the original implementation
is based on SIFT, you can try to use SURF or ORB detectors instead. Homography RANSAC
is used to reject outliers. Threading is used for faster affine sampling.
[1] http://www.ipol.im/pub/algo/my_affine_sift/
USAGE
asift.py [--feature=<sift|surf|orb|brisk>[-flann]] [ <image1> <image2> ]
--feature - Feature to use. Can be sift, surf, orb or brisk. Append '-flann'
to feature name to use Flann-based matcher instead bruteforce.
Press left mouse button on a feature point to see its matching point.
'''
# Python 2/3 compatibility
from __future__ import print_function
import numpy as np
import cv2
# built-in modules
import itertools as it
from multiprocessing.pool import ThreadPool
# local modules
from common import Timer
from find_obj import init_feature, filter_matches, explore_match
def affine_skew(tilt, phi, img, mask=None):
'''
affine_skew(tilt, phi, img, mask=None) -> skew_img, skew_mask, Ai
Ai - is an affine transform matrix from skew_img to img
'''
h, w = img.shape[:2]
if mask is None:
mask = np.zeros((h, w), np.uint8)
mask[:] = 255
A = np.float32([[1, 0, 0], [0, 1, 0]])
if phi != 0.0:
phi = np.deg2rad(phi)
s, c = np.sin(phi), np.cos(phi)
A = np.float32([[c,-s], [ s, c]])
corners = [[0, 0], [w, 0], [w, h], [0, h]]
tcorners = np.int32( np.dot(corners, A.T) )
x, y, w, h = cv2.boundingRect(tcorners.reshape(1,-1,2))
A = np.hstack([A, [[-x], [-y]]])
img = cv2.warpAffine(img, A, (w, h), flags=cv2.INTER_LINEAR, borderMode=cv2.BORDER_REPLICATE)
if tilt != 1.0:
s = 0.8*np.sqrt(tilt*tilt-1)
img = cv2.GaussianBlur(img, (0, 0), sigmaX=s, sigmaY=0.01)
img = cv2.resize(img, (0, 0), fx=1.0/tilt, fy=1.0, interpolation=cv2.INTER_NEAREST)
A[0] /= tilt
if phi != 0.0 or tilt != 1.0:
h, w = img.shape[:2]
mask = cv2.warpAffine(mask, A, (w, h), flags=cv2.INTER_NEAREST)
Ai = cv2.invertAffineTransform(A)
return img, mask, Ai
def affine_detect(detector, img, mask=None, pool=None):
'''
affine_detect(detector, img, mask=None, pool=None) -> keypoints, descrs
Apply a set of affine transormations to the image, detect keypoints and
reproject them into initial image coordinates.
See http://www.ipol.im/pub/algo/my_affine_sift/ for the details.
ThreadPool object may be passed to speedup the computation.
'''
params = [(1.0, 0.0)]
for t in 2**(0.5*np.arange(1,6)):
for phi in np.arange(0, 180, 72.0 / t):
params.append((t, phi))
def f(p):
t, phi = p
timg, tmask, Ai = affine_skew(t, phi, img)
keypoints, descrs = detector.detectAndCompute(timg, tmask)
for kp in keypoints:
x, y = kp.pt
kp.pt = tuple( np.dot(Ai, (x, y, 1)) )
if descrs is None:
descrs = []
return keypoints, descrs
keypoints, descrs = [], []
if pool is None:
ires = it.imap(f, params)
else:
ires = pool.imap(f, params)
for i, (k, d) in enumerate(ires):
print('affine sampling: %d / %d\r' % (i+1, len(params)), end='')
keypoints.extend(k)
descrs.extend(d)
print()
return keypoints, np.array(descrs)
if __name__ == '__main__':
print(__doc__)
import sys, getopt
opts, args = getopt.getopt(sys.argv[1:], '', ['feature='])
opts = dict(opts)
feature_name = opts.get('--feature', 'brisk-flann')
try:
fn1, fn2 = args
except:
fn1 = '../data/aero1.jpg'
fn2 = '../data/aero3.jpg'
img1 = cv2.imread(fn1, 0)
img2 = cv2.imread(fn2, 0)
detector, matcher = init_feature(feature_name)
if img1 is None:
print('Failed to load fn1:', fn1)
sys.exit(1)
if img2 is None:
print('Failed to load fn2:', fn2)
sys.exit(1)
if detector is None:
print('unknown feature:', feature_name)
sys.exit(1)
print('using', feature_name)
pool=ThreadPool(processes = cv2.getNumberOfCPUs())
kp1, desc1 = affine_detect(detector, img1, pool=pool)
kp2, desc2 = affine_detect(detector, img2, pool=pool)
print('img1 - %d features, img2 - %d features' % (len(kp1), len(kp2)))
def match_and_draw(win):
with Timer('matching'):
raw_matches = matcher.knnMatch(desc1, trainDescriptors = desc2, k = 2) #2
p1, p2, kp_pairs = filter_matches(kp1, kp2, raw_matches)
if len(p1) >= 4:
H, status = cv2.findHomography(p1, p2, cv2.RANSAC, 5.0)
print('%d / %d inliers/matched' % (np.sum(status), len(status)))
# do not draw outliers (there will be a lot of them)
kp_pairs = [kpp for kpp, flag in zip(kp_pairs, status) if flag]
else:
H, status = None, None
print('%d matches found, not enough for homography estimation' % len(p1))
explore_match(win, img1, img2, kp_pairs, None, H)
match_and_draw('affine find_obj')
cv2.waitKey()
cv2.destroyAllWindows()
| {
"content_hash": "27f62bdd7e8ef5f51be44f4561efd3cb",
"timestamp": "",
"source": "github",
"line_count": 161,
"max_line_length": 101,
"avg_line_length": 32.838509316770185,
"alnum_prop": 0.5969358804615094,
"repo_name": "zzjkf2009/Midterm_Astar",
"id": "f2b0e7999535df29dd4e1c3c1221d3f04f13db61",
"size": "5310",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "opencv/samples/python/asift.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "7310"
},
{
"name": "C",
"bytes": "1157337"
},
{
"name": "C#",
"bytes": "42254"
},
{
"name": "C++",
"bytes": "28411379"
},
{
"name": "CMake",
"bytes": "810399"
},
{
"name": "CSS",
"bytes": "4784"
},
{
"name": "Clojure",
"bytes": "1487"
},
{
"name": "Cuda",
"bytes": "1699447"
},
{
"name": "HLSL",
"bytes": "3314"
},
{
"name": "HTML",
"bytes": "220169"
},
{
"name": "Java",
"bytes": "831255"
},
{
"name": "JavaScript",
"bytes": "113900"
},
{
"name": "Makefile",
"bytes": "2690"
},
{
"name": "Objective-C",
"bytes": "44625"
},
{
"name": "Objective-C++",
"bytes": "211774"
},
{
"name": "Perl",
"bytes": "15867"
},
{
"name": "PowerShell",
"bytes": "14589"
},
{
"name": "Python",
"bytes": "911972"
},
{
"name": "Scala",
"bytes": "5683"
},
{
"name": "Shell",
"bytes": "16088"
},
{
"name": "TeX",
"bytes": "34757"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import, unicode_literals
from functools import wraps
import time
import celery
from celery.schedules import crontab
from elasticsearch.helpers import scan
from elasticsearch.exceptions import NotFoundError
from oclubs.access import done, database, elasticsearch, redis
from oclubs.access.redis import r_url_celery
from oclubs.app import app as flask_app
from oclubs.enums import UserType
from oclubs.objs import Activity, Club, User, Upload
from oclubs.objs.base import Property
app = celery.Celery(
'oclubsbackend',
backend=r_url_celery + '1',
broker=r_url_celery + '2'
)
app.conf.update(
CELERY_TASK_SERIALIZER='json',
CELERY_ACCEPT_CONTENT=['json'], # Ignore other content
CELERY_RESULT_SERIALIZER='json',
CELERY_TASK_RESULT_EXPIRES=30 * 24 * 3600,
CELERY_TIMEZONE='Asia/Shanghai',
CELERYBEAT_SCHEDULE={
'rebuild_elasticsearch_night': {
'task': 'oclubs.worker.rebuild_elasticsearch',
'schedule': crontab(minute=47, hour=3),
},
# DISABLED due to admin unable to fetch new account passwords
# 'refresh_user_holiday_weekend_night': {
# 'task': 'oclubs.worker.refresh_user',
# 'schedule': crontab(minute=23, hour=2, day_of_week='sunday',
# month_of_year='1,2,7,8'),
# },
}
)
def handle_app_context(func):
@wraps(func)
def decorated_function(*args, **kwargs):
with flask_app.app_context():
try:
ret = func(*args, **kwargs)
done(True)
return ret
except:
done(False)
raise
return decorated_function
# This part does not work anymore, do not use this
# @app.task()
# @handle_app_context
# def refresh_user(authority):
# ours = database.fetch_multirow(
# 'user',
# {
# 'user_login_name': 'sid',
# 'user_id': 'uid',
# },
# [
# ('!=', 'user_password', None),
# ('=', 'user_type', UserType.STUDENT.value)
# ]
# )
# ours = {data['sid']: data['uid'] for data in ours}
#
# union = set(ours).union(authority)
#
# for sid in union:
# if sid in authority:
# if sid in ours:
# _update_account.delay(ours[sid], authority[sid])
# else:
# _create_account.delay(authority[sid])
# else:
# if sid in ours:
# _disable_account.delay(ours[sid])
# else:
# assert False # This is an impossibility
@app.task()
@handle_app_context
def _disable_account(uid):
u = User(uid)
u.password = None
u.grade = None
u.currentclass = None
print 'DISABLED USER ID %d' % u.id
def _user_refresh(u, authority):
u.studentid = authority['UNIONID']
u.passportname = authority['NAMEEN']
if 'GRADENAME' in authority:
u.grade = int(authority['GRADENAME'])
if 'STUCLASSNAME' in authority:
u.currentclass = int(authority['STUCLASSNAME'])
if 'EMAILADDRESS' in authority:
u.email = authority['EMAILADDRESS']
@app.task()
@handle_app_context
def _create_account(authority, _type='STUDENT', haspassword=True):
u = User.new()
u.studentid = ''
u.passportname = ''
u.email = ''
u.phone = None
u.grade = None
u.currentclass = None
u.initalized = False
_user_refresh(u, authority)
u.password = None
u.nickname = u.passportname
u.picture = Upload(-1)
u.type = UserType[_type]
u.create(True)
if haspassword:
redis.RedisCache('tempuserpw:' + str(u.id), 3600 * 48).set(password)
print 'CREATED USER ID %d' % u.id
@app.task()
@handle_app_context
def _update_account(uid, authority):
u = User(uid)
_user_refresh(u, authority)
print 'UPDATED USER ID %d' % u.id
@app.task()
@handle_app_context
def rebuild_elasticsearch():
types = {
Club: {
'conds': [('=', 'club_inactive', False)]
},
Activity: {}
}
for cls, params in types.items():
db_ids = database.fetch_onecol(
cls.table,
cls.identifier,
params.get('conds', [])
)
db_ids = set(int(x) for x in db_ids)
if db_ids:
db_max = max(db_ids)
else:
db_max = 0
try:
es_ids = scan(
elasticsearch.es,
index='oclubs',
doc_type=cls.table,
size=10000000,
query={
'query': {'match_all': {}},
'size': 10000,
'fields': ['_id']
})
es_ids = (d['_id'] for d in es_ids)
es_ids = set(int(x) for x in es_ids)
except NotFoundError:
es_ids = []
if es_ids:
es_max = max(es_ids)
else:
es_max = 0
max_id = max(db_max, es_max)
cls_searchprops = [
prop.name for prop in [
getattr(cls, propname) for propname in dir(cls)
] if isinstance(prop, Property) and prop.search
]
for i in xrange(1, max_id + 1):
time.sleep(0.01)
if i in db_ids:
obj = cls(i)
db_data = {}
for propname in cls_searchprops:
db_data[propname] = (
getattr(cls, propname).search(getattr(obj, propname)))
if i in es_ids:
es_data = elasticsearch.get(cls.table, i)
if db_data == es_data:
print 'TYPE %s ID %d MATCH' % (cls.table, i)
else:
print 'UPDATED ES TYPE %s ID %d' % (cls.table, i)
elasticsearch.update(cls.table, i, db_data)
else:
print 'CREATED ES TYPE %s ID %d' % (cls.table, i)
elasticsearch.create(cls.table, i, db_data)
else:
if i in es_ids:
print 'DELETED ES TYPE %s ID %d' % (cls.table, i)
elasticsearch.delete(cls.table, i)
else:
print 'TYPE %s ID %d DOES NOT EXIST' % (cls.table, i)
pass
| {
"content_hash": "9691816b40833af0feee559ad5dd4a95",
"timestamp": "",
"source": "github",
"line_count": 222,
"max_line_length": 78,
"avg_line_length": 28.725225225225227,
"alnum_prop": 0.5220323035910303,
"repo_name": "SHSIDers/oclubs",
"id": "e88045cbe6feba07da0ddf6f65d7a9d8342e8ad0",
"size": "6427",
"binary": false,
"copies": "1",
"ref": "refs/heads/centos7",
"path": "oclubs/worker.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "54317"
},
{
"name": "HTML",
"bytes": "15923"
},
{
"name": "JavaScript",
"bytes": "14697"
},
{
"name": "Puppet",
"bytes": "12155"
},
{
"name": "Python",
"bytes": "257513"
},
{
"name": "Shell",
"bytes": "8110"
}
],
"symlink_target": ""
} |
"""This module contains methods to produce a formatted string to be sent through the Telegram API.
"""
import math
import html
import itertools
import logging
import exceptions
import constants
from objects.answer import TelegramAnswer
from objects.answer import TelegramCallbackAnswer
from objects.answer import TelegramInlineAnswer
logger = logging.getLogger("output_formatter")
def _escapeHtml(text):
text = text.replace("&", "&")
text = text.replace("<","<")
text = text.replace(">", ">")
return text
def _bold(text):
return "<b>" + text + "</b>"
def _italic(text):
return "<i>" + text + "</i>"
def _link(link, label):
return "<a href=\"" + link + "\">" + label + "</a>"
def _appendList(originalString, listToAppend, separator, ending):
for elem in listToAppend:
originalString += _escapeHtml(elem) + separator
offset = len(separator)
originalString = originalString[:-offset]
originalString += ending
return originalString
def _formatGameTitle(game):
s = _bold(_escapeHtml(game.name.title()))
if game.year is not None:
s += " (" + game.year + ")"
s += "\n"
return s
def _formatGameInfo(game):
s = ""
if game.numDesigners() > 0:
if 1 == game.numDesigners():
s += _italic("Designer: ")
else:
s += _italic("Designers: ")
s = _appendList(s, game.getDesigners(), ", ", ".\n")
if game.numArtists() > 0:
if 1 == game.numArtists():
s += _italic("Artist: ")
else:
s += _italic("Artists: ")
s = _appendList(s, game.getArtists(), ", ", ".\n")
if game.average is not None:
try:
rating = str(round(float(game.average), 1))
if "." in rating:
rating = rating.rstrip("0").rstrip(".")
# remove decimal part if zero
s += _italic("Rating: ") + rating + "\n"
except ValueError:
# just skip the average, which is likely not available
logger.info("Game average is not a number: " + game.average)
if game.rank is not None:
s += _italic("Rank: ") + game.rank + "\n"
if game.playingTime is not None and "0" != game.playingTime:
s += _italic("Playing time: ") + game.playingTime + " minutes.\n"
if game.minPlayers is not None:
s += _italic("Players: ") + game.minPlayers
if game.maxPlayers is not None:
if game.minPlayers is None:
s += _italic("Players: ") + game.maxPlayers
elif game.maxPlayers > game.minPlayers:
s += " - " + game.maxPlayers
return s + "\n"
def _formatGameDescription(game):
if len(game.description) > 800:
return _escapeHtml(html.unescape(game.description[:800])) + "...\n"
else:
return _escapeHtml(html.unescape(game.description)) + "\n"
def _formatGameThumbnail(game):
if game.thumbnail is not None:
return _link(game.thumbnail, "Cover") + "\n"
return ""
def _formatGameLink(game):
return _link(game.link, "Read on BoardGameGeek.") + "\n"
def _formatGameBodyLess(game):
"""Formats the body of an answer containing a game, inserting only basic info.
Args:
game (game.Game): an object containing all the information on the game.
Returns:
str: a formatted string with the information to be sent.
"""
s = _formatGameTitle(game) + "\n"
s += _formatGameInfo(game)
s += _formatGameThumbnail(game)
s += _formatGameLink(game)
return s
def _formatGameBodyMore(game):
"""Formats the body of an answer containing a game, inserting additional info.
Args:
game (game.Game): an object containing all the information on the game.
Returns:
str: a formatted string with the information to be sent.
"""
s = _formatGameTitle(game) + "\n"
s += _formatGameDescription(game)
s += _formatGameLink(game)
return s
def _formatGameListBody(gameList):
"""Formats the body of an answer containing a game list.
Args:
gameList (game.GameList): an object containing all the information on the game list.
Returns:
str: a formatted string with the information to be sent.
"""
s = ""
offset = gameList.offset
limit = offset + constants.LIST_PAGE_SIZE
count = offset + 1
for game in itertools.islice(gameList.gameList, offset, limit):
s += u"►" # Unicode symbol to indicate element in list
s += " " + str(count) + "." # element number
s += " " + _bold(_escapeHtml(game.name.title()))
if game.year is not None:
s += " (" + game.year + ")"
s += " - ID: /" + game.id_ + "\n"
count += 1
return s
def formatGame(game, more=False):
"""Formats an answer containing a game, creating the body and attaching the markup.
Args:
game (game.Game): an object containing all the information on the game.
more (bool): True if the answer should show additional info.
Returns:
.answer.TelegramAnswer: an object containing all the information to be sent.
"""
if(more):
formattedGameBody = _formatGameBodyMore(game)
disableWebPagePreview = True
text = "Game Info"
callback_data = "gl" + str(game.id_)
else:
formattedGameBody = _formatGameBodyLess(game)
disableWebPagePreview = False
text = "Description"
callback_data = "gm" + str(game.id_)
keyboard = [[dict(text=text, callback_data=callback_data), dict(text="Share", switch_inline_query="i " + game.id_)]]
return TelegramAnswer(formattedGameBody, inlineKeyboardMarkup=keyboard, disableWebPagePreview=disableWebPagePreview)
def formatInlineGame(game):
"""Formats an answer containing a game to be sent inline.
Args:
game (game.Game): an object containing all the information on the game.
Returns:
.answer.TelegramInlineAnswer: an object containing all the information to be sent.
"""
formattedGameBody = _formatGameBodyLess(game)
return TelegramInlineAnswer(formattedGameBody, game.id_, game.name.title(), game.thumbnail)
def formatGameList(gameList):
"""Formats an answer containing a game list, creating the body and attaching the markup.
Args:
gameList (game.GameList): an object containing all the information on the game list.
Returns:
.answer.TelegramAnswer: an object containing all the information to be sent.
"""
formattedGameListBody = _formatGameListBody(gameList)
keyboard = []
offset = gameList.offset
totalSize = len(gameList.gameList)
callback_data = gameList.originalSearch + constants.CALLBACK_DATA_SEPARATOR + str(offset)
buttonList = []
if offset > 0:
entry = dict(text="Back", callback_data="lp" + callback_data)
buttonList.append(entry)
if offset + constants.LIST_PAGE_SIZE < totalSize:
entry = dict(text="Next", callback_data="ln" + callback_data)
buttonList.append(entry)
if buttonList:
keyboard.append(buttonList)
return TelegramAnswer(formattedGameListBody, inlineKeyboardMarkup=keyboard if keyboard else None)
"""Following methods format various error messages."""
def formatNoResultFound():
return TelegramAnswer("No result found!")
def formatBggUnreachable():
return TelegramAnswer("Sorry, it was not possible to contact Boardgamegeek servers. Try again later!")
def formatCommandNotSupported(command):
return TelegramAnswer("Sorry, " + _bold("/" + command) + " is not a valid command.")
def formatHistoryNotFound():
return TelegramAnswer("Sorry, last search not found in the history. Try to start a new search.")
def formatHistoryNotFoundCallback():
return TelegramCallbackAnswer("Sorry, last search not found in the history. Try to start a new search.")
def formatGameListIndexNotValid(index):
return TelegramAnswer("Error, " + index + " is not a valid search index (out of bound).")
def formatStaleList():
return TelegramCallbackAnswer("The inline keyboard only works with the most recent searches.")
def formatBadCallbackData():
return TelegramCallbackAnswer("This callback action is not supported, please try to start a new search.")
def formatHelp():
"""Formats a description of this bot usage.
Returns:
.answer.TelegramAnswer: The description of how to use this bot.
"""
s = "This bot brings the power of " + _link("https://boardgamegeek.com/", "BoardGameGeek") + " into Telegram. The sky's the limit now."
s += "\n\n" + _bold("Commands:") + "\n"
for c in constants.COMMAND_DESCRIPTIONS:
s += c + " - " + constants.COMMAND_DESCRIPTIONS[c] + "\n"
s += "\n" + _bold("Inline Commands:") + "\n"
for c in constants.INLINE_COMMAND_DESCRIPTIONS:
s += c + " - " + constants.INLINE_COMMAND_DESCRIPTIONS[c] + "\n"
s += "\nFor info about how inline mode works, see" + _link("https://telegram.org/blog/inline-bots", " the official guide") + "."
return TelegramAnswer(s)
| {
"content_hash": "ab90914d2145803ff8b9b304991a1fbc",
"timestamp": "",
"source": "github",
"line_count": 248,
"max_line_length": 139,
"avg_line_length": 36.53629032258065,
"alnum_prop": 0.6447411985432071,
"repo_name": "crash-g/BoardGameBot",
"id": "9909c766685c047fbfdbfa77371849e33e2cd34c",
"size": "9061",
"binary": false,
"copies": "1",
"ref": "refs/heads/github",
"path": "boardgamebot/tools/output_formatter.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "70996"
}
],
"symlink_target": ""
} |
from django.contrib.gis import forms
from django.contrib.gis.db.models.lookups import gis_lookups
from django.contrib.gis.db.models.proxy import SpatialProxy
from django.contrib.gis.gdal import HAS_GDAL
from django.contrib.gis.geometry.backend import Geometry, GeometryException
from django.core.exceptions import ImproperlyConfigured
from django.db.models.expressions import Expression
from django.db.models.fields import Field
from django.utils import six
from django.utils.translation import ugettext_lazy as _
# Local cache of the spatial_ref_sys table, which holds SRID data for each
# spatial database alias. This cache exists so that the database isn't queried
# for SRID info each time a distance query is constructed.
_srid_cache = {}
def get_srid_info(srid, connection):
"""
Returns the units, unit name, and spheroid WKT associated with the
given SRID from the `spatial_ref_sys` (or equivalent) spatial database
table for the given database connection. These results are cached.
"""
global _srid_cache
try:
# The SpatialRefSys model for the spatial backend.
SpatialRefSys = connection.ops.spatial_ref_sys()
except NotImplementedError:
# No `spatial_ref_sys` table in spatial backend (e.g., MySQL).
return None, None, None
if connection.alias not in _srid_cache:
# Initialize SRID dictionary for database if it doesn't exist.
_srid_cache[connection.alias] = {}
if srid not in _srid_cache[connection.alias]:
# Use `SpatialRefSys` model to query for spatial reference info.
sr = SpatialRefSys.objects.using(connection.alias).get(srid=srid)
units, units_name = sr.units
spheroid = SpatialRefSys.get_spheroid(sr.wkt)
_srid_cache[connection.alias][srid] = (units, units_name, spheroid)
return _srid_cache[connection.alias][srid]
class GeoSelectFormatMixin(object):
def select_format(self, compiler, sql, params):
"""
Returns the selection format string, depending on the requirements
of the spatial backend. For example, Oracle and MySQL require custom
selection formats in order to retrieve geometries in OGC WKT. For all
other fields a simple '%s' format string is returned.
"""
connection = compiler.connection
srid = compiler.query.get_context('transformed_srid')
if srid:
sel_fmt = '%s(%%s, %s)' % (connection.ops.transform, srid)
else:
sel_fmt = '%s'
if connection.ops.select:
# This allows operations to be done on fields in the SELECT,
# overriding their values -- used by the Oracle and MySQL
# spatial backends to get database values as WKT, and by the
# `transform` method.
sel_fmt = connection.ops.select % sel_fmt
return sel_fmt % sql, params
class BaseSpatialField(Field):
"""
The Base GIS Field.
It's used as a base class for GeometryField and RasterField. Defines
properties that are common to all GIS fields such as the characteristics
of the spatial reference system of the field.
"""
description = _("The base GIS field.")
# Geodetic units.
geodetic_units = ('decimal degree', 'degree')
def __init__(self, verbose_name=None, srid=4326, spatial_index=True, **kwargs):
"""
The initialization function for base spatial fields. Takes the following
as keyword arguments:
srid:
The spatial reference system identifier, an OGC standard.
Defaults to 4326 (WGS84).
spatial_index:
Indicates whether to create a spatial index. Defaults to True.
Set this instead of 'db_index' for geographic fields since index
creation is different for geometry columns.
"""
# Setting the index flag with the value of the `spatial_index` keyword.
self.spatial_index = spatial_index
# Setting the SRID and getting the units. Unit information must be
# easily available in the field instance for distance queries.
self.srid = srid
# Setting the verbose_name keyword argument with the positional
# first parameter, so this works like normal fields.
kwargs['verbose_name'] = verbose_name
super(BaseSpatialField, self).__init__(**kwargs)
def deconstruct(self):
name, path, args, kwargs = super(BaseSpatialField, self).deconstruct()
# Always include SRID for less fragility; include spatial index if it's
# not the default value.
kwargs['srid'] = self.srid
if self.spatial_index is not True:
kwargs['spatial_index'] = self.spatial_index
return name, path, args, kwargs
def db_type(self, connection):
return connection.ops.geo_db_type(self)
# The following functions are used to get the units, their name, and
# the spheroid corresponding to the SRID of the BaseSpatialField.
def _get_srid_info(self, connection):
# Get attributes from `get_srid_info`.
self._units, self._units_name, self._spheroid = get_srid_info(self.srid, connection)
def spheroid(self, connection):
if not hasattr(self, '_spheroid'):
self._get_srid_info(connection)
return self._spheroid
def units(self, connection):
if not hasattr(self, '_units'):
self._get_srid_info(connection)
return self._units
def units_name(self, connection):
if not hasattr(self, '_units_name'):
self._get_srid_info(connection)
return self._units_name
def geodetic(self, connection):
"""
Returns true if this field's SRID corresponds with a coordinate
system that uses non-projected units (e.g., latitude/longitude).
"""
units_name = self.units_name(connection)
# Some backends like MySQL cannot determine units name. In that case,
# test if srid is 4326 (WGS84), even if this is over-simplification.
return units_name.lower() in self.geodetic_units if units_name else self.srid == 4326
def get_placeholder(self, value, compiler, connection):
"""
Returns the placeholder for the spatial column for the
given value.
"""
return connection.ops.get_geom_placeholder(self, value, compiler)
class GeometryField(GeoSelectFormatMixin, BaseSpatialField):
"""
The base Geometry field -- maps to the OpenGIS Specification Geometry type.
"""
description = _("The base Geometry field -- maps to the OpenGIS Specification Geometry type.")
form_class = forms.GeometryField
# The OpenGIS Geometry name.
geom_type = 'GEOMETRY'
def __init__(self, verbose_name=None, dim=2, geography=False, **kwargs):
"""
The initialization function for geometry fields. In addition to the
parameters from BaseSpatialField, it takes the following as keyword
arguments:
dim:
The number of dimensions for this geometry. Defaults to 2.
extent:
Customize the extent, in a 4-tuple of WGS 84 coordinates, for the
geometry field entry in the `USER_SDO_GEOM_METADATA` table. Defaults
to (-180.0, -90.0, 180.0, 90.0).
tolerance:
Define the tolerance, in meters, to use for the geometry field
entry in the `USER_SDO_GEOM_METADATA` table. Defaults to 0.05.
"""
# Setting the dimension of the geometry field.
self.dim = dim
# Is this a geography rather than a geometry column?
self.geography = geography
# Oracle-specific private attributes for creating the entry in
# `USER_SDO_GEOM_METADATA`
self._extent = kwargs.pop('extent', (-180.0, -90.0, 180.0, 90.0))
self._tolerance = kwargs.pop('tolerance', 0.05)
super(GeometryField, self).__init__(verbose_name=verbose_name, **kwargs)
def deconstruct(self):
name, path, args, kwargs = super(GeometryField, self).deconstruct()
# Include kwargs if they're not the default values.
if self.dim != 2:
kwargs['dim'] = self.dim
if self.geography is not False:
kwargs['geography'] = self.geography
return name, path, args, kwargs
# ### Routines specific to GeometryField ###
def get_distance(self, value, lookup_type, connection):
"""
Returns a distance number in units of the field. For example, if
`D(km=1)` was passed in and the units of the field were in meters,
then 1000 would be returned.
"""
return connection.ops.get_distance(self, value, lookup_type)
def get_prep_value(self, value):
"""
Spatial lookup values are either a parameter that is (or may be
converted to) a geometry, or a sequence of lookup values that
begins with a geometry. This routine will setup the geometry
value properly, and preserve any other lookup parameters before
returning to the caller.
"""
value = super(GeometryField, self).get_prep_value(value)
if isinstance(value, Expression):
return value
elif isinstance(value, (tuple, list)):
geom = value[0]
seq_value = True
else:
geom = value
seq_value = False
# When the input is not a GEOS geometry, attempt to construct one
# from the given string input.
if isinstance(geom, Geometry):
pass
elif isinstance(geom, (bytes, six.string_types)) or hasattr(geom, '__geo_interface__'):
try:
geom = Geometry(geom)
except GeometryException:
raise ValueError('Could not create geometry from lookup value.')
else:
raise ValueError('Cannot use object with type %s for a geometry lookup parameter.' % type(geom).__name__)
# Assigning the SRID value.
geom.srid = self.get_srid(geom)
if seq_value:
lookup_val = [geom]
lookup_val.extend(value[1:])
return tuple(lookup_val)
else:
return geom
def from_db_value(self, value, expression, connection, context):
if value:
if not isinstance(value, Geometry):
value = Geometry(value)
srid = value.srid
if not srid and self.srid != -1:
value.srid = self.srid
return value
def get_srid(self, geom):
"""
Returns the default SRID for the given geometry, taking into account
the SRID set for the field. For example, if the input geometry
has no SRID, then that of the field will be returned.
"""
gsrid = geom.srid # SRID of given geometry.
if gsrid is None or self.srid == -1 or (gsrid == -1 and self.srid != -1):
return self.srid
else:
return gsrid
# ### Routines overloaded from Field ###
def contribute_to_class(self, cls, name, **kwargs):
super(GeometryField, self).contribute_to_class(cls, name, **kwargs)
# Setup for lazy-instantiated Geometry object.
setattr(cls, self.attname, SpatialProxy(Geometry, self))
def formfield(self, **kwargs):
defaults = {'form_class': self.form_class,
'geom_type': self.geom_type,
'srid': self.srid,
}
defaults.update(kwargs)
if (self.dim > 2 and 'widget' not in kwargs and
not getattr(defaults['form_class'].widget, 'supports_3d', False)):
defaults['widget'] = forms.Textarea
return super(GeometryField, self).formfield(**defaults)
def get_db_prep_lookup(self, lookup_type, value, connection, prepared=False):
"""
Prepare for the database lookup, and return any spatial parameters
necessary for the query. This includes wrapping any geometry
parameters with a backend-specific adapter and formatting any distance
parameters into the correct units for the coordinate system of the
field.
"""
# special case for isnull lookup
if lookup_type == 'isnull':
return []
elif lookup_type in self.class_lookups:
# Populating the parameters list, and wrapping the Geometry
# with the Adapter of the spatial backend.
if isinstance(value, (tuple, list)):
params = [connection.ops.Adapter(value[0])]
if self.class_lookups[lookup_type].distance:
# Getting the distance parameter in the units of the field.
params += self.get_distance(value[1:], lookup_type, connection)
elif lookup_type in connection.ops.truncate_params:
# Lookup is one where SQL parameters aren't needed from the
# given lookup value.
pass
else:
params += value[1:]
elif isinstance(value, Expression):
params = []
else:
params = [connection.ops.Adapter(value)]
return params
else:
raise ValueError('%s is not a valid spatial lookup for %s.' %
(lookup_type, self.__class__.__name__))
def get_prep_lookup(self, lookup_type, value):
if lookup_type == 'contains':
# 'contains' name might conflict with the "normal" contains lookup,
# for which the value is not prepared, but left as-is.
return self.get_prep_value(value)
return super(GeometryField, self).get_prep_lookup(lookup_type, value)
def get_db_prep_save(self, value, connection):
"Prepares the value for saving in the database."
if not value:
return None
else:
return connection.ops.Adapter(self.get_prep_value(value))
for klass in gis_lookups.values():
GeometryField.register_lookup(klass)
# The OpenGIS Geometry Type Fields
class PointField(GeometryField):
geom_type = 'POINT'
form_class = forms.PointField
description = _("Point")
class LineStringField(GeometryField):
geom_type = 'LINESTRING'
form_class = forms.LineStringField
description = _("Line string")
class PolygonField(GeometryField):
geom_type = 'POLYGON'
form_class = forms.PolygonField
description = _("Polygon")
class MultiPointField(GeometryField):
geom_type = 'MULTIPOINT'
form_class = forms.MultiPointField
description = _("Multi-point")
class MultiLineStringField(GeometryField):
geom_type = 'MULTILINESTRING'
form_class = forms.MultiLineStringField
description = _("Multi-line string")
class MultiPolygonField(GeometryField):
geom_type = 'MULTIPOLYGON'
form_class = forms.MultiPolygonField
description = _("Multi polygon")
class GeometryCollectionField(GeometryField):
geom_type = 'GEOMETRYCOLLECTION'
form_class = forms.GeometryCollectionField
description = _("Geometry collection")
class ExtentField(GeoSelectFormatMixin, Field):
"Used as a return value from an extent aggregate"
description = _("Extent Aggregate Field")
def get_internal_type(self):
return "ExtentField"
class RasterField(BaseSpatialField):
"""
Raster field for GeoDjango -- evaluates into GDALRaster objects.
"""
description = _("Raster Field")
geom_type = 'RASTER'
def __init__(self, *args, **kwargs):
if not HAS_GDAL:
raise ImproperlyConfigured('RasterField requires GDAL.')
super(RasterField, self).__init__(*args, **kwargs)
def _check_connection(self, connection):
# Make sure raster fields are used only on backends with raster support.
if not connection.features.gis_enabled or not connection.features.supports_raster:
raise ImproperlyConfigured('Raster fields require backends with raster support.')
def db_type(self, connection):
self._check_connection(connection)
return super(RasterField, self).db_type(connection)
def from_db_value(self, value, expression, connection, context):
return connection.ops.parse_raster(value)
def get_db_prep_value(self, value, connection, prepared=False):
self._check_connection(connection)
# Prepare raster for writing to database.
if not prepared:
value = connection.ops.deconstruct_raster(value)
return super(RasterField, self).get_db_prep_value(value, connection, prepared)
def contribute_to_class(self, cls, name, **kwargs):
super(RasterField, self).contribute_to_class(cls, name, **kwargs)
# Importing GDALRaster raises an exception on systems without gdal.
from django.contrib.gis.gdal import GDALRaster
# Setup for lazy-instantiated Raster object. For large querysets, the
# instantiation of all GDALRasters can potentially be expensive. This
# delays the instantiation of the objects to the moment of evaluation
# of the raster attribute.
setattr(cls, self.attname, SpatialProxy(GDALRaster, self))
| {
"content_hash": "769c26983b4b2d3ead0b9a924c553d15",
"timestamp": "",
"source": "github",
"line_count": 444,
"max_line_length": 117,
"avg_line_length": 39.86486486486486,
"alnum_prop": 0.6248587570621469,
"repo_name": "yephper/django",
"id": "20b6a50687e68059ccbc6ac8d0811d5dd49d6e54",
"size": "17700",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "django/contrib/gis/db/models/fields.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "ASP",
"bytes": "1538"
},
{
"name": "CSS",
"bytes": "1697381"
},
{
"name": "HTML",
"bytes": "390772"
},
{
"name": "Java",
"bytes": "588"
},
{
"name": "JavaScript",
"bytes": "3172126"
},
{
"name": "Makefile",
"bytes": "134"
},
{
"name": "PHP",
"bytes": "19336"
},
{
"name": "Python",
"bytes": "13365273"
},
{
"name": "Shell",
"bytes": "837"
},
{
"name": "Smarty",
"bytes": "133"
}
],
"symlink_target": ""
} |
from flask import _app_ctx_stack
from flask import current_app as app
from flask import Flask, request, session, url_for, redirect, \
render_template, abort, g, flash, _app_ctx_stack
from sqlite3 import dbapi2 as sqlite3
from hashlib import md5
from datetime import datetime
def get_db():
"""Opens a new database connection if there is none yet for the
current application context.
"""
top = _app_ctx_stack.top
if not hasattr(top, 'sqlite_db'):
top.sqlite_db = sqlite3.connect(app.config['DATABASE'])
top.sqlite_db.row_factory = sqlite3.Row
return top.sqlite_db
def close_database(exception):
"""Closes the database again at the end of the request."""
top = _app_ctx_stack.top
if hasattr(top, 'sqlite_db'):
top.sqlite_db.close()
def init_db():
"""Initializes the database."""
db = get_db()
with app.open_resource('schema.sql', mode='r') as f:
db.cursor().executescript(f.read())
db.commit()
def initdb_command():
"""Creates the database tables."""
init_db()
print('Initialized the database.')
def query_db(query, args=(), one=False):
"""Queries the database and returns a list of dictionaries."""
cur = get_db().execute(query, args)
rv = cur.fetchall()
return (rv[0] if rv else None) if one else rv
def get_user_id(username):
"""Convenience method to look up the id for a username."""
rv = query_db('select user_id from user where username = ?',
[username], one=True)
return rv[0] if rv else None
def format_datetime(timestamp):
"""Format a timestamp for display."""
return datetime.utcfromtimestamp(timestamp).strftime('%Y-%m-%d @ %H:%M')
def gravatar_url(email, size=80):
"""Return the gravatar image for the given email address."""
return 'http://www.gravatar.com/avatar/%s?d=identicon&s=%d' % \
(md5(email.strip().lower().encode('utf-8')).hexdigest(), size)
def url_for(string, data=None):
if data is not None:
return (string) % data
else:
return string
| {
"content_hash": "67eba6559bd22efa73f741226b1286a9",
"timestamp": "",
"source": "github",
"line_count": 64,
"max_line_length": 76,
"avg_line_length": 32.25,
"alnum_prop": 0.6501937984496124,
"repo_name": "bridgetnoelleee/project2",
"id": "e2b3a92eba2fe4c95b358c2246d3622aa8572d97",
"size": "2064",
"binary": false,
"copies": "8",
"ref": "refs/heads/master",
"path": "helpers/functions.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "3080"
},
{
"name": "HTML",
"bytes": "4141"
},
{
"name": "Python",
"bytes": "10284"
}
],
"symlink_target": ""
} |
import django_admin_blocks
from linkcheck.views import get_status_message
"""Legacy internal helper"""
def notification():
return get_status_message()
django_admin_blocks.register({
'errors': (notification,),
})
| {
"content_hash": "f856ba1cf70948710fa90a0af1e5adbe",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 46,
"avg_line_length": 16.142857142857142,
"alnum_prop": 0.7212389380530974,
"repo_name": "claudep/django-linkcheck",
"id": "e800fb31bbb144caf4c3e4aaacc2709d606dcf06",
"size": "226",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "linkcheck/admin_blocks.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "11357"
},
{
"name": "Python",
"bytes": "81297"
}
],
"symlink_target": ""
} |
"""Ops to manipulate lists of tensors."""
# pylint: disable=g-bad-name
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.core.framework import types_pb2
from tensorflow.python.framework import cpp_shape_inference_pb2
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_list_ops
from tensorflow.python.ops import handle_data_util
# go/tf-wildcard-import
# pylint: disable=wildcard-import
from tensorflow.python.ops.gen_list_ops import *
# pylint: enable=wildcard-import
from tensorflow.python.util.lazy_loader import LazyLoader
# list_ops -> control_flow_ops -> tensor_array_ops -> list_ops
control_flow_ops = LazyLoader(
"control_flow_ops", globals(),
"tensorflow.python.ops.control_flow_ops")
ops.NotDifferentiable("TensorListConcatLists")
ops.NotDifferentiable("TensorListElementShape")
ops.NotDifferentiable("TensorListLength")
ops.NotDifferentiable("TensorListPushBackBatch")
def empty_tensor_list(element_shape,
element_dtype,
max_num_elements=None,
name=None):
if max_num_elements is None:
max_num_elements = -1
return gen_list_ops.empty_tensor_list(
element_shape=_build_element_shape(element_shape),
element_dtype=element_dtype,
max_num_elements=max_num_elements,
name=name)
def _set_handle_data(list_handle, element_shape, element_dtype):
"""Sets type information on `list_handle` for consistency with graphs."""
# TODO(b/169968286): It would be better if we had a consistent story for
# creating handle data from eager operations (shared with VarHandleOp).
if isinstance(list_handle, ops.EagerTensor):
if tensor_util.is_tensor(element_shape):
element_shape = tensor_shape.TensorShape(None)
elif not isinstance(element_shape, tensor_shape.TensorShape):
element_shape = tensor_shape.TensorShape(element_shape)
handle_data = cpp_shape_inference_pb2.CppShapeInferenceResult.HandleData()
handle_data.is_set = True
handle_data.shape_and_type.append(
cpp_shape_inference_pb2.CppShapeInferenceResult.HandleShapeAndType(
shape=element_shape.as_proto(),
dtype=element_dtype.as_datatype_enum,
specialized_type=types_pb2.ST_TENSOR_LIST))
list_handle._handle_data = handle_data # pylint: disable=protected-access
def tensor_list_reserve(element_shape, num_elements, element_dtype, name=None):
result = gen_list_ops.tensor_list_reserve(
element_shape=_build_element_shape(element_shape),
num_elements=num_elements,
element_dtype=element_dtype,
name=name)
# TODO(b/169968286): gen_ops needs to ensure the metadata is properly
# populated for eager operations.
_set_handle_data(result, element_shape, element_dtype)
return result
def tensor_list_from_tensor(tensor, element_shape, name=None):
tensor = ops.convert_to_tensor(tensor)
result = gen_list_ops.tensor_list_from_tensor(
tensor=tensor,
element_shape=_build_element_shape(element_shape),
name=name)
_set_handle_data(result, tensor.shape, tensor.dtype)
return result
def tensor_list_get_item(input_handle, index, element_dtype, element_shape=None,
name=None):
return gen_list_ops.tensor_list_get_item(
input_handle=input_handle,
index=index,
element_shape=_build_element_shape(element_shape),
element_dtype=element_dtype,
name=name)
def tensor_list_pop_back(input_handle, element_dtype, name=None):
return gen_list_ops.tensor_list_pop_back(
input_handle=input_handle,
element_shape=-1,
element_dtype=element_dtype,
name=name)
def tensor_list_gather(input_handle,
indices,
element_dtype,
element_shape=None,
name=None):
return gen_list_ops.tensor_list_gather(
input_handle=input_handle,
indices=indices,
element_shape=_build_element_shape(element_shape),
element_dtype=element_dtype,
name=name)
def tensor_list_scatter(tensor,
indices,
element_shape=None,
input_handle=None,
name=None):
"""Returns a TensorList created or updated by scattering `tensor`."""
tensor = ops.convert_to_tensor(tensor)
if input_handle is not None:
output_handle = gen_list_ops.tensor_list_scatter_into_existing_list(
input_handle=input_handle, tensor=tensor, indices=indices, name=name)
handle_data_util.copy_handle_data(input_handle, output_handle)
return output_handle
else:
output_handle = gen_list_ops.tensor_list_scatter_v2(
tensor=tensor,
indices=indices,
element_shape=_build_element_shape(element_shape),
num_elements=-1,
name=name)
_set_handle_data(output_handle, element_shape, tensor.dtype)
return output_handle
def tensor_list_stack(input_handle,
element_dtype,
num_elements=-1,
element_shape=None,
name=None):
return gen_list_ops.tensor_list_stack(
input_handle=input_handle,
element_shape=_build_element_shape(element_shape),
element_dtype=element_dtype,
num_elements=num_elements,
name=name)
def tensor_list_concat(input_handle, element_dtype, element_shape=None,
name=None):
# Ignore the lengths output of TensorListConcat. It is only used during
# gradient computation.
return gen_list_ops.tensor_list_concat_v2(
input_handle=input_handle,
element_dtype=element_dtype,
element_shape=_build_element_shape(element_shape),
leading_dims=ops.convert_to_tensor([], dtype=dtypes.int64),
name=name)[0]
def tensor_list_split(tensor, element_shape, lengths, name=None):
return gen_list_ops.tensor_list_split(
tensor=tensor,
element_shape=_build_element_shape(element_shape),
lengths=lengths,
name=name)
def tensor_list_set_item(input_handle,
index,
item,
resize_if_index_out_of_bounds=False,
name=None):
"""Sets `item` at `index` in input list."""
if resize_if_index_out_of_bounds:
input_list_size = gen_list_ops.tensor_list_length(input_handle)
# TODO(srbs): This could cause some slowdown. Consider fusing resize
# functionality in the SetItem op.
input_handle = control_flow_ops.cond(
index >= input_list_size,
lambda: gen_list_ops.tensor_list_resize( # pylint: disable=g-long-lambda
input_handle, index + 1),
lambda: input_handle)
output_handle = gen_list_ops.tensor_list_set_item(
input_handle=input_handle, index=index, item=item, name=name)
handle_data_util.copy_handle_data(input_handle, output_handle)
return output_handle
@ops.RegisterGradient("TensorListPushBack")
def _PushBackGrad(op, dresult):
return gen_list_ops.tensor_list_pop_back(
dresult,
element_shape=array_ops.shape(op.inputs[1]),
element_dtype=op.get_attr("element_dtype"))
@ops.RegisterGradient("TensorListPopBack")
def _PopBackGrad(op, dlist, delement):
if dlist is None:
dlist = empty_tensor_list(
element_dtype=delement.dtype,
element_shape=gen_list_ops.tensor_list_element_shape(
op.outputs[0], shape_type=dtypes.int32))
if delement is None:
delement = array_ops.zeros_like(op.outputs[1])
return gen_list_ops.tensor_list_push_back(dlist, delement), None
@ops.RegisterGradient("TensorListStack")
def _TensorListStackGrad(unused_op, dtensor):
return tensor_list_from_tensor(dtensor, element_shape=dtensor.shape[1:]), None
@ops.RegisterGradient("TensorListConcat")
@ops.RegisterGradient("TensorListConcatV2")
def _TensorListConcatGrad(op, dtensor, unused_dlengths):
"""Gradient function for TensorListConcat."""
dlist = tensor_list_split(
dtensor,
element_shape=gen_list_ops.tensor_list_element_shape(
op.inputs[0], shape_type=dtypes.int32),
lengths=op.outputs[1])
if op.type == "TensorListConcatV2":
return dlist, None, None
else:
return dlist
@ops.RegisterGradient("TensorListSplit")
def _TensorListSplitGrad(op, dlist):
tensor, _, lengths = op.inputs
element_shape = array_ops.slice(array_ops.shape(tensor), [1], [-1])
element_shape = array_ops.concat([[-1], element_shape], axis=0)
return gen_list_ops.tensor_list_concat_v2(
dlist,
element_shape=element_shape,
leading_dims=lengths,
element_dtype=op.inputs[0].dtype)[0], None, None
@ops.RegisterGradient("TensorListFromTensor")
def _TensorListFromTensorGrad(op, dlist):
"""Gradient for TensorListFromTensor."""
t = op.inputs[0]
if t.shape.dims and t.shape.dims[0].value is not None:
num_elements = t.shape.dims[0].value
else:
num_elements = None
if dlist is None:
dlist = empty_tensor_list(
element_dtype=t.dtype,
element_shape=gen_list_ops.tensor_list_element_shape(
op.outputs[0], shape_type=dtypes.int32))
tensor_grad = gen_list_ops.tensor_list_stack(
dlist,
element_shape=array_ops.slice(array_ops.shape(t), [1], [-1]),
element_dtype=t.dtype,
num_elements=num_elements)
shape_grad = None
return tensor_grad, shape_grad
@ops.RegisterGradient("TensorListGetItem")
def _TensorListGetItemGrad(op, ditem):
"""Gradient for TensorListGetItem."""
list_size = gen_list_ops.tensor_list_length(op.inputs[0])
list_grad = gen_list_ops.tensor_list_set_item(
gen_list_ops.tensor_list_reserve(
gen_list_ops.tensor_list_element_shape(op.inputs[0],
shape_type=dtypes.int32),
list_size, element_dtype=ditem.dtype),
index=op.inputs[1],
item=ditem)
index_grad = None
element_shape_grad = None
return list_grad, index_grad, element_shape_grad
@ops.RegisterGradient("TensorListSetItem")
def _TensorListSetItemGrad(op, dlist):
"""Gradient function for TensorListSetItem."""
_, index, item = op.inputs
list_grad = gen_list_ops.tensor_list_set_item(
dlist, index=index, item=array_ops.zeros_like(item))
index_grad = None
element_grad = tensor_list_get_item(
dlist,
index,
element_shape=array_ops.shape(item),
element_dtype=item.dtype)
return list_grad, index_grad, element_grad
@ops.RegisterGradient("TensorListResize")
def _TensorListResizeGrad(op, dlist):
input_list, _ = op.inputs
input_list_size = gen_list_ops.tensor_list_length(input_list)
return gen_list_ops.tensor_list_resize(dlist, input_list_size), None
@ops.RegisterGradient("TensorListGather")
def _TensorListGatherGrad(op, dtensor):
"""Gradient function for TensorListGather."""
input_list, indices, _ = op.inputs
element_shape = gen_list_ops.tensor_list_element_shape(
input_list, shape_type=dtypes.int32)
num_elements = gen_list_ops.tensor_list_length(input_list)
dlist = tensor_list_reserve(element_shape, num_elements, dtensor.dtype)
dlist = tensor_list_scatter(
tensor=dtensor, indices=indices, input_handle=dlist)
return dlist, None, None
@ops.RegisterGradient("TensorListScatter")
@ops.RegisterGradient("TensorListScatterV2")
def _TensorListScatterGrad(op, dlist):
"""Gradient function for TensorListScatter."""
tensor = op.inputs[0]
indices = op.inputs[1]
dtensor = gen_list_ops.tensor_list_gather(
dlist,
indices,
element_shape=array_ops.slice(array_ops.shape(tensor), [1], [-1]),
element_dtype=tensor.dtype)
if op.type == "TensorListScatterV2":
return dtensor, None, None, None
else:
return dtensor, None, None
@ops.RegisterGradient("TensorListScatterIntoExistingList")
def _TensorListScatterIntoExistingListGrad(op, dlist):
"""Gradient function for TensorListScatterIntoExistingList."""
_, tensor, indices = op.inputs
dtensor = gen_list_ops.tensor_list_gather(
dlist,
indices,
element_shape=array_ops.slice(array_ops.shape(tensor), [1], [-1]),
element_dtype=tensor.dtype)
zeros = array_ops.zeros_like(tensor)
dlist = tensor_list_scatter(zeros, indices, indices, input_handle=dlist)
return dlist, dtensor, None
def _build_element_shape(shape):
"""Converts shape to a format understood by list_ops for element_shape.
If `shape` is already a `Tensor` it is returned as-is. We do not perform a
type check here.
If shape is None or a TensorShape with unknown rank, -1 is returned.
If shape is a scalar, an int32 tensor with empty list is returned. Note we
do directly return an empty list since ops.convert_to_tensor would conver it
to a float32 which is not a valid type for element_shape.
If shape is a sequence of dims, None's in the list are replaced with -1. We
do not check the dtype of the other dims.
Args:
shape: Could be None, Tensor, TensorShape or a list of dims (each dim could
be a None, scalar or Tensor).
Returns:
A None-free shape that can be converted to a tensor.
"""
if isinstance(shape, ops.Tensor):
return shape
if isinstance(shape, tensor_shape.TensorShape):
# `TensorShape.as_list` requires rank to be known.
shape = shape.as_list() if shape else None
# Shape is unknown.
if shape is None:
return -1
# Shape is numpy array or a scalar.
if isinstance(shape, (np.ndarray, np.generic)) or not shape:
return ops.convert_to_tensor(shape, dtype=dtypes.int32)
# Shape is a sequence of dimensions. Convert None dims to -1.
def convert(val):
if val is None:
return -1
if isinstance(val, ops.Tensor):
return val
if isinstance(val, tensor_shape.Dimension):
return val.value if val.value is not None else -1
return val
return [convert(d) for d in shape]
| {
"content_hash": "19466454f1249a77cde4a9c48d489efe",
"timestamp": "",
"source": "github",
"line_count": 399,
"max_line_length": 81,
"avg_line_length": 35.478696741854634,
"alnum_prop": 0.6876236224922294,
"repo_name": "freedomtan/tensorflow",
"id": "f4da26ccb949cd83a6630879140779d51c64bd4e",
"size": "14845",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "tensorflow/python/ops/list_ops.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "32479"
},
{
"name": "Batchfile",
"bytes": "38366"
},
{
"name": "C",
"bytes": "1035837"
},
{
"name": "C#",
"bytes": "13395"
},
{
"name": "C++",
"bytes": "99324075"
},
{
"name": "CMake",
"bytes": "107781"
},
{
"name": "Dockerfile",
"bytes": "283435"
},
{
"name": "Go",
"bytes": "2013128"
},
{
"name": "HTML",
"bytes": "4686483"
},
{
"name": "Java",
"bytes": "928595"
},
{
"name": "Jupyter Notebook",
"bytes": "981916"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "MLIR",
"bytes": "4489624"
},
{
"name": "Makefile",
"bytes": "97500"
},
{
"name": "NASL",
"bytes": "8048"
},
{
"name": "Objective-C",
"bytes": "141623"
},
{
"name": "Objective-C++",
"bytes": "360423"
},
{
"name": "PHP",
"bytes": "20570"
},
{
"name": "Pawn",
"bytes": "32277"
},
{
"name": "Perl",
"bytes": "7536"
},
{
"name": "Python",
"bytes": "42762396"
},
{
"name": "RobotFramework",
"bytes": "2661"
},
{
"name": "Roff",
"bytes": "2515"
},
{
"name": "Ruby",
"bytes": "6723"
},
{
"name": "Shell",
"bytes": "647623"
},
{
"name": "Smarty",
"bytes": "52687"
},
{
"name": "Starlark",
"bytes": "4632847"
},
{
"name": "Swift",
"bytes": "56924"
},
{
"name": "Vim Snippet",
"bytes": "58"
}
],
"symlink_target": ""
} |
import copy
import mock
import six
from heat.common import exception
from heat.common import grouputils
from heat.common import template_format
from heat.engine import resource
from heat.engine.resources.openstack.heat import instance_group as instgrp
from heat.engine import rsrc_defn
from heat.engine import scheduler
from heat.engine import stack as parser
from heat.tests.autoscaling import inline_templates
from heat.tests import common
from heat.tests import utils
class TestInstanceGroup(common.HeatTestCase):
def setUp(self):
super(TestInstanceGroup, self).setUp()
t = template_format.parse(inline_templates.as_template)
self.stack = utils.parse_stack(t, params=inline_templates.as_params)
self.defn = rsrc_defn.ResourceDefinition(
'asg', 'OS::Heat::InstanceGroup',
{'Size': 2, 'AvailabilityZones': ['zoneb'],
'LaunchConfigurationName': 'config'})
self.instance_group = instgrp.InstanceGroup('asg',
self.defn, self.stack)
def test_child_template(self):
self.instance_group._create_template = mock.Mock(return_value='tpl')
self.assertEqual('tpl', self.instance_group.child_template())
self.instance_group._create_template.assert_called_once_with(2)
def test_child_params(self):
expected = {'parameters': {},
'resource_registry': {
'OS::Heat::ScaledResource': 'AWS::EC2::Instance'}}
self.assertEqual(expected, self.instance_group.child_params())
def test_tags_default(self):
expected = [{'Value': u'asg',
'Key': 'metering.groupname'}]
self.assertEqual(expected, self.instance_group._tags())
def test_tags_with_extra(self):
self.instance_group.properties.data['Tags'] = [
{'Key': 'fee', 'Value': 'foo'}]
expected = [{'Key': 'fee', 'Value': 'foo'},
{'Value': u'asg',
'Key': 'metering.groupname'}]
self.assertEqual(expected, self.instance_group._tags())
def test_tags_with_metering(self):
self.instance_group.properties.data['Tags'] = [
{'Key': 'metering.fee', 'Value': 'foo'}]
expected = [{'Key': 'metering.fee', 'Value': 'foo'}]
self.assertEqual(expected, self.instance_group._tags())
def test_validate_launch_conf(self):
props = self.instance_group.properties.data
props['LaunchConfigurationName'] = 'urg_i_cant_spell'
creator = scheduler.TaskRunner(self.instance_group.create)
error = self.assertRaises(exception.ResourceFailure, creator)
self.assertIn('(urg_i_cant_spell) reference can not be found.',
six.text_type(error))
def test_validate_launch_conf_no_ref(self):
props = self.instance_group.properties.data
props['LaunchConfigurationName'] = 'JobServerConfig'
creator = scheduler.TaskRunner(self.instance_group.create)
error = self.assertRaises(exception.ResourceFailure, creator)
self.assertIn('(JobServerConfig) reference can not be',
six.text_type(error))
def test_handle_create(self):
self.instance_group.create_with_template = mock.Mock(return_value=None)
self.instance_group.validate_launchconfig = mock.Mock(
return_value=None)
self.instance_group._create_template = mock.Mock(return_value='{}')
self.instance_group.handle_create()
self.instance_group.validate_launchconfig.assert_called_once_with()
self.instance_group._create_template.assert_called_once_with(2)
self.instance_group.create_with_template.assert_called_once_with('{}')
def test_update_in_failed(self):
self.instance_group.state_set('CREATE', 'FAILED')
# to update the failed instance_group
self.instance_group.resize = mock.Mock(return_value=None)
self.instance_group.handle_update(self.defn, None, None)
self.instance_group.resize.assert_called_once_with(2)
def test_handle_delete(self):
self.instance_group.delete_nested = mock.Mock(return_value=None)
self.instance_group.handle_delete()
self.instance_group.delete_nested.assert_called_once_with()
def test_handle_update_size(self):
self.instance_group._try_rolling_update = mock.Mock(return_value=None)
self.instance_group.resize = mock.Mock(return_value=None)
props = {'Size': 5}
defn = rsrc_defn.ResourceDefinition(
'nopayload',
'AWS::AutoScaling::AutoScalingGroup',
props)
self.instance_group.handle_update(defn, None, props)
self.instance_group.resize.assert_called_once_with(5)
def test_attributes(self):
mock_members = self.patchobject(grouputils, 'get_members')
instances = []
for ip_ex in six.moves.range(1, 4):
inst = mock.Mock()
inst.FnGetAtt.return_value = '2.1.3.%d' % ip_ex
instances.append(inst)
mock_members.return_value = instances
res = self.instance_group._resolve_attribute('InstanceList')
self.assertEqual('2.1.3.1,2.1.3.2,2.1.3.3', res)
class TestLaunchConfig(common.HeatTestCase):
def create_resource(self, t, stack, resource_name):
# subsequent resources may need to reference previous created resources
# use the stack's resource objects instead of instantiating new ones
rsrc = stack[resource_name]
self.assertIsNone(rsrc.validate())
scheduler.TaskRunner(rsrc.create)()
self.assertEqual((rsrc.CREATE, rsrc.COMPLETE), rsrc.state)
return rsrc
def test_update_metadata_replace(self):
"""Updating the config's metadata causes a config replacement."""
lc_template = '''
{
"AWSTemplateFormatVersion" : "2010-09-09",
"Resources": {
"JobServerConfig" : {
"Type" : "AWS::AutoScaling::LaunchConfiguration",
"Metadata": {"foo": "bar"},
"Properties": {
"ImageId" : "foo",
"InstanceType" : "m1.large",
"KeyName" : "test",
}
}
}
}
'''
self.stub_ImageConstraint_validate()
self.stub_FlavorConstraint_validate()
self.stub_KeypairConstraint_validate()
t = template_format.parse(lc_template)
stack = utils.parse_stack(t)
rsrc = self.create_resource(t, stack, 'JobServerConfig')
props = copy.copy(rsrc.properties.data)
metadata = copy.copy(rsrc.metadata_get())
update_snippet = rsrc_defn.ResourceDefinition(rsrc.name,
rsrc.type(),
props,
metadata)
# Change nothing in the first update
scheduler.TaskRunner(rsrc.update, update_snippet)()
self.assertEqual('bar', metadata['foo'])
metadata['foo'] = 'wibble'
update_snippet = rsrc_defn.ResourceDefinition(rsrc.name,
rsrc.type(),
props,
metadata)
# Changing metadata in the second update triggers UpdateReplace
updater = scheduler.TaskRunner(rsrc.update, update_snippet)
self.assertRaises(resource.UpdateReplace, updater)
class LoadbalancerReloadTest(common.HeatTestCase):
def test_Instances(self):
t = template_format.parse(inline_templates.as_template)
stack = utils.parse_stack(t)
lb = stack['ElasticLoadBalancer']
lb.update = mock.Mock(return_value=None)
defn = rsrc_defn.ResourceDefinition(
'asg', 'OS::Heat::InstanceGroup',
{'Size': 2,
'AvailabilityZones': ['zoneb'],
"LaunchConfigurationName": "LaunchConfig",
"LoadBalancerNames": ["ElasticLoadBalancer"]})
group = instgrp.InstanceGroup('asg', defn, stack)
mock_members = self.patchobject(grouputils, 'get_member_refids')
mock_members.return_value = ['aaaa', 'bbb']
expected = rsrc_defn.ResourceDefinition(
'ElasticLoadBalancer',
'AWS::ElasticLoadBalancing::LoadBalancer',
{'Instances': ['aaaa', 'bbb'],
'Listeners': [{'InstancePort': u'80',
'LoadBalancerPort': u'80',
'Protocol': 'HTTP'}],
'AvailabilityZones': ['nova']},
metadata={},
deletion_policy='Delete'
)
group._lb_reload()
mock_members.assert_called_once_with(group, exclude=[])
lb.update.assert_called_once_with(expected)
def test_members(self):
t = template_format.parse(inline_templates.as_template)
t['Resources']['ElasticLoadBalancer'] = {
'Type': 'OS::Neutron::LoadBalancer',
'Properties': {
'protocol_port': 8080,
}
}
stack = utils.parse_stack(t)
lb = stack['ElasticLoadBalancer']
lb.update = mock.Mock(return_value=None)
defn = rsrc_defn.ResourceDefinition(
'asg', 'OS::Heat::InstanceGroup',
{'Size': 2,
'AvailabilityZones': ['zoneb'],
"LaunchConfigurationName": "LaunchConfig",
"LoadBalancerNames": ["ElasticLoadBalancer"]})
group = instgrp.InstanceGroup('asg', defn, stack)
mock_members = self.patchobject(grouputils, 'get_member_refids')
mock_members.return_value = ['aaaa', 'bbb']
expected = rsrc_defn.ResourceDefinition(
'ElasticLoadBalancer',
'OS::Neutron::LoadBalancer',
{'protocol_port': 8080,
'members': ['aaaa', 'bbb']},
metadata={},
deletion_policy='Delete')
group._lb_reload()
mock_members.assert_called_once_with(group, exclude=[])
lb.update.assert_called_once_with(expected)
def test_lb_reload_invalid_resource(self):
t = template_format.parse(inline_templates.as_template)
t['Resources']['ElasticLoadBalancer'] = {
'Type': 'AWS::EC2::Volume',
'Properties': {
'AvailabilityZone': 'nova'
}
}
stack = utils.parse_stack(t)
lb = stack['ElasticLoadBalancer']
lb.update = mock.Mock(return_value=None)
defn = rsrc_defn.ResourceDefinition(
'asg', 'OS::Heat::InstanceGroup',
{'Size': 2,
'AvailabilityZones': ['zoneb'],
"LaunchConfigurationName": "LaunchConfig",
"LoadBalancerNames": ["ElasticLoadBalancer"]})
group = instgrp.InstanceGroup('asg', defn, stack)
mock_members = self.patchobject(grouputils, 'get_member_refids')
mock_members.return_value = ['aaaa', 'bbb']
error = self.assertRaises(exception.Error,
group._lb_reload)
self.assertEqual(
"Unsupported resource 'ElasticLoadBalancer' in "
"LoadBalancerNames",
six.text_type(error))
def test_lb_reload_static_resolve(self):
t = template_format.parse(inline_templates.as_template)
properties = t['Resources']['ElasticLoadBalancer']['Properties']
properties['AvailabilityZones'] = {'Fn::GetAZs': ''}
self.patchobject(parser.Stack, 'get_availability_zones',
return_value=['abc', 'xyz'])
mock_members = self.patchobject(grouputils, 'get_member_refids')
mock_members.return_value = ['aaaabbbbcccc']
# Check that the Fn::GetAZs is correctly resolved
expected = {u'Properties': {'Instances': ['aaaabbbbcccc'],
u'Listeners': [{u'InstancePort': u'80',
u'LoadBalancerPort': u'80',
u'Protocol': u'HTTP'}],
u'AvailabilityZones': ['abc', 'xyz']},
u'DeletionPolicy': 'Delete',
u'Metadata': {}}
stack = utils.parse_stack(t, params=inline_templates.as_params)
lb = stack['ElasticLoadBalancer']
lb.state_set(lb.CREATE, lb.COMPLETE)
lb.handle_update = mock.Mock(return_value=None)
group = stack['WebServerGroup']
group._lb_reload()
lb.handle_update.assert_called_once_with(
mock.ANY, expected,
{'Instances': ['aaaabbbbcccc']})
class ReplaceTest(common.HeatTestCase):
scenarios = [
('1', dict(min_in_service=0, batch_size=1, updates=2)),
('2', dict(min_in_service=0, batch_size=2, updates=1)),
('3', dict(min_in_service=3, batch_size=1, updates=3)),
('4', dict(min_in_service=3, batch_size=2, updates=2))]
def setUp(self):
super(ReplaceTest, self).setUp()
t = template_format.parse(inline_templates.as_template)
self.stack = utils.parse_stack(t, params=inline_templates.as_params)
lc = self.create_launch_config(t, self.stack)
lcid = lc.FnGetRefId()
self.defn = rsrc_defn.ResourceDefinition(
'asg', 'OS::Heat::InstanceGroup',
{'Size': 2, 'AvailabilityZones': ['zoneb'],
'LaunchConfigurationName': lcid})
self.group = instgrp.InstanceGroup('asg', self.defn, self.stack)
self.group._lb_reload = mock.Mock()
self.group.update_with_template = mock.Mock()
self.group.check_update_complete = mock.Mock()
self.group._nested = self.get_fake_nested_stack()
def create_launch_config(self, t, stack):
self.stub_ImageConstraint_validate()
self.stub_FlavorConstraint_validate()
self.stub_SnapshotConstraint_validate()
rsrc = stack['LaunchConfig']
self.assertIsNone(rsrc.validate())
scheduler.TaskRunner(rsrc.create)()
self.assertEqual((rsrc.CREATE, rsrc.COMPLETE), rsrc.state)
return rsrc
def get_fake_nested_stack(self):
nested_t = '''
heat_template_version: 2013-05-23
description: AutoScaling Test
resources:
one:
type: ResourceWithPropsAndAttrs
properties:
Foo: hello
two:
type: ResourceWithPropsAndAttrs
properties:
Foo: fee
'''
return utils.parse_stack(template_format.parse(nested_t))
def test_rolling_updates(self):
self.group._replace(self.min_in_service, self.batch_size, 0)
self.assertEqual(self.updates,
len(self.group.update_with_template.call_args_list))
self.assertEqual(self.updates + 1,
len(self.group._lb_reload.call_args_list))
| {
"content_hash": "cbfc985384c417185a2ebbdf2c60bca6",
"timestamp": "",
"source": "github",
"line_count": 367,
"max_line_length": 79,
"avg_line_length": 40.76021798365122,
"alnum_prop": 0.5896116050538137,
"repo_name": "rh-s/heat",
"id": "81099ad931e79bc6d1fe2fc80cc7be1e39f6f0c0",
"size": "15534",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "heat/tests/test_instance_group.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "6288599"
},
{
"name": "Shell",
"bytes": "32845"
}
],
"symlink_target": ""
} |
"""EmPOWER cell pool and cell classes."""
from empower.core.serialize import serializable_dict
class CellPool(list):
"""Cell pool.
Extends the list in order to add a few filtering and sorting methods
"""
def sort_by_rsrp(self, ue_id):
"""Return list sorted by rsrp for the specified address."""
filtered = [x for x in self if ue_id in x.ue_measurements]
cells = sorted(filtered,
key=lambda x: x.ue_measurements[ue_id]['rsrp'],
reverse=True)
return CellPool(cells)
def sort_by_rsrq(self, ue_id):
"""Return list sorted by rsrq for the specified address."""
filtered = [x for x in self if ue_id in x.ue_measurements]
cells = sorted(filtered,
key=lambda x: x.ue_measurements[ue_id]['rsrq'],
reverse=True)
return CellPool(cells)
def first(self):
"""Return first entry in the list."""
if self:
cell = list.__getitem__(self, 0)
return cell
return None
def last(self):
"""Return last entry in the list."""
if self:
cell = list.__getitem__(self, -1)
return cell
return None
@serializable_dict
class Cell:
"""An LTE eNB cell.
Attributes:
vbs: The VBS at which this cell is available
pci: the physical cell id
dl_earfcn: downlink center frequency
dl_bandwidth: downlink bandwidth
ul_earfcn: uplink center frequency
ul_bandwidth: uplink bandwidth
ue_measurements: UE measurements (RSRP/RSRQ)
cell_measurements: cell measurements
"""
def __init__(self, vbs, pci, dl_earfcn, ul_earfcn, n_prbs):
self.vbs = vbs
self.pci = pci
self.dl_earfcn = dl_earfcn
self.ul_earfcn = ul_earfcn
self.n_prbs = n_prbs
self.ue_measurements = {}
self.cell_measurements = {}
def to_str(self):
"""Return an ASCII representation of the object."""
return "vbs %s pci %u dl_earfcn %u dl_earfcn %u n_prbs %u" % \
(self.vbs.addr, self.pci, self.dl_earfcn, self.ul_earfcn,
self.n_prbs)
def __str__(self):
return self.to_str()
def __repr__(self):
return self.__class__.__name__ + "('" + self.to_str() + "')"
def __hash__(self):
return hash(self.vbs) + hash(self.pci)
def __eq__(self, other):
if isinstance(other, Cell):
return self.vbs == other.vbs and self.pci == other.pci
return False
def __ne__(self, other):
return not self.__eq__(other)
def to_dict(self):
"""Return JSON-serializable representation of the object."""
out = {}
out['addr'] = self.vbs.addr
out['pci'] = self.pci
out['dl_earfcn'] = self.dl_earfcn
out['ul_earfcn'] = self.ul_earfcn
out['n_prbs'] = self.n_prbs
out['cell_measurements'] = self.cell_measurements
out['ue_measurements'] = self.ue_measurements
return out
| {
"content_hash": "5dd5c237046300ce254d308385d901a5",
"timestamp": "",
"source": "github",
"line_count": 114,
"max_line_length": 72,
"avg_line_length": 27.19298245614035,
"alnum_prop": 0.5590322580645162,
"repo_name": "rriggio/empower-runtime",
"id": "23f33be5d7d6fce6738ea6b8225e13cfed4cfe4e",
"size": "3709",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "empower/managers/ranmanager/vbsp/cellpool.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "33830"
},
{
"name": "HTML",
"bytes": "22883"
},
{
"name": "JavaScript",
"bytes": "2649924"
},
{
"name": "Python",
"bytes": "615969"
}
],
"symlink_target": ""
} |
"""Tools to work with checkpoints."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import six
from tensorflow.python.ops import io_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variable_scope as vs
from tensorflow.python.ops import variables
from tensorflow.python.platform import gfile
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training import saver
from tensorflow.python.training import training as train
__all__ = [
"load_checkpoint", "load_variable", "list_variables", "init_from_checkpoint"
]
def load_checkpoint(ckpt_dir_or_file):
"""Returns `CheckpointReader` for checkpoint found in `ckpt_dir_or_file`.
If `ckpt_dir_or_file` resolves to a directory with multiple checkpoints,
reader for the latest checkpoint is returned.
Args:
ckpt_dir_or_file: Directory with checkpoints file or path to checkpoint
file.
Returns:
`CheckpointReader` object.
Raises:
ValueError: If `ckpt_dir_or_file` resolves to a directory with no
checkpoints.
"""
filename = _get_checkpoint_filename(ckpt_dir_or_file)
if filename is None:
raise ValueError("Couldn't find 'checkpoint' file or checkpoints in "
"given directory %s" % ckpt_dir_or_file)
return train.NewCheckpointReader(filename)
def load_variable(ckpt_dir_or_file, name):
"""Returns the tensor value of the given variable in the checkpoint.
Args:
ckpt_dir_or_file: Directory with checkpoints file or path to checkpoint.
name: Name of the variable to return.
Returns:
A numpy `ndarray` with a copy of the value of this variable.
"""
# TODO(b/29227106): Fix this in the right place and remove this.
if name.endswith(":0"):
name = name[:-2]
reader = load_checkpoint(ckpt_dir_or_file)
return reader.get_tensor(name)
def list_variables(ckpt_dir_or_file):
"""Returns list of all variables in the checkpoint.
Args:
ckpt_dir_or_file: Directory with checkpoints file or path to checkpoint.
Returns:
List of tuples `(name, shape)`.
"""
reader = load_checkpoint(ckpt_dir_or_file)
variable_map = reader.get_variable_to_shape_map()
names = sorted(variable_map.keys())
result = []
for name in names:
result.append((name, variable_map[name]))
return result
def init_from_checkpoint(ckpt_dir_or_file, assignment_map):
"""Initializes current variables with tensors loaded from given checkpoint.
Note: This overrides default initialization ops of specified variables and
redefines dtype.
Assignment map supports following syntax:
* `'checkpoint_scope_name/': 'scope_name/'` - will load all variables in
current `scope_name` from `checkpoint_scope_name` with matching tensor
names.
* `'checkpoint_scope_name/some_other_variable': 'scope_name/variable_name'` -
will initialize `scope_name/variable_name` variable
from `checkpoint_scope_name/some_other_variable`.
* `'scope_variable_name': variable` - will initialize given `tf.Variable`
object with tensor 'scope_variable_name' from the checkpoint.
* `'scope_variable_name': list(variable)` - will initialize list of
partitioned variables with tensor 'scope_variable_name' from the checkpoint.
* `'/': 'scope_name/'` - will load all variables in current `scope_name` from
checkpoint's root (e.g. no scope).
Supports loading into partitioned variables, which are represented as
`'<variable>/part_<part #>'`.
Example:
```python
# Say, '/tmp/model.ckpt' has the following tensors:
# -- name='old_scope_1/var1', shape=[20, 2]
# -- name='old_scope_1/var2', shape=[50, 4]
# -- name='old_scope_2/var3', shape=[100, 100]
# Create new model's variables
with tf.variable_scope('new_scope_1'):
var1 = tf.get_variable('var1', shape=[20, 2],
initializer=tf.zeros_initializer())
with tf.variable_scope('new_scope_2'):
var2 = tf.get_variable('var2', shape=[50, 4],
initializer=tf.zeros_initializer())
# Partition into 5 variables along the first axis.
var3 = tf.get_variable(name='var3', shape=[100, 100],
initializer=tf.zeros_initializer(),
partitioner=lambda shape, dtype: [5, 1])
# Initialize all variables in `new_scope_1` from `old_scope_1`.
init_from_checkpoint('/tmp/model.ckpt', {'old_scope_1/', 'new_scope_1'})
# Use names to specify which variables to initialize from checkpoint.
init_from_checkpoint('/tmp/model.ckpt',
{'old_scope_1/var1': 'new_scope_1/var1',
'old_scope_1/var2': 'new_scope_2/var2'})
# Or use tf.Variable objects to identify what to initialize.
init_from_checkpoint('/tmp/model.ckpt',
{'old_scope_1/var1': var1,
'old_scope_1/var2': var2})
# Initialize partitioned variables using variable's name
init_from_checkpoint('/tmp/model.ckpt',
{'old_scope_2/var3': 'new_scope_2/var3'})
# Or specify the list of tf.Variable objects.
init_from_checkpoint('/tmp/model.ckpt',
{'old_scope_2/var3': var3._get_variable_list()})
```
Args:
ckpt_dir_or_file: Directory with checkpoints file or path to checkpoint.
assignment_map: Dict, where keys are names of the variables in the
checkpoint and values are current variables or names of current variables
(in default graph).
Raises:
tf.errors.OpError: If missing checkpoints or tensors in checkpoints.
ValueError: If missing variables in current graph.
"""
ckpt_file = _get_checkpoint_filename(ckpt_dir_or_file)
reader = load_checkpoint(ckpt_dir_or_file)
variable_map = reader.get_variable_to_shape_map()
for tensor_name_in_ckpt, current_var_or_name in six.iteritems(assignment_map):
var = None
# Check if this is Variable object or list of Variable objects (in case of
# partitioned variables).
is_var = lambda x: isinstance(x, variables.Variable)
if is_var(current_var_or_name) or (
isinstance(current_var_or_name, list)
and all(is_var(v) for v in current_var_or_name)):
var = current_var_or_name
else:
store_vars = vs._get_default_variable_store()._vars # pylint:disable=protected-access
# Check if this variable is in var_store.
var = store_vars.get(current_var_or_name, None)
# Also check if variable is partitioned as list.
if var is None:
var = _collect_partitioned_variable(current_var_or_name, store_vars)
if var is not None:
# If 1 to 1 mapping was provided, find variable in the checkpoint.
if tensor_name_in_ckpt not in variable_map:
raise ValueError("Tensor %s is not found in %s checkpoint %s" % (
tensor_name_in_ckpt, ckpt_dir_or_file, variable_map
))
if is_var(var):
# Additional at-call-time checks.
if not var.get_shape().is_compatible_with(
variable_map[tensor_name_in_ckpt]):
raise ValueError(
"Shape of variable %s (%s) doesn't match with shape of "
"tensor %s (%s) from checkpoint reader." % (
var.name, str(var.get_shape()),
tensor_name_in_ckpt, str(variable_map[tensor_name_in_ckpt])
))
var_name = var.name
else:
var_name = ",".join([v.name for v in var])
_set_variable_or_list_initializer(var, ckpt_file, tensor_name_in_ckpt)
logging.info("Initialize variable %s from checkpoint %s with %s" % (
var_name, ckpt_dir_or_file, tensor_name_in_ckpt
))
else:
scopes = ""
# TODO(vihanjain): Support list of 'current_var_or_name' here.
if "/" in current_var_or_name:
scopes = current_var_or_name[:current_var_or_name.rindex("/")]
if not tensor_name_in_ckpt.endswith("/"):
raise ValueError(
"Assignment map with scope only name {} should map to scope only "
"{}. Should be 'scope/': 'other_scope/'.".format(
scopes, tensor_name_in_ckpt))
# If scope to scope mapping was provided, find all variables in the scope
# and create variable to variable mapping.
scope_variables = set()
for var_name in store_vars:
if not scopes or var_name.startswith(scopes + "/"):
# Consume /part_ if partitioned variable.
if "/part_" in var_name:
var_name = var_name[:var_name.index("/part_")]
scope_variables.add(var_name)
for var_name in scope_variables:
# Lookup name with specified prefix and suffix from current variable.
# If tensor_name given is '/' (root), don't use it for full name.
full_tensor_name = var_name[len(scopes):]
if current_var_or_name != "/":
full_tensor_name = full_tensor_name[1:]
if tensor_name_in_ckpt != "/":
full_tensor_name = tensor_name_in_ckpt + full_tensor_name
if full_tensor_name not in variable_map:
raise ValueError(
"Tensor %s (%s in %s) is not found in %s checkpoint" % (
full_tensor_name, var_name[len(scopes) + 1:],
tensor_name_in_ckpt, ckpt_dir_or_file
))
var = store_vars.get(var_name, None)
if var is None:
var = _collect_partitioned_variable(var_name, store_vars)
_set_variable_or_list_initializer(var, ckpt_file, full_tensor_name)
logging.info("Initialize variable %s from checkpoint %s with %s" % (
var_name, ckpt_dir_or_file, full_tensor_name
))
def _get_checkpoint_filename(ckpt_dir_or_file):
"""Returns checkpoint filename given directory or specific checkpoint file."""
if gfile.IsDirectory(ckpt_dir_or_file):
return saver.latest_checkpoint(ckpt_dir_or_file)
return ckpt_dir_or_file
def _set_checkpoint_initializer(variable,
ckpt_file,
tensor_name,
slice_spec,
name="checkpoint_initializer"):
"""Overrides given variable's initialization op.
Sets variable initializer to assign op that initializes variable from tensor's
value in the checkpoint.
Args:
variable: `tf.Variable` object.
ckpt_file: string, full path of the checkpoint.
tensor_name: Name of the tensor to load from the checkpoint.
slice_spec: Slice specification for loading partitioned tensors.
name: Name of the operation.
"""
base_type = variable.dtype.base_dtype
restore_op = io_ops.restore_v2(
ckpt_file, [tensor_name], [slice_spec], [base_type], name=name)[0]
variable._initializer_op = state_ops.assign(variable, restore_op) # pylint:disable=protected-access
def _set_variable_or_list_initializer(variable_or_list, ckpt_file,
tensor_name):
"""Overrides initialization op of given variable or list of variables.
Calls `_set_checkpoint_initializer` for each variable in the given list of
variables.
Args:
variable_or_list: `tf.Variable` object or a list of `tf.Variable` objects.
ckpt_file: string, full path of the checkpoint.
tensor_name: Name of the tensor to load from the checkpoint.
Raises:
ValueError: if all objects in `variable_or_list` are not partitions of the
same large variable.
"""
if isinstance(variable_or_list, (list, tuple)):
# A set of slices.
slice_name = None
for v in variable_or_list:
slice_info = v._save_slice_info # pylint:disable=protected-access
if slice_name is None:
slice_name = slice_info.full_name
elif slice_name != slice_info.full_name:
raise ValueError("Slices must all be from the same tensor: %s != %s" %
(slice_name, slice_info.full_name))
_set_checkpoint_initializer(v, ckpt_file, tensor_name, slice_info.spec)
else:
_set_checkpoint_initializer(variable_or_list, ckpt_file, tensor_name, "")
def _collect_partitioned_variable(name, all_vars):
"""Returns list of `tf.Variable` that comprise the partitioned variable."""
if name + "/part_0" in all_vars:
var = []
i = 0
while name + "/part_%d" % i in all_vars:
var.append(all_vars[name + "/part_%d" % i])
i += 1
return var
return None
| {
"content_hash": "ed06d7f30cdd00d65a5c2f31c9c37a4a",
"timestamp": "",
"source": "github",
"line_count": 314,
"max_line_length": 102,
"avg_line_length": 39.554140127388536,
"alnum_prop": 0.6485507246376812,
"repo_name": "vrv/tensorflow",
"id": "ffa39b77930ca42c735caf4bc76c4bdcf783a8dd",
"size": "13109",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "tensorflow/python/training/checkpoint_utils.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "7481"
},
{
"name": "C",
"bytes": "182510"
},
{
"name": "C++",
"bytes": "23619794"
},
{
"name": "CMake",
"bytes": "158007"
},
{
"name": "CSS",
"bytes": "774"
},
{
"name": "Go",
"bytes": "824401"
},
{
"name": "HTML",
"bytes": "886772"
},
{
"name": "Java",
"bytes": "286562"
},
{
"name": "JavaScript",
"bytes": "14005"
},
{
"name": "Jupyter Notebook",
"bytes": "1833654"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "Makefile",
"bytes": "37302"
},
{
"name": "Objective-C",
"bytes": "7037"
},
{
"name": "Objective-C++",
"bytes": "64142"
},
{
"name": "Protocol Buffer",
"bytes": "214419"
},
{
"name": "Python",
"bytes": "20580440"
},
{
"name": "Shell",
"bytes": "337420"
},
{
"name": "TypeScript",
"bytes": "1267602"
}
],
"symlink_target": ""
} |
"""Node classes for the AST for a Mojo IDL file."""
# Note: For convenience of testing, you probably want to define __eq__() methods
# for all node types; it's okay to be slightly lax (e.g., not compare filename
# and lineno). You may also define __repr__() to help with analyzing test
# failures, especially for more complex types.
class NodeBase(object):
"""Base class for nodes in the AST."""
def __init__(self, filename=None, lineno=None):
self.filename = filename
self.lineno = lineno
def __eq__(self, other):
# We want strict comparison of the two object's types. Disable pylint's
# insistence upon recommending isinstance().
# pylint: disable=unidiomatic-typecheck
return type(self) == type(other)
# Make != the inverse of ==. (Subclasses shouldn't have to override this.)
def __ne__(self, other):
return not self == other
# TODO(vtl): Some of this is complicated enough that it should be tested.
class NodeListBase(NodeBase):
"""Represents a list of other nodes, all having the same type. (This is meant
to be subclassed, with subclasses defining _list_item_type to be the class (or
classes, in a tuple) of the members of the list.)"""
def __init__(self, item_or_items=None, **kwargs):
super(NodeListBase, self).__init__(**kwargs)
self.items = []
if item_or_items is None:
pass
elif isinstance(item_or_items, list):
for item in item_or_items:
assert isinstance(item, self._list_item_type)
self.Append(item)
else:
assert isinstance(item_or_items, self._list_item_type)
self.Append(item_or_items)
# Support iteration. For everything else, users should just access |items|
# directly. (We intentionally do NOT supply |__len__()| or |__nonzero__()|, so
# |bool(NodeListBase())| is true.)
def __iter__(self):
return self.items.__iter__()
def __eq__(self, other):
return super(NodeListBase, self).__eq__(other) and \
self.items == other.items
# Implement this so that on failure, we get slightly more sensible output.
def __repr__(self):
return self.__class__.__name__ + "([" + \
", ".join([repr(elem) for elem in self.items]) + "])"
def Insert(self, item):
"""Inserts item at the front of the list."""
assert isinstance(item, self._list_item_type)
self.items.insert(0, item)
self._UpdateFilenameAndLineno()
def Append(self, item):
"""Appends item to the end of the list."""
assert isinstance(item, self._list_item_type)
self.items.append(item)
self._UpdateFilenameAndLineno()
def _UpdateFilenameAndLineno(self):
if self.items:
self.filename = self.items[0].filename
self.lineno = self.items[0].lineno
class Definition(NodeBase):
"""Represents a definition of anything that has a global name (e.g., enums,
enum values, consts, structs, struct fields, interfaces). (This does not
include parameter definitions.) This class is meant to be subclassed."""
def __init__(self, mojom_name, **kwargs):
assert isinstance(mojom_name, str)
NodeBase.__init__(self, **kwargs)
self.mojom_name = mojom_name
################################################################################
class Attribute(NodeBase):
"""Represents an attribute."""
def __init__(self, key, value, **kwargs):
assert isinstance(key, str)
super(Attribute, self).__init__(**kwargs)
self.key = key
self.value = value
def __eq__(self, other):
return super(Attribute, self).__eq__(other) and \
self.key == other.key and \
self.value == other.value
class AttributeList(NodeListBase):
"""Represents a list attributes."""
_list_item_type = Attribute
class Const(Definition):
"""Represents a const definition."""
def __init__(self, mojom_name, attribute_list, typename, value, **kwargs):
assert attribute_list is None or isinstance(attribute_list, AttributeList)
# The typename is currently passed through as a string.
assert isinstance(typename, str)
# The value is either a literal (currently passed through as a string) or a
# "wrapped identifier".
assert isinstance(value, str) or isinstance(value, tuple)
super(Const, self).__init__(mojom_name, **kwargs)
self.attribute_list = attribute_list
self.typename = typename
self.value = value
def __eq__(self, other):
return super(Const, self).__eq__(other) and \
self.attribute_list == other.attribute_list and \
self.typename == other.typename and \
self.value == other.value
class Enum(Definition):
"""Represents an enum definition."""
def __init__(self, mojom_name, attribute_list, enum_value_list, **kwargs):
assert attribute_list is None or isinstance(attribute_list, AttributeList)
assert enum_value_list is None or isinstance(enum_value_list, EnumValueList)
super(Enum, self).__init__(mojom_name, **kwargs)
self.attribute_list = attribute_list
self.enum_value_list = enum_value_list
def __eq__(self, other):
return super(Enum, self).__eq__(other) and \
self.attribute_list == other.attribute_list and \
self.enum_value_list == other.enum_value_list
class EnumValue(Definition):
"""Represents a definition of an enum value."""
def __init__(self, mojom_name, attribute_list, value, **kwargs):
# The optional value is either an int (which is current a string) or a
# "wrapped identifier".
assert attribute_list is None or isinstance(attribute_list, AttributeList)
assert value is None or isinstance(value, (str, tuple))
super(EnumValue, self).__init__(mojom_name, **kwargs)
self.attribute_list = attribute_list
self.value = value
def __eq__(self, other):
return super(EnumValue, self).__eq__(other) and \
self.attribute_list == other.attribute_list and \
self.value == other.value
class EnumValueList(NodeListBase):
"""Represents a list of enum value definitions (i.e., the "body" of an enum
definition)."""
_list_item_type = EnumValue
class Import(NodeBase):
"""Represents an import statement."""
def __init__(self, attribute_list, import_filename, **kwargs):
assert attribute_list is None or isinstance(attribute_list, AttributeList)
assert isinstance(import_filename, str)
super(Import, self).__init__(**kwargs)
self.attribute_list = attribute_list
self.import_filename = import_filename
def __eq__(self, other):
return super(Import, self).__eq__(other) and \
self.attribute_list == other.attribute_list and \
self.import_filename == other.import_filename
class ImportList(NodeListBase):
"""Represents a list (i.e., sequence) of import statements."""
_list_item_type = Import
class Interface(Definition):
"""Represents an interface definition."""
def __init__(self, mojom_name, attribute_list, body, **kwargs):
assert attribute_list is None or isinstance(attribute_list, AttributeList)
assert isinstance(body, InterfaceBody)
super(Interface, self).__init__(mojom_name, **kwargs)
self.attribute_list = attribute_list
self.body = body
def __eq__(self, other):
return super(Interface, self).__eq__(other) and \
self.attribute_list == other.attribute_list and \
self.body == other.body
class Method(Definition):
"""Represents a method definition."""
def __init__(self, mojom_name, attribute_list, ordinal, parameter_list,
response_parameter_list, **kwargs):
assert attribute_list is None or isinstance(attribute_list, AttributeList)
assert ordinal is None or isinstance(ordinal, Ordinal)
assert isinstance(parameter_list, ParameterList)
assert response_parameter_list is None or \
isinstance(response_parameter_list, ParameterList)
super(Method, self).__init__(mojom_name, **kwargs)
self.attribute_list = attribute_list
self.ordinal = ordinal
self.parameter_list = parameter_list
self.response_parameter_list = response_parameter_list
def __eq__(self, other):
return super(Method, self).__eq__(other) and \
self.attribute_list == other.attribute_list and \
self.ordinal == other.ordinal and \
self.parameter_list == other.parameter_list and \
self.response_parameter_list == other.response_parameter_list
# This needs to be declared after |Method|.
class InterfaceBody(NodeListBase):
"""Represents the body of (i.e., list of definitions inside) an interface."""
_list_item_type = (Const, Enum, Method)
class Module(NodeBase):
"""Represents a module statement."""
def __init__(self, mojom_namespace, attribute_list, **kwargs):
# |mojom_namespace| is either none or a "wrapped identifier".
assert mojom_namespace is None or isinstance(mojom_namespace, tuple)
assert attribute_list is None or isinstance(attribute_list, AttributeList)
super(Module, self).__init__(**kwargs)
self.mojom_namespace = mojom_namespace
self.attribute_list = attribute_list
def __eq__(self, other):
return super(Module, self).__eq__(other) and \
self.mojom_namespace == other.mojom_namespace and \
self.attribute_list == other.attribute_list
class Mojom(NodeBase):
"""Represents an entire .mojom file. (This is the root node.)"""
def __init__(self, module, import_list, definition_list, **kwargs):
assert module is None or isinstance(module, Module)
assert isinstance(import_list, ImportList)
assert isinstance(definition_list, list)
super(Mojom, self).__init__(**kwargs)
self.module = module
self.import_list = import_list
self.definition_list = definition_list
def __eq__(self, other):
return super(Mojom, self).__eq__(other) and \
self.module == other.module and \
self.import_list == other.import_list and \
self.definition_list == other.definition_list
def __repr__(self):
return "%s(%r, %r, %r)" % (self.__class__.__name__, self.module,
self.import_list, self.definition_list)
class Ordinal(NodeBase):
"""Represents an ordinal value labeling, e.g., a struct field."""
def __init__(self, value, **kwargs):
assert isinstance(value, int)
super(Ordinal, self).__init__(**kwargs)
self.value = value
def __eq__(self, other):
return super(Ordinal, self).__eq__(other) and \
self.value == other.value
class Parameter(NodeBase):
"""Represents a method request or response parameter."""
def __init__(self, mojom_name, attribute_list, ordinal, typename, **kwargs):
assert isinstance(mojom_name, str)
assert attribute_list is None or isinstance(attribute_list, AttributeList)
assert ordinal is None or isinstance(ordinal, Ordinal)
assert isinstance(typename, str)
super(Parameter, self).__init__(**kwargs)
self.mojom_name = mojom_name
self.attribute_list = attribute_list
self.ordinal = ordinal
self.typename = typename
def __eq__(self, other):
return super(Parameter, self).__eq__(other) and \
self.mojom_name == other.mojom_name and \
self.attribute_list == other.attribute_list and \
self.ordinal == other.ordinal and \
self.typename == other.typename
class ParameterList(NodeListBase):
"""Represents a list of (method request or response) parameters."""
_list_item_type = Parameter
class Struct(Definition):
"""Represents a struct definition."""
def __init__(self, mojom_name, attribute_list, body, **kwargs):
assert attribute_list is None or isinstance(attribute_list, AttributeList)
assert isinstance(body, StructBody) or body is None
super(Struct, self).__init__(mojom_name, **kwargs)
self.attribute_list = attribute_list
self.body = body
def __eq__(self, other):
return super(Struct, self).__eq__(other) and \
self.attribute_list == other.attribute_list and \
self.body == other.body
class StructField(Definition):
"""Represents a struct field definition."""
def __init__(self, mojom_name, attribute_list, ordinal, typename,
default_value, **kwargs):
assert isinstance(mojom_name, str)
assert attribute_list is None or isinstance(attribute_list, AttributeList)
assert ordinal is None or isinstance(ordinal, Ordinal)
assert isinstance(typename, str)
# The optional default value is currently either a value as a string or a
# "wrapped identifier".
assert default_value is None or isinstance(default_value, (str, tuple))
super(StructField, self).__init__(mojom_name, **kwargs)
self.attribute_list = attribute_list
self.ordinal = ordinal
self.typename = typename
self.default_value = default_value
def __eq__(self, other):
return super(StructField, self).__eq__(other) and \
self.attribute_list == other.attribute_list and \
self.ordinal == other.ordinal and \
self.typename == other.typename and \
self.default_value == other.default_value
# This needs to be declared after |StructField|.
class StructBody(NodeListBase):
"""Represents the body of (i.e., list of definitions inside) a struct."""
_list_item_type = (Const, Enum, StructField)
class Union(Definition):
"""Represents a union definition."""
def __init__(self, mojom_name, attribute_list, body, **kwargs):
assert attribute_list is None or isinstance(attribute_list, AttributeList)
assert isinstance(body, UnionBody)
super(Union, self).__init__(mojom_name, **kwargs)
self.attribute_list = attribute_list
self.body = body
def __eq__(self, other):
return super(Union, self).__eq__(other) and \
self.attribute_list == other.attribute_list and \
self.body == other.body
class UnionField(Definition):
def __init__(self, mojom_name, attribute_list, ordinal, typename, **kwargs):
assert isinstance(mojom_name, str)
assert attribute_list is None or isinstance(attribute_list, AttributeList)
assert ordinal is None or isinstance(ordinal, Ordinal)
assert isinstance(typename, str)
super(UnionField, self).__init__(mojom_name, **kwargs)
self.attribute_list = attribute_list
self.ordinal = ordinal
self.typename = typename
def __eq__(self, other):
return super(UnionField, self).__eq__(other) and \
self.attribute_list == other.attribute_list and \
self.ordinal == other.ordinal and \
self.typename == other.typename
class UnionBody(NodeListBase):
_list_item_type = UnionField
| {
"content_hash": "af48331556017e8e95aec9764bf77081",
"timestamp": "",
"source": "github",
"line_count": 414,
"max_line_length": 80,
"avg_line_length": 35.28502415458937,
"alnum_prop": 0.6645673603504929,
"repo_name": "endlessm/chromium-browser",
"id": "c9b6605cf43a4a1a25efefbbbb4ce52ac3477f8e",
"size": "14770",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mojo/public/tools/mojom/mojom/parse/ast.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
} |
"""
(c) Copyright 2016 Hewlett-Packard Enterprise Development Company, L.P.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from oslo_log import log
from freezer_api import context
LOG = log.getLogger(__name__)
def inject_context(req, resp, params):
user_id = req.get_header('X-USER-ID')
request_id = req.get_header('X-Openstack-Request-ID')
auth_token = req.get_header('X-AUTH-TOKEN')
tenant = req.get_header('X-TENANT-ID')
roles = req.get_header('X-ROLES')
roles = roles and roles.split(',') or []
ctxt = context.FreezerContext(auth_token=auth_token,
user=user_id,
tenant=tenant,
request_id=request_id,
roles=roles)
req.env['freezer.context'] = ctxt
LOG.info('Request context set')
def before_hooks():
return [inject_context]
class FuncMiddleware(object):
"""
Injecting some function as middleware for freezer-api
"""
def __init__(self, func):
self.func = func
def process_resource(self, req, resp, resource, params=None):
return self.func(req, resp, params)
| {
"content_hash": "b94c46d8d2ea610d4943804150dec01b",
"timestamp": "",
"source": "github",
"line_count": 56,
"max_line_length": 72,
"avg_line_length": 29.785714285714285,
"alnum_prop": 0.6510791366906474,
"repo_name": "szaher/freezer-api",
"id": "ba40e97b0773421195b9390864b2e64c0500ea97",
"size": "1668",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "freezer_api/api/common/utils.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "1942"
},
{
"name": "PHP",
"bytes": "20318"
},
{
"name": "Python",
"bytes": "424120"
},
{
"name": "Shell",
"bytes": "13072"
}
],
"symlink_target": ""
} |
from __future__ import print_function
from builtins import str
import sys
import os
from vmrunner import vmrunner
import socket
# Get an auto-created VM from the vmrunner
vm = vmrunner.vms[0]
def UDP_test():
print("<Test.py> Performing UDP tests")
HOST, PORT = "10.0.0.55", 4242
sock = socket.socket
# SOCK_DGRAM is the socket type to use for UDP sockets
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
# NOTE: This is necessary for the test to exit after the VM has
# been shut down due to a VM timeout
sock.settimeout(20)
data = "Lucky".encode()
sock.sendto(data, (HOST, PORT))
received = sock.recv(1024)
print("<Test.py> Sent: {}".format(data))
print("<Test.py> Received: {}".format(received))
if received != data: return False
data = "Luke".encode()
sock.sendto(data, (HOST, PORT))
received = sock.recv(1024)
print("<Test.py> Sent: {}".format(data))
print("<Test.py> Received: {}".format(received))
if received != data: return False
data = "x".encode() * 1472
sock.sendto(data, (HOST, PORT))
received = sock.recv(1500)
if received != data:
print("<Test.py> Did not receive long string: {}".format(received))
return False
data = "x".encode() * 9216 # 9216 is apparently default max for MacOS
sock.sendto(data, (HOST, PORT))
received = bytearray()
while (len(received) < len(data)):
received.extend(sock.recv(len(data)))
print("RECEIVED: ", len(received))
if received != data:
print("<Test.py> Did not receive mega string (64k)")
return False
vm.exit(0, "Test completed without errors")
def UDP6_test(trigger_line):
print("<Test.py> Performing UDP6 tests")
HOST, PORT = 'fe80::4242%bridge43', 4242
sock = socket.socket
# SOCK_DGRAM is the socket type to use for UDP sockets
sock = socket.socket(socket.AF_INET6, socket.SOCK_DGRAM)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
res = socket.getaddrinfo(HOST, PORT, socket.AF_INET6, socket.SOCK_DGRAM)
af, socktype, proto, canonname, addr = res[0]
# NOTE: This is necessary for the test to exit after the VM has
# been shut down due to a VM timeout
sock.settimeout(20)
data = "Lucky".encode()
sock.sendto(data, addr)
received = sock.recv(1024)
print("<Test.py> Sent: {}".format(data))
print("<Test.py> Received: {}".format(received))
if received != data: return False
data = "Luke".encode()
sock.sendto(data, addr)
received = sock.recv(1024)
print("<Test.py> Sent: {}".format(data))
print("<Test.py> Received: {}".format(received))
if received != data: return False
data = "x".encode() * 1448
sock.sendto(data, addr)
received = sock.recv(1500)
if received != data:
print("<Test.py> Did not receive long string: {}".format(received))
return False
UDP_test()
# Add custom event-handler
vm.on_output("UDP test service", UDP6_test)
if len(sys.argv) > 1:
vm.boot(image_name=str(sys.argv[1]))
else:
# Boot the VM, taking a timeout as parameter
vm.cmake().boot(30,image_name="net_udp").clean()
| {
"content_hash": "5f14a29b5e7985980153bd332ef47376",
"timestamp": "",
"source": "github",
"line_count": 104,
"max_line_length": 74,
"avg_line_length": 30.14423076923077,
"alnum_prop": 0.6669856459330143,
"repo_name": "AnnikaH/IncludeOS",
"id": "c54983496ec661a763e20c172925247bf3c68e69",
"size": "3159",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "test/net/integration/udp/test.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "63075"
},
{
"name": "C",
"bytes": "49793"
},
{
"name": "C++",
"bytes": "3231859"
},
{
"name": "CMake",
"bytes": "139086"
},
{
"name": "Dockerfile",
"bytes": "3694"
},
{
"name": "GDB",
"bytes": "255"
},
{
"name": "JavaScript",
"bytes": "1956"
},
{
"name": "Makefile",
"bytes": "1719"
},
{
"name": "Python",
"bytes": "160249"
},
{
"name": "Shell",
"bytes": "87390"
}
],
"symlink_target": ""
} |
from .models import Place, Location, Activity
from django.core import serializers
from django.http import HttpResponse
from collections import Counter
from json import dumps
# Create your views here.
def get_locations(request):
location = request.GET.get('id')
locations = Location.objects.filter(activites_id=activity)
return HttpResponse(serializers.serialize("json", locations))
def get_places(request):
t = {}
activity = request.GET.get('id')
places = Place.objects.filter(activities_id=activity)
for place in places:
loc_name = place.location.name
if loc_name in t:
t[loc_name]['places'].append(
serializers.serialize("json", [place, ]))
else:
t[loc_name] = {}
t[loc_name]['id'] = place.location.id
t[loc_name]['places'] = []
t[loc_name]['name'] = place.location.name.title()
a = t.values()
return HttpResponse(dumps(a))
def get_activities(request):
activities = Activity.objects.all()
return HttpResponse(serializers.serialize("json", activities))
| {
"content_hash": "c7027cd7bfaec80d2bb72a6f12ca3b2b",
"timestamp": "",
"source": "github",
"line_count": 35,
"max_line_length": 66,
"avg_line_length": 31.542857142857144,
"alnum_prop": 0.6512681159420289,
"repo_name": "silentninja/trippal",
"id": "7af34513f79d318e03bf8ead3e1d575333604b48",
"size": "1104",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "server/trippal/core/views.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1271896"
},
{
"name": "HTML",
"bytes": "37741"
},
{
"name": "JavaScript",
"bytes": "3269124"
},
{
"name": "Python",
"bytes": "15697"
}
],
"symlink_target": ""
} |
"""Treadmill Tenant REST api.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import flask
import flask_restplus as restplus
from flask_restplus import fields
# Disable E0611: No 'name' in module
from treadmill import webutils # pylint: disable=E0611
# Old style classes, no init method.
#
# pylint: disable=W0232
def init(api, cors, impl):
"""Configures REST handlers for tenant resource."""
namespace = webutils.namespace(
api, __name__, 'Tenant REST operations'
)
model = {
# Tenant return is inconsistent, for list it uses "tenant" and GET, it
# uses _id, now need both in here.
'_id': fields.String(description='Tenant name'),
'tenant': fields.String(description='Tenant name'),
'systems': fields.List(
fields.Integer(description='System ID', required=True),
min_items=1)
}
tenant_model = api.model(
'Tenant', model
)
@namespace.route('/',)
class _TenantList(restplus.Resource):
"""Treadmill Tenant resource"""
@webutils.get_api(api, cors,
marshal=api.marshal_list_with,
resp_model=tenant_model)
def get(self):
"""Returns list of configured tenants."""
return impl.list()
@namespace.route('/<tenant_id>')
@api.doc(params={'tenant_id': 'Tenant ID/name'})
class _TenantResource(restplus.Resource):
"""Treadmill Tenant resource."""
@webutils.get_api(api, cors,
marshal=api.marshal_with,
resp_model=tenant_model)
def get(self, tenant_id):
"""Return Treadmill tenant configuration."""
return impl.get(tenant_id)
@webutils.post_api(api, cors,
req_model=tenant_model,
resp_model=tenant_model)
def post(self, tenant_id):
"""Creates Treadmill tenant."""
return impl.create(tenant_id, flask.request.json)
@webutils.put_api(api, cors,
req_model=tenant_model,
resp_model=tenant_model)
def put(self, tenant_id):
"""Updates Treadmill tenant configuration."""
return impl.update(tenant_id, flask.request.json)
@webutils.delete_api(api, cors)
def delete(self, tenant_id):
"""Deletes Treadmill tenant."""
return impl.delete(tenant_id)
| {
"content_hash": "d5d87ed3a80f5c5418046f9594914e3d",
"timestamp": "",
"source": "github",
"line_count": 81,
"max_line_length": 78,
"avg_line_length": 32.074074074074076,
"alnum_prop": 0.5800615858352579,
"repo_name": "captiosus/treadmill",
"id": "fae63938c6752ccf991e7912f04f19cd62f01fe1",
"size": "2598",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "treadmill/rest/api/tenant.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "PowerShell",
"bytes": "570"
},
{
"name": "Python",
"bytes": "2598791"
},
{
"name": "Ruby",
"bytes": "3712"
},
{
"name": "Shell",
"bytes": "58099"
}
],
"symlink_target": ""
} |
import time
def main(request, response):
response.add_required_headers = False # Don't implicitly add HTTP headers
response.writer.write_status(200)
response.writer.write_header("Content-Type", "text/html")
response.writer.end_headers()
response.writer.write(b'<!DOCTYPE html><script src="script.py?uuid=%s&character=ζ"></script>' % request.GET[b"uuid"]);
time.sleep(0.2)
response.writer.write(b'<meta charset="windows-1251"><p>Test: \xE6</p>');
| {
"content_hash": "843461145b20685d89c347c9390b443a",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 129,
"avg_line_length": 43.90909090909091,
"alnum_prop": 0.6977225672877847,
"repo_name": "nwjs/chromium.src",
"id": "c72c469ce3cd23580f88713cefcc2799fed8ad66",
"size": "483",
"binary": false,
"copies": "20",
"ref": "refs/heads/nw70",
"path": "third_party/blink/web_tests/external/wpt/html/syntax/speculative-charset/support/speculative-script.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
} |
from tkinter import *
"""/*
Class: ArenaPanel
Abstract super class for all <Panel> objects to be used as children in
the <ArenaGUI>.
Provides an interface for panel specific methods, and some helper
methods
*/"""
class ArenaPanel(LabelFrame):
"""/*
Group: Constructors
*/"""
"""/*
Constructor: __init__
Create the <Panel> and initialise any instance variables it may have
Parameters:
obj master - The parent of this panel
string title - The title to be displayed on the panel
int width - The width of the panel in pixels
int height - The height of the panel in pixels
array *args - An array of extra arguments to be passed in
dict **kwargs - A dict of extra keyword arguments to be passed in
*/"""
def __init__(self, master, title, width, height, *args, **kwargs):
super(ArenaPanel, self).__init__(
master, text=title, width=width, height=height)
"""/*
Group: Variables
*/"""
"""/*
var: _master
The parent of the window
*/"""
self._master = master
self._initialiseVariables(*args, **kwargs)
self._initialiseChildren()
"""/*
Group: Public Methods
*/"""
"""/*
Function: getTitle
Return the title of this ArenaPanel instance
Returns:
str title - The title of this ArenaPanel instance
*/"""
def getTitle(self):
return self.cget("text")
"""/*
Function: close
Handled the closing of the panel, including checking if the panel can
be closed, and closing any service the panel handles
*/"""
def close(self):
raise NotImplemented("This method must be overrided")
# Group: Private Methods
"""/*
Group: Private Methods
*/"""
"""/*
Function: _popup
Allows any subclass to create a popup for displaying errors
Parameters:
string title - The title of the popup
string message - The error message to be displayed
*/"""
def _popup(self, title, message):
popup = Toplevel(self._master)
popup.title(title)
Label(popup, text=message).pack(fill=BOTH, expand=1)
Button(popup, command=popup.destroy, text="Close").pack(
fill=BOTH, expand=1)
"""/*
Group: Abstract Methods
*/"""
"""/*
Function: _initialiseVariables
Initialise all instance variables to be used in this panel
Parameters:
array *args - An array of extra arguments needed
dict **kwargs - A dict of extra keyword arguments needed
*/"""
def _initialiseVariables(self, *args, **kwargs):
raise NotImplemented("This method must be overrided")
"""/*
Function: _initialiseChildren
Create any children of this panel and add them into the panel
*/"""
def _initialiseChildren(self):
raise NotImplemented("This method must be overrided")
| {
"content_hash": "6f671cdefbad3d1d376c30ab67e06c91",
"timestamp": "",
"source": "github",
"line_count": 108,
"max_line_length": 77,
"avg_line_length": 28.72222222222222,
"alnum_prop": 0.5838168923275306,
"repo_name": "ExceptionalVoid/Arena",
"id": "c7416412b198cd6e34208a09aa9f09a49b94aede",
"size": "3102",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "local/ArenaPanel.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "399"
},
{
"name": "HTML",
"bytes": "8865"
},
{
"name": "JavaScript",
"bytes": "54859"
},
{
"name": "Python",
"bytes": "84596"
}
],
"symlink_target": ""
} |
import pyxb.binding.generate
import pyxb.utils.domutils
from xml.dom import Node
import os.path
xsd='''<?xml version="1.0" encoding="UTF-8"?>
<xs:schema xmlns:xs="http://www.w3.org/2001/XMLSchema">
<xs:element name="a"/>
<xs:element name="b"/>
<xs:element name="c"/>
<xs:element name="d"/>
<xs:element name="e"/>
<xs:group name="Cabc">
<xs:choice>
<xs:element ref="a"/>
<xs:element ref="b"/>
<xs:element ref="c"/>
</xs:choice>
</xs:group>
<xs:group name="Cbcd">
<xs:choice>
<xs:element ref="b"/>
<xs:element ref="c"/>
<xs:element ref="d"/>
</xs:choice>
</xs:group>
<xs:group name="Cbe">
<xs:choice>
<xs:element ref="b"/>
<xs:element ref="e"/>
</xs:choice>
</xs:group>
<xs:group name="CabcPCbcdPCbe">
<xs:sequence>
<xs:group ref="Cabc"/>
<xs:group ref="Cbcd"/>
<xs:group ref="Cbe"/>
</xs:sequence>
</xs:group>
<xs:group name="CbcdPCbe">
<xs:sequence>
<xs:group ref="Cbcd"/>
<xs:group ref="Cbe"/>
</xs:sequence>
</xs:group>
<xs:complexType name="aBCde">
<xs:sequence>
<xs:group ref="CabcPCbcdPCbe"/>
</xs:sequence>
</xs:complexType>
<xs:complexType name="Bcde">
<xs:sequence>
<xs:group ref="CbcdPCbe"/>
</xs:sequence>
</xs:complexType>
<xs:complexType name="aBCDE">
<xs:sequence>
<xs:group ref="CabcPCbcdPCbe"/>
<xs:group ref="CbcdPCbe"/>
</xs:sequence>
</xs:complexType>
</xs:schema>'''
code = pyxb.binding.generate.GeneratePython(schema_text=xsd)
#print code
rv = compile(code, 'test', 'exec')
eval(rv)
from pyxb.exceptions_ import *
import unittest
class TestTrac0034 (unittest.TestCase):
def test_aBCde (self):
instance = aBCde()
self.assertEqual(None, instance.a)
self.assertEqual([], instance.b)
self.assertEqual([], instance.c)
self.assertEqual(None, instance.d)
self.assertEqual(None, instance.e)
def test_Bcde (self):
instance = Bcde()
self.assertEqual([], instance.b)
self.assertEqual(None, instance.c)
self.assertEqual(None, instance.d)
self.assertEqual(None, instance.e)
def test_aBCDE (self):
instance = aBCDE()
self.assertEqual(None, instance.a)
self.assertEqual([], instance.b)
self.assertEqual([], instance.c)
self.assertEqual([], instance.d)
self.assertEqual([], instance.e)
if __name__ == '__main__':
unittest.main()
| {
"content_hash": "24ad1f6c6ae46d4917ab93f07d8e8123",
"timestamp": "",
"source": "github",
"line_count": 100,
"max_line_length": 60,
"avg_line_length": 25.07,
"alnum_prop": 0.5907459114479457,
"repo_name": "jonfoster/pyxb1",
"id": "1fa7177178ff60827682b1376ca2af7b1212daee",
"size": "2507",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/trac/test-trac-0034b.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "1564427"
},
{
"name": "Shell",
"bytes": "18946"
}
],
"symlink_target": ""
} |
__all__ = ('config', 'logger', )
from hotqueue import HotQueue
from ConfigParser import ConfigParser
from os.path import dirname, join, exists
from redis import Redis
import json
# configuration
config = ConfigParser()
config.add_section('www'),
config.set('www', 'baseurl', 'http://android.kivy.org/')
config.set('www', 'secret_key', '')
config.add_section('database')
config.set('database', 'url', 'sqlite:////tmp/test.db')
config.add_section('redis')
config.set('redis', 'host', 'localhost')
config.set('redis', 'port', '6379')
config.set('redis', 'password', '')
# read existing file
config_fn = join(dirname(__file__), '..', 'config.cfg')
if exists(config_fn):
config.read(config_fn)
# write current config if possible
try:
fd = open(config_fn, 'w')
config.write(fd)
fd.close()
except Exception:
pass
# start the queue
qjob = HotQueue(
'jobsubmit',
host=config.get('redis', 'host'),
port=config.getint('redis', 'port'),
password=config.get('redis', 'password'),
db=0)
# Redis database connector
r = Redis(
host=config.get('redis', 'host'),
port=config.getint('redis', 'port'),
password=config.get('redis', 'password'),
db=1)
| {
"content_hash": "958c1d43d65b704e6e6ef8750e103e84",
"timestamp": "",
"source": "github",
"line_count": 50,
"max_line_length": 56,
"avg_line_length": 23.6,
"alnum_prop": 0.6661016949152543,
"repo_name": "kivy/p4a-cloud",
"id": "8af2cf12151a9f9eafb13f8d0f95f77ec7dded22",
"size": "1180",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "master/web/config.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "5449"
},
{
"name": "HTML",
"bytes": "43702"
},
{
"name": "JavaScript",
"bytes": "1356"
},
{
"name": "Python",
"bytes": "33003"
},
{
"name": "Shell",
"bytes": "78"
}
],
"symlink_target": ""
} |
import logging
from framework.celery_tasks.handlers import enqueue_task
from website import settings
logger = logging.getLogger(__name__)
if settings.SEARCH_ENGINE == 'elastic':
import elastic_search as search_engine
else:
search_engine = None
logger.warn('Elastic search is not set to load')
def requires_search(func):
def wrapped(*args, **kwargs):
if search_engine is not None and not settings.RUNNING_MIGRATION:
return func(*args, **kwargs)
return wrapped
@requires_search
def search(query, index=None, doc_type=None, raw=None):
index = index or settings.ELASTIC_INDEX
return search_engine.search(query, index=index, doc_type=doc_type, raw=raw)
@requires_search
def update_node(node, index=None, bulk=False, async=True, saved_fields=None):
kwargs = {
'index': index,
'bulk': bulk
}
if async:
node_id = node._id
# We need the transaction to be committed before trying to run celery tasks.
# For example, when updating a Node's privacy, is_public must be True in the
# database in order for method that updates the Node's elastic search document
# to run correctly.
if settings.USE_CELERY:
enqueue_task(search_engine.update_node_async.s(node_id=node_id, **kwargs))
else:
search_engine.update_node_async(node_id=node_id, **kwargs)
else:
index = index or settings.ELASTIC_INDEX
return search_engine.update_node(node, **kwargs)
@requires_search
def bulk_update_nodes(serialize, nodes, index=None):
index = index or settings.ELASTIC_INDEX
search_engine.bulk_update_nodes(serialize, nodes, index=index)
@requires_search
def delete_node(node, index=None):
index = index or settings.ELASTIC_INDEX
doc_type = node.project_or_component
if node.is_registration:
doc_type = 'registration'
elif node.is_preprint:
doc_type = 'preprint'
search_engine.delete_doc(node._id, node, index=index, category=doc_type)
@requires_search
def update_contributors_async(user_id):
"""Async version of update_contributors above"""
if settings.USE_CELERY:
enqueue_task(search_engine.update_contributors_async.s(user_id))
else:
search_engine.update_contributors_async(user_id)
@requires_search
def update_user(user, index=None, async=True):
index = index or settings.ELASTIC_INDEX
if async:
user_id = user.id
if settings.USE_CELERY:
enqueue_task(search_engine.update_user_async.s(user_id, index=index))
else:
search_engine.update_user_async(user_id, index=index)
else:
search_engine.update_user(user, index=index)
@requires_search
def update_file(file_, index=None, delete=False):
index = index or settings.ELASTIC_INDEX
search_engine.update_file(file_, index=index, delete=delete)
@requires_search
def update_institution(institution, index=None):
index = index or settings.ELASTIC_INDEX
search_engine.update_institution(institution, index=index)
@requires_search
def update_collected_metadata(cgm_id, collection_id=None, index=None, op='update'):
index = index or settings.ELASTIC_INDEX
if settings.USE_CELERY:
enqueue_task(search_engine.update_cgm_async.s(cgm_id, collection_id=collection_id, op=op, index=index))
else:
search_engine.update_cgm_async(cgm_id, collection_id=collection_id, op=op, index=index)
@requires_search
def bulk_update_collected_metadata(cgms, op='update', index=None):
index = index or settings.ELASTIC_INDEX
search_engine.bulk_update_cgm(cgms, op=op, index=index)
@requires_search
def delete_all():
search_engine.delete_all()
@requires_search
def delete_index(index):
search_engine.delete_index(index)
@requires_search
def create_index(index=None):
index = index or settings.ELASTIC_INDEX
search_engine.create_index(index=index)
@requires_search
def search_contributor(query, page=0, size=10, exclude=None, current_user=None):
exclude = exclude or []
result = search_engine.search_contributor(query=query, page=page, size=size,
exclude=exclude, current_user=current_user)
return result
| {
"content_hash": "f9457fdb6c2febe42ef1487aa847da68",
"timestamp": "",
"source": "github",
"line_count": 125,
"max_line_length": 111,
"avg_line_length": 33.968,
"alnum_prop": 0.6938294865756006,
"repo_name": "caseyrollins/osf.io",
"id": "b49d2ee4dc90e77956d7b4cc7029aa7a5fa37a48",
"size": "4246",
"binary": false,
"copies": "4",
"ref": "refs/heads/develop",
"path": "website/search/search.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "93007"
},
{
"name": "Dockerfile",
"bytes": "8455"
},
{
"name": "HTML",
"bytes": "296984"
},
{
"name": "JavaScript",
"bytes": "1813961"
},
{
"name": "Mako",
"bytes": "676476"
},
{
"name": "Python",
"bytes": "8712355"
},
{
"name": "VCL",
"bytes": "13885"
}
],
"symlink_target": ""
} |
"""Package loc."""
| {
"content_hash": "24dd88bdae48be2a9db22633e67fec70",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 18,
"avg_line_length": 19,
"alnum_prop": 0.5263157894736842,
"repo_name": "Nachtfeuer/pipeline",
"id": "620e10ea52bb5b463e8fed39a45291c4340541c1",
"size": "19",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tests/tools/loc/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "277"
},
{
"name": "CSS",
"bytes": "796"
},
{
"name": "Dockerfile",
"bytes": "527"
},
{
"name": "Jinja",
"bytes": "6473"
},
{
"name": "Python",
"bytes": "246314"
},
{
"name": "Shell",
"bytes": "16432"
}
],
"symlink_target": ""
} |
class Solution(object):
def maxProfit(self, prices, fee):
"""
:type prices: List[int]
:type fee: int
:rtype: int
"""
hold, not_hold = None, 0
for p in prices:
hold, not_hold = max(hold, not_hold - p - fee), max(not_hold, None if hold is None else hold + p)
return max(hold, not_hold)
| {
"content_hash": "3951c59b33307cc0218adf30feead9c2",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 109,
"avg_line_length": 33.09090909090909,
"alnum_prop": 0.5247252747252747,
"repo_name": "ckclark/leetcode",
"id": "be308ed80dab3a60b42aa4b10814f2d675614187",
"size": "364",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "py/best-time-to-buy-and-sell-stock-with-transaction-fee.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "1124"
},
{
"name": "C++",
"bytes": "1212"
},
{
"name": "Go",
"bytes": "99087"
},
{
"name": "Java",
"bytes": "308677"
},
{
"name": "Python",
"bytes": "162345"
},
{
"name": "Shell",
"bytes": "251"
},
{
"name": "TSQL",
"bytes": "326"
}
],
"symlink_target": ""
} |
from django.core.urlresolvers import reverse
from django.core.urlresolvers import reverse_lazy
from django.utils.translation import ugettext_lazy as _
from horizon import exceptions
from horizon import forms
from horizon import tables
from horizon import tabs
from horizon.utils import memoized
from horizon import workflows
from openstack_dashboard import api
from openstack_dashboard.api import cinder
from openstack_dashboard.usage import quotas
from openstack_dashboard.dashboards.project.cgroups \
import forms as vol_cgroup_forms
from openstack_dashboard.dashboards.project.cgroups \
import tables as vol_cgroup_tables
from openstack_dashboard.dashboards.project.cgroups \
import tabs as vol_cgroup_tabs
from openstack_dashboard.dashboards.project.cgroups \
import workflows as vol_cgroup_workflows
CGROUP_INFO_FIELDS = ("name",
"description")
INDEX_URL = "horizon:project:cgroups:index"
class CGroupsView(tables.DataTableView):
table_class = vol_cgroup_tables.VolumeCGroupsTable
page_title = _("Consistency Groups")
def get_data(self):
try:
cgroups = api.cinder.volume_cgroup_list_with_vol_type_names(
self.request)
except Exception:
cgroups = []
exceptions.handle(self.request, _("Unable to retrieve "
"volume consistency groups."))
return cgroups
class CreateView(workflows.WorkflowView):
workflow_class = vol_cgroup_workflows.CreateCGroupWorkflow
template_name = 'project/cgroups/create.html'
page_title = _("Create Volume Consistency Group")
class UpdateView(forms.ModalFormView):
template_name = 'project/cgroups/update.html'
page_title = _("Edit Consistency Group")
form_class = vol_cgroup_forms.UpdateForm
success_url = reverse_lazy('horizon:project:cgroups:index')
submit_url = "horizon:project:cgroups:update"
def get_initial(self):
cgroup = self.get_object()
return {'cgroup_id': self.kwargs["cgroup_id"],
'name': cgroup.name,
'description': cgroup.description}
def get_context_data(self, **kwargs):
context = super(UpdateView, self).get_context_data(**kwargs)
context['cgroup_id'] = self.kwargs['cgroup_id']
args = (self.kwargs['cgroup_id'],)
context['submit_url'] = reverse(self.submit_url, args=args)
return context
def get_object(self):
cgroup_id = self.kwargs['cgroup_id']
try:
self._object = cinder.volume_cgroup_get(self.request, cgroup_id)
except Exception:
exceptions.handle(self.request,
_('Unable to retrieve consistency group '
'details.'),
redirect=reverse(INDEX_URL))
return self._object
class RemoveVolumesView(forms.ModalFormView):
template_name = 'project/cgroups/remove_vols.html'
page_title = _("Remove Volumes from Consistency Group")
form_class = vol_cgroup_forms.RemoveVolsForm
success_url = reverse_lazy('horizon:project:cgroups:index')
submit_url = "horizon:project:cgroups:remove_volumes"
def get_initial(self):
cgroup = self.get_object()
return {'cgroup_id': self.kwargs["cgroup_id"],
'name': cgroup.name}
def get_context_data(self, **kwargs):
context = super(RemoveVolumesView, self).get_context_data(**kwargs)
context['cgroup_id'] = self.kwargs['cgroup_id']
args = (self.kwargs['cgroup_id'],)
context['submit_url'] = reverse(self.submit_url, args=args)
return context
def get_object(self):
cgroup_id = self.kwargs['cgroup_id']
try:
self._object = cinder.volume_cgroup_get(self.request, cgroup_id)
except Exception:
exceptions.handle(self.request,
_('Unable to retrieve consistency group '
'details.'),
redirect=reverse(INDEX_URL))
return self._object
class DeleteView(forms.ModalFormView):
template_name = 'project/cgroups/delete.html'
page_title = _("Delete Consistency Group")
form_class = vol_cgroup_forms.DeleteForm
success_url = reverse_lazy('horizon:project:cgroups:index')
submit_url = "horizon:project:cgroups:delete"
submit_label = page_title
def get_initial(self):
cgroup = self.get_object()
return {'cgroup_id': self.kwargs["cgroup_id"],
'name': cgroup.name}
def get_context_data(self, **kwargs):
context = super(DeleteView, self).get_context_data(**kwargs)
context['cgroup_id'] = self.kwargs['cgroup_id']
args = (self.kwargs['cgroup_id'],)
context['submit_url'] = reverse(self.submit_url, args=args)
return context
def get_object(self):
cgroup_id = self.kwargs['cgroup_id']
try:
self._object = cinder.volume_cgroup_get(self.request, cgroup_id)
except Exception:
exceptions.handle(self.request,
_('Unable to retrieve consistency group '
'details.'),
redirect=reverse(INDEX_URL))
return self._object
class ManageView(workflows.WorkflowView):
workflow_class = vol_cgroup_workflows.UpdateCGroupWorkflow
def get_context_data(self, **kwargs):
context = super(ManageView, self).get_context_data(**kwargs)
context['cgroup_id'] = self.kwargs["cgroup_id"]
return context
def _get_object(self, *args, **kwargs):
cgroup_id = self.kwargs['cgroup_id']
try:
cgroup = cinder.volume_cgroup_get(self.request, cgroup_id)
except Exception:
exceptions.handle(self.request,
_('Unable to retrieve consistency group '
'details.'),
redirect=reverse(INDEX_URL))
return cgroup
def get_initial(self):
cgroup = self._get_object()
return {'cgroup_id': cgroup.id,
'name': cgroup.name,
'description': cgroup.description,
'vtypes': getattr(cgroup, "volume_types")}
class CreateSnapshotView(forms.ModalFormView):
form_class = vol_cgroup_forms.CreateSnapshotForm
page_title = _("Create Consistency Group Snapshot")
template_name = 'project/cgroups/create_snapshot.html'
submit_label = _("Create Snapshot")
submit_url = "horizon:project:cgroups:create_snapshot"
success_url = reverse_lazy('horizon:project:cg_snapshots:index')
def get_context_data(self, **kwargs):
context = super(CreateSnapshotView, self).get_context_data(**kwargs)
context['cgroup_id'] = self.kwargs['cgroup_id']
args = (self.kwargs['cgroup_id'],)
context['submit_url'] = reverse(self.submit_url, args=args)
try:
# get number of snapshots we will be creating
search_opts = {'consistencygroup_id': context['cgroup_id']}
volumes = api.cinder.volume_list(self.request,
search_opts=search_opts)
num_volumes = len(volumes)
usages = quotas.tenant_limit_usages(self.request)
if usages['snapshotsUsed'] + num_volumes > \
usages['maxTotalSnapshots']:
raise ValueError(_('Unable to create snapshots due to '
'exceeding snapshot quota limit.'))
else:
usages['numRequestedItems'] = num_volumes
context['usages'] = usages
except ValueError as e:
exceptions.handle(self.request, e.message)
return None
except Exception:
exceptions.handle(self.request,
_('Unable to retrieve consistency '
'group information.'))
return context
def get_initial(self):
return {'cgroup_id': self.kwargs["cgroup_id"]}
class CloneCGroupView(forms.ModalFormView):
form_class = vol_cgroup_forms.CloneCGroupForm
page_title = _("Clone Consistency Group")
template_name = 'project/cgroups/clone_cgroup.html'
submit_label = _("Clone Consistency Group")
submit_url = "horizon:project:cgroups:clone_cgroup"
success_url = reverse_lazy('horizon:project:cgroups:index')
def get_context_data(self, **kwargs):
context = super(CloneCGroupView, self).get_context_data(**kwargs)
context['cgroup_id'] = self.kwargs['cgroup_id']
args = (self.kwargs['cgroup_id'],)
context['submit_url'] = reverse(self.submit_url, args=args)
try:
# get number of volumes we will be creating
cgroup_id = context['cgroup_id']
search_opts = {'consistencygroup_id': cgroup_id}
volumes = api.cinder.volume_list(self.request,
search_opts=search_opts)
num_volumes = len(volumes)
usages = quotas.tenant_limit_usages(self.request)
if usages['volumesUsed'] + num_volumes > \
usages['maxTotalVolumes']:
raise ValueError(_('Unable to create consistency group due to '
'exceeding volume quota limit.'))
else:
usages['numRequestedItems'] = num_volumes
context['usages'] = usages
except ValueError as e:
exceptions.handle(self.request, e.message)
return None
except Exception:
exceptions.handle(self.request,
_('Unable to retrieve consistency '
'group information.'))
return context
def get_initial(self):
return {'cgroup_id': self.kwargs["cgroup_id"]}
class DetailView(tabs.TabView):
tab_group_class = vol_cgroup_tabs.CGroupsDetailTabs
template_name = 'horizon/common/_detail.html'
page_title = "{{ cgroup.name|default:cgroup.id }}"
def get_context_data(self, **kwargs):
context = super(DetailView, self).get_context_data(**kwargs)
cgroup = self.get_data()
table = vol_cgroup_tables.VolumeCGroupsTable(self.request)
context["cgroup"] = cgroup
context["url"] = self.get_redirect_url()
context["actions"] = table.render_row_actions(cgroup)
return context
@memoized.memoized_method
def get_data(self):
try:
cgroup_id = self.kwargs['cgroup_id']
cgroup = api.cinder.volume_cgroup_get(self.request,
cgroup_id)
cgroup.volume_type_names = []
for vol_type_id in cgroup.volume_types:
vol_type = api.cinder.volume_type_get(self.request,
vol_type_id)
cgroup.volume_type_names.append(vol_type.name)
cgroup.volume_names = []
search_opts = {'consistencygroup_id': cgroup_id}
volumes = api.cinder.volume_list(self.request,
search_opts=search_opts)
for volume in volumes:
cgroup.volume_names.append(volume.name)
except Exception:
redirect = self.get_redirect_url()
exceptions.handle(self.request,
_('Unable to retrieve consistency group '
'details.'),
redirect=redirect)
return cgroup
@staticmethod
def get_redirect_url():
return reverse('horizon:project:cgroups:index')
def get_tabs(self, request, *args, **kwargs):
cgroup = self.get_data()
return self.tab_group_class(request, cgroup=cgroup, **kwargs)
| {
"content_hash": "541ec4a3391fe4d8ccb68eebc9f93108",
"timestamp": "",
"source": "github",
"line_count": 308,
"max_line_length": 79,
"avg_line_length": 38.90584415584416,
"alnum_prop": 0.5940081782525244,
"repo_name": "kogotko/carburetor",
"id": "954b5e4a1bbfedf33154a3bf474005b5796684e6",
"size": "12556",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "openstack_dashboard/dashboards/project/cgroups/views.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "9097503"
},
{
"name": "HTML",
"bytes": "1650202"
},
{
"name": "JavaScript",
"bytes": "4712562"
},
{
"name": "Makefile",
"bytes": "557"
},
{
"name": "Python",
"bytes": "5086985"
},
{
"name": "Shell",
"bytes": "18571"
}
],
"symlink_target": ""
} |
"""Replay buffer that performs relabeling."""
import gin
import numpy as np
import tensorflow as tf
from tf_agents.replay_buffers import tf_uniform_replay_buffer
from tf_agents.utils import common
@gin.configurable
class RelabellingReplayBuffer(tf_uniform_replay_buffer.TFUniformReplayBuffer):
"""A replay buffer that relabels experience."""
def __init__(self, *args, **kwargs):
"""Initialize the replay buffer.
Args:
*args: Arguments.
**kwargs: Keyword arguments.
Additional arguments:
task_distribution: an instance of multitask.TaskDistribution.
sample_batch_size: (int) the batch size.
num_parallel_calls: (int) number of parallel calls for sampling.
num_future_states: (int) number of future states to consider for
future state relabeling.
actor: the actor network.
critic: the critic network.
gamma: (float) the discount factor.
relabel_type: (str) indicator of the relabeling strategy.
candidate_task_type: (str) within each back, should we use the states,
next_states, or originally commanded tasks as possible tasks when
relabeling.
relabel_prob: (float) fraction of experience to relabel when sampling.
keep_current_goal: (bool) for ``last'' and ``final'' relabeling,
should we add both the originally commanded task and the relabeled
task when inserting new experience into the replay buffer.
normalize_cols: (bool) Normalizing the columns has the effect of
including the partition function.
"""
self._task_distribution = kwargs.pop("task_distribution")
self._sample_batch_size = kwargs.pop("sample_batch_size")
self._num_parallel_calls = kwargs.pop("num_parallel_calls")
self._num_future_states = kwargs.pop("num_future_states", 4)
self._actor = kwargs.pop("actor")
self._critic = kwargs.pop("critic")
self._gamma = kwargs.pop("gamma")
self._relabel_type = kwargs.pop("relabel_type", None)
assert self._relabel_type in [None, "last", "future", "soft", "random"]
self._candidate_task_type = kwargs.pop("candidate_task_type", "states")
assert self._candidate_task_type in ["states", "next_states", "tasks"]
self._relabel_prob = kwargs.pop("relabel_prob", 1.0)
self._keep_current_goal = kwargs.pop("keep_current_goal", False)
self._normalize_cols = kwargs.pop("normalize_cols", True)
self._iterator = None
super(RelabellingReplayBuffer, self).__init__(*args, **kwargs)
def get_batch(self):
if self._iterator is None:
dataset = self.as_dataset(
sample_batch_size=self._sample_batch_size,
num_parallel_calls=self._num_parallel_calls,
num_steps=2,
).prefetch(3)
self._iterator = iter(dataset)
experience, unused_info = next(self._iterator)
if self._relabel_type in ["soft", "random"]:
experience = self._soft_relabel(experience)
elif self._relabel_type in ["last", "future"]:
# Reassign the next_states to have the same goal as the current states
_, tasks = self._task_distribution.split(experience.observation[:, 0])
next_states, _ = self._task_distribution.split(experience.observation[:,
1])
next_states_and_tasks = self._task_distribution.combine(
next_states, tasks)
new_observation = tf.concat(
[
experience.observation[:, 0][:, None], next_states_and_tasks[:,
None]
],
axis=1,
)
assert new_observation.shape == experience.observation.shape
experience = experience.replace(observation=new_observation)
if self._relabel_type is not None:
# Recompute rewards and done flags
states, tasks = self._task_distribution.split(experience.observation[:,
0])
next_states, next_tasks = self._task_distribution.split(
experience.observation[:, 1])
rewards, dones = self._task_distribution.evaluate(states,
experience.action[:, 0],
tasks)
# Strictly speaking, we don't need to relabel the next rewards and next
# dones because they end up being thrown away. Only the current rewards
# and dones end up being important.
next_rewards, next_dones = self._task_distribution.evaluate(
next_states, experience.action[:, 1], next_tasks)
new_rewards = tf.concat([rewards[:, None], next_rewards[:, None]], axis=1)
new_dones = tf.concat([dones[:, None], next_dones[:, None]], axis=1)
# 0 if episode is done, 1 if episode is continuing
new_discount = 1.0 - tf.cast(new_dones, tf.float32)
assert new_rewards.shape == experience.reward.shape
assert new_discount.shape == experience.discount.shape
experience = experience.replace(reward=new_rewards, discount=new_discount)
return experience
def _soft_relabel(self, experience):
"""Reassigns tasks to each state and next state.
Does not recompute the rewards or done flags.
Args:
experience: The experience that we want to relabel with inverse RL.
Returns:
relabeled_experience: The relabeled experience.
"""
raise NotImplementedError
def _add_batch(self, items):
"""Adds a trajectory to the replay buffer."""
assert items[0].is_first()
for item in items:
# The items are batched already, so we remove the first dimension.
assert item.observation.shape[1:] == self.data_spec.observation.shape
super(RelabellingReplayBuffer, self)._add_batch(item)
class GoalRelabellingReplayBuffer(RelabellingReplayBuffer):
"""Implements a replay buffer for relabeling goals."""
def _add_batch(self, items):
"""Adds a trajectory to the replay buffer."""
batch_size = len(items)
if self._relabel_type in ["future", "last"]:
relabelled_items = []
for i in range(batch_size):
if self._relabel_type == "future":
relabel_indices = np.random.randint(
i, batch_size, size=self._num_future_states)
else:
relabel_indices = [batch_size - 1]
if self._keep_current_goal:
relabelled_items.append(items[i])
for j in relabel_indices:
state, _ = self._task_distribution.split(items[i].observation)
next_state, _ = self._task_distribution.split(items[j].observation)
task = self._task_distribution.state_to_task(next_state)
state_and_task = self._task_distribution.combine(state, task)
new_item = items[i].replace(observation=state_and_task)
relabelled_items.append(new_item)
items = relabelled_items
super(GoalRelabellingReplayBuffer, self)._add_batch(items)
@tf.function
def _soft_relabel(self, experience):
# experience.observation.shape = [B x T=2 x obs_dim+state_dim]
states, orig_tasks = self._task_distribution.split(
experience.observation[:, 0])
if self._task_distribution.tasks is None:
tasks = orig_tasks
else:
tasks = tf.constant(self._task_distribution.tasks, dtype=tf.float32)
next_states, _ = self._task_distribution.split(experience.observation[:, 1])
if self._candidate_task_type == "states":
candidate_tasks = self._task_distribution.state_to_task(states)
elif self._candidate_task_type == "next_states":
candidate_tasks = self._task_distribution.state_to_task(next_states)
else:
assert self._candidate_task_type == "tasks"
candidate_tasks = tasks
actions = experience.action[:, 0]
num_tasks = tasks.shape[0]
batch_size = states.shape[0]
task_dim = tasks.shape[1]
obs_dim = states.shape[1]
action_dim = actions.shape[1]
action_spec = self._actor.output_tensor_spec
states_tiled = tf.tile(states[:, None], [1, num_tasks, 1]) # B x B x D
states_tiled = tf.reshape(states_tiled,
[batch_size * num_tasks, obs_dim]) # B*B x D
actions_tiled = tf.tile(actions[:, None], [1, num_tasks, 1]) # B x B x D
actions_tiled = tf.reshape(actions_tiled,
[batch_size * num_tasks, action_dim]) # B*B x D
tasks_tiled = tf.tile(tasks[None], [batch_size, 1, 1]) # B x B x D
tasks_tiled = tf.reshape(tasks_tiled,
[batch_size * num_tasks, task_dim]) # B*B x D
next_states_tiled = tf.tile(next_states[:, None], [1, num_tasks, 1])
next_states_tiled = tf.reshape(next_states_tiled,
[batch_size * num_tasks, obs_dim]) # B*B x D
next_relabelled_obs = self._task_distribution.combine(
next_states_tiled, tasks_tiled)
sampled_actions_tiled = self._actor(
next_relabelled_obs, step_type=(), network_state=())[0].sample()
critic_input = (next_relabelled_obs, sampled_actions_tiled)
q_vals, _ = self._critic(critic_input, training=False)
q_vals_vec = tf.reshape(q_vals, (batch_size, num_tasks))
rewards, dones = self._task_distribution.evaluate(states_tiled,
actions_tiled,
tasks_tiled)
dones = tf.cast(dones, tf.float32)
rewards_vec = tf.reshape(rewards, (batch_size, num_tasks))
dones_vec = tf.reshape(dones, (batch_size, num_tasks))
relabelled_obs = self._task_distribution.combine(states_tiled, tasks_tiled)
action_distribution = self._actor(
relabelled_obs, step_type=(), network_state=())[0]
log_pi = common.log_probability(action_distribution, actions_tiled,
action_spec)
log_pi_vec = tf.reshape(log_pi, (batch_size, num_tasks))
logits_vec = (
rewards_vec - log_pi_vec + self._gamma * (1.0 - dones_vec) * q_vals_vec)
if self._relabel_type == "random":
logits_vec = tf.ones_like(logits_vec) # Hack to make sampling random
## End new version
if self._normalize_cols:
logits_vec = logits_vec - tf.math.reduce_logsumexp(
logits_vec, axis=0)[None]
relabel_indices = tf.random.categorical(logits=logits_vec, num_samples=1)
### Metrics
global_step = tf.compat.v1.train.get_or_create_global_step()
orig_indices = tf.range(
self._sample_batch_size, dtype=relabel_indices.dtype)
with tf.name_scope("relabelling"):
# How often are the originally commanded goals most optimal?
opt_indices = tf.argmax(logits_vec, axis=1)
orig_is_opt = opt_indices == orig_indices
orig_opt_frac = tf.reduce_mean(tf.cast(orig_is_opt, tf.float32))
tf.compat.v2.summary.scalar(
name="orig_task_optimal", data=orig_opt_frac, step=global_step)
# How often is the relabelled goal optimal?
# The relabel_indices are [B, 1], so we need to remove the extra dim.
relabel_is_opt = tf.squeeze(relabel_indices) == orig_indices
relabel_opt_frac = tf.reduce_mean(tf.cast(relabel_is_opt, tf.float32))
tf.compat.v2.summary.scalar(
name="relabel_task_optimal", data=relabel_opt_frac, step=global_step)
# What are the average Q values of the original tasks?
if batch_size == num_tasks:
indices = tf.transpose(tf.stack([orig_indices, orig_indices], axis=0))
orig_q_vals = tf.gather_nd(logits_vec, indices)
tf.compat.v2.summary.scalar(
name="orig_q_vals",
data=tf.reduce_mean(orig_q_vals),
step=global_step,
)
# What are the average Q values of the relabelled tasks?
indices = tf.transpose(
tf.stack([orig_indices, tf.squeeze(relabel_indices)], axis=0))
relabel_q_vals = tf.gather_nd(logits_vec, indices)
tf.compat.v2.summary.scalar(
name="relabel_q_vals",
data=tf.reduce_mean(relabel_q_vals),
step=global_step,
)
max_q = tf.reduce_max(logits_vec, axis=1)
tf.compat.v2.summary.scalar(
name="max_q", data=tf.reduce_mean(max_q), step=global_step)
### End metrics
# For both state-centric and goal-centric relabelling, the implementation of
# mixing is the same: we randomly replace some of the indices with the
# diagonal.
relabelled_tasks = tf.gather(candidate_tasks, tf.squeeze(relabel_indices))
if self._relabel_prob == 0:
relabelled_tasks = orig_tasks
elif 0 < self._relabel_prob < 1:
logits = tf.log([1.0 - self._relabel_prob, self._relabel_prob])
mask = tf.squeeze(
tf.random.categorical(
logits[None], num_samples=self._sample_batch_size))
mask = tf.cast(mask, tf.float32)[:, None]
relabelled_tasks = mask * orig_tasks + (1 - mask) * relabelled_tasks
states_and_tasks = self._task_distribution.combine(states, relabelled_tasks)
next_states_and_tasks = self._task_distribution.combine(
next_states, relabelled_tasks)
new_observation = tf.concat(
[states_and_tasks[:, None], next_states_and_tasks[:, None]], axis=1)
assert new_observation.shape == experience.observation.shape
experience = experience.replace(observation=new_observation)
return experience
| {
"content_hash": "94d7166c28ba5335584f26ee557b5e2f",
"timestamp": "",
"source": "github",
"line_count": 298,
"max_line_length": 80,
"avg_line_length": 44.84228187919463,
"alnum_prop": 0.631519868293048,
"repo_name": "google-research/google-research",
"id": "54716e6008a390eb643f7a60244110c0f0e4131b",
"size": "13971",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "hipi/relabelling_replay_buffer.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "9817"
},
{
"name": "C++",
"bytes": "4166670"
},
{
"name": "CMake",
"bytes": "6412"
},
{
"name": "CSS",
"bytes": "27092"
},
{
"name": "Cuda",
"bytes": "1431"
},
{
"name": "Dockerfile",
"bytes": "7145"
},
{
"name": "Gnuplot",
"bytes": "11125"
},
{
"name": "HTML",
"bytes": "77599"
},
{
"name": "ImageJ Macro",
"bytes": "50488"
},
{
"name": "Java",
"bytes": "487585"
},
{
"name": "JavaScript",
"bytes": "896512"
},
{
"name": "Julia",
"bytes": "67986"
},
{
"name": "Jupyter Notebook",
"bytes": "71290299"
},
{
"name": "Lua",
"bytes": "29905"
},
{
"name": "MATLAB",
"bytes": "103813"
},
{
"name": "Makefile",
"bytes": "5636"
},
{
"name": "NASL",
"bytes": "63883"
},
{
"name": "Perl",
"bytes": "8590"
},
{
"name": "Python",
"bytes": "53790200"
},
{
"name": "R",
"bytes": "101058"
},
{
"name": "Roff",
"bytes": "1208"
},
{
"name": "Rust",
"bytes": "2389"
},
{
"name": "Shell",
"bytes": "730444"
},
{
"name": "Smarty",
"bytes": "5966"
},
{
"name": "Starlark",
"bytes": "245038"
}
],
"symlink_target": ""
} |
"""
kinto
Kinto is a minimalist JSON storage service with synchronisation and sharing abilities. It is meant to be easy to use and easy to self-host. **Limitations of this OpenAPI specification:** 1. Validation on OR clauses is not supported (e.g. provide `data` or `permissions` in patch operations). 2. [Filtering](http://kinto.readthedocs.io/en/stable/api/1.x/filtering.html) is supported on any field by using `?{prefix}{field_name}={value}`. 3. [Backoff headers](http://kinto.readthedocs.io/en/stable/api/1.x/backoff.html) may occur with any response, but they are only present if the server is under in heavy load, so we cannot validate them on every request. They are listed only on the default error message. 4. [Collection schemas](http://kinto.readthedocs.io/en/stable/api/1.x/collections.html#collection-json-schema) can be provided when defining a collection, but they are not validated by this specification.
OpenAPI spec version: 1.13
Generated by: https://github.com/swagger-api/swagger-codegen.git
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import sys
from setuptools import setup, find_packages
NAME = "swagger_client"
VERSION = "1.0.0"
# To install the library, run the following
#
# python setup.py install
#
# prerequisite: setuptools
# http://pypi.python.org/pypi/setuptools
REQUIRES = ["urllib3 >= 1.15", "six >= 1.10", "certifi", "python-dateutil"]
setup(
name=NAME,
version=VERSION,
description="kinto",
author_email="",
url="",
keywords=["Swagger", "kinto"],
install_requires=REQUIRES,
packages=find_packages(),
include_package_data=True,
long_description="""\
Kinto is a minimalist JSON storage service with synchronisation and sharing abilities. It is meant to be easy to use and easy to self-host. **Limitations of this OpenAPI specification:** 1. Validation on OR clauses is not supported (e.g. provide `data` or `permissions` in patch operations). 2. [Filtering](http://kinto.readthedocs.io/en/stable/api/1.x/filtering.html) is supported on any field by using `?{prefix}{field_name}={value}`. 3. [Backoff headers](http://kinto.readthedocs.io/en/stable/api/1.x/backoff.html) may occur with any response, but they are only present if the server is under in heavy load, so we cannot validate them on every request. They are listed only on the default error message. 4. [Collection schemas](http://kinto.readthedocs.io/en/stable/api/1.x/collections.html#collection-json-schema) can be provided when defining a collection, but they are not validated by this specification.
"""
)
| {
"content_hash": "67a23858cf93082e79a80373a4aba0f0",
"timestamp": "",
"source": "github",
"line_count": 52,
"max_line_length": 973,
"avg_line_length": 61.23076923076923,
"alnum_prop": 0.7283291457286433,
"repo_name": "gabisurita/kinto-codegen-tutorial",
"id": "a094f2ec051d01ba496489b2bddbc6472c2e6bcf",
"size": "3201",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python-client/setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "PHP",
"bytes": "95504"
},
{
"name": "Python",
"bytes": "662063"
},
{
"name": "Shell",
"bytes": "3259"
}
],
"symlink_target": ""
} |
import json
from dojo.models import Finding
class SemgrepParser(object):
def get_scan_types(self):
return ["Semgrep JSON Report"]
def get_label_for_scan_types(self, scan_type):
return scan_type # no custom label for now
def get_description_for_scan_types(self, scan_type):
return "Import Semgrep output (--json)"
def get_findings(self, filename, test):
data = json.load(filename)
dupes = dict()
for item in data["results"]:
finding = Finding(
test=test,
title=item["check_id"],
severity=self.convert_severity(item["extra"]["severity"]),
description=self.get_description(item),
file_path=item['path'],
line=item["start"]["line"],
static_finding=True,
dynamic_finding=False,
vuln_id_from_tool=item["check_id"],
nb_occurences=1,
)
# manage CWE
if 'cwe' in item["extra"]["metadata"]:
finding.cwe = int(item["extra"]["metadata"].get("cwe").partition(':')[0].partition('-')[2])
# manage references from metadata
if 'references' in item["extra"]["metadata"]:
finding.references = "\n".join(item["extra"]["metadata"]["references"])
# manage mitigation from metadata
if 'fix' in item["extra"]:
finding.mitigation = item["extra"]["fix"]
elif 'fix_regex' in item["extra"]:
finding.mitigation = "\n".join([
"**You can automaticaly apply this regex:**",
"\n```\n",
json.dumps(item["extra"]["fix_regex"]),
"\n```\n",
])
dupe_key = finding.title + finding.file_path + str(finding.line)
if dupe_key in dupes:
find = dupes[dupe_key]
find.nb_occurences += 1
else:
dupes[dupe_key] = finding
return list(dupes.values())
def convert_severity(self, val):
if "WARNING" == val.upper():
return "Low"
elif "ERROR" == val.upper():
return "High"
elif "INFO" == val.upper():
return "Info"
else:
raise ValueError(f"Unknown value for severity: {val}")
def get_description(self, item):
description = ''
message = item["extra"]["message"]
description += '**Result message:** {}\n'.format(message)
snippet = item["extra"].get("lines")
if snippet is not None:
description += '**Snippet:**\n```{}```\n'.format(snippet)
return description
| {
"content_hash": "b83c70f58ac964667b35ce8d662044f3",
"timestamp": "",
"source": "github",
"line_count": 85,
"max_line_length": 107,
"avg_line_length": 32.411764705882355,
"alnum_prop": 0.5078039927404718,
"repo_name": "rackerlabs/django-DefectDojo",
"id": "684000da70f2c4d83e245f32294aaf89f5b5ebe7",
"size": "2755",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "dojo/tools/semgrep/parser.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "18132"
},
{
"name": "Groff",
"bytes": "91"
},
{
"name": "HTML",
"bytes": "666571"
},
{
"name": "JavaScript",
"bytes": "6393"
},
{
"name": "Python",
"bytes": "524728"
},
{
"name": "Shell",
"bytes": "20558"
},
{
"name": "XSLT",
"bytes": "6624"
}
],
"symlink_target": ""
} |
"""
sentry.runner.commands.init
~~~~~~~~~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2015 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import, print_function
import os
import click
@click.command()
@click.argument('directory', required=False)
@click.pass_context
def init(ctx, directory):
"Initialize new configuration directory."
from sentry.runner.settings import discover_configs, generate_settings
if directory is not None:
os.environ['SENTRY_CONF'] = directory
directory, py, yaml = discover_configs()
# In this case, the config is pointing directly to a file, so we
# must maintain old behavior, and just abort
if yaml is None and os.path.isfile(py):
# TODO: Link to docs explaining about new behavior of SENTRY_CONF?
raise click.ClickException("Found legacy '%s' file, so aborting." % click.format_filename(py))
if yaml is None:
raise click.ClickException("DIRECTORY must not be a file.")
if directory and not os.path.exists(directory):
os.makedirs(directory)
py_contents, yaml_contents = generate_settings()
if os.path.isfile(yaml):
click.confirm("File already exists at '%s', overwrite?" % click.format_filename(yaml), abort=True)
with click.open_file(yaml, 'w') as fp:
fp.write(yaml_contents)
if os.path.isfile(py):
click.confirm("File already exists at '%s', overwrite?" % click.format_filename(py), abort=True)
with click.open_file(py, 'w') as fp:
fp.write(py_contents)
| {
"content_hash": "1bde408776522c9b6863025b980f4cf5",
"timestamp": "",
"source": "github",
"line_count": 49,
"max_line_length": 106,
"avg_line_length": 32.6530612244898,
"alnum_prop": 0.67625,
"repo_name": "nicholasserra/sentry",
"id": "98ece4d40b797013ab8d6958e0514cdb9e0437b0",
"size": "1600",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "src/sentry/runner/commands/init.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "174940"
},
{
"name": "HTML",
"bytes": "199996"
},
{
"name": "JavaScript",
"bytes": "609445"
},
{
"name": "Lua",
"bytes": "21966"
},
{
"name": "Makefile",
"bytes": "4816"
},
{
"name": "Python",
"bytes": "8613631"
}
],
"symlink_target": ""
} |
"""Prefix DAG permissions.
Revision ID: 849da589634d
Revises: 45ba3f1493b9
Create Date: 2020-10-01 17:25:10.006322
"""
from flask_appbuilder import SQLA
from flask_appbuilder.security.sqla.models import Permission, PermissionView, ViewMenu
from airflow import settings
from airflow.security import permissions
# revision identifiers, used by Alembic.
revision = '849da589634d'
down_revision = '45ba3f1493b9'
branch_labels = None
depends_on = None
def prefix_individual_dag_permissions(session):
dag_perms = ['can_dag_read', 'can_dag_edit']
prefix = "DAG:"
perms = (
session.query(PermissionView)
.join(Permission)
.filter(Permission.name.in_(dag_perms))
.join(ViewMenu)
.filter(ViewMenu.name != 'all_dags')
.filter(ViewMenu.name.notlike(prefix + '%'))
.all()
)
resource_ids = {permission.view_menu.id for permission in perms}
vm_query = session.query(ViewMenu).filter(ViewMenu.id.in_(resource_ids))
vm_query.update({ViewMenu.name: prefix + ViewMenu.name}, synchronize_session=False)
session.commit()
def get_or_create_dag_resource(session):
dag_resource = get_resource_query(session, permissions.RESOURCE_DAG).first()
if dag_resource:
return dag_resource
dag_resource = ViewMenu()
dag_resource.name = permissions.RESOURCE_DAG
session.add(dag_resource)
session.commit()
return dag_resource
def get_or_create_action(session, action_name):
action = get_action_query(session, action_name).first()
if action:
return action
action = Permission()
action.name = action_name
session.add(action)
session.commit()
return action
def get_resource_query(session, resource_name):
return session.query(ViewMenu).filter(ViewMenu.name == resource_name)
def get_action_query(session, action_name):
return session.query(Permission).filter(Permission.name == action_name)
def get_permission_with_action_query(session, action):
return session.query(PermissionView).filter(PermissionView.permission == action)
def get_permission_with_resource_query(session, resource):
return session.query(PermissionView).filter(PermissionView.view_menu_id == resource.id)
def update_permission_action(session, permission_query, action):
permission_query.update({PermissionView.permission_id: action.id}, synchronize_session=False)
session.commit()
def get_permission(session, resource, action):
return (
session.query(PermissionView)
.filter(PermissionView.view_menu == resource)
.filter(PermissionView.permission == action)
.first()
)
def update_permission_resource(session, permission_query, resource):
for permission in permission_query.all():
if not get_permission(session, resource, permission.permission):
permission.view_menu = resource
else:
session.delete(permission)
session.commit()
def migrate_to_new_dag_permissions(db):
# Prefix individual dag perms with `DAG:`
prefix_individual_dag_permissions(db.session)
# Update existing permissions to use `can_read` instead of `can_dag_read`
can_dag_read_action = get_action_query(db.session, 'can_dag_read').first()
old_can_dag_read_permissions = get_permission_with_action_query(db.session, can_dag_read_action)
can_read_action = get_or_create_action(db.session, 'can_read')
update_permission_action(db.session, old_can_dag_read_permissions, can_read_action)
# Update existing permissions to use `can_edit` instead of `can_dag_edit`
can_dag_edit_action = get_action_query(db.session, 'can_dag_edit').first()
old_can_dag_edit_permissions = get_permission_with_action_query(db.session, can_dag_edit_action)
can_edit_action = get_or_create_action(db.session, 'can_edit')
update_permission_action(db.session, old_can_dag_edit_permissions, can_edit_action)
# Update existing permissions for `all_dags` resource to use `DAGs` resource.
all_dags_resource = get_resource_query(db.session, 'all_dags').first()
if all_dags_resource:
old_all_dags_permission = get_permission_with_resource_query(db.session, all_dags_resource)
dag_resource = get_or_create_dag_resource(db.session)
update_permission_resource(db.session, old_all_dags_permission, dag_resource)
# Delete the `all_dags` resource
db.session.delete(all_dags_resource)
# Delete `can_dag_read` action
if can_dag_read_action:
db.session.delete(can_dag_read_action)
# Delete `can_dag_edit` action
if can_dag_edit_action:
db.session.delete(can_dag_edit_action)
db.session.commit()
def upgrade():
db = SQLA()
db.session = settings.Session
migrate_to_new_dag_permissions(db)
db.session.commit()
db.session.close()
def downgrade():
pass
| {
"content_hash": "caed6ef651778b158e0703bd85362495",
"timestamp": "",
"source": "github",
"line_count": 152,
"max_line_length": 100,
"avg_line_length": 32.046052631578945,
"alnum_prop": 0.7039622254157257,
"repo_name": "dhuang/incubator-airflow",
"id": "fbbaa3adc3a3e809a20b8c2992463f1d35214fd1",
"size": "5659",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "airflow/migrations/versions/849da589634d_prefix_dag_permissions.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "109698"
},
{
"name": "HTML",
"bytes": "264851"
},
{
"name": "JavaScript",
"bytes": "1988427"
},
{
"name": "Mako",
"bytes": "1037"
},
{
"name": "Python",
"bytes": "3357958"
},
{
"name": "Shell",
"bytes": "34442"
}
],
"symlink_target": ""
} |
from .base import NewRelicBaseClient
from .exceptions import ItemAlreadyExistsError, ItemNotFoundError
from .synthetics import SyntheticsClient
class AlertClient(NewRelicBaseClient):
def __init__(
self, api_key,
base_url='https://api.newrelic.com',
timeout=10
):
super(AlertClient, self).__init__(api_key, base_url, timeout)
def get_alert_policies(self, name=None):
url = '{}/v2/alerts_policies.json'.format(self.base_url)
if name:
payload = 'filter[name]={}'.format(name)
r = self._get(
url,
headers=self.default_headers,
timeout=self.timeout,
params=payload
)
else:
r = self._get(
url,
headers=self.default_headers,
timeout=self.timeout
)
res = r.json()['policies']
return res
def create_alert_policy(
self,
policy_name,
incident_preference=None,
check_unique=True
):
# New Relic API allows creating multiple alerts policies
# with the same name. Give a possibility to disallow this in client
if check_unique:
policies = self.get_alert_policies(policy_name)
if len(policies) != 0:
raise ItemAlreadyExistsError(
'Alert policy with name "{}" already exists'
.format(policy_name)
)
url = '{}/v2/alerts_policies.json'.format(self.base_url)
payload = {
'policy': {
'name': policy_name
}
}
if incident_preference is not None:
payload['policy']['incident_preference'] = incident_preference
res = self._post(
url,
headers=self.default_headers,
timeout=self.timeout,
json=payload
)
return res.json()['policy']
def delete_alert_policy(self, policy_name):
try:
policy = self.get_alert_policies(policy_name)[0]
except IndexError:
raise ItemNotFoundError(
'Alert Policy with name "{}" not found'.format(policy_name)
)
url = '{}/v2/alerts_policies/{}.json'.format(
self.base_url, policy['id']
)
self._delete(
url,
headers=self.default_headers,
timeout=self.timeout,
)
def get_alert_conditions(self, policy_name):
try:
policy = self.get_alert_policies(policy_name)[0]
except IndexError:
raise ItemNotFoundError(
'Alert policy with name "{}" not found'.format(policy_name)
)
url = '{}/v2/alerts_synthetics_conditions.json'.format(self.base_url)
payload = 'policy_id={}'.format(policy['id'])
r = self._get(
url,
headers=self.default_headers,
timeout=self.timeout,
params=payload
)
return r.json()
def create_synthetics_alert_condition(
self,
policy_name,
condition_name,
monitor_name,
runbook_url=None,
enabled=False,
check_unique=True
):
try:
policy = self.get_alert_policies(policy_name)[0]
except IndexError:
raise ItemNotFoundError(
'Alert policy with "{}" not found'.format(policy_name)
)
synthetics = SyntheticsClient(self.api_key)
monitor = synthetics.get_monitor_by_name(monitor_name)
if not monitor:
raise ItemNotFoundError(
'Monitor with name "{}" not found'.format(monitor_name)
)
# New Relic API allows creating multiple alerts conditions
# from the same monitor using the same alert policy
# to avoid creating lots of duplicate entries - disallow this
if check_unique:
alert_conditions = self.get_alert_conditions(policy_name)
# we are only interested in synthetics conditions
try:
synth_conditions = alert_conditions['synthetics_conditions']
except KeyError:
# we don't have any alert conditions for synthetics
# no duplicates then
pass
else:
for condition in synth_conditions:
if condition['monitor_id'] == monitor['id']:
raise ItemAlreadyExistsError(
'Synthetics Alert Condition for monitor "{}" '
'is already present in policy "{}" with name "{}"'
.format(
monitor_name,
policy_name,
condition['name']
)
)
url = (
'{}/v2/alerts_synthetics_conditions/policies/{}.json'
.format(self.base_url, policy['id'])
)
payload = {
'synthetics_condition': {
'name': condition_name,
'monitor_id': monitor['id'],
'enabled': enabled
}
}
if runbook_url:
payload['synthetics_condition']['runbook_url'] = runbook_url
self._post(
url,
headers=self.default_headers,
timeout=self.timeout,
json=payload
)
def delete_synthetics_alert_conditions(self, policy_name, monitor_name):
"""Deletes all synthetics alert conditions that match pair
policy_name:monitor_name
Returns count of conditions deleted
"""
synthetics = SyntheticsClient(self.api_key)
monitor = synthetics.get_monitor_by_name(monitor_name)
if not monitor:
raise ItemNotFoundError(
'Monitor with name "{}" not found'.format(monitor_name)
)
alert_conditions_deleted = 0
alert_conditions = self.get_alert_conditions(policy_name)
# we are only interested in synthetics conditions
try:
synthetics_conditions = alert_conditions['synthetics_conditions']
except KeyError:
# we don't have any alert conditions for synthetics
# no duplicates then
pass
else:
for condition in synthetics_conditions:
if condition['monitor_id'] == monitor['id']:
url = (
'{}/v2/alerts_synthetics_conditions/{}.json'
.format(self.base_url, condition['id'])
)
self._delete(
url,
headers=self.default_headers,
timeout=self.timeout
)
alert_conditions_deleted += 1
return alert_conditions_deleted
| {
"content_hash": "ec2ce80cc528f9ef379d0ba1cf618e0f",
"timestamp": "",
"source": "github",
"line_count": 205,
"max_line_length": 78,
"avg_line_length": 34.229268292682924,
"alnum_prop": 0.5178851360980476,
"repo_name": "NativeInstruments/newrelic-cli",
"id": "fa7c34dbeabcfcba1e467b470f0416747ce0a2a6",
"size": "7017",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "newrelic_cli/alerts.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "30"
},
{
"name": "Python",
"bytes": "77591"
},
{
"name": "Smarty",
"bytes": "63"
}
],
"symlink_target": ""
} |
from __future__ import print_function, division
import matplotlib
import logging
from sys import stdout
matplotlib.use('Agg') # Must be before importing matplotlib.pyplot or pylab!
from neuralnilm import (Net, RealApplianceSource,
BLSTMLayer, DimshuffleLayer,
BidirectionalRecurrentLayer)
from neuralnilm.source import standardise, discretize, fdiff, power_and_fdiff
from neuralnilm.experiment import run_experiment, init_experiment
from neuralnilm.net import TrainingError
from neuralnilm.layers import MixtureDensityLayer
from neuralnilm.objectives import (scaled_cost, mdn_nll,
scaled_cost_ignore_inactive, ignore_inactive,
scaled_cost3)
from neuralnilm.plot import MDNPlotter
from lasagne.nonlinearities import sigmoid, rectify, tanh
from lasagne.objectives import mse
from lasagne.init import Uniform, Normal
from lasagne.layers import (LSTMLayer, DenseLayer, Conv1DLayer,
ReshapeLayer, FeaturePoolLayer, RecurrentLayer)
from lasagne.updates import nesterov_momentum, momentum
from functools import partial
import os
import __main__
from copy import deepcopy
from math import sqrt
import numpy as np
import theano.tensor as T
NAME = os.path.splitext(os.path.split(__main__.__file__)[1])[0]
PATH = "/homes/dk3810/workspace/python/neuralnilm/figures"
SAVE_PLOT_INTERVAL = 5000
GRADIENT_STEPS = 100
source_dict = dict(
filename='/data/dk3810/ukdale.h5',
appliances=[
['fridge freezer', 'fridge', 'freezer'],
'hair straighteners',
'television',
'dish washer',
['washer dryer', 'washing machine']
],
max_appliance_powers=[300, 500, 200, 2500, 2400],
on_power_thresholds=[5] * 5,
max_input_power=5900,
min_on_durations=[60, 60, 60, 1800, 1800],
min_off_durations=[12, 12, 12, 1800, 600],
window=("2013-06-01", "2014-07-01"),
seq_length=512,
output_one_appliance=False,
boolean_targets=False,
train_buildings=[1],
validation_buildings=[1],
skip_probability=0.7,
one_target_per_seq=False,
n_seq_per_batch=16,
subsample_target=2,
include_diff=False,
clip_appliance_power=True,
target_is_prediction=False,
# independently_center_inputs = True,
standardise_input=True,
unit_variance_targets=True,
input_padding=8,
lag=0
# reshape_target_to_2D=True,
# input_stats={'mean': np.array([ 0.05526326], dtype=np.float32),
# 'std': np.array([ 0.12636775], dtype=np.float32)},
# target_stats={
# 'mean': np.array([ 0.04066789, 0.01881946,
# 0.24639061, 0.17608672, 0.10273963],
# dtype=np.float32),
# 'std': np.array([ 0.11449792, 0.07338708,
# 0.26608968, 0.33463112, 0.21250485],
# dtype=np.float32)}
)
N = 50
net_dict = dict(
save_plot_interval=SAVE_PLOT_INTERVAL,
# loss_function=partial(ignore_inactive, loss_func=mdn_nll, seq_length=SEQ_LENGTH),
# loss_function=lambda x, t: mdn_nll(x, t).mean(),
# loss_function=lambda x, t: mse(x, t).mean(),
# loss_function=partial(scaled_cost, loss_func=mse),
# loss_function=ignore_inactive,
loss_function=partial(scaled_cost3, ignore_inactive=False),
updates_func=momentum,
learning_rate=1e-1,
learning_rate_changes_by_iteration={
1000: 1e-2,
2000: 1e-3,
10000: 1e-4
# 500: 1e-3
# 4000: 1e-03,
# 6000: 5e-06,
# 7000: 1e-06
# 2000: 5e-06
# 3000: 1e-05
# 7000: 5e-06,
# 10000: 1e-06,
# 15000: 5e-07,
# 50000: 1e-07
},
do_save_activations=True,
auto_reshape=False
# plotter=MDNPlotter
)
"""
||||||||||
||||||||||
||||||||||
||||||||||
||||||||||
||||||||||
12345678901234567890
"""
def exp_a(name):
global source
# source_dict_copy = deepcopy(source_dict)
# source = RealApplianceSource(**source_dict_copy)
net_dict_copy = deepcopy(net_dict)
net_dict_copy.update(dict(
experiment_name=name,
source=source
))
N = 1024
NUM_FILTERS = 50
net_dict_copy['layers_config'] = [
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1) # (batch, features, time)
},
{
'type': Conv1DLayer, # convolve over the time axis
'num_filters': NUM_FILTERS,
'filter_length': 10,
'stride': 2,
'nonlinearity': rectify,
'W': Normal(std=1/sqrt(source.seq_length))
},
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1) # back to (batch, time, features)
},
{
'type': DenseLayer,
'num_units': N,
'W': Normal(std=1/sqrt(N * NUM_FILTERS)),
'nonlinearity': rectify
},
{
'type': DenseLayer,
'num_units': N,
'W': Normal(std=1/sqrt(N)),
'nonlinearity': rectify
},
{
'type': DenseLayer,
'num_units': N,
'W': Normal(std=1/sqrt(N)),
'nonlinearity': rectify
},
{
'type': DenseLayer,
'num_units': source.output_shape()[1] * source.output_shape()[2],
'W': Normal(std=1/sqrt(N)),
'nonlinearity': T.nnet.softplus
}
]
net = Net(**net_dict_copy)
return net
def main():
# EXPERIMENTS = list('abcdefghijklmnopqrstuvwxyz')
EXPERIMENTS = list('a')
for experiment in EXPERIMENTS:
full_exp_name = NAME + experiment
func_call = init_experiment(PATH, experiment, full_exp_name)
logger = logging.getLogger(full_exp_name)
try:
net = eval(func_call)
run_experiment(net, epochs=None)
except KeyboardInterrupt:
logger.info("KeyboardInterrupt")
break
except Exception as exception:
logger.exception("Exception")
raise
finally:
logging.shutdown()
if __name__ == "__main__":
main()
| {
"content_hash": "f8b03abcbc3004796ea02a2a0d2110de",
"timestamp": "",
"source": "github",
"line_count": 201,
"max_line_length": 86,
"avg_line_length": 31.00995024875622,
"alnum_prop": 0.5725974651050858,
"repo_name": "mmottahedi/neuralnilm_prototype",
"id": "8d27f232604f49dc75f6c2a0ac6c57117e57c4c8",
"size": "6233",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "scripts/e343.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "4536723"
}
],
"symlink_target": ""
} |
"""This module exports the jscs plugin class."""
from SublimeLinter.lint import Linter
class Jscs(Linter):
"""Provides an interface to jscs."""
syntax = ('javascript', 'html')
cmd = 'jscs -r checkstyle'
version_args = '--version'
version_re = r'(?P<version>\d+\.\d+\.\d+)'
version_requirement = '>= 1.0.10' # 1.0.10 introduced checkstyle reporter
regex = (
r'^\s+?<error line="(?P<line>\d+)" '
r'column="(?P<col>\d+)" '
# jscs always reports with error severity; show as warning
r'severity="(?P<warning>error)" '
r'message="(?P<message>.+?)"'
)
multiline = True
selectors = {'html': 'source.js.embedded.html'}
tempfile_suffix = 'js'
config_file = ('--config', '.jscsrc', '~')
| {
"content_hash": "4bcadf0fb54d11d64874858ae11d6391",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 78,
"avg_line_length": 30.8,
"alnum_prop": 0.574025974025974,
"repo_name": "Raynos/SublimeLinter-jscs",
"id": "5499b3ae747142c092a884029755d320c6b5564d",
"size": "948",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "linter.py",
"mode": "33188",
"license": "mit",
"language": [],
"symlink_target": ""
} |
from blaze.interactive import Data, compute, concrete_head, expr_repr, to_html
import datetime
from odo import into, append
from odo.backends.csv import CSV
from blaze import discover, transform
from blaze.compute.core import compute
from blaze.compute.python import compute
from blaze.expr import symbol
from datashape import dshape
from blaze.utils import tmpfile, example
import pytest
import sys
from types import MethodType
import pandas as pd
import pandas.util.testing as tm
import numpy as np
data = (('Alice', 100),
('Bob', 200))
L = [[1, 'Alice', 100],
[2, 'Bob', -200],
[3, 'Charlie', 300],
[4, 'Denis', 400],
[5, 'Edith', -500]]
t = Data(data, fields=['name', 'amount'])
x = np.ones((2, 2))
def test_table_raises_on_inconsistent_inputs():
with pytest.raises(ValueError):
t = Data(data, schema='{name: string, amount: float32}',
dshape=dshape("{name: string, amount: float32}"))
def test_resources():
assert t._resources() == {t: t.data}
def test_resources_fail():
t = symbol('t', 'var * {x: int, y: int}')
d = t[t['x'] > 100]
with pytest.raises(ValueError):
compute(d)
def test_compute_on_Data_gives_back_data():
assert compute(Data([1, 2, 3])) == [1, 2, 3]
def test_len():
assert len(t) == 2
assert len(t.name) == 2
def test_compute():
assert list(compute(t['amount'] + 1)) == [101, 201]
def test_create_with_schema():
t = Data(data, schema='{name: string, amount: float32}')
assert t.schema == dshape('{name: string, amount: float32}')
def test_create_with_raw_data():
t = Data(data, fields=['name', 'amount'])
assert t.schema == dshape('{name: string, amount: int64}')
assert t.name
assert t.data == data
def test_repr():
result = expr_repr(t['name'])
print(result)
assert isinstance(result, str)
assert 'Alice' in result
assert 'Bob' in result
assert '...' not in result
result = expr_repr(t['amount'] + 1)
print(result)
assert '101' in result
t2 = Data(tuple((i, i**2) for i in range(100)), fields=['x', 'y'])
assert t2.dshape == dshape('100 * {x: int64, y: int64}')
result = expr_repr(t2)
print(result)
assert len(result.split('\n')) < 20
assert '...' in result
def test_str_does_not_repr():
# see GH issue #1240.
d = Data([('aa', 1), ('b', 2)], name="ZZZ",
dshape='2 * {a: string, b: int64}')
expr = transform(d, c=d.a.strlen() + d.b)
assert str(
expr) == "Merge(_child=ZZZ, children=(ZZZ, label(strlen(_child=ZZZ.a) + ZZZ.b, 'c')))"
def test_repr_of_scalar():
assert repr(t.amount.sum()) == '300'
def test_mutable_backed_repr():
mutable_backed_table = Data([[0]], fields=['col1'])
repr(mutable_backed_table)
def test_dataframe_backed_repr():
df = pd.DataFrame(data=[0], columns=['col1'])
dataframe_backed_table = Data(df)
repr(dataframe_backed_table)
def test_dataframe_backed_repr_complex():
df = pd.DataFrame([(1, 'Alice', 100),
(2, 'Bob', -200),
(3, 'Charlie', 300),
(4, 'Denis', 400),
(5, 'Edith', -500)],
columns=['id', 'name', 'balance'])
t = Data(df)
repr(t[t['balance'] < 0])
def test_repr_html_on_no_resources_symbol():
t = symbol('t', '5 * {id: int, name: string, balance: int}')
assert to_html(t) == 't'
def test_expr_repr_empty():
s = repr(t[t.amount > 1e9])
assert isinstance(s, str)
assert 'amount' in s
def test_to_html():
s = to_html(t)
assert s
assert 'Alice' in s
assert '<table' in s
assert to_html(1) == '1'
assert to_html(t.count()) == '2'
def test_to_html_on_arrays():
s = to_html(Data(np.ones((2, 2))))
assert '1' in s
assert 'br>' in s
def test_repr_html():
assert '<table' in t._repr_html_()
assert '<table' in t.name._repr_html_()
def test_into():
assert into(list, t) == into(list, data)
def test_serialization():
import pickle
t2 = pickle.loads(pickle.dumps(t))
assert t.schema == t2.schema
assert t._name == t2._name
def test_table_resource():
with tmpfile('csv') as filename:
ds = dshape('var * {a: int, b: int}')
csv = CSV(filename)
append(csv, [[1, 2], [10, 20]], dshape=ds)
t = Data(filename)
assert isinstance(t.data, CSV)
assert into(list, compute(t)) == into(list, csv)
def test_concretehead_failure():
t = symbol('t', 'var * {x:int, y:int}')
d = t[t['x'] > 100]
with pytest.raises(ValueError):
concrete_head(d)
def test_into_np_ndarray_column():
t = Data(L, fields=['id', 'name', 'balance'])
expr = t[t.balance < 0].name
colarray = into(np.ndarray, expr)
assert len(list(compute(expr))) == len(colarray)
def test_into_nd_array_selection():
t = Data(L, fields=['id', 'name', 'balance'])
expr = t[t['balance'] < 0]
selarray = into(np.ndarray, expr)
assert len(list(compute(expr))) == len(selarray)
def test_into_nd_array_column_failure():
tble = Data(L, fields=['id', 'name', 'balance'])
expr = tble[tble['balance'] < 0]
colarray = into(np.ndarray, expr)
assert len(list(compute(expr))) == len(colarray)
def test_Data_attribute_repr():
t = Data(CSV(example('accounts-datetimes.csv')))
result = t.when.day
expected = pd.DataFrame({'when_day': [1, 2, 3, 4, 5]})
assert repr(result) == repr(expected)
def test_can_trivially_create_csv_Data():
Data(example('iris.csv'))
# in context
with Data(example('iris.csv')) as d:
assert d is not None
def test_can_trivially_create_csv_Data_with_unicode():
if sys.version[0] == '2':
assert isinstance(Data(example(u'iris.csv')).data, CSV)
def test_can_trivially_create_sqlite_table():
pytest.importorskip('sqlalchemy')
Data('sqlite:///'+example('iris.db')+'::iris')
# in context
with Data('sqlite:///'+example('iris.db')+'::iris') as d:
assert d is not None
@pytest.mark.xfail(sys.platform != 'darwin', reason="h5py/pytables mismatch")
@pytest.mark.skipif(sys.version_info[:2] == (3, 4) and sys.platform == 'win32',
reason='PyTables + Windows + Python 3.4 crashes')
def test_can_trivially_create_pytables():
pytest.importorskip('tables')
with Data(example('accounts.h5')+'::/accounts') as d:
assert d is not None
def test_data_passes_kwargs_to_resource():
assert Data(example('iris.csv'), encoding='ascii').data.encoding == 'ascii'
def test_data_on_iterator_refies_data():
data = [1, 2, 3]
d = Data(iter(data))
assert into(list, d) == data
assert into(list, d) == data
# in context
with Data(iter(data)) as d:
assert d is not None
def test_Data_on_json_is_concrete():
d = Data(example('accounts-streaming.json'))
assert compute(d.amount.sum()) == 100 - 200 + 300 + 400 - 500
assert compute(d.amount.sum()) == 100 - 200 + 300 + 400 - 500
def test_repr_on_nd_array_doesnt_err():
d = Data(np.ones((2, 2, 2)))
repr(d + 1)
def test_generator_reprs_concretely():
x = [1, 2, 3, 4, 5, 6]
d = Data(x)
expr = d[d > 2] + 1
assert '4' in repr(expr)
def test_incompatible_types():
d = Data(pd.DataFrame(L, columns=['id', 'name', 'amount']))
with pytest.raises(ValueError):
d.id == 'foo'
result = compute(d.id == 3)
expected = pd.Series([False, False, True, False, False], name='id')
tm.assert_series_equal(result, expected)
def test___array__():
x = np.ones(4)
d = Data(x)
assert (np.array(d + 1) == x + 1).all()
d = Data(x[:2])
x[2:] = d + 1
assert x.tolist() == [1, 1, 2, 2]
def test_python_scalar_protocols():
d = Data(1)
assert int(d + 1) == 2
assert float(d + 1.0) == 2.0
assert bool(d > 0) is True
assert complex(d + 1.0j) == 1 + 1.0j
def test_iter():
x = np.ones(4)
d = Data(x)
assert list(d + 1) == [2, 2, 2, 2]
@pytest.mark.xfail(
reason="DataFrame constructor doesn't yet support __array__"
)
def test_DataFrame():
x = np.array([(1, 2), (1., 2.)], dtype=[('a', 'i4'), ('b', 'f4')])
d = Data(x)
assert isinstance(pd.DataFrame(d), pd.DataFrame)
def test_head_compute():
data = tm.makeMixedDataFrame()
t = symbol('t', discover(data))
db = into('sqlite:///:memory:::t', data, dshape=t.dshape)
n = 2
d = Data(db)
# skip the header and the ... at the end of the repr
expr = d.head(n)
s = repr(expr)
assert '...' not in s
result = s.split('\n')[1:]
assert len(result) == n
def test_scalar_sql_compute():
t = into('sqlite:///:memory:::t', data,
dshape=dshape('var * {name: string, amount: int}'))
d = Data(t)
assert repr(d.amount.sum()) == '300'
def test_no_name_for_simple_data():
d = Data([1, 2, 3])
assert repr(d) == ' \n0 1\n1 2\n2 3'
assert not d._name
d = Data(1)
assert not d._name
assert repr(d) == '1'
def test_coerce_date_and_datetime():
x = datetime.datetime.now().date()
d = Data(x)
assert repr(d) == repr(x)
x = datetime.datetime.now()
d = Data(x)
assert repr(d) == repr(x)
def test_highly_nested_repr():
data = [[0, [[1, 2], [3]], 'abc']]
d = Data(data)
assert 'abc' in repr(d.head())
def test_asarray_fails_on_different_column_names():
vs = {'first': [2., 5., 3.],
'second': [4., 1., 4.],
'third': [6., 4., 3.]}
df = pd.DataFrame(vs)
with pytest.raises(ValueError):
Data(df, fields=list('abc'))
def test_data_does_not_accept_columns_kwarg():
with pytest.raises(ValueError):
Data([(1, 2), (3, 4)], columns=list('ab'))
def test_functions_as_bound_methods():
"""
Test that all functions on an InteractiveSymbol are instance methods
of that object.
"""
# Filter out __class__ and friends that are special, these can be
# callables without being instance methods.
callable_attrs = filter(
callable,
(getattr(t, a, None) for a in dir(t) if not a.startswith('__')),
)
for attr in callable_attrs:
assert isinstance(attr, MethodType)
# Make sure this is bound to the correct object.
assert attr.__self__ is t
| {
"content_hash": "1beadb826e02c28f946a1afca05be7a6",
"timestamp": "",
"source": "github",
"line_count": 409,
"max_line_length": 94,
"avg_line_length": 25.278728606356967,
"alnum_prop": 0.581971177096431,
"repo_name": "caseyclements/blaze",
"id": "fb36543b58b6665f37ac252369c4f4902d30c254",
"size": "10339",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "blaze/tests/test_interactive.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "37"
},
{
"name": "Python",
"bytes": "753339"
},
{
"name": "Shell",
"bytes": "35"
}
],
"symlink_target": ""
} |
import logging
from django.conf import settings
from django.utils.translation import gettext_lazy as _
import horizon
LOG = logging.getLogger(__name__)
class Aggregates(horizon.Panel):
name = _("Host Aggregates")
slug = 'aggregates'
policy_rules = (("compute", "compute_extension:aggregates"),)
permissions = ('openstack.services.compute',)
def allowed(self, context):
if (('compute' in settings.SYSTEM_SCOPE_SERVICES) !=
bool(context['request'].user.system_scoped)):
return False
return super().allowed(context)
| {
"content_hash": "b5e316e24f82b5830896c181c817a470",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 65,
"avg_line_length": 26.545454545454547,
"alnum_prop": 0.666095890410959,
"repo_name": "openstack/horizon",
"id": "315bef2e05ab8c784e54b1a0c016ec21d93bb342",
"size": "1157",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "openstack_dashboard/dashboards/admin/aggregates/panel.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "583449"
},
{
"name": "JavaScript",
"bytes": "2585531"
},
{
"name": "Python",
"bytes": "5370605"
},
{
"name": "SCSS",
"bytes": "133237"
},
{
"name": "Shell",
"bytes": "6526"
}
],
"symlink_target": ""
} |
'''
Test Cases for DocumentConverter Class for WordCloud Project
Daniel Klein
Computer-Based Honors Program
The University of Alabama
9.27.2013
'''
import unittest
import os, os.path
from src.core.python.SupremeCourtOpinionFileConverter import SupremeCourtOpinionFileConverter
##### Here are all the global variables used in these tests.
VALID_OPINION_FILE_LINES = ([
"""\
TITLE: UNITED STATES v. JOHNSON ET AL., DOING BUSINESS AS UNITED STATES\
DENTAL CO., ET AL.\
""",
"""CASE NUMBER: No. 43""",
"""US CITATION: 323 U.S. 273""",
"""SUPREME COURT CITATION: 65 S. Ct. 249""",
"""LAWYERS ED CITATION: 89 L. Ed. 236""",
"""LEXIS CITATION: 1944 U.S. LEXIS 1230""",
"""\
FULL CITATION: 323 U.S. 273; 65 S. Ct. 249; 89 L. Ed. 236; 1944 U.S. LEXIS 1230\
""",
"""DATES: November 8, 1944, Argued;December 18, 1944, Decided;""",
"""DISPOSITION: 53 F.Supp. 596, affirmed.""",
"""OPINION TYPE: concur""",
"""* * * * * * * *""",
"""MR. JUSTICE MURPHY, concurring.""",
"""I join in the opinion of the Court and believe that the judgment should be \
affirmed.""",
"""Congress has the constitutional power to fix venue at any place where a \
crime occurs. Our problem here is to determine, in the absence of a specific \
venue provision, where the crime outlawed by the Federal Denture Act occurred \
for purposes of venue.""",
"""The Act prohibits the use of the mails for the purpose of sending or \
bringing into any state certain prohibited articles. It is undisputed that \
when a defendant places a prohibited article in the mails in Illinois for \
the purpose of sending it into Delaware he has completed a statutory offense. \
Hence he is triable in Illinois. But to hold that the statutory crime also \
encompasses the receipt of the prohibited article in Delaware, justifying a \
trial at that point, requires an implication that I am unwilling to make in \
the absence of more explicit Congressional language.""",
"""Very often the difference between liberty and imprisonment in cases where \
the direct evidence offered by the government and the defendant is evenly \
balanced depends upon the presence of character witnesses. The defendant is \
more likely to obtain their presence in the district of his residence, which \
in this instance is usually the place where the prohibited article is mailed. \
The inconvenience, expense and loss of time involved in transplanting these \
witnesses to testify in trials far removed from their homes are often too \
great to warrant their use. Moreover, they are likely to lose much of their \
effectiveness before a distant jury that knows nothing of their reputations. \
Such factors make it difficult for me to conclude, where Congress has not \
said so specifically, that we should construe the Federal Denture Act as \
covering more than the first sufficient and punishable use of the mails \
insofar as the sender of a prohibited article is concerned. The principle of \
narrow construction of criminal statutes does not warrant interpreting the \
"use" of the mails to cover all possible uses in light of the foregoing \
considerations."""])
CASE_TITLE = """\
UNITED STATES v. JOHNSON ET AL., DOING BUSINESS AS UNITED STATES\
DENTAL CO., ET AL.\
"""
CASE_NUM = "No. 43"
CASE_US_CITE = "323 U.S. 273"
CASE_SUPREME_COURT_CITE = "65 S. Ct. 249"
CASE_LAWYERS_ED_CITE = "89 L. Ed. 236"
CASE_LEXIS_CITE = "1944 U.S. LEXIS 1230"
CASE_FULL_CITE = "323 U.S. 273; 65 S. Ct. 249; 89 L. Ed. 236; 1944 U.S. LEXIS 1230"
CASE_DATES = "November 8, 1944 (Argued) December 18, 1944 (Decided) " # THIS MIGHT CHANGE!!
CASE_DISPOSITION = "53 F.Supp. 596, affirmed."
OPINION_AUTHOR = "MURPHY"
OPINION_TYPE = "concur"
OPINION_TEXT = "\n".join(VALID_OPINION_FILE_LINES[11:])
TEST_FILE_PATH = os.path.join(os.path.abspath(os.curdir), "MURPHY_1944 U.S. LEXIS 1230.txt")
TEST_PICKLE_PATH = os.path.join(os.path.abspath(os.curdir), "pickled_test_doc")
#####
def create_test_file(file_lines):
with open(TEST_FILE_PATH, 'w') as test_file:
for line in file_lines:
test_file.write(line + "\n")
class DocumentConverterTest(unittest.TestCase):
def setUp(self):
'''
what do i need to run tests?
- a test file.
'''
self.test_path = TEST_FILE_PATH
self.test_converter = SupremeCourtOpinionFileConverter(self.test_path, TEST_PICKLE_PATH)
def tearDown(self):
if os.path.exists(self.test_path):
os.remove(self.test_path)
if os.path.exists(TEST_PICKLE_PATH):
os.chmod(TEST_PICKLE_PATH, 0777)
os.remove(TEST_PICKLE_PATH)
del self.test_converter
def testNormalCase(self):
print("DocumentConverterTest: testing DocumentConverter.convert_file() normal case...")
# create a normal test file
create_test_file(VALID_OPINION_FILE_LINES)
converted_doc = self.test_converter.convert_file()
print("Word count: {0}".format(converted_doc.word_count))
# here assert a bunch of things about the resulting converted_doc
self.assertTrue(hasattr(converted_doc, 'output_filename'))
self.assertEqual(converted_doc.output_filename, TEST_PICKLE_PATH)
self.assertTrue(hasattr(converted_doc, 'doc_text'))
self.assertEqual(converted_doc.doc_text, OPINION_TEXT)
self.assertTrue(hasattr(converted_doc, 'doc_metadata'))
self.assertTrue(hasattr(converted_doc.doc_metadata, 'case_title'))
self.assertEqual(converted_doc.doc_metadata.case_title, CASE_TITLE)
self.assertTrue(hasattr(converted_doc.doc_metadata, 'opinion_author'))
self.assertEqual(converted_doc.doc_metadata.opinion_author, OPINION_AUTHOR)
self.assertTrue(hasattr(converted_doc.doc_metadata, 'opinion_type'))
self.assertEqual(converted_doc.doc_metadata.opinion_type, OPINION_TYPE)
self.assertTrue(hasattr(converted_doc.doc_metadata, 'case_num'))
self.assertEqual(converted_doc.doc_metadata.case_num, CASE_NUM)
self.assertTrue(hasattr(converted_doc.doc_metadata, 'case_us_cite'))
self.assertEqual(converted_doc.doc_metadata.case_us_cite, CASE_US_CITE)
self.assertTrue(hasattr(converted_doc.doc_metadata, 'case_supreme_court_cite'))
self.assertEqual(converted_doc.doc_metadata.case_supreme_court_cite, CASE_SUPREME_COURT_CITE)
self.assertTrue(hasattr(converted_doc.doc_metadata, 'case_lawyers_ed_cite'))
self.assertEqual(converted_doc.doc_metadata.case_lawyers_ed_cite, CASE_LAWYERS_ED_CITE)
self.assertTrue(hasattr(converted_doc.doc_metadata, 'case_lexis_cite'))
self.assertEqual(converted_doc.doc_metadata.case_lexis_cite, CASE_LEXIS_CITE)
self.assertTrue(hasattr(converted_doc.doc_metadata, 'case_full_cite'))
self.assertEqual(converted_doc.doc_metadata.case_full_cite, CASE_FULL_CITE)
self.assertTrue(hasattr(converted_doc.doc_metadata, 'case_dates'))
self.assertEqual(converted_doc.doc_metadata.case_dates, CASE_DATES)
self.assertTrue(hasattr(converted_doc.doc_metadata, 'case_disposition'))
self.assertEqual(converted_doc.doc_metadata.case_disposition, CASE_DISPOSITION)
def testNoMetadataInFile(self):
print("DocumentConverterTest: testing DocumentConverter.convert_file() "
"with no Metadata in the input file...")
# create a test file without any metadata fields in it
create_test_file(VALID_OPINION_FILE_LINES[10:])
converted_doc = self.test_converter.convert_file()
# here assert a bunch of things about the resulting converted_doc
self.assertTrue(hasattr(converted_doc, 'output_filename'))
self.assertEqual(converted_doc.output_filename, TEST_PICKLE_PATH)
self.assertTrue(hasattr(converted_doc, 'doc_text'))
self.assertEqual(converted_doc.doc_text, OPINION_TEXT)
self.assertTrue(hasattr(converted_doc, 'doc_metadata'))
self.assertTrue(hasattr(converted_doc.doc_metadata, 'case_title'))
self.assertEqual(converted_doc.doc_metadata.case_title, "")
self.assertTrue(hasattr(converted_doc.doc_metadata, 'opinion_author'))
self.assertEqual(converted_doc.doc_metadata.opinion_author, OPINION_AUTHOR)
self.assertTrue(hasattr(converted_doc.doc_metadata, 'opinion_type'))
self.assertEqual(converted_doc.doc_metadata.opinion_type, "")
self.assertTrue(hasattr(converted_doc.doc_metadata, 'case_num'))
self.assertEqual(converted_doc.doc_metadata.case_num, "")
self.assertTrue(hasattr(converted_doc.doc_metadata, 'case_us_cite'))
self.assertEqual(converted_doc.doc_metadata.case_us_cite, "")
self.assertTrue(hasattr(converted_doc.doc_metadata, 'case_supreme_court_cite'))
self.assertEqual(converted_doc.doc_metadata.case_supreme_court_cite, "")
self.assertTrue(hasattr(converted_doc.doc_metadata, 'case_lawyers_ed_cite'))
self.assertEqual(converted_doc.doc_metadata.case_lawyers_ed_cite, "")
self.assertTrue(hasattr(converted_doc.doc_metadata, 'case_lexis_cite'))
self.assertEqual(converted_doc.doc_metadata.case_lexis_cite, "")
self.assertTrue(hasattr(converted_doc.doc_metadata, 'case_full_cite'))
self.assertEqual(converted_doc.doc_metadata.case_full_cite, "")
self.assertTrue(hasattr(converted_doc.doc_metadata, 'case_dates'))
self.assertEqual(converted_doc.doc_metadata.case_dates, '')
self.assertTrue(hasattr(converted_doc.doc_metadata, 'case_disposition'))
self.assertEqual(converted_doc.doc_metadata.case_disposition, "")
#self.fail("DocumentConverterTest: I haven't written testNoMetadataInFile yet.")
def testNoBodyTextInFile(self):
print("DocumentConverterTest: testing DocumentConverter.convert_file() "
"with no body text in the input file...")
# create a test file with valid metadata but without any body text in it
create_test_file(VALID_OPINION_FILE_LINES[:11])
converted_doc = self.test_converter.convert_file()
# here assert a bunch of things about the resulting converted_doc
self.assertTrue(hasattr(converted_doc, 'output_filename'))
self.assertEqual(converted_doc.output_filename, TEST_PICKLE_PATH)
self.assertTrue(hasattr(converted_doc, 'doc_text'))
self.assertEqual(converted_doc.doc_text, "")
self.assertTrue(hasattr(converted_doc, 'doc_metadata'))
self.assertTrue(hasattr(converted_doc.doc_metadata, 'case_title'))
self.assertEqual(converted_doc.doc_metadata.case_title, CASE_TITLE)
self.assertTrue(hasattr(converted_doc.doc_metadata, 'opinion_author'))
self.assertEqual(converted_doc.doc_metadata.opinion_author, OPINION_AUTHOR)
self.assertTrue(hasattr(converted_doc.doc_metadata, 'opinion_type'))
self.assertEqual(converted_doc.doc_metadata.opinion_type, OPINION_TYPE)
self.assertTrue(hasattr(converted_doc.doc_metadata, 'case_num'))
self.assertEqual(converted_doc.doc_metadata.case_num, CASE_NUM)
self.assertTrue(hasattr(converted_doc.doc_metadata, 'case_us_cite'))
self.assertEqual(converted_doc.doc_metadata.case_us_cite, CASE_US_CITE)
self.assertTrue(hasattr(converted_doc.doc_metadata, 'case_supreme_court_cite'))
self.assertEqual(converted_doc.doc_metadata.case_supreme_court_cite, CASE_SUPREME_COURT_CITE)
self.assertTrue(hasattr(converted_doc.doc_metadata, 'case_lawyers_ed_cite'))
self.assertEqual(converted_doc.doc_metadata.case_lawyers_ed_cite, CASE_LAWYERS_ED_CITE)
self.assertTrue(hasattr(converted_doc.doc_metadata, 'case_lexis_cite'))
self.assertEqual(converted_doc.doc_metadata.case_lexis_cite, CASE_LEXIS_CITE)
self.assertTrue(hasattr(converted_doc.doc_metadata, 'case_full_cite'))
self.assertEqual(converted_doc.doc_metadata.case_full_cite, CASE_FULL_CITE)
self.assertTrue(hasattr(converted_doc.doc_metadata, 'case_dates'))
self.assertEqual(converted_doc.doc_metadata.case_dates, CASE_DATES)
self.assertTrue(hasattr(converted_doc.doc_metadata, 'case_disposition'))
self.assertEqual(converted_doc.doc_metadata.case_disposition, CASE_DISPOSITION)
#self.fail("DocumentConverterTest: I haven't written testNoBodyTextInFile yet.")
def testOutputFileNotWritable(self):
print("DocumentConverterTest: testing DocumentConverter.convert_file() "
"and save_converted_doc() with an unwritable output file...")
create_test_file(VALID_OPINION_FILE_LINES)
converted_doc = self.test_converter.convert_file()
# assert stuff about the created converted_doc
self.assertTrue(hasattr(converted_doc, 'output_filename'))
self.assertEqual(converted_doc.output_filename, TEST_PICKLE_PATH)
self.assertTrue(hasattr(converted_doc, 'doc_text'))
self.assertEqual(converted_doc.doc_text, OPINION_TEXT)
self.assertTrue(hasattr(converted_doc, 'doc_metadata'))
self.assertTrue(hasattr(converted_doc.doc_metadata, 'case_title'))
self.assertEqual(converted_doc.doc_metadata.case_title, CASE_TITLE)
self.assertTrue(hasattr(converted_doc.doc_metadata, 'opinion_author'))
self.assertEqual(converted_doc.doc_metadata.opinion_author, OPINION_AUTHOR)
self.assertTrue(hasattr(converted_doc.doc_metadata, 'opinion_type'))
self.assertEqual(converted_doc.doc_metadata.opinion_type, OPINION_TYPE)
self.assertTrue(hasattr(converted_doc.doc_metadata, 'case_num'))
self.assertEqual(converted_doc.doc_metadata.case_num, CASE_NUM)
self.assertTrue(hasattr(converted_doc.doc_metadata, 'case_us_cite'))
self.assertEqual(converted_doc.doc_metadata.case_us_cite, CASE_US_CITE)
self.assertTrue(hasattr(converted_doc.doc_metadata, 'case_supreme_court_cite'))
self.assertEqual(converted_doc.doc_metadata.case_supreme_court_cite, CASE_SUPREME_COURT_CITE)
self.assertTrue(hasattr(converted_doc.doc_metadata, 'case_lawyers_ed_cite'))
self.assertEqual(converted_doc.doc_metadata.case_lawyers_ed_cite, CASE_LAWYERS_ED_CITE)
self.assertTrue(hasattr(converted_doc.doc_metadata, 'case_lexis_cite'))
self.assertEqual(converted_doc.doc_metadata.case_lexis_cite, CASE_LEXIS_CITE)
self.assertTrue(hasattr(converted_doc.doc_metadata, 'case_full_cite'))
self.assertEqual(converted_doc.doc_metadata.case_full_cite, CASE_FULL_CITE)
self.assertTrue(hasattr(converted_doc.doc_metadata, 'case_dates'))
self.assertEqual(converted_doc.doc_metadata.case_dates, CASE_DATES)
self.assertTrue(hasattr(converted_doc.doc_metadata, 'case_disposition'))
self.assertEqual(converted_doc.doc_metadata.case_disposition, CASE_DISPOSITION)
# I need to change the permisssions of the pickle_path (chmod 0444)
with open(converted_doc.output_filename, 'w') as dummy:
pass
os.chmod(converted_doc.output_filename, 0444)
self.assertRaises(IOError, self.test_converter.save_converted_doc)
#self.fail("DocumentConverterTest: I haven't written testOutputFileNotWritable yet.")
def testInputFileNonexistent(self):
print("DocumentConverterTest: testing DocumentConverter.convert_file() "
"with nonexistent input file...")
# skip the create_test_file call and just try to convert.
self.assertRaises(IOError, self.test_converter.convert_file)
#self.fail("DocumentConverterTest: I haven't written testInputFileNonexistent yet.")
def testEmptyInputFile(self):
print("DocumentConverterTest: testing DocumentConverter.convert_file() "
"with completely empty input file...")
# create a test file with nothing in it
create_test_file([])
converted_doc = self.test_converter.convert_file()
# here assert a bunch of things about the resulting converted_doc
self.assertTrue(hasattr(converted_doc, 'output_filename'))
self.assertEqual(converted_doc.output_filename, TEST_PICKLE_PATH)
self.assertTrue(hasattr(converted_doc, 'doc_text'))
self.assertEqual(converted_doc.doc_text, "")
self.assertTrue(hasattr(converted_doc, 'doc_metadata'))
self.assertTrue(hasattr(converted_doc.doc_metadata, 'case_title'))
self.assertEqual(converted_doc.doc_metadata.case_title, "")
self.assertTrue(hasattr(converted_doc.doc_metadata, 'opinion_author'))
self.assertEqual(converted_doc.doc_metadata.opinion_author, OPINION_AUTHOR)
self.assertTrue(hasattr(converted_doc.doc_metadata, 'opinion_type'))
self.assertEqual(converted_doc.doc_metadata.opinion_type, "")
self.assertTrue(hasattr(converted_doc.doc_metadata, 'case_num'))
self.assertEqual(converted_doc.doc_metadata.case_num, "")
self.assertTrue(hasattr(converted_doc.doc_metadata, 'case_us_cite'))
self.assertEqual(converted_doc.doc_metadata.case_us_cite, "")
self.assertTrue(hasattr(converted_doc.doc_metadata, 'case_supreme_court_cite'))
self.assertEqual(converted_doc.doc_metadata.case_supreme_court_cite, "")
self.assertTrue(hasattr(converted_doc.doc_metadata, 'case_lawyers_ed_cite'))
self.assertEqual(converted_doc.doc_metadata.case_lawyers_ed_cite, "")
self.assertTrue(hasattr(converted_doc.doc_metadata, 'case_lexis_cite'))
self.assertEqual(converted_doc.doc_metadata.case_lexis_cite, "")
self.assertTrue(hasattr(converted_doc.doc_metadata, 'case_full_cite'))
self.assertEqual(converted_doc.doc_metadata.case_full_cite, "")
self.assertTrue(hasattr(converted_doc.doc_metadata, 'case_dates'))
self.assertEqual(converted_doc.doc_metadata.case_dates, '')
self.assertTrue(hasattr(converted_doc.doc_metadata, 'case_disposition'))
self.assertEqual(converted_doc.doc_metadata.case_disposition, "")
#self.fail("DocumentConverterTest: I haven't written testEmptyInputFile yet.")
if __name__ == "__main__":
#import sys;sys.argv = ['', 'Test.testName']
unittest.main() | {
"content_hash": "4372e8f01ac83998fff5895ca2a1c2ff",
"timestamp": "",
"source": "github",
"line_count": 371,
"max_line_length": 101,
"avg_line_length": 50.35309973045822,
"alnum_prop": 0.690594721909962,
"repo_name": "dmarklein/WordCloud",
"id": "87123ba6391e24dd49f979cc6f67422fac8c5782",
"size": "18681",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/unit/python/DocumentConverterTest.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "5968"
},
{
"name": "CSS",
"bytes": "17383"
},
{
"name": "Groovy",
"bytes": "82610"
},
{
"name": "HTML",
"bytes": "1258"
},
{
"name": "Java",
"bytes": "118357"
},
{
"name": "JavaScript",
"bytes": "13989"
},
{
"name": "PowerShell",
"bytes": "507"
},
{
"name": "Python",
"bytes": "172532"
}
],
"symlink_target": ""
} |
"""wikisource.
Usage:
dasem.wikisource get <title>
dasem.wikisource list
Example:
$ python -m dasem.wikisource get Mogens
"""
from __future__ import print_function
import re
from bs4 import BeautifulSoup
import requests
from six import u
from .wikidata import query_to_dataframe
SPARQL_QUERY = """
SELECT distinct ?item ?itemLabel ?article WHERE {
?article schema:about ?item.
?article schema:isPartOf <https://da.wikisource.org/>.
values ?kind { wd:Q7725634 wd:Q1372064 wd:Q7366 wd:Q49848}
?item (wdt:P31/wdt:P279*) ?kind .
SERVICE wikibase:label { bd:serviceParam wikibase:language "da,en". }
}
"""
def extract_text(text):
"""Extract relevant part of text from page.
Attempts with various regular expressions to extract the relevant
text from the downloaded parsed wikipage.
Poems might have the '<poem>...</poem>' construct. Text between these two
tags are extracted and returned.
Public domain license information is ignored.
Parameters
----------
text : str
Downloaded text.
Returns
-------
extracted_text : str
Extracted text.
"""
# Match <poem> and just extract that.
in_poem = re.findall(r'<poem>(.*?)</poem>', text,
flags=re.UNICODE | re.DOTALL)
if in_poem:
return u"\n\n".join(in_poem)
# Ignore license information. This might be above or below the text.
text = re.sub((r'Public domainPublic domain(.*?), '
'da det blev udgivet.{15,25}\.$'), '\n',
text, flags=re.UNICODE | re.DOTALL | re.MULTILINE)
regex = r'Teksten\[redig' + u('\xe9') + r'r\](.*)'
after_teksten = re.findall(regex, text, flags=re.UNICODE | re.DOTALL)
if after_teksten:
return u"\n\n".join(after_teksten)
# Match bottom of infobox on some of the songs
rest = re.findall(r'.*Wikipedia-link\s*(.*)', text,
flags=re.UNICODE | re.DOTALL)
if rest:
return u"\n\n".join(rest)
return text
def get_list_from_wikidata():
"""Get list of works from Wikidata.
Returns
-------
df : pandas.DataFrame
DataFrame with information from Wikidata.
"""
df = query_to_dataframe(SPARQL_QUERY)
return df
def get_text_by_title(title):
"""Get text from Wikisource based on title.
If the text is split over several wikipages (which is the case with novels)
then the full text will not be returned, - only the index page.
Parameters
----------
title : str
Title of wikipage on Danish Wikisource.
Returns
-------
text : str or None
The text. Returns none if the page does not exist.
"""
url = 'https://da.wikisource.org/w/api.php'
params = {'page': title, 'action': 'parse', 'format': 'json'}
data = requests.get(url, params=params).json()
if 'parse' in data:
text = BeautifulSoup(data['parse']['text']['*'], "lxml").get_text()
else:
text = None
return text
def main():
"""Handle command-line interface."""
from docopt import docopt
arguments = docopt(__doc__)
if arguments['get']:
text = get_text_by_title(arguments['<title>'])
if text:
extracted_text = extract_text(text)
print(extracted_text.encode('utf-8'))
elif arguments['list']:
df = get_list_from_wikidata()
print(df.to_csv(encoding='utf-8'))
if __name__ == '__main__':
main()
| {
"content_hash": "a6b449a5fd9b5a9dec362b2fa6ab980a",
"timestamp": "",
"source": "github",
"line_count": 142,
"max_line_length": 79,
"avg_line_length": 24.47887323943662,
"alnum_prop": 0.6101841196777905,
"repo_name": "fnielsen/dasem",
"id": "e33432e08fe82ad7aa2af92e5d86c25d4b605d9a",
"size": "3476",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "dasem/wikisource.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "2069"
},
{
"name": "Jupyter Notebook",
"bytes": "279950"
},
{
"name": "Python",
"bytes": "283237"
}
],
"symlink_target": ""
} |
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence, PackedSequence
from sklearn.metrics import confusion_matrix
from utils import create_emb_layer
def accuracy(pred, label):
correct = []
for i in range(len(pred)):
correct.append(pred[i]==label[i])
correct = torch.stack(correct)
return torch.sum(correct)
def create_sorted_batch(batch):
"""
To create packed sequence later in forward pass to utilise Mini-batch for LSTM input
"""
sent_lengths, perm_idx = batch['sent_len'].sort(0, descending=True)
batch['asp_len'] = batch['asp_len'][perm_idx]
asp_lengths = batch['asp_len']
batch['sentence'] = batch['sentence'][perm_idx][:, :sent_lengths.max()]
batch['aspect'] = batch['aspect'][perm_idx][:, :asp_lengths.max()]
batch['sentiment']= batch['sentiment'][perm_idx]
batch['sent_len'] = sent_lengths
return batch
class ATAE_LSTM(nn.Module):
def __init__(self, weights_matrix, hidden_dim, output_dim, dropout, words, word2idx):
super().__init__()
self.embedding, embedding_dim= create_emb_layer(weights_matrix)
self.lstm = nn.LSTM(embedding_dim*2, hidden_dim, num_layers=2, bidirectional=True, dropout=0.5, batch_first=True)
self.attn = nn.Linear(hidden_dim*2+embedding_dim, hidden_dim, bias=False)
self.fc = nn.Linear(hidden_dim*2, output_dim)
self.dropout = nn.Dropout(dropout)
################### Projection Parameters ###################
self.v = nn.Parameter(torch.rand(hidden_dim))
self.Wp = nn.Parameter(torch.rand(hidden_dim*2, hidden_dim*2))
self.Wx = nn.Parameter(torch.rand(hidden_dim*2, hidden_dim*2))
def forward(self, vocab):
with torch.no_grad():
s_shape = vocab['sentence'].shape
a_shape = vocab['aspect'].shape
s_embedding = self.embedding(vocab['sentence'].cuda())
##########################################################
############### Average the aspect embedding #############
##########################################################
"""
Note:- simply averaging the word embeddings of a target phrase is not
sufficient to represent the semantics of the target phrase.
Reference - https://aclweb.org/anthology/D16-1058
Future work - Learning aspect embedding
"""
##########################################################
a_embedding = self.embedding(vocab['aspect'].cuda())
a_embedding = torch.unsqueeze(torch.mean(a_embedding, 1),1).repeat(1,s_shape[1],1)
# Concatenate each word in sentence with aspect vector
concated_input = self.dropout(torch.cat((s_embedding,a_embedding),-1))
packed_input = pack_padded_sequence(concated_input, vocab['sent_len'], batch_first=True)
out, (h, c) = self.lstm(packed_input)
##########################################################################
"""
Concatenate the aspect vector into the sentence hidden representations
for computing attention weights
"""
##########################################################################
with torch.no_grad():
unpacked_out, _ = pad_packed_sequence(out, batch_first=True)
concated_out = torch.cat((unpacked_out, a_embedding),-1)
attn_in = concated_out
##########################################################
################# Attention #############################
##########################################################
score = F.tanh(self.attn(attn_in.cuda())).transpose(2,1)
v = self.v.repeat(unpacked_out.shape[0],1).unsqueeze(1)
attn_score = torch.bmm(v,score).squeeze(1)
attn_weights = F.softmax(attn_score, dim=1).unsqueeze(1)
r = torch.bmm(attn_weights, unpacked_out).squeeze(1)
##########################################################
################# h' = tanh(Wp.r + Wx.Hn) ################
##########################################################
final_rep = F.tanh(r.matmul(self.Wp) + unpacked_out[:,-1,:].matmul(self.Wx))
pred = self.fc(final_rep)
return pred
##############################################################
######################## TRAINING ###########################
##############################################################
def train_(model, batches, optimizer, criterion):
model.train()
total_loss = 0
total_acc = 0
count = 0
cm = torch.zeros(3,3)
for batch in batches:
batch = create_sorted_batch(batch)
label = batch['sentiment']
optimizer.zero_grad()
pred = model(batch)
loss = criterion(pred, label.cuda())
acc = accuracy(torch.argmax(F.softmax(pred,dim=1),1).float(), label.float().cuda())
cm += torch.from_numpy(confusion_matrix(label, torch.argmax(pred,1), \
labels=[torch.tensor(0), torch.tensor(1), torch.tensor(2)])).float()
loss.backward()
optimizer.step()
total_loss += loss.item()
total_acc += acc.item()
count += len(label)
return total_loss/len(batches), total_acc/count, cm
##############################################################
######################## Validation ###########################
##############################################################
def eval_(model, batches, criterion):
model.eval()
total_loss = 0
total_acc = 0
count = 0
cm = torch.zeros(3,3)
for batch in batches:
batch = create_sorted_batch(batch)
label = batch['sentiment']
pred = model(batch)
loss = criterion(pred, label.cuda())
acc = accuracy(torch.argmax(F.softmax(pred,dim=1),1).float(), label.float().cuda())
cm += torch.from_numpy(confusion_matrix(label, torch.argmax(pred,1), \
labels=[torch.tensor(0), torch.tensor(1), torch.tensor(2)])).float()
total_loss += loss.item()
total_acc += acc.item()
count += len(label)
return total_loss/len(batches), total_acc/count, cm
##############################################################
#################### Test/ Prediction #######################
##############################################################
def test(model, batches, weights_matrix):
model.embedding, _ = create_emb_layer(weights_matrix)
model.embedding = model.embedding.cuda()
model.eval()
total_acc = 0
count = 0
cm = torch.zeros(3,3)
for batch in batches:
batch = create_sorted_batch(batch)
label = batch['sentiment']
pred = model(batch)
acc = accuracy(torch.argmax(F.softmax(pred,dim=1),1).float(), label.float().cuda())
cm += torch.from_numpy(confusion_matrix(label, torch.argmax(pred,1), \
labels=[torch.tensor(0), torch.tensor(1), torch.tensor(2)])).float()
total_acc += acc.item()
count += len(label)
return total_acc/count, cm
def predict(model, batches, weights_matrix):
model.embedding, _ = create_emb_layer(weights_matrix)
model.embedding = model.embedding.cuda()
model.eval()
# Only 1 batch and 1 item in that batch
for batch in batches:
pred = model(batch)
return torch.argmax(F.softmax(pred,dim=1),1)
| {
"content_hash": "a2eed6f9cfcf94586bea18ce35cd87c7",
"timestamp": "",
"source": "github",
"line_count": 190,
"max_line_length": 121,
"avg_line_length": 41.84736842105263,
"alnum_prop": 0.48132310401207395,
"repo_name": "prakhar2b/Weekend-Projects",
"id": "134355699b05aacc960f2c1c81e861b80dc292d6",
"size": "8373",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Machine-Learning/model.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Java",
"bytes": "6818"
},
{
"name": "Jupyter Notebook",
"bytes": "217785"
},
{
"name": "Python",
"bytes": "21895"
}
],
"symlink_target": ""
} |
from django.conf.urls import patterns, url
urlpatterns = patterns(
'wagtail.wagtaildocs.views',
url(r'^(\d+)/(.*)$', 'serve.serve', name='wagtaildocs_serve'),
)
| {
"content_hash": "9283af09bc468161d3b1a67a58894ea9",
"timestamp": "",
"source": "github",
"line_count": 6,
"max_line_length": 66,
"avg_line_length": 28.333333333333332,
"alnum_prop": 0.6588235294117647,
"repo_name": "h2oloopan/easymerge",
"id": "5f8ddaf0398ec4dea20853470e3e8cfea80bc3fb",
"size": "170",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "EasyMerge/tests/wagtail/wagtail/wagtaildocs/urls.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "13487"
},
{
"name": "CSS",
"bytes": "416664"
},
{
"name": "D",
"bytes": "2012"
},
{
"name": "Java",
"bytes": "583078"
},
{
"name": "JavaScript",
"bytes": "285692"
},
{
"name": "Python",
"bytes": "4212549"
},
{
"name": "Ruby",
"bytes": "920"
},
{
"name": "Shell",
"bytes": "40508"
},
{
"name": "TeX",
"bytes": "114952"
}
],
"symlink_target": ""
} |
import statsmodels.api as sm
from scipy.stats import norm
import numpy as np
import pytest
import scipy
import sys
import os
from respy.python.solve.solve_auxiliary import get_predictions
from codes.auxiliary import write_interpolation_grid
from codes.random_init import generate_init
from respy.python.shared.shared_auxiliary import read_draws
from codes.auxiliary import write_draws
from respy.python.estimate.estimate_auxiliary import get_optim_paras
from respy.python.shared.shared_auxiliary import replace_missing_values
from respy.python.solve.solve_auxiliary import get_endogenous_variable
from respy.python.solve.solve_auxiliary import get_future_value
from respy.python.shared.shared_auxiliary import get_cholesky
from respy.python.shared.shared_constants import IS_FORTRAN
from respy.fortran.interface import resfort_interface
from respy.python.solve.solve_python import pyth_solve
from respy.python.simulate.simulate_python import pyth_simulate
from respy.python.evaluate.evaluate_python import pyth_evaluate
from respy.python.estimate.estimate_python import pyth_criterion
from respy.python.shared.shared_auxiliary import dist_class_attributes
from respy.python.shared.shared_auxiliary import dist_model_paras
from respy.python.shared.shared_auxiliary import create_draws
from respy.python.shared.shared_constants import TEST_RESOURCES_DIR
from respy.python.solve.solve_auxiliary import pyth_create_state_space
from respy.python.solve.solve_auxiliary import pyth_calculate_payoffs_systematic
from respy.python.solve.solve_auxiliary import pyth_backward_induction
from respy.python.solve.solve_auxiliary import get_simulated_indicator
from respy.python.solve.solve_auxiliary import get_exogenous_variables
from respy import RespyCls
from respy import simulate
# Edit of PYTHONPATH required for PYTHON 2 as no __init__.py in tests
# subdirectory. If __init__.py is added, the path resolution for PYTEST
# breaks down.
if IS_FORTRAN:
sys.path.insert(0, TEST_RESOURCES_DIR)
import f2py_interface as fort_debug
@pytest.mark.skipif(not IS_FORTRAN, reason='No FORTRAN available')
@pytest.mark.usefixtures('fresh_directory', 'set_seed')
class TestClass(object):
""" This class groups together some tests.
"""
def test_1(self):
""" Compare the evaluation of the criterion function for the ambiguity
optimization and the simulated expected future value between the FORTRAN
and PYTHON implementations. These tests are set up a separate test case
due to the large setup cost to construct the ingredients for the interface.
"""
# Generate constraint periods
constraints = dict()
constraints['version'] = 'PYTHON'
# Generate random initialization file
generate_init(constraints)
# Perform toolbox actions
respy_obj = RespyCls('test.respy.ini')
respy_obj = simulate(respy_obj)
# Extract class attributes
periods_payoffs_systematic, states_number_period, mapping_state_idx, \
periods_emax, num_periods, states_all, num_draws_emax, edu_start, \
edu_max, delta = \
dist_class_attributes(respy_obj,
'periods_payoffs_systematic', 'states_number_period',
'mapping_state_idx', 'periods_emax', 'num_periods',
'states_all', 'num_draws_emax', 'edu_start', 'edu_max',
'delta')
# Sample draws
draws_standard = np.random.multivariate_normal(np.zeros(4),
np.identity(4), (num_draws_emax,))
# Sampling of random period and admissible state index
period = np.random.choice(range(num_periods))
k = np.random.choice(range(states_number_period[period]))
# Select systematic payoffs
payoffs_systematic = periods_payoffs_systematic[period, k, :]
# Evaluation of simulated expected future values
args = (num_periods, num_draws_emax, period, k, draws_standard,
payoffs_systematic, edu_max, edu_start, periods_emax, states_all,
mapping_state_idx, delta)
py = get_future_value(*args)
f90 = fort_debug.wrapper_get_future_value(*args)
np.testing.assert_allclose(py, f90, rtol=1e-05, atol=1e-06)
def test_2(self):
""" Compare results between FORTRAN and PYTHON of selected
hand-crafted functions. In test_97() we test FORTRAN implementations
against PYTHON intrinsic routines.
"""
for _ in range(25):
# Create grid of admissible state space values.
num_periods = np.random.randint(1, 15)
edu_start = np.random.randint(1, 5)
edu_max = edu_start + np.random.randint(1, 5)
# Prepare interface
min_idx = min(num_periods, (edu_max - edu_start + 1))
# FORTRAN
args = (num_periods, edu_start, edu_max, min_idx)
fort_a, fort_b, fort_c, fort_d = \
fort_debug.f2py_create_state_space(*args)
py_a, py_b, py_c, py_d = pyth_create_state_space(*args)
# Ensure equivalence
for obj in [[fort_a, py_a], [fort_b, py_b], [fort_c, py_c], [fort_d, py_d]]:
np.testing.assert_allclose(obj[0], obj[1])
for _ in range(100):
# Draw random request for testing purposes
num_covars = np.random.randint(2, 10)
num_agents = np.random.randint(100, 1000)
tiny = np.random.normal(size=num_agents)
beta = np.random.normal(size=num_covars)
# Generate sample
exog = np.random.sample((num_agents, num_covars))
exog[:, 0] = 1
endog = np.dot(exog, beta) + tiny
# Run statsmodels
results = sm.OLS(endog, exog).fit()
# Check parameters
py = results.params
f90 = fort_debug.wrapper_get_coefficients(endog, exog, num_covars,
num_agents)
np.testing.assert_almost_equal(py, f90)
# Check prediction
py = results.predict(exog)
f90 = fort_debug.wrapper_point_predictions(exog, f90, num_agents)
np.testing.assert_almost_equal(py, f90)
# Check coefficient of determination and the standard errors.
py = [results.rsquared, results.bse]
f90 = fort_debug.wrapper_get_pred_info(endog, f90, exog,
num_agents, num_covars)
for i in range(2):
np.testing.assert_almost_equal(py[i], f90[i])
def test_3(self):
""" Compare results between FORTRAN and PYTHON of selected functions.
"""
for _ in range(10):
# Draw random requests for testing purposes.
num_draws_emax = np.random.randint(2, 1000)
dim = np.random.randint(1, 6)
matrix = (np.random.multivariate_normal(np.zeros(dim),
np.identity(dim), dim))
cov = np.dot(matrix, matrix.T)
# PDF of normal distribution
args = np.random.normal(size=3)
args[-1] **= 2
f90 = fort_debug.wrapper_normal_pdf(*args)
py = norm.pdf(*args)
np.testing.assert_almost_equal(py, f90)
# Singular Value Decomposition
py = scipy.linalg.svd(matrix)
f90 = fort_debug.wrapper_svd(matrix, dim)
for i in range(3):
np.testing.assert_allclose(py[i], f90[i], rtol=1e-05, atol=1e-06)
# Pseudo-Inverse
py = np.linalg.pinv(matrix)
f90 = fort_debug.wrapper_pinv(matrix, dim)
np.testing.assert_allclose(py, f90, rtol=1e-05, atol=1e-06)
# Inverse
py = np.linalg.inv(cov)
f90 = fort_debug.wrapper_inverse(cov, dim)
np.testing.assert_allclose(py, f90, rtol=1e-05, atol=1e-06)
# Determinant
py = np.linalg.det(cov)
f90 = fort_debug.wrapper_determinant(cov)
np.testing.assert_allclose(py, f90, rtol=1e-05, atol=1e-06)
# Trace
py = np.trace(cov)
f90 = fort_debug.wrapper_trace(cov)
np.testing.assert_allclose(py, f90, rtol=1e-05, atol=1e-06)
# Random normal deviates. This only tests the interface, requires
# visual inspection in IPYTHON notebook as well.
fort_debug.wrapper_standard_normal(num_draws_emax)
# Clipping values below and above bounds.
num_values = np.random.randint(1, 10000)
lower_bound = np.random.randn()
upper_bound = lower_bound + np.random.ranf()
values = np.random.normal(size=num_values)
f90 = fort_debug.wrapper_clip_value(values, lower_bound,
upper_bound, num_values)
py = np.clip(values, lower_bound, upper_bound)
np.testing.assert_almost_equal(py, f90)
def test_4(self):
""" Testing the core functions of the solution step for the equality
of results between the PYTHON and FORTRAN implementations.
"""
# Generate random initialization file
generate_init()
# Perform toolbox actions
respy_obj = RespyCls('test.respy.ini')
# Ensure that backward induction routines use the same grid for the
# interpolation.
write_interpolation_grid('test.respy.ini')
# Extract class attributes
num_periods, edu_start, edu_max, min_idx, model_paras, num_draws_emax, \
seed_emax, is_debug, delta, is_interpolated, num_points_interp, = \
dist_class_attributes(respy_obj,
'num_periods', 'edu_start', 'edu_max', 'min_idx',
'model_paras', 'num_draws_emax', 'seed_emax', 'is_debug',
'delta', 'is_interpolated', 'num_points_interp')
# Auxiliary objects
coeffs_a, coeffs_b, coeffs_edu, coeffs_home, shocks_cholesky = \
dist_model_paras(model_paras, is_debug)
# Check the state space creation.
args = (num_periods, edu_start, edu_max, min_idx)
pyth = pyth_create_state_space(*args)
f2py = fort_debug.f2py_create_state_space(*args)
for i in range(4):
np.testing.assert_allclose(pyth[i], f2py[i])
# Carry some results from the state space creation for future use.
states_all, states_number_period = pyth[:2]
mapping_state_idx, max_states_period = pyth[2:]
# Cutting to size
states_all = states_all[:, :max(states_number_period), :]
# Check calculation of systematic components of payoffs.
args = (num_periods, states_number_period, states_all, edu_start,
coeffs_a, coeffs_b, coeffs_edu, coeffs_home, max_states_period)
pyth = pyth_calculate_payoffs_systematic(*args)
f2py = fort_debug.f2py_calculate_payoffs_systematic(*args)
np.testing.assert_allclose(pyth, f2py)
# Carry some results from the systematic payoff calculation for
# future use and create the required set of disturbances.
periods_draws_emax = create_draws(num_periods, num_draws_emax,
seed_emax, is_debug)
periods_payoffs_systematic = pyth
# Check backward induction procedure.
args = (num_periods, max_states_period, periods_draws_emax,
num_draws_emax, states_number_period, periods_payoffs_systematic,
edu_max, edu_start, mapping_state_idx, states_all, delta,
is_debug, is_interpolated, num_points_interp, shocks_cholesky)
pyth = pyth_backward_induction(*args)
f2py = fort_debug.f2py_backward_induction(*args)
np.testing.assert_allclose(pyth, f2py)
def test_5(self):
""" This methods ensures that the core functions yield the same
results across implementations.
"""
# Generate random initialization file
generate_init()
# Perform toolbox actions
respy_obj = RespyCls('test.respy.ini')
# Ensure that backward induction routines use the same grid for the
# interpolation.
max_states_period = write_interpolation_grid('test.respy.ini')
# Extract class attributes
num_periods, edu_start, edu_max, min_idx, model_paras, num_draws_emax, \
is_debug, delta, is_interpolated, num_points_interp, is_myopic, num_agents_sim, \
num_draws_prob, tau, paras_fixed, seed_sim = \
dist_class_attributes(
respy_obj, 'num_periods', 'edu_start', 'edu_max', 'min_idx',
'model_paras', 'num_draws_emax', 'is_debug', 'delta',
'is_interpolated', 'num_points_interp', 'is_myopic', 'num_agents_sim',
'num_draws_prob', 'tau', 'paras_fixed', 'seed_sim')
# Write out random components and interpolation grid to align the
# three implementations.
max_draws = max(num_agents_sim, num_draws_emax, num_draws_prob)
write_draws(num_periods, max_draws)
periods_draws_emax = read_draws(num_periods, num_draws_emax)
periods_draws_prob = read_draws(num_periods, num_draws_prob)
periods_draws_sims = read_draws(num_periods, num_agents_sim)
# Extract coefficients
coeffs_a, coeffs_b, coeffs_edu, coeffs_home, shocks_cholesky = dist_model_paras(
model_paras, True)
# Check the full solution procedure
base_args = (coeffs_a, coeffs_b, coeffs_edu, coeffs_home, shocks_cholesky,
is_interpolated, num_draws_emax, num_periods, num_points_interp, is_myopic,
edu_start, is_debug, edu_max, min_idx, delta)
fort, _ = resfort_interface(respy_obj, 'simulate')
pyth = pyth_solve(*base_args + (periods_draws_emax,))
f2py = fort_debug.f2py_solve(*base_args + (periods_draws_emax, max_states_period))
for alt in [f2py, fort]:
for i in range(5):
np.testing.assert_allclose(pyth[i], alt[i])
# Distribute solution arguments for further use in simulation test.
periods_payoffs_systematic, _, mapping_state_idx, periods_emax, states_all = pyth
args = (periods_payoffs_systematic, mapping_state_idx, \
periods_emax, states_all, shocks_cholesky, num_periods, edu_start,
edu_max, delta, num_agents_sim, periods_draws_sims, seed_sim)
pyth = pyth_simulate(*args)
f2py = fort_debug.f2py_simulate(*args)
np.testing.assert_allclose(pyth, f2py)
data_array = pyth
base_args = (coeffs_a, coeffs_b, coeffs_edu, coeffs_home, shocks_cholesky,
is_interpolated, num_draws_emax, num_periods, num_points_interp, is_myopic,
edu_start, is_debug, edu_max, min_idx, delta, data_array, num_agents_sim,
num_draws_prob, tau)
args = base_args + (periods_draws_emax, periods_draws_prob)
pyth = pyth_evaluate(*args)
args = base_args + (periods_draws_emax, periods_draws_prob)
f2py = fort_debug.f2py_evaluate(*args)
np.testing.assert_allclose(pyth, f2py)
# Evaluation of criterion function
x0 = get_optim_paras(coeffs_a, coeffs_b, coeffs_edu, coeffs_home,
shocks_cholesky, 'all', paras_fixed, is_debug)
args = (
is_interpolated, num_draws_emax, num_periods, num_points_interp, is_myopic,
edu_start, is_debug, edu_max, min_idx, delta, data_array, num_agents_sim,
num_draws_prob, tau, periods_draws_emax, periods_draws_prob)
pyth = pyth_criterion(x0, *args)
f2py = fort_debug.f2py_criterion(x0, *args)
np.testing.assert_allclose(pyth, f2py)
def test_6(self):
""" Further tests for the interpolation routines.
"""
# Generate random initialization file
generate_init()
# Perform toolbox actions
respy_obj = RespyCls('test.respy.ini')
respy_obj = simulate(respy_obj)
# Extract class attributes
periods_payoffs_systematic, states_number_period, mapping_state_idx, seed_prob, periods_emax, num_periods, states_all, num_points_interp, edu_start, num_draws_emax, is_debug, edu_max, delta = dist_class_attributes(
respy_obj, 'periods_payoffs_systematic', 'states_number_period',
'mapping_state_idx', 'seed_prob', 'periods_emax',
'num_periods', 'states_all', 'num_points_interp', 'edu_start',
'num_draws_emax', 'is_debug', 'edu_max', 'delta')
# Add some additional objects required for the interfaces to the
# functions.
period = np.random.choice(range(num_periods))
periods_draws_emax = create_draws(num_periods, num_draws_emax, seed_prob,
is_debug)
draws_emax = periods_draws_emax[period, :, :]
num_states = states_number_period[period]
shifts = np.random.randn(4)
# Slight modification of request which assures that the
# interpolation code is working.
num_points_interp = min(num_points_interp, num_states)
# Get the IS_SIMULATED indicator for the subset of points which are
# used for the predication model.
args = (num_points_interp, num_states, period, is_debug)
is_simulated = get_simulated_indicator(*args)
# Construct the exogenous variables for all points of the state
# space.
args = (
period, num_periods, num_states, delta, periods_payoffs_systematic, shifts,
edu_max, edu_start, mapping_state_idx, periods_emax, states_all)
py = get_exogenous_variables(*args)
f90 = fort_debug.wrapper_get_exogenous_variables(*args)
np.testing.assert_equal(py, f90)
# Distribute validated results for further functions.
exogenous, maxe = py
# Construct endogenous variable so that the prediction model can be
# fitted.
args = (period, num_periods, num_states, delta,
periods_payoffs_systematic, edu_max, edu_start,
mapping_state_idx, periods_emax, states_all, is_simulated,
num_draws_emax, maxe, draws_emax)
py = get_endogenous_variable(*args)
f90 = fort_debug.wrapper_get_endogenous_variable(*args)
np.testing.assert_equal(py, replace_missing_values(f90))
# Distribute validated results for further functions.
endogenous = py
args = (endogenous, exogenous, maxe, is_simulated, num_points_interp,
num_states, is_debug)
py = get_predictions(*args)
f90 = fort_debug.wrapper_get_predictions(*args[:-1])
np.testing.assert_array_almost_equal(py, f90)
def test_7(self):
""" This is a special test for auxiliary functions related to the
interpolation setup.
"""
# Impose constraints
constr = dict()
constr['periods'] = np.random.randint(2, 5)
# Construct a random initialization file
generate_init(constr)
# Extract required information
respy_obj = RespyCls('test.respy.ini')
# Extract class attributes
is_debug, num_periods = dist_class_attributes(respy_obj,
'is_debug', 'num_periods')
# Write out a grid for the interpolation
max_states_period = write_interpolation_grid('test.respy.ini')
# Draw random request for testing
num_states = np.random.randint(1, max_states_period)
candidates = list(range(num_states))
period = np.random.randint(1, num_periods)
num_points_interp = np.random.randint(1, num_states + 1)
# Check function for random choice and make sure that there are no
# duplicates.
f90 = fort_debug.wrapper_random_choice(candidates, num_states, num_points_interp)
np.testing.assert_equal(len(set(f90)), len(f90))
np.testing.assert_equal(len(f90), num_points_interp)
# Check the standard cases of the function.
args = (num_points_interp, num_states, period, is_debug, num_periods)
f90 = fort_debug.wrapper_get_simulated_indicator(*args)
np.testing.assert_equal(len(f90), num_states)
np.testing.assert_equal(np.all(f90) in [0, 1], True)
# Test the standardization across PYTHON, F2PY, and FORTRAN
# implementations. This is possible as we write out an interpolation
# grid to disk which is used for both functions.
base_args = (num_points_interp, num_states, period, is_debug)
args = base_args
py = get_simulated_indicator(*args)
args = base_args + (num_periods, )
f90 = fort_debug.wrapper_get_simulated_indicator(*args)
np.testing.assert_array_equal(f90, 1*py)
os.unlink('interpolation.txt')
# Special case where number of interpolation points are same as the
# number of candidates. In that case the returned indicator
# should be all TRUE.
args = (num_states, num_states, period, True, num_periods)
f90 = fort_debug.wrapper_get_simulated_indicator(*args)
np.testing.assert_equal(sum(f90), num_states)
def test_8(self):
""" We test the construction of the Cholesky decomposition against
each other.
"""
# Draw a random vector of parameters
x = np.random.uniform(size=26)
# Construct the Cholesky decompositions
py = get_cholesky(x, info=0)
fort = fort_debug.wrapper_get_cholesky(x)
# Compare the results based on the two methods
np.testing.assert_equal(fort, py)
| {
"content_hash": "b8d4a9dde1cd3de6590b9105732b15cb",
"timestamp": "",
"source": "github",
"line_count": 539,
"max_line_length": 222,
"avg_line_length": 40.0556586270872,
"alnum_prop": 0.6320055581287634,
"repo_name": "restudToolbox/package",
"id": "d50fce3896f401d2fcd35f2a8ed61e500e0ab82e",
"size": "21590",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "respy/tests/test_f2py.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Fortran",
"bytes": "571229"
},
{
"name": "HCL",
"bytes": "342"
},
{
"name": "Python",
"bytes": "417314"
},
{
"name": "Shell",
"bytes": "623"
}
],
"symlink_target": ""
} |
import os
class Config(object):
DEBUG = False
TESTING = False
CSRF_ENABLED = True
SECRET_KEY = 'this-really-needs-to-be-changed'
SQLALCHEMY_DATABASE_URI = os.environ['DATABASE_URL']
class DevelopmentConfig(Config):
DEVELOPMENT = True
DEBUG = True
| {
"content_hash": "0079d777bb6d7870bd2763fb33cd7872",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 56,
"avg_line_length": 23.083333333333332,
"alnum_prop": 0.6859205776173285,
"repo_name": "jwestgard/elk",
"id": "239459cc146bea6f1127f5bd85bfb9a398897be9",
"size": "277",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "config.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "1051"
},
{
"name": "Python",
"bytes": "1802"
}
],
"symlink_target": ""
} |
"""Sort current config.json alphabetically. """
import json
import os
import re
import sys
ORIG_CWD = os.getcwd() # Checkout changes cwd
def test_infra(*paths):
"""Return path relative to root of test-infra repo."""
return os.path.join(ORIG_CWD, os.path.dirname(__file__), '..', *paths)
def sort():
"""Sort config.json alphabetically."""
with open(test_infra('jobs/config.json'), 'r+') as fp:
configs = json.loads(fp.read())
regexp = re.compile(r'KUBEKINS_TIMEOUT=(\d+m)')
problems = []
for job, values in configs.items():
if values.get('scenario') != 'kubernetes_e2e':
continue
migrated = any('--timeout=' in a for a in values.get('args', []))
with open(test_infra('jobs/%s.env' % job)) as fp:
env = fp.read()
if migrated:
if 'KUBEKINS_TIMEOUT=' in env:
problems.append(job)
continue
timeout = None
lines = []
for line in env.split('\n'):
mat = regexp.search(line)
if not mat:
lines.append(line)
continue
if timeout:
print >>sys.stderr, 'Duplicate timeouts:', job
problems.append(job)
break
timeout = mat.group(1)
else:
if not timeout:
problems.append(job)
with open(test_infra('jobs/%s.env' % job), 'w') as fp:
fp.write('\n'.join(lines))
values['args'].append('--timeout=%s' % timeout)
with open(test_infra('jobs/config.json'), 'w') as fp:
fp.write(json.dumps(configs, sort_keys=True, indent=2))
fp.write('\n')
if not problems:
sys.exit(0)
print >>sys.stderr, '%d problems' % len(problems)
print '\n'.join(problems)
if __name__ == '__main__':
sort()
| {
"content_hash": "439d8a1fa671fdfa74e9c3db8a367009",
"timestamp": "",
"source": "github",
"line_count": 57,
"max_line_length": 74,
"avg_line_length": 32.666666666666664,
"alnum_prop": 0.5338345864661654,
"repo_name": "dchen1107/test-infra",
"id": "7b482a0ba89cb570d751d5a753cc3e09ced3b34d",
"size": "2473",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "jobs/move_timeout.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "10320"
},
{
"name": "Go",
"bytes": "1625626"
},
{
"name": "HTML",
"bytes": "57297"
},
{
"name": "JavaScript",
"bytes": "70846"
},
{
"name": "Makefile",
"bytes": "32985"
},
{
"name": "Nginx",
"bytes": "1532"
},
{
"name": "Protocol Buffer",
"bytes": "5614"
},
{
"name": "Python",
"bytes": "729862"
},
{
"name": "Roff",
"bytes": "13936"
},
{
"name": "Shell",
"bytes": "122854"
}
],
"symlink_target": ""
} |
"""
A few bits of helper functions for comment views.
"""
import textwrap
try:
from urllib.parse import urlencode
except ImportError: # Python 2
from urllib import urlencode
from django.http import HttpResponseRedirect
from django.shortcuts import render_to_response, resolve_url
from django.template import RequestContext
from django.core.exceptions import ObjectDoesNotExist
from django.utils.http import is_safe_url
import django_comments
def next_redirect(request, fallback, **get_kwargs):
"""
Handle the "where should I go next?" part of comment views.
The next value could be a
``?next=...`` GET arg or the URL of a given view (``fallback``). See
the view modules for examples.
Returns an ``HttpResponseRedirect``.
"""
next = request.POST.get('next')
if not is_safe_url(url=next, host=request.get_host()):
next = resolve_url(fallback)
if get_kwargs:
if '#' in next:
tmp = next.rsplit('#', 1)
next = tmp[0]
anchor = '#' + tmp[1]
else:
anchor = ''
joiner = ('?' in next) and '&' or '?'
next += joiner + urlencode(get_kwargs) + anchor
return HttpResponseRedirect(next)
def confirmation_view(template, doc="Display a confirmation view."):
"""
Confirmation view generator for the "comment was
posted/flagged/deleted/approved" views.
"""
def confirmed(request):
comment = None
if 'c' in request.GET:
try:
comment = django_comments.get_model().objects.get(pk=request.GET['c'])
except (ObjectDoesNotExist, ValueError):
pass
return render_to_response(
template,
{'comment': comment},
context_instance=RequestContext(request)
)
confirmed.__doc__ = textwrap.dedent("""\
%s
Templates: :template:`%s``
Context:
comment
The posted comment
""" % (doc, template)
)
return confirmed
| {
"content_hash": "8e1283a96b84654325ce65be32a1560a",
"timestamp": "",
"source": "github",
"line_count": 76,
"max_line_length": 86,
"avg_line_length": 26.894736842105264,
"alnum_prop": 0.6056751467710372,
"repo_name": "Maplecroft/django-contrib-comments",
"id": "32e73def5e1efc7ed74e3106bdbe21972d66be55",
"size": "2044",
"binary": false,
"copies": "10",
"ref": "refs/heads/master",
"path": "django_comments/views/utils.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "7622"
},
{
"name": "Python",
"bytes": "142450"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
__metaclass__ = type
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'metadata_version': '1.1'}
DOCUMENTATION = '''
---
module: zfs_delegate_admin
short_description: Manage ZFS delegated administration (user admin privileges)
description:
- Manages ZFS file system delegated administration permissions, which allow unprivileged users to perform ZFS
operations normally restricted to the superuser.
- See the "zfs allow" section of C(zfs(1M)) for detailed explanations of options. This module attempts to adhere to
the behavior of the command line tool as much as possible.
requirements:
- "A ZFS/OpenZFS implementation that supports delegation with `zfs allow`, including: Solaris >= 10, illumos (all
versions), FreeBSD >= 8.0R, ZFS on Linux >= 0.7.0."
version_added: "2.5"
options:
name:
description:
- File system or volume name e.g. C(rpool/myfs)
required: true
state:
description:
- Whether to allow (C(present)), or unallow (C(absent)) a permission. When set to C(present), at least one
"entity" param of I(users), I(groups), or I(everyone) are required. When set to C(absent), removes permissions
from the specified entities, or removes all permissions if no entity params are specified.
required: true
choices: [present, absent]
users:
description:
- List of users to whom permission(s) should be granted
groups:
description:
- List of groups to whom permission(s) should be granted
everyone:
description:
- Apply permissions to everyone.
default: false
type: bool
permissions:
description:
- The list of permission(s) to delegate (required if C(state) is C(present))
choices: ['allow','clone','create','destroy',...]
local:
description:
- Apply permissions to C(name) locally (C(zfs allow -l))
default: null
type: bool
descendents:
description:
- Apply permissions to C(name)'s descendents (C(zfs allow -d))
default: null
type: bool
recursive:
description:
- Unallow permissions recursively (ignored when C(state) is C(present))
default: false
type: bool
author: "Nate Coraor (@natefoo)"
'''
EXAMPLES = '''
# Grant `zfs allow` and `unallow` permission to the `adm` user with the default local+descendents scope
- zfs_delegate_admin: name=rpool/myfs users=adm permissions=allow,unallow
# Grant `zfs send` to everyone, plus the group `backup`
- zfs_delegate_admin: name=rpool/myvol groups=backup everyone=yes permissions=send
# Grant `zfs send,receive` to users `foo` and `bar` with local scope only
- zfs_delegate_admin: name=rpool/myfs users=foo,bar permissions=send,receive local=yes
# Revoke all permissions from everyone (permissions specifically assigned to users and groups remain)
- zfs_delegate_admin: name=rpool/myfs state=absent everyone=yes
'''
# This module does not return anything other than the standard
# changed/state/msg/stdout
RETURN = '''
'''
from itertools import product
from ansible.module_utils.basic import AnsibleModule
class ZfsDelegateAdmin(object):
def __init__(self, module):
self.module = module
self.name = module.params.get('name')
self.state = module.params.get('state')
self.users = module.params.get('users')
self.groups = module.params.get('groups')
self.everyone = module.params.get('everyone')
self.perms = module.params.get('permissions')
self.scope = None
self.changed = False
self.initial_perms = None
self.subcommand = 'allow'
self.recursive_opt = []
self.run_method = self.update
self.setup(module)
def setup(self, module):
""" Validate params and set up for run.
"""
if self.state == 'absent':
self.subcommand = 'unallow'
if module.params.get('recursive'):
self.recursive_opt = ['-r']
local = module.params.get('local')
descendents = module.params.get('descendents')
if (local and descendents) or (not local and not descendents):
self.scope = 'ld'
elif local:
self.scope = 'l'
elif descendents:
self.scope = 'd'
else:
self.module.fail_json(msg='Impossible value for local and descendents')
if not (self.users or self.groups or self.everyone):
if self.state == 'present':
self.module.fail_json(msg='One of `users`, `groups`, or `everyone` must be set')
elif self.state == 'absent':
self.run_method = self.clear
# ansible ensures the else cannot happen here
self.zfs_path = module.get_bin_path('zfs', True)
@property
def current_perms(self):
""" Parse the output of `zfs allow <name>` to retrieve current permissions.
"""
out = self.run_zfs_raw(subcommand='allow')
perms = {
'l': {'u': {}, 'g': {}, 'e': []},
'd': {'u': {}, 'g': {}, 'e': []},
'ld': {'u': {}, 'g': {}, 'e': []},
}
linemap = {
'Local permissions:': 'l',
'Descendent permissions:': 'd',
'Local+Descendent permissions:': 'ld',
}
scope = None
for line in out.splitlines():
scope = linemap.get(line, scope)
if not scope:
continue
try:
if line.startswith('\tuser ') or line.startswith('\tgroup '):
ent_type, ent, cur_perms = line.split()
perms[scope][ent_type[0]][ent] = cur_perms.split(',')
elif line.startswith('\teveryone '):
perms[scope]['e'] = line.split()[1].split(',')
except ValueError:
self.module.fail_json(msg="Cannot parse user/group permission output by `zfs allow`: '%s'" % line)
return perms
def run_zfs_raw(self, subcommand=None, args=None):
""" Run a raw zfs command, fail on error.
"""
cmd = [self.zfs_path, subcommand or self.subcommand] + (args or []) + [self.name]
rc, out, err = self.module.run_command(cmd)
if rc:
self.module.fail_json(msg='Command `%s` failed: %s' % (' '.join(cmd), err))
return out
def run_zfs(self, args):
""" Run zfs allow/unallow with appropriate options as per module arguments.
"""
args = self.recursive_opt + ['-' + self.scope] + args
if self.perms:
args.append(','.join(self.perms))
return self.run_zfs_raw(args=args)
def clear(self):
""" Called by run() to clear all permissions.
"""
changed = False
stdout = ''
for scope, ent_type in product(('ld', 'l', 'd'), ('u', 'g')):
for ent in self.initial_perms[scope][ent_type].keys():
stdout += self.run_zfs(['-%s' % ent_type, ent])
changed = True
for scope in ('ld', 'l', 'd'):
if self.initial_perms[scope]['e']:
stdout += self.run_zfs(['-e'])
changed = True
return (changed, stdout)
def update(self):
""" Update permissions as per module arguments.
"""
stdout = ''
for ent_type, entities in (('u', self.users), ('g', self.groups)):
if entities:
stdout += self.run_zfs(['-%s' % ent_type, ','.join(entities)])
if self.everyone:
stdout += self.run_zfs(['-e'])
return (self.initial_perms != self.current_perms, stdout)
def run(self):
""" Run an operation, return results for Ansible.
"""
exit_args = {'state': self.state}
self.initial_perms = self.current_perms
exit_args['changed'], stdout = self.run_method()
if exit_args['changed']:
exit_args['msg'] = 'ZFS delegated admin permissions updated'
exit_args['stdout'] = stdout
self.module.exit_json(**exit_args)
def main():
module = AnsibleModule(
argument_spec=dict(
name=dict(required=True),
state=dict(default='present', choices=['absent', 'present']),
users=dict(default=[], type='list'),
groups=dict(default=[], type='list'),
everyone=dict(default=False, type='bool'),
permissions=dict(default=[], type='list'),
local=dict(default=None, type='bool'),
descendents=dict(default=None, type='bool'),
recursive=dict(default=False, type='bool')
),
supports_check_mode=False,
required_if=[('state', 'present', ['permissions'])]
)
zfs_delegate_admin = ZfsDelegateAdmin(module)
zfs_delegate_admin.run()
if __name__ == '__main__':
main()
| {
"content_hash": "b726ed0997c45ec2208f52eaf493563d",
"timestamp": "",
"source": "github",
"line_count": 243,
"max_line_length": 118,
"avg_line_length": 36.641975308641975,
"alnum_prop": 0.5889487870619946,
"repo_name": "galaxyproject/ansible-common-roles",
"id": "749dba5b5a178e2d5df6d116a0cb0cfea666fe48",
"size": "9645",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "paths/library/zfs_delegate_admin.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jinja",
"bytes": "2031"
},
{
"name": "Python",
"bytes": "14543"
},
{
"name": "Shell",
"bytes": "13492"
}
],
"symlink_target": ""
} |
from django.conf.urls import url
from api import views
urlpatterns = [
url(r'^search/$', views.package_search, name='package_search'),
url(r'^info/$', views.package_info, name='package_info'),
url(r'^register/$', views.package_register, name='package_register'),
url(r'^remove/$', views.package_remove, name='package_remove'),
url(r'^remove_build/$', views.remove_build, name='remove_build'),
url(r'^cleanup/$', views.cleanup, name='cleanup'),
url(r'^cleanup_all/$', views.cleanup_all, name='cleanup_all'),
url(r'^build/$', views.package_build, name='package_build'),
url(r'^build_all/$', views.package_build_all, name='package_build_all'),
url(r'^install/$', views.package_install, name='package_install'),
url(r'^toggle_ignore/$', views.toggle_ignore, name='toggle_ignore'),
]
| {
"content_hash": "1e1887fb0a854903621dca6b64870071",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 76,
"avg_line_length": 48.529411764705884,
"alnum_prop": 0.6654545454545454,
"repo_name": "colajam93/aurpackager",
"id": "741d45cb7412725b9f06bd0839f9ac31bb10a5ed",
"size": "825",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "api/urls.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "352"
},
{
"name": "HTML",
"bytes": "35879"
},
{
"name": "JavaScript",
"bytes": "1347"
},
{
"name": "Python",
"bytes": "53001"
},
{
"name": "Shell",
"bytes": "776"
}
],
"symlink_target": ""
} |
'''
pb_statsfofn_ucd.py
generate read stats from pacbio fofn file
fofn format input, one line per fasta file
/share/dnat/rs2/161028_448/D02_1/Analysis_Results/m161031_221844_42145_c101058752550000001823247401061784_s1_p0.2.subreads.fasta
/share/dnat/rs2/161028_448/D02_1/Analysis_Results/m161031_221844_42145_c101058752550000001823247401061784_s1_p0.3.subreads.fasta
stats output file is in json format
'''
import sys
import os
import glob
import time
import re
import json
from collections import OrderedDict
from optparse import OptionParser # http://docs.python.org/library/optparse.html
version = "1.0"
usage = "usage: %prog [options] -o output_filename fofn_file"
epilog = "The fofn_file can be provided as stdin. \
Specifying -o stdout can be used to put the ou tput to stdout."
parser = OptionParser(usage=usage, version="%prog " + str(version), epilog=epilog)
parser.add_option('-o', '--output', help="output filename, stdout is acceptable [default: %default]",
action="store", type="str", dest="output", default="input.fofn.stats")
parser.add_option('-d', '--id', help="ID for the stats output [default: %default]",
action="store", type="str", dest="id", default=None)
(options, args) = parser.parse_args()
def remove_comment(str1):
loc = str1.find("#") # identify if and where a # occurs
if loc == -1:
return str1 # if # not found return str1 as is
str1 = str1[0:loc] # trim of comment
return str1.rstrip() # remove any trailing whitespace and return result
def parse_pacbio_readid_rs2(str1):
# >m160924_114322_42145_c101087062550000001823245703091787_s1_p0/8/0_35815 RQ=0.849
rid, rq = str1.strip().split(' RQ=') # strip out read quality
rid, zmw, rlen = rid.split("/")
rlen = rlen.split("_")
rstart = int(rlen[0])
rend = int(rlen[1])
rlen = rend-rstart
return (rid[1:], int(zmw), rstart, rend, rlen, float(rq))
if len(args) > 1:
sys.stderr.write("program requires at most 1 argument\n")
sys.stderr.write(usage + '\n')
sys.exit()
elif len(args) == 1:
infile = args[0]
# Start opening input/output files:
if not os.path.exists(infile):
sys.stderr.write("Error, can't find input file %s\n" % infile)
sys.exit()
infofn = open(infile, 'r')
else:
# reading from stdin
infofn = sys.stdin
output = options.output
if output == "stdout":
out = sys.stdout
else:
out = open(output, 'w')
sid = options.id
fasta_files = 0
all_data = []
for line in infofn:
# each line in the fofn file is one fasta file
line = remove_comment(line)
line = line.strip()
if len(line) == 0:
continue
if not os.path.exists(line):
sys.stderr.write("Error, can't find fasta file %s\n" % line)
sys.exit()
# process a fasta file
fasta = open(line, 'r')
sys.stderr.write("Processing file: %s\n" % line)
# parse out run and cell position and cell barcode from file name
pf = line.split("/Analysis_Results/")
if len(pf) == 2:
# agrees with UCD DNA Tech Core file path expectations
pr = pf[0].split('/')
run = pr[-2]
m = re.match("m\d+_\d+_\d+_(c\d+)_s1_p0.\d.subreads.fasta",pf[1])
if m:
cell_position = pr[-1]
cell_barcode = m.group(1)
else:
sys.stderr.write("Error, can't identify cell barcode in either filename, or first read\n")
sys.exit()
elif len(pf) == 1:
run = None
# try and extract cell_id from filename
m = re.match(".+/m\d+_\d+_\d+_(c\d+)_s1_p0.\d.subreads.fasta",pf[0])
if m:
cell_position = None
cell_barcode = m.group(1)
else:
# get cell id from first read id
l1 = fasta.readline()
m = re.match(">m\d+_\d+_\d+_(c\d+)_s1_p0/\d+/\d+_\d+ RQ=\d.\d+",l1)
if m:
cell_position = None
cell_barcode = m.group(1)
else:
sys.stderr.write("Error, can't identify cell barcode in either filename, or first read\n")
sys.exit()
fasta.seek(0) # seek back to beginning of file
else:
sys.stderr.write("Error, can't identify cell barcode in either filename, or first read\n")
sys.exit()
rcount = 0 # read count
zmw = []
rstart = []
rend = []
rlen = []
rq =[]
try:
for read in fasta:
if read[0] == '>':
rstats = parse_pacbio_readid_rs2(read)
zmw.append(rstats[1])
rstart.append(rstats[2])
rend.append(rstats[3])
rlen.append(rstats[4])
rq.append(rstats[5])
rcount += 1
else:
continue
finally:
fasta.close()
file_data = OrderedDict([("run_id", run),
("cell_position", cell_position),
("cell_barcode", cell_barcode),
("filename", line),
("read_count", rcount),
("zmw", zmw),
("read_starts", rstart),
("read_ends", rend),
("read_lengths", rlen),
("read_qualiies", rq)])
all_data.append(file_data)
file_data = None
fasta_files += 1
file_keys = ["run_id","cell_position","cell_barcode","filename","read_count","zmw","read_starts","read_ends","read_lengths","read_qualities"]
stats_json = OrderedDict([
("id", sid),
("format", "UCD Pac Bio Fasta stats %s" % version),
("format_url", "https://github.com/ucdavis-bioinformatics/PacBio_GenomeStats"),
("generated_by", "pb_statsfofn_ucd.py"),
("date", time.strftime("%Y-%m-%dT%H:%M:%S")),
("type", "ucd pb stats"),
("source", "fasta"),
("number_of_files", fasta_files),
("file_keys", file_keys),
("file_data", all_data)])
sys.stderr.write("Writing JSON output to: %s\n" % output )
json.dump(stats_json, out)
infofn.close()
out.close()
| {
"content_hash": "1c0064fd3e30507751b645f6f047a8c3",
"timestamp": "",
"source": "github",
"line_count": 187,
"max_line_length": 141,
"avg_line_length": 32.844919786096256,
"alnum_prop": 0.5695213285574732,
"repo_name": "ucdavis-bioinformatics-training/Bioinformatics-Genome-Assembly-and-Analysis-Workshop-Pac-Bio-and-10x-Genomics-",
"id": "077b954527868716222c6b636b5ef682b61238a8",
"size": "6165",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "PacBio-Experimental-Design/pb_statsfofn_ucd.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "6165"
},
{
"name": "R",
"bytes": "4943"
},
{
"name": "Shell",
"bytes": "2029"
}
],
"symlink_target": ""
} |
"""Metrics to assess performance on regression task.
Functions named as ``*_score`` return a scalar value to maximize: the higher
the better.
Function named as ``*_error`` or ``*_loss`` return a scalar value to minimize:
the lower the better.
"""
# Authors: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Mathieu Blondel <mathieu@mblondel.org>
# Olivier Grisel <olivier.grisel@ensta.org>
# Arnaud Joly <a.joly@ulg.ac.be>
# Jochen Wersdorfer <jochen@wersdoerfer.de>
# Lars Buitinck
# Joel Nothman <joel.nothman@gmail.com>
# Karan Desai <karandesai281196@gmail.com>
# Noel Dawe <noel@dawe.me>
# Manoj Kumar <manojkumarsivaraj334@gmail.com>
# Michael Eickenberg <michael.eickenberg@gmail.com>
# Konstantin Shmelkov <konstantin.shmelkov@polytechnique.edu>
# Christian Lorentzen <lorentzen.ch@googlemail.com>
# Ashutosh Hathidara <ashutoshhathidara98@gmail.com>
# Uttam kumar <bajiraouttamsinha@gmail.com>
# License: BSD 3 clause
import warnings
import numpy as np
from .._loss.glm_distribution import TweedieDistribution
from ..exceptions import UndefinedMetricWarning
from ..utils.validation import (
check_array,
check_consistent_length,
_num_samples,
column_or_1d,
_check_sample_weight,
_deprecate_positional_args,
)
from ..utils.stats import _weighted_percentile
__ALL__ = [
"max_error",
"mean_absolute_error",
"mean_squared_error",
"mean_squared_log_error",
"median_absolute_error",
"mean_absolute_percentage_error",
"mean_pinball_loss",
"r2_score",
"explained_variance_score",
"mean_tweedie_deviance",
"mean_poisson_deviance",
"mean_gamma_deviance",
]
def _check_reg_targets(y_true, y_pred, multioutput, dtype="numeric"):
"""Check that y_true and y_pred belong to the same regression task.
Parameters
----------
y_true : array-like
y_pred : array-like
multioutput : array-like or string in ['raw_values', uniform_average',
'variance_weighted'] or None
None is accepted due to backward compatibility of r2_score().
Returns
-------
type_true : one of {'continuous', continuous-multioutput'}
The type of the true target data, as output by
'utils.multiclass.type_of_target'.
y_true : array-like of shape (n_samples, n_outputs)
Ground truth (correct) target values.
y_pred : array-like of shape (n_samples, n_outputs)
Estimated target values.
multioutput : array-like of shape (n_outputs) or string in ['raw_values',
uniform_average', 'variance_weighted'] or None
Custom output weights if ``multioutput`` is array-like or
just the corresponding argument if ``multioutput`` is a
correct keyword.
dtype : str or list, default="numeric"
the dtype argument passed to check_array.
"""
check_consistent_length(y_true, y_pred)
y_true = check_array(y_true, ensure_2d=False, dtype=dtype)
y_pred = check_array(y_pred, ensure_2d=False, dtype=dtype)
if y_true.ndim == 1:
y_true = y_true.reshape((-1, 1))
if y_pred.ndim == 1:
y_pred = y_pred.reshape((-1, 1))
if y_true.shape[1] != y_pred.shape[1]:
raise ValueError(
"y_true and y_pred have different number of output ({0}!={1})".format(
y_true.shape[1], y_pred.shape[1]
)
)
n_outputs = y_true.shape[1]
allowed_multioutput_str = ("raw_values", "uniform_average", "variance_weighted")
if isinstance(multioutput, str):
if multioutput not in allowed_multioutput_str:
raise ValueError(
"Allowed 'multioutput' string values are {}. "
"You provided multioutput={!r}".format(
allowed_multioutput_str, multioutput
)
)
elif multioutput is not None:
multioutput = check_array(multioutput, ensure_2d=False)
if n_outputs == 1:
raise ValueError("Custom weights are useful only in multi-output cases.")
elif n_outputs != len(multioutput):
raise ValueError(
"There must be equally many custom weights (%d) as outputs (%d)."
% (len(multioutput), n_outputs)
)
y_type = "continuous" if n_outputs == 1 else "continuous-multioutput"
return y_type, y_true, y_pred, multioutput
def mean_absolute_error(
y_true, y_pred, *, sample_weight=None, multioutput="uniform_average"
):
"""Mean absolute error regression loss.
Read more in the :ref:`User Guide <mean_absolute_error>`.
Parameters
----------
y_true : array-like of shape (n_samples,) or (n_samples, n_outputs)
Ground truth (correct) target values.
y_pred : array-like of shape (n_samples,) or (n_samples, n_outputs)
Estimated target values.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights.
multioutput : {'raw_values', 'uniform_average'} or array-like of shape \
(n_outputs,), default='uniform_average'
Defines aggregating of multiple output values.
Array-like value defines weights used to average errors.
'raw_values' :
Returns a full set of errors in case of multioutput input.
'uniform_average' :
Errors of all outputs are averaged with uniform weight.
Returns
-------
loss : float or ndarray of floats
If multioutput is 'raw_values', then mean absolute error is returned
for each output separately.
If multioutput is 'uniform_average' or an ndarray of weights, then the
weighted average of all output errors is returned.
MAE output is non-negative floating point. The best value is 0.0.
Examples
--------
>>> from sklearn.metrics import mean_absolute_error
>>> y_true = [3, -0.5, 2, 7]
>>> y_pred = [2.5, 0.0, 2, 8]
>>> mean_absolute_error(y_true, y_pred)
0.5
>>> y_true = [[0.5, 1], [-1, 1], [7, -6]]
>>> y_pred = [[0, 2], [-1, 2], [8, -5]]
>>> mean_absolute_error(y_true, y_pred)
0.75
>>> mean_absolute_error(y_true, y_pred, multioutput='raw_values')
array([0.5, 1. ])
>>> mean_absolute_error(y_true, y_pred, multioutput=[0.3, 0.7])
0.85...
"""
y_type, y_true, y_pred, multioutput = _check_reg_targets(
y_true, y_pred, multioutput
)
check_consistent_length(y_true, y_pred, sample_weight)
output_errors = np.average(np.abs(y_pred - y_true), weights=sample_weight, axis=0)
if isinstance(multioutput, str):
if multioutput == "raw_values":
return output_errors
elif multioutput == "uniform_average":
# pass None as weights to np.average: uniform mean
multioutput = None
return np.average(output_errors, weights=multioutput)
def mean_pinball_loss(
y_true, y_pred, *, sample_weight=None, alpha=0.5, multioutput="uniform_average"
):
"""Pinball loss for quantile regression.
Read more in the :ref:`User Guide <pinball_loss>`.
Parameters
----------
y_true : array-like of shape (n_samples,) or (n_samples, n_outputs)
Ground truth (correct) target values.
y_pred : array-like of shape (n_samples,) or (n_samples, n_outputs)
Estimated target values.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights.
alpha: float, slope of the pinball loss, default=0.5,
this loss is equivalent to :ref:`mean_absolute_error` when `alpha=0.5`,
`alpha=0.95` is minimized by estimators of the 95th percentile.
multioutput : {'raw_values', 'uniform_average'} or array-like of shape \
(n_outputs,), default='uniform_average'
Defines aggregating of multiple output values.
Array-like value defines weights used to average errors.
'raw_values' :
Returns a full set of errors in case of multioutput input.
'uniform_average' :
Errors of all outputs are averaged with uniform weight.
Returns
-------
loss : float or ndarray of floats
If multioutput is 'raw_values', then mean absolute error is returned
for each output separately.
If multioutput is 'uniform_average' or an ndarray of weights, then the
weighted average of all output errors is returned.
The pinball loss output is a non-negative floating point. The best
value is 0.0.
Examples
--------
>>> from sklearn.metrics import mean_pinball_loss
>>> y_true = [1, 2, 3]
>>> mean_pinball_loss(y_true, [0, 2, 3], alpha=0.1)
0.03...
>>> mean_pinball_loss(y_true, [1, 2, 4], alpha=0.1)
0.3...
>>> mean_pinball_loss(y_true, [0, 2, 3], alpha=0.9)
0.3...
>>> mean_pinball_loss(y_true, [1, 2, 4], alpha=0.9)
0.03...
>>> mean_pinball_loss(y_true, y_true, alpha=0.1)
0.0
>>> mean_pinball_loss(y_true, y_true, alpha=0.9)
0.0
"""
y_type, y_true, y_pred, multioutput = _check_reg_targets(
y_true, y_pred, multioutput
)
check_consistent_length(y_true, y_pred, sample_weight)
diff = y_true - y_pred
sign = (diff >= 0).astype(diff.dtype)
loss = alpha * sign * diff - (1 - alpha) * (1 - sign) * diff
output_errors = np.average(loss, weights=sample_weight, axis=0)
if isinstance(multioutput, str):
if multioutput == "raw_values":
return output_errors
elif multioutput == "uniform_average":
# pass None as weights to np.average: uniform mean
multioutput = None
else:
raise ValueError(
"multioutput is expected to be 'raw_values' "
"or 'uniform_average' but we got %r"
" instead." % multioutput
)
return np.average(output_errors, weights=multioutput)
@_deprecate_positional_args(version="1.1")
def mean_absolute_percentage_error(
y_true, y_pred, *, sample_weight=None, multioutput="uniform_average"
):
"""Mean absolute percentage error (MAPE) regression loss.
Note here that the output is not a percentage in the range [0, 100]
and a value of 100 does not mean 100% but 1e2. Furthermore, the output
can be arbitrarily high when `y_true` is small (which is specific to the
metric) or when `abs(y_true - y_pred)` is large (which is common for most
regression metrics). Read more in the
:ref:`User Guide <mean_absolute_percentage_error>`.
.. versionadded:: 0.24
Parameters
----------
y_true : array-like of shape (n_samples,) or (n_samples, n_outputs)
Ground truth (correct) target values.
y_pred : array-like of shape (n_samples,) or (n_samples, n_outputs)
Estimated target values.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights.
multioutput : {'raw_values', 'uniform_average'} or array-like
Defines aggregating of multiple output values.
Array-like value defines weights used to average errors.
If input is list then the shape must be (n_outputs,).
'raw_values' :
Returns a full set of errors in case of multioutput input.
'uniform_average' :
Errors of all outputs are averaged with uniform weight.
Returns
-------
loss : float or ndarray of floats
If multioutput is 'raw_values', then mean absolute percentage error
is returned for each output separately.
If multioutput is 'uniform_average' or an ndarray of weights, then the
weighted average of all output errors is returned.
MAPE output is non-negative floating point. The best value is 0.0.
But note that bad predictions can lead to arbitrarily large
MAPE values, especially if some `y_true` values are very close to zero.
Note that we return a large value instead of `inf` when `y_true` is zero.
Examples
--------
>>> from sklearn.metrics import mean_absolute_percentage_error
>>> y_true = [3, -0.5, 2, 7]
>>> y_pred = [2.5, 0.0, 2, 8]
>>> mean_absolute_percentage_error(y_true, y_pred)
0.3273...
>>> y_true = [[0.5, 1], [-1, 1], [7, -6]]
>>> y_pred = [[0, 2], [-1, 2], [8, -5]]
>>> mean_absolute_percentage_error(y_true, y_pred)
0.5515...
>>> mean_absolute_percentage_error(y_true, y_pred, multioutput=[0.3, 0.7])
0.6198...
>>> # the value when some element of the y_true is zero is arbitrarily high because
>>> # of the division by epsilon
>>> y_true = [1., 0., 2.4, 7.]
>>> y_pred = [1.2, 0.1, 2.4, 8.]
>>> mean_absolute_percentage_error(y_true, y_pred)
112589990684262.48
"""
y_type, y_true, y_pred, multioutput = _check_reg_targets(
y_true, y_pred, multioutput
)
check_consistent_length(y_true, y_pred, sample_weight)
epsilon = np.finfo(np.float64).eps
mape = np.abs(y_pred - y_true) / np.maximum(np.abs(y_true), epsilon)
output_errors = np.average(mape, weights=sample_weight, axis=0)
if isinstance(multioutput, str):
if multioutput == "raw_values":
return output_errors
elif multioutput == "uniform_average":
# pass None as weights to np.average: uniform mean
multioutput = None
return np.average(output_errors, weights=multioutput)
def mean_squared_error(
y_true, y_pred, *, sample_weight=None, multioutput="uniform_average", squared=True
):
"""Mean squared error regression loss.
Read more in the :ref:`User Guide <mean_squared_error>`.
Parameters
----------
y_true : array-like of shape (n_samples,) or (n_samples, n_outputs)
Ground truth (correct) target values.
y_pred : array-like of shape (n_samples,) or (n_samples, n_outputs)
Estimated target values.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights.
multioutput : {'raw_values', 'uniform_average'} or array-like of shape \
(n_outputs,), default='uniform_average'
Defines aggregating of multiple output values.
Array-like value defines weights used to average errors.
'raw_values' :
Returns a full set of errors in case of multioutput input.
'uniform_average' :
Errors of all outputs are averaged with uniform weight.
squared : bool, default=True
If True returns MSE value, if False returns RMSE value.
Returns
-------
loss : float or ndarray of floats
A non-negative floating point value (the best value is 0.0), or an
array of floating point values, one for each individual target.
Examples
--------
>>> from sklearn.metrics import mean_squared_error
>>> y_true = [3, -0.5, 2, 7]
>>> y_pred = [2.5, 0.0, 2, 8]
>>> mean_squared_error(y_true, y_pred)
0.375
>>> y_true = [3, -0.5, 2, 7]
>>> y_pred = [2.5, 0.0, 2, 8]
>>> mean_squared_error(y_true, y_pred, squared=False)
0.612...
>>> y_true = [[0.5, 1],[-1, 1],[7, -6]]
>>> y_pred = [[0, 2],[-1, 2],[8, -5]]
>>> mean_squared_error(y_true, y_pred)
0.708...
>>> mean_squared_error(y_true, y_pred, squared=False)
0.822...
>>> mean_squared_error(y_true, y_pred, multioutput='raw_values')
array([0.41666667, 1. ])
>>> mean_squared_error(y_true, y_pred, multioutput=[0.3, 0.7])
0.825...
"""
y_type, y_true, y_pred, multioutput = _check_reg_targets(
y_true, y_pred, multioutput
)
check_consistent_length(y_true, y_pred, sample_weight)
output_errors = np.average((y_true - y_pred) ** 2, axis=0, weights=sample_weight)
if not squared:
output_errors = np.sqrt(output_errors)
if isinstance(multioutput, str):
if multioutput == "raw_values":
return output_errors
elif multioutput == "uniform_average":
# pass None as weights to np.average: uniform mean
multioutput = None
return np.average(output_errors, weights=multioutput)
def mean_squared_log_error(
y_true, y_pred, *, sample_weight=None, multioutput="uniform_average", squared=True
):
"""Mean squared logarithmic error regression loss.
Read more in the :ref:`User Guide <mean_squared_log_error>`.
Parameters
----------
y_true : array-like of shape (n_samples,) or (n_samples, n_outputs)
Ground truth (correct) target values.
y_pred : array-like of shape (n_samples,) or (n_samples, n_outputs)
Estimated target values.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights.
multioutput : {'raw_values', 'uniform_average'} or array-like of shape \
(n_outputs,), default='uniform_average'
Defines aggregating of multiple output values.
Array-like value defines weights used to average errors.
'raw_values' :
Returns a full set of errors when the input is of multioutput
format.
'uniform_average' :
Errors of all outputs are averaged with uniform weight.
squared : bool, default=True
If True returns MSLE (mean squared log error) value.
If False returns RMSLE (root mean squared log error) value.
Returns
-------
loss : float or ndarray of floats
A non-negative floating point value (the best value is 0.0), or an
array of floating point values, one for each individual target.
Examples
--------
>>> from sklearn.metrics import mean_squared_log_error
>>> y_true = [3, 5, 2.5, 7]
>>> y_pred = [2.5, 5, 4, 8]
>>> mean_squared_log_error(y_true, y_pred)
0.039...
>>> mean_squared_log_error(y_true, y_pred, squared=False)
0.199...
>>> y_true = [[0.5, 1], [1, 2], [7, 6]]
>>> y_pred = [[0.5, 2], [1, 2.5], [8, 8]]
>>> mean_squared_log_error(y_true, y_pred)
0.044...
>>> mean_squared_log_error(y_true, y_pred, multioutput='raw_values')
array([0.00462428, 0.08377444])
>>> mean_squared_log_error(y_true, y_pred, multioutput=[0.3, 0.7])
0.060...
"""
y_type, y_true, y_pred, multioutput = _check_reg_targets(
y_true, y_pred, multioutput
)
check_consistent_length(y_true, y_pred, sample_weight)
if (y_true < 0).any() or (y_pred < 0).any():
raise ValueError(
"Mean Squared Logarithmic Error cannot be used when "
"targets contain negative values."
)
return mean_squared_error(
np.log1p(y_true),
np.log1p(y_pred),
sample_weight=sample_weight,
multioutput=multioutput,
squared=squared,
)
def median_absolute_error(
y_true, y_pred, *, multioutput="uniform_average", sample_weight=None
):
"""Median absolute error regression loss.
Median absolute error output is non-negative floating point. The best value
is 0.0. Read more in the :ref:`User Guide <median_absolute_error>`.
Parameters
----------
y_true : array-like of shape = (n_samples) or (n_samples, n_outputs)
Ground truth (correct) target values.
y_pred : array-like of shape = (n_samples) or (n_samples, n_outputs)
Estimated target values.
multioutput : {'raw_values', 'uniform_average'} or array-like of shape \
(n_outputs,), default='uniform_average'
Defines aggregating of multiple output values. Array-like value defines
weights used to average errors.
'raw_values' :
Returns a full set of errors in case of multioutput input.
'uniform_average' :
Errors of all outputs are averaged with uniform weight.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights.
.. versionadded:: 0.24
Returns
-------
loss : float or ndarray of floats
If multioutput is 'raw_values', then mean absolute error is returned
for each output separately.
If multioutput is 'uniform_average' or an ndarray of weights, then the
weighted average of all output errors is returned.
Examples
--------
>>> from sklearn.metrics import median_absolute_error
>>> y_true = [3, -0.5, 2, 7]
>>> y_pred = [2.5, 0.0, 2, 8]
>>> median_absolute_error(y_true, y_pred)
0.5
>>> y_true = [[0.5, 1], [-1, 1], [7, -6]]
>>> y_pred = [[0, 2], [-1, 2], [8, -5]]
>>> median_absolute_error(y_true, y_pred)
0.75
>>> median_absolute_error(y_true, y_pred, multioutput='raw_values')
array([0.5, 1. ])
>>> median_absolute_error(y_true, y_pred, multioutput=[0.3, 0.7])
0.85
"""
y_type, y_true, y_pred, multioutput = _check_reg_targets(
y_true, y_pred, multioutput
)
if sample_weight is None:
output_errors = np.median(np.abs(y_pred - y_true), axis=0)
else:
sample_weight = _check_sample_weight(sample_weight, y_pred)
output_errors = _weighted_percentile(
np.abs(y_pred - y_true), sample_weight=sample_weight
)
if isinstance(multioutput, str):
if multioutput == "raw_values":
return output_errors
elif multioutput == "uniform_average":
# pass None as weights to np.average: uniform mean
multioutput = None
return np.average(output_errors, weights=multioutput)
def explained_variance_score(
y_true, y_pred, *, sample_weight=None, multioutput="uniform_average"
):
"""Explained variance regression score function.
Best possible score is 1.0, lower values are worse.
Read more in the :ref:`User Guide <explained_variance_score>`.
Parameters
----------
y_true : array-like of shape (n_samples,) or (n_samples, n_outputs)
Ground truth (correct) target values.
y_pred : array-like of shape (n_samples,) or (n_samples, n_outputs)
Estimated target values.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights.
multioutput : {'raw_values', 'uniform_average', 'variance_weighted'} or \
array-like of shape (n_outputs,), default='uniform_average'
Defines aggregating of multiple output scores.
Array-like value defines weights used to average scores.
'raw_values' :
Returns a full set of scores in case of multioutput input.
'uniform_average' :
Scores of all outputs are averaged with uniform weight.
'variance_weighted' :
Scores of all outputs are averaged, weighted by the variances
of each individual output.
Returns
-------
score : float or ndarray of floats
The explained variance or ndarray if 'multioutput' is 'raw_values'.
Notes
-----
This is not a symmetric function.
Examples
--------
>>> from sklearn.metrics import explained_variance_score
>>> y_true = [3, -0.5, 2, 7]
>>> y_pred = [2.5, 0.0, 2, 8]
>>> explained_variance_score(y_true, y_pred)
0.957...
>>> y_true = [[0.5, 1], [-1, 1], [7, -6]]
>>> y_pred = [[0, 2], [-1, 2], [8, -5]]
>>> explained_variance_score(y_true, y_pred, multioutput='uniform_average')
0.983...
"""
y_type, y_true, y_pred, multioutput = _check_reg_targets(
y_true, y_pred, multioutput
)
check_consistent_length(y_true, y_pred, sample_weight)
y_diff_avg = np.average(y_true - y_pred, weights=sample_weight, axis=0)
numerator = np.average(
(y_true - y_pred - y_diff_avg) ** 2, weights=sample_weight, axis=0
)
y_true_avg = np.average(y_true, weights=sample_weight, axis=0)
denominator = np.average((y_true - y_true_avg) ** 2, weights=sample_weight, axis=0)
nonzero_numerator = numerator != 0
nonzero_denominator = denominator != 0
valid_score = nonzero_numerator & nonzero_denominator
output_scores = np.ones(y_true.shape[1])
output_scores[valid_score] = 1 - (numerator[valid_score] / denominator[valid_score])
output_scores[nonzero_numerator & ~nonzero_denominator] = 0.0
if isinstance(multioutput, str):
if multioutput == "raw_values":
# return scores individually
return output_scores
elif multioutput == "uniform_average":
# passing to np.average() None as weights results is uniform mean
avg_weights = None
elif multioutput == "variance_weighted":
avg_weights = denominator
else:
avg_weights = multioutput
return np.average(output_scores, weights=avg_weights)
def r2_score(y_true, y_pred, *, sample_weight=None, multioutput="uniform_average"):
""":math:`R^2` (coefficient of determination) regression score function.
Best possible score is 1.0 and it can be negative (because the
model can be arbitrarily worse). A constant model that always
predicts the expected value of y, disregarding the input features,
would get a :math:`R^2` score of 0.0.
Read more in the :ref:`User Guide <r2_score>`.
Parameters
----------
y_true : array-like of shape (n_samples,) or (n_samples, n_outputs)
Ground truth (correct) target values.
y_pred : array-like of shape (n_samples,) or (n_samples, n_outputs)
Estimated target values.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights.
multioutput : {'raw_values', 'uniform_average', 'variance_weighted'}, \
array-like of shape (n_outputs,) or None, default='uniform_average'
Defines aggregating of multiple output scores.
Array-like value defines weights used to average scores.
Default is "uniform_average".
'raw_values' :
Returns a full set of scores in case of multioutput input.
'uniform_average' :
Scores of all outputs are averaged with uniform weight.
'variance_weighted' :
Scores of all outputs are averaged, weighted by the variances
of each individual output.
.. versionchanged:: 0.19
Default value of multioutput is 'uniform_average'.
Returns
-------
z : float or ndarray of floats
The :math:`R^2` score or ndarray of scores if 'multioutput' is
'raw_values'.
Notes
-----
This is not a symmetric function.
Unlike most other scores, :math:`R^2` score may be negative (it need not
actually be the square of a quantity R).
This metric is not well-defined for single samples and will return a NaN
value if n_samples is less than two.
References
----------
.. [1] `Wikipedia entry on the Coefficient of determination
<https://en.wikipedia.org/wiki/Coefficient_of_determination>`_
Examples
--------
>>> from sklearn.metrics import r2_score
>>> y_true = [3, -0.5, 2, 7]
>>> y_pred = [2.5, 0.0, 2, 8]
>>> r2_score(y_true, y_pred)
0.948...
>>> y_true = [[0.5, 1], [-1, 1], [7, -6]]
>>> y_pred = [[0, 2], [-1, 2], [8, -5]]
>>> r2_score(y_true, y_pred,
... multioutput='variance_weighted')
0.938...
>>> y_true = [1, 2, 3]
>>> y_pred = [1, 2, 3]
>>> r2_score(y_true, y_pred)
1.0
>>> y_true = [1, 2, 3]
>>> y_pred = [2, 2, 2]
>>> r2_score(y_true, y_pred)
0.0
>>> y_true = [1, 2, 3]
>>> y_pred = [3, 2, 1]
>>> r2_score(y_true, y_pred)
-3.0
"""
y_type, y_true, y_pred, multioutput = _check_reg_targets(
y_true, y_pred, multioutput
)
check_consistent_length(y_true, y_pred, sample_weight)
if _num_samples(y_pred) < 2:
msg = "R^2 score is not well-defined with less than two samples."
warnings.warn(msg, UndefinedMetricWarning)
return float("nan")
if sample_weight is not None:
sample_weight = column_or_1d(sample_weight)
weight = sample_weight[:, np.newaxis]
else:
weight = 1.0
numerator = (weight * (y_true - y_pred) ** 2).sum(axis=0, dtype=np.float64)
denominator = (
weight * (y_true - np.average(y_true, axis=0, weights=sample_weight)) ** 2
).sum(axis=0, dtype=np.float64)
nonzero_denominator = denominator != 0
nonzero_numerator = numerator != 0
valid_score = nonzero_denominator & nonzero_numerator
output_scores = np.ones([y_true.shape[1]])
output_scores[valid_score] = 1 - (numerator[valid_score] / denominator[valid_score])
# arbitrary set to zero to avoid -inf scores, having a constant
# y_true is not interesting for scoring a regression anyway
output_scores[nonzero_numerator & ~nonzero_denominator] = 0.0
if isinstance(multioutput, str):
if multioutput == "raw_values":
# return scores individually
return output_scores
elif multioutput == "uniform_average":
# passing None as weights results is uniform mean
avg_weights = None
elif multioutput == "variance_weighted":
avg_weights = denominator
# avoid fail on constant y or one-element arrays
if not np.any(nonzero_denominator):
if not np.any(nonzero_numerator):
return 1.0
else:
return 0.0
else:
avg_weights = multioutput
return np.average(output_scores, weights=avg_weights)
def max_error(y_true, y_pred):
"""
The max_error metric calculates the maximum residual error.
Read more in the :ref:`User Guide <max_error>`.
Parameters
----------
y_true : array-like of shape (n_samples,)
Ground truth (correct) target values.
y_pred : array-like of shape (n_samples,)
Estimated target values.
Returns
-------
max_error : float
A positive floating point value (the best value is 0.0).
Examples
--------
>>> from sklearn.metrics import max_error
>>> y_true = [3, 2, 7, 1]
>>> y_pred = [4, 2, 7, 1]
>>> max_error(y_true, y_pred)
1
"""
y_type, y_true, y_pred, _ = _check_reg_targets(y_true, y_pred, None)
if y_type == "continuous-multioutput":
raise ValueError("Multioutput not supported in max_error")
return np.max(np.abs(y_true - y_pred))
def mean_tweedie_deviance(y_true, y_pred, *, sample_weight=None, power=0):
"""Mean Tweedie deviance regression loss.
Read more in the :ref:`User Guide <mean_tweedie_deviance>`.
Parameters
----------
y_true : array-like of shape (n_samples,)
Ground truth (correct) target values.
y_pred : array-like of shape (n_samples,)
Estimated target values.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights.
power : float, default=0
Tweedie power parameter. Either power <= 0 or power >= 1.
The higher `p` the less weight is given to extreme
deviations between true and predicted targets.
- power < 0: Extreme stable distribution. Requires: y_pred > 0.
- power = 0 : Normal distribution, output corresponds to
mean_squared_error. y_true and y_pred can be any real numbers.
- power = 1 : Poisson distribution. Requires: y_true >= 0 and
y_pred > 0.
- 1 < p < 2 : Compound Poisson distribution. Requires: y_true >= 0
and y_pred > 0.
- power = 2 : Gamma distribution. Requires: y_true > 0 and y_pred > 0.
- power = 3 : Inverse Gaussian distribution. Requires: y_true > 0
and y_pred > 0.
- otherwise : Positive stable distribution. Requires: y_true > 0
and y_pred > 0.
Returns
-------
loss : float
A non-negative floating point value (the best value is 0.0).
Examples
--------
>>> from sklearn.metrics import mean_tweedie_deviance
>>> y_true = [2, 0, 1, 4]
>>> y_pred = [0.5, 0.5, 2., 2.]
>>> mean_tweedie_deviance(y_true, y_pred, power=1)
1.4260...
"""
y_type, y_true, y_pred, _ = _check_reg_targets(
y_true, y_pred, None, dtype=[np.float64, np.float32]
)
if y_type == "continuous-multioutput":
raise ValueError("Multioutput not supported in mean_tweedie_deviance")
check_consistent_length(y_true, y_pred, sample_weight)
if sample_weight is not None:
sample_weight = column_or_1d(sample_weight)
sample_weight = sample_weight[:, np.newaxis]
dist = TweedieDistribution(power=power)
dev = dist.unit_deviance(y_true, y_pred, check_input=True)
return np.average(dev, weights=sample_weight)
def mean_poisson_deviance(y_true, y_pred, *, sample_weight=None):
"""Mean Poisson deviance regression loss.
Poisson deviance is equivalent to the Tweedie deviance with
the power parameter `power=1`.
Read more in the :ref:`User Guide <mean_tweedie_deviance>`.
Parameters
----------
y_true : array-like of shape (n_samples,)
Ground truth (correct) target values. Requires y_true >= 0.
y_pred : array-like of shape (n_samples,)
Estimated target values. Requires y_pred > 0.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights.
Returns
-------
loss : float
A non-negative floating point value (the best value is 0.0).
Examples
--------
>>> from sklearn.metrics import mean_poisson_deviance
>>> y_true = [2, 0, 1, 4]
>>> y_pred = [0.5, 0.5, 2., 2.]
>>> mean_poisson_deviance(y_true, y_pred)
1.4260...
"""
return mean_tweedie_deviance(y_true, y_pred, sample_weight=sample_weight, power=1)
def mean_gamma_deviance(y_true, y_pred, *, sample_weight=None):
"""Mean Gamma deviance regression loss.
Gamma deviance is equivalent to the Tweedie deviance with
the power parameter `power=2`. It is invariant to scaling of
the target variable, and measures relative errors.
Read more in the :ref:`User Guide <mean_tweedie_deviance>`.
Parameters
----------
y_true : array-like of shape (n_samples,)
Ground truth (correct) target values. Requires y_true > 0.
y_pred : array-like of shape (n_samples,)
Estimated target values. Requires y_pred > 0.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights.
Returns
-------
loss : float
A non-negative floating point value (the best value is 0.0).
Examples
--------
>>> from sklearn.metrics import mean_gamma_deviance
>>> y_true = [2, 0.5, 1, 4]
>>> y_pred = [0.5, 0.5, 2., 2.]
>>> mean_gamma_deviance(y_true, y_pred)
1.0568...
"""
return mean_tweedie_deviance(y_true, y_pred, sample_weight=sample_weight, power=2)
def d2_tweedie_score(y_true, y_pred, *, sample_weight=None, power=0):
"""D^2 regression score function, percentage of Tweedie deviance explained.
Best possible score is 1.0 and it can be negative (because the model can be
arbitrarily worse). A model that always uses the empirical mean of `y_true` as
constant prediction, disregarding the input features, gets a D^2 score of 0.0.
Read more in the :ref:`User Guide <d2_tweedie_score>`.
.. versionadded:: 1.0
Parameters
----------
y_true : array-like of shape (n_samples,)
Ground truth (correct) target values.
y_pred : array-like of shape (n_samples,)
Estimated target values.
sample_weight : array-like of shape (n_samples,), optional
Sample weights.
power : float, default=0
Tweedie power parameter. Either power <= 0 or power >= 1.
The higher `p` the less weight is given to extreme
deviations between true and predicted targets.
- power < 0: Extreme stable distribution. Requires: y_pred > 0.
- power = 0 : Normal distribution, output corresponds to r2_score.
y_true and y_pred can be any real numbers.
- power = 1 : Poisson distribution. Requires: y_true >= 0 and
y_pred > 0.
- 1 < p < 2 : Compound Poisson distribution. Requires: y_true >= 0
and y_pred > 0.
- power = 2 : Gamma distribution. Requires: y_true > 0 and y_pred > 0.
- power = 3 : Inverse Gaussian distribution. Requires: y_true > 0
and y_pred > 0.
- otherwise : Positive stable distribution. Requires: y_true > 0
and y_pred > 0.
Returns
-------
z : float or ndarray of floats
The D^2 score.
Notes
-----
This is not a symmetric function.
Like R^2, D^2 score may be negative (it need not actually be the square of
a quantity D).
This metric is not well-defined for single samples and will return a NaN
value if n_samples is less than two.
References
----------
.. [1] Eq. (3.11) of Hastie, Trevor J., Robert Tibshirani and Martin J.
Wainwright. "Statistical Learning with Sparsity: The Lasso and
Generalizations." (2015). https://trevorhastie.github.io
Examples
--------
>>> from sklearn.metrics import d2_tweedie_score
>>> y_true = [0.5, 1, 2.5, 7]
>>> y_pred = [1, 1, 5, 3.5]
>>> d2_tweedie_score(y_true, y_pred)
0.285...
>>> d2_tweedie_score(y_true, y_pred, power=1)
0.487...
>>> d2_tweedie_score(y_true, y_pred, power=2)
0.630...
>>> d2_tweedie_score(y_true, y_true, power=2)
1.0
"""
y_type, y_true, y_pred, _ = _check_reg_targets(
y_true, y_pred, None, dtype=[np.float64, np.float32]
)
if y_type == "continuous-multioutput":
raise ValueError("Multioutput not supported in d2_tweedie_score")
check_consistent_length(y_true, y_pred, sample_weight)
if _num_samples(y_pred) < 2:
msg = "D^2 score is not well-defined with less than two samples."
warnings.warn(msg, UndefinedMetricWarning)
return float("nan")
if sample_weight is not None:
sample_weight = column_or_1d(sample_weight)
sample_weight = sample_weight[:, np.newaxis]
dist = TweedieDistribution(power=power)
dev = dist.unit_deviance(y_true, y_pred, check_input=True)
numerator = np.average(dev, weights=sample_weight)
y_avg = np.average(y_true, weights=sample_weight)
dev = dist.unit_deviance(y_true, y_avg, check_input=True)
denominator = np.average(dev, weights=sample_weight)
return 1 - numerator / denominator
| {
"content_hash": "a055faad58d6e3bd381010e4bd1e6764",
"timestamp": "",
"source": "github",
"line_count": 1108,
"max_line_length": 88,
"avg_line_length": 34.66335740072202,
"alnum_prop": 0.6139766188455229,
"repo_name": "sergeyf/scikit-learn",
"id": "ffa2b0b8218aaa0821d9fe1979f180295abd4f69",
"size": "38407",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sklearn/metrics/_regression.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "42335"
},
{
"name": "C++",
"bytes": "147316"
},
{
"name": "Cython",
"bytes": "718114"
},
{
"name": "Makefile",
"bytes": "1711"
},
{
"name": "Python",
"bytes": "9906683"
},
{
"name": "Shell",
"bytes": "49565"
}
],
"symlink_target": ""
} |
import six
from sahara.api import acl
from sahara.service.api import v10 as api
from sahara.service import validation as v
from sahara.service.validations import cluster_template_schema as ct_schema
from sahara.service.validations import cluster_templates as v_ct
from sahara.service.validations import clusters as v_c
from sahara.service.validations import clusters_scaling as v_c_s
from sahara.service.validations import clusters_schema as v_c_schema
from sahara.service.validations import images as v_images
from sahara.service.validations import node_group_template_schema as ngt_schema
from sahara.service.validations import node_group_templates as v_ngt
from sahara.service.validations import plugins as v_p
import sahara.utils.api as u
rest = u.Rest('v10', __name__)
# Cluster ops
@rest.get('/clusters')
@acl.enforce("data-processing:clusters:get_all")
@v.check_exists(api.get_cluster, 'marker')
@v.validate(None, v.validate_pagination_limit,
v.validate_sorting_clusters)
def clusters_list():
result = api.get_clusters(**u.get_request_args().to_dict())
return u.render(res=result, name='clusters')
@rest.post('/clusters')
@acl.enforce("data-processing:clusters:create")
@v.validate(v_c_schema.CLUSTER_SCHEMA, v_c.check_cluster_create)
def clusters_create(data):
return u.render(api.create_cluster(data).to_wrapped_dict())
@rest.post('/clusters/multiple')
@acl.enforce("data-processing:clusters:create")
@v.validate(
v_c_schema.MULTIPLE_CLUSTER_SCHEMA, v_c.check_multiple_clusters_create)
def clusters_create_multiple(data):
return u.render(api.create_multiple_clusters(data))
@rest.put('/clusters/<cluster_id>')
@acl.enforce("data-processing:clusters:scale")
@v.check_exists(api.get_cluster, 'cluster_id')
@v.validate(v_c_schema.CLUSTER_SCALING_SCHEMA, v_c_s.check_cluster_scaling)
def clusters_scale(cluster_id, data):
return u.to_wrapped_dict(api.scale_cluster, cluster_id, data)
@rest.get('/clusters/<cluster_id>')
@acl.enforce("data-processing:clusters:get")
@v.check_exists(api.get_cluster, 'cluster_id')
def clusters_get(cluster_id):
data = u.get_request_args()
show_events = six.text_type(
data.get('show_progress', 'false')).lower() == 'true'
return u.to_wrapped_dict(api.get_cluster, cluster_id, show_events)
@rest.patch('/clusters/<cluster_id>')
@acl.enforce("data-processing:clusters:modify")
@v.check_exists(api.get_cluster, 'cluster_id')
@v.validate(v_c_schema.CLUSTER_UPDATE_SCHEMA, v_c.check_cluster_update)
def clusters_update(cluster_id, data):
return u.to_wrapped_dict(api.update_cluster, cluster_id, data)
@rest.delete('/clusters/<cluster_id>')
@acl.enforce("data-processing:clusters:delete")
@v.check_exists(api.get_cluster, 'cluster_id')
@v.validate(None, v_c.check_cluster_delete)
def clusters_delete(cluster_id):
api.terminate_cluster(cluster_id)
return u.render()
# ClusterTemplate ops
@rest.get('/cluster-templates')
@acl.enforce("data-processing:cluster-templates:get_all")
@v.check_exists(api.get_cluster_template, 'marker')
@v.validate(None, v.validate_pagination_limit,
v.validate_sorting_cluster_templates)
def cluster_templates_list():
result = api.get_cluster_templates(
**u.get_request_args().to_dict())
return u.render(res=result, name='cluster_templates')
@rest.post('/cluster-templates')
@acl.enforce("data-processing:cluster-templates:create")
@v.validate(ct_schema.CLUSTER_TEMPLATE_SCHEMA,
v_ct.check_cluster_template_create)
def cluster_templates_create(data):
return u.render(api.create_cluster_template(data).to_wrapped_dict())
@rest.get('/cluster-templates/<cluster_template_id>')
@acl.enforce("data-processing:cluster-templates:get")
@v.check_exists(api.get_cluster_template, 'cluster_template_id')
def cluster_templates_get(cluster_template_id):
return u.to_wrapped_dict(api.get_cluster_template, cluster_template_id)
@rest.put('/cluster-templates/<cluster_template_id>')
@acl.enforce("data-processing:cluster-templates:modify")
@v.check_exists(api.get_cluster_template, 'cluster_template_id')
@v.validate(ct_schema.CLUSTER_TEMPLATE_UPDATE_SCHEMA,
v_ct.check_cluster_template_update)
def cluster_templates_update(cluster_template_id, data):
return u.to_wrapped_dict(
api.update_cluster_template, cluster_template_id, data)
@rest.delete('/cluster-templates/<cluster_template_id>')
@acl.enforce("data-processing:cluster-templates:delete")
@v.check_exists(api.get_cluster_template, 'cluster_template_id')
@v.validate(None, v_ct.check_cluster_template_usage)
def cluster_templates_delete(cluster_template_id):
api.terminate_cluster_template(cluster_template_id)
return u.render()
# NodeGroupTemplate ops
@rest.get('/node-group-templates')
@acl.enforce("data-processing:node-group-templates:get_all")
@v.check_exists(api.get_node_group_template, 'marker')
@v.validate(None, v.validate_pagination_limit,
v.validate_sorting_node_group_templates)
def node_group_templates_list():
result = api.get_node_group_templates(
**u.get_request_args().to_dict())
return u.render(res=result, name='node_group_templates')
@rest.post('/node-group-templates')
@acl.enforce("data-processing:node-group-templates:create")
@v.validate(ngt_schema.NODE_GROUP_TEMPLATE_SCHEMA,
v_ngt.check_node_group_template_create)
def node_group_templates_create(data):
return u.render(api.create_node_group_template(data).to_wrapped_dict())
@rest.get('/node-group-templates/<node_group_template_id>')
@acl.enforce("data-processing:node-group-templates:get")
@v.check_exists(api.get_node_group_template, 'node_group_template_id')
def node_group_templates_get(node_group_template_id):
return u.to_wrapped_dict(
api.get_node_group_template, node_group_template_id)
@rest.put('/node-group-templates/<node_group_template_id>')
@acl.enforce("data-processing:node-group-templates:modify")
@v.check_exists(api.get_node_group_template, 'node_group_template_id')
@v.validate(ngt_schema.NODE_GROUP_TEMPLATE_UPDATE_SCHEMA,
v_ngt.check_node_group_template_update)
def node_group_templates_update(node_group_template_id, data):
return u.to_wrapped_dict(
api.update_node_group_template, node_group_template_id, data)
@rest.delete('/node-group-templates/<node_group_template_id>')
@acl.enforce("data-processing:node-group-templates:delete")
@v.check_exists(api.get_node_group_template, 'node_group_template_id')
@v.validate(None, v_ngt.check_node_group_template_usage)
def node_group_templates_delete(node_group_template_id):
api.terminate_node_group_template(node_group_template_id)
return u.render()
# Plugins ops
@rest.get('/plugins')
@acl.enforce("data-processing:plugins:get_all")
def plugins_list():
return u.render(plugins=[p.dict for p in api.get_plugins()])
@rest.get('/plugins/<plugin_name>')
@acl.enforce("data-processing:plugins:get")
@v.check_exists(api.get_plugin, plugin_name='plugin_name')
def plugins_get(plugin_name):
return u.render(api.get_plugin(plugin_name).wrapped_dict)
@rest.get('/plugins/<plugin_name>/<version>')
@acl.enforce("data-processing:plugins:get_version")
@v.check_exists(api.get_plugin, plugin_name='plugin_name', version='version')
def plugins_get_version(plugin_name, version):
return u.render(api.get_plugin(plugin_name, version).wrapped_dict)
@rest.patch('/plugins/<plugin_name>')
@acl.enforce("data-processing:plugins:patch")
@v.check_exists(api.get_plugin, plugin_name='plugin_name')
@v.validate(v_p.plugin_update_validation_jsonschema(), v_p.check_plugin_update)
def plugins_update(plugin_name, data):
return u.render(api.update_plugin(plugin_name, data).wrapped_dict)
@rest.post_file('/plugins/<plugin_name>/<version>/convert-config/<name>')
@acl.enforce("data-processing:plugins:convert_config")
@v.check_exists(api.get_plugin, plugin_name='plugin_name', version='version')
@v.validate(None, v_p.check_convert_to_template)
def plugins_convert_to_cluster_template(plugin_name, version, name, data):
# There is no plugins that supports converting to cluster template
# The last plugin with support of that is no longer supported
pass
# Image Registry ops
@rest.get('/images')
@acl.enforce("data-processing:images:get_all")
def images_list():
tags = u.get_request_args().getlist('tags')
name = u.get_request_args().get('name', None)
return u.render(images=[i.dict for i in api.get_images(name, tags)])
@rest.get('/images/<image_id>')
@acl.enforce("data-processing:images:get")
@v.check_exists(api.get_image, id='image_id')
def images_get(image_id):
return u.render(api.get_registered_image(image_id=image_id).wrapped_dict)
@rest.post('/images/<image_id>')
@acl.enforce("data-processing:images:register")
@v.check_exists(api.get_image, id='image_id')
@v.validate(v_images.image_register_schema, v_images.check_image_register)
def images_set(image_id, data):
return u.render(api.register_image(image_id, **data).wrapped_dict)
@rest.delete('/images/<image_id>')
@acl.enforce("data-processing:images:unregister")
@v.check_exists(api.get_image, id='image_id')
def images_unset(image_id):
api.unregister_image(image_id)
return u.render()
@rest.post('/images/<image_id>/tag')
@acl.enforce("data-processing:images:add_tags")
@v.check_exists(api.get_image, id='image_id')
@v.validate(v_images.image_tags_schema, v_images.check_tags)
def image_tags_add(image_id, data):
return u.render(api.add_image_tags(image_id, **data).wrapped_dict)
@rest.post('/images/<image_id>/untag')
@acl.enforce("data-processing:images:remove_tags")
@v.check_exists(api.get_image, id='image_id')
@v.validate(v_images.image_tags_schema)
def image_tags_delete(image_id, data):
return u.render(api.remove_image_tags(image_id, **data).wrapped_dict)
| {
"content_hash": "d9a08d0eed5b240b71b99a020fb43ab6",
"timestamp": "",
"source": "github",
"line_count": 265,
"max_line_length": 79,
"avg_line_length": 37.033962264150944,
"alnum_prop": 0.7381292031791319,
"repo_name": "tellesnobrega/sahara",
"id": "e4e5730c45489bcfdd3059089b44ca7d0bec638b",
"size": "10397",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "sahara/api/v10.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Mako",
"bytes": "952"
},
{
"name": "Python",
"bytes": "3354711"
},
{
"name": "Shell",
"bytes": "56856"
}
],
"symlink_target": ""
} |
import piglow
from multiprocessing import Process, Queue
from Queue import Full, Empty
from time import sleep
#
# Piglow UI utils for piglow-sys library.
#
def start(clear=True):
""" Start PiGlow UI updates """
if _enabled:
return
if clear:
_change_task("_clear_all")
_change_task("_enable")
_start_updater()
def stop(clear=True):
""" Stop any PiGlow UI updates """
if clear:
_change_task("_clear_all")
_change_task("_disable")
def clear_all():
""" Clear all LEDs """
_change_task("_clear_all")
def pulse_color(color, speed=10, low=64, high=255):
""" Pulse each LED of the defined color at the defined speed. """
_change_task("_pulse_color", [color, speed, low, high], True)
def set_color(color, value):
""" Set the value of the defined color """
_change_task("_set_color", [color, value])
def cycle(leds, speed=10, low=0, high=255):
""" Cycle each LED from low to high in order """
_change_task("_cycle", [leds, speed, low, high], True)
def dim(led, speed=2, high=255, low=0):
""" Dims the LED from high to low at the given speed """
_change_task("_dim", [led, speed, high, low], True)
def set(leds, value):
""" Sets the value of each led """
_change_task("_set", [leds, value])
def pulse(led, speed=2, low=0, high=255):
""" Pulse the LED from low to high at the given speed """
_change_task("_pulse", [led, speed, low, high], True)
#
# Private functions to drive the UI (ie, PiGlow updates)
#
_enabled = False
_task_queue = Queue()
_updater_process = None
_NOTASK_SLEEP_INTERVAL = 1
def _enable():
""" Enable the PiGlow UI updates """
global _enabled
_enabled = True
def _disable():
""" Disable the PiGlow UI updates """
global _enabled
_enabled = False
def _change_task(task, args=[], repeat=False, interval=0):
""" Change the current task """
try:
_task_queue.put([task, args, repeat, interval])
except Full:
print "Task ", task, " failed. Task queue full"
return
def _handle_tasks(tasks):
""" Perform the UI update for the current task """
global _enabled
task = None
_enabled = True
while _enabled:
try:
task = tasks.get(False)
except Empty:
# Do nothing, this is a valid state
pass
# If we have no task, just sleep for an interval and read again
if task is None:
sleep(_NOTASK_SLEEP_INTERVAL)
continue
# Get and exec the task method
task_method = globals()[task[0]]
if task_method is None:
sleep(task[3])
continue
else:
task_method(*task[1])
if not task[2]:
task = None
def _start_updater():
""" Start an updater process if there isn't already one """
global _updater_process
# If already enabled, just return
if _enabled:
return
_updater_process = Process(target=_handle_tasks, args=(_task_queue,))
_updater_process.start()
#
# API drawing task functions
#
def _clear_all():
""" Clear all LEDs """
for l in range(0, 18):
piglow.set(l, 0)
piglow.show()
def _set_color(color, value):
""" Set the value of the defined color """
color_setter = getattr(piglow, color)
color_setter(value)
piglow.show()
def _pulse_color(color, speed, low, high):
""" Pulse each LED of the defined color at the given speed """
color_setter = getattr(piglow, color)
pulse_range = range(low, high)
wait_for = 1/speed
for c in pulse_range:
color_setter(c)
piglow.show()
sleep(wait_for)
for c in reversed(pulse_range):
color_setter(c)
piglow.show()
sleep(wait_for)
def _pulse(led, speed, low, high):
""" Pulse the LED from low to high """
pulse_range = range(low, high)
wait_for = 1/speed
for c in pulse_range:
piglow.set(led, c)
piglow.show()
sleep(wait_for)
for c in reversed(pulse_range):
piglow.set(led, c)
piglow.show()
sleep(wait_for)
def _set(leds, value):
""" Sets the value of each led """
for led in leds:
piglow.set(led, value)
piglow.show()
def _dim(led, speed, high, low):
""" Dims the led from high to low at the given speed """
dim_range = range(low, high)
wait_for = 1/speed
for c in reversed(dim_range):
piglow.set(led, c)
piglow.show()
sleep(wait_for)
def _cycle(leds, speed, low, high):
""" Cycle each LED from low to high in order """
pulse_range = range(low, high)
wait_for = 1/speed
# Set each LED to the LOW state
_set(leds, low)
for i in range(0, len(leds)):
for c in pulse_range:
# Increase the LED to HIGH
piglow.set(leds[i], c)
piglow.show()
sleep(wait_for)
# Decrease the previous LED back to LOW at same rate
if i > 0:
piglow.set(leds[i-1], high-(c-low))
piglow.show()
sleep(wait_for)
# Decrease the final LED back to LOW state
_dim(leds[-1], speed, high, low)
# Set each LED to the LOW state
_set(leds, low)
| {
"content_hash": "6c0ac8c81eba9d786ea880b44d94dbfa",
"timestamp": "",
"source": "github",
"line_count": 216,
"max_line_length": 73,
"avg_line_length": 24.48611111111111,
"alnum_prop": 0.5802609188882587,
"repo_name": "deanydean/py-piglow-sys",
"id": "37a1f41b8202995a02a239b975ab1754c6252166",
"size": "5904",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/piglowui.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "11839"
},
{
"name": "Shell",
"bytes": "95"
}
],
"symlink_target": ""
} |
"""Integration with the Rachio Iro sprinkler system controller."""
from abc import abstractmethod
from contextlib import suppress
from datetime import timedelta
import logging
import voluptuous as vol
from homeassistant.components.switch import SwitchEntity
from homeassistant.const import ATTR_ENTITY_ID, ATTR_ID
from homeassistant.core import callback
from homeassistant.exceptions import HomeAssistantError
from homeassistant.helpers import config_validation as cv, entity_platform
from homeassistant.helpers.dispatcher import async_dispatcher_connect
from homeassistant.helpers.event import async_track_point_in_utc_time
from homeassistant.util.dt import as_timestamp, now, parse_datetime, utc_from_timestamp
from .const import (
CONF_MANUAL_RUN_MINS,
DEFAULT_MANUAL_RUN_MINS,
DOMAIN as DOMAIN_RACHIO,
KEY_CUSTOM_CROP,
KEY_CUSTOM_SHADE,
KEY_CUSTOM_SLOPE,
KEY_DEVICE_ID,
KEY_DURATION,
KEY_ENABLED,
KEY_ID,
KEY_IMAGE_URL,
KEY_NAME,
KEY_ON,
KEY_RAIN_DELAY,
KEY_RAIN_DELAY_END,
KEY_SCHEDULE_ID,
KEY_SUBTYPE,
KEY_SUMMARY,
KEY_TYPE,
KEY_ZONE_ID,
KEY_ZONE_NUMBER,
SCHEDULE_TYPE_FIXED,
SCHEDULE_TYPE_FLEX,
SERVICE_SET_ZONE_MOISTURE,
SERVICE_START_MULTIPLE_ZONES,
SIGNAL_RACHIO_CONTROLLER_UPDATE,
SIGNAL_RACHIO_RAIN_DELAY_UPDATE,
SIGNAL_RACHIO_SCHEDULE_UPDATE,
SIGNAL_RACHIO_ZONE_UPDATE,
SLOPE_FLAT,
SLOPE_MODERATE,
SLOPE_SLIGHT,
SLOPE_STEEP,
)
from .entity import RachioDevice
from .webhooks import (
SUBTYPE_RAIN_DELAY_OFF,
SUBTYPE_RAIN_DELAY_ON,
SUBTYPE_SCHEDULE_COMPLETED,
SUBTYPE_SCHEDULE_STARTED,
SUBTYPE_SCHEDULE_STOPPED,
SUBTYPE_SLEEP_MODE_OFF,
SUBTYPE_SLEEP_MODE_ON,
SUBTYPE_ZONE_COMPLETED,
SUBTYPE_ZONE_PAUSED,
SUBTYPE_ZONE_STARTED,
SUBTYPE_ZONE_STOPPED,
)
_LOGGER = logging.getLogger(__name__)
ATTR_DURATION = "duration"
ATTR_PERCENT = "percent"
ATTR_SCHEDULE_SUMMARY = "Summary"
ATTR_SCHEDULE_ENABLED = "Enabled"
ATTR_SCHEDULE_DURATION = "Duration"
ATTR_SCHEDULE_TYPE = "Type"
ATTR_SORT_ORDER = "sortOrder"
ATTR_ZONE_NUMBER = "Zone number"
ATTR_ZONE_SHADE = "Shade"
ATTR_ZONE_SLOPE = "Slope"
ATTR_ZONE_SUMMARY = "Summary"
ATTR_ZONE_TYPE = "Type"
START_MULTIPLE_ZONES_SCHEMA = vol.Schema(
{
vol.Required(ATTR_ENTITY_ID): cv.entity_ids,
vol.Required(ATTR_DURATION): cv.ensure_list_csv,
}
)
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Set up the Rachio switches."""
zone_entities = []
has_flex_sched = False
entities = await hass.async_add_executor_job(_create_entities, hass, config_entry)
for entity in entities:
if isinstance(entity, RachioZone):
zone_entities.append(entity)
if isinstance(entity, RachioSchedule) and entity.type == SCHEDULE_TYPE_FLEX:
has_flex_sched = True
async_add_entities(entities)
_LOGGER.info("%d Rachio switch(es) added", len(entities))
def start_multiple(service):
"""Service to start multiple zones in sequence."""
zones_list = []
person = hass.data[DOMAIN_RACHIO][config_entry.entry_id]
entity_id = service.data[ATTR_ENTITY_ID]
duration = iter(service.data[ATTR_DURATION])
default_time = service.data[ATTR_DURATION][0]
entity_to_zone_id = {
entity.entity_id: entity.zone_id for entity in zone_entities
}
for (count, data) in enumerate(entity_id):
if data in entity_to_zone_id:
# Time can be passed as a list per zone,
# or one time for all zones
time = int(next(duration, default_time)) * 60
zones_list.append(
{
ATTR_ID: entity_to_zone_id.get(data),
ATTR_DURATION: time,
ATTR_SORT_ORDER: count,
}
)
if len(zones_list) != 0:
person.start_multiple_zones(zones_list)
_LOGGER.debug("Starting zone(s) %s", entity_id)
else:
raise HomeAssistantError("No matching zones found in given entity_ids")
hass.services.async_register(
DOMAIN_RACHIO,
SERVICE_START_MULTIPLE_ZONES,
start_multiple,
schema=START_MULTIPLE_ZONES_SCHEMA,
)
if has_flex_sched:
platform = entity_platform.current_platform.get()
platform.async_register_entity_service(
SERVICE_SET_ZONE_MOISTURE,
{vol.Required(ATTR_PERCENT): cv.positive_int},
"set_moisture_percent",
)
def _create_entities(hass, config_entry):
entities = []
person = hass.data[DOMAIN_RACHIO][config_entry.entry_id]
# Fetch the schedule once at startup
# in order to avoid every zone doing it
for controller in person.controllers:
entities.append(RachioStandbySwitch(controller))
entities.append(RachioRainDelay(controller))
zones = controller.list_zones()
schedules = controller.list_schedules()
flex_schedules = controller.list_flex_schedules()
current_schedule = controller.current_schedule
for zone in zones:
entities.append(RachioZone(person, controller, zone, current_schedule))
for sched in schedules + flex_schedules:
entities.append(RachioSchedule(person, controller, sched, current_schedule))
_LOGGER.debug("Added %s", entities)
return entities
class RachioSwitch(RachioDevice, SwitchEntity):
"""Represent a Rachio state that can be toggled."""
def __init__(self, controller):
"""Initialize a new Rachio switch."""
super().__init__(controller)
self._state = None
@property
def name(self) -> str:
"""Get a name for this switch."""
return f"Switch on {self._controller.name}"
@property
def is_on(self) -> bool:
"""Return whether the switch is currently on."""
return self._state
@callback
def _async_handle_any_update(self, *args, **kwargs) -> None:
"""Determine whether an update event applies to this device."""
if args[0][KEY_DEVICE_ID] != self._controller.controller_id:
# For another device
return
# For this device
self._async_handle_update(args, kwargs)
@abstractmethod
def _async_handle_update(self, *args, **kwargs) -> None:
"""Handle incoming webhook data."""
class RachioStandbySwitch(RachioSwitch):
"""Representation of a standby status/button."""
@property
def name(self) -> str:
"""Return the name of the standby switch."""
return f"{self._controller.name} in standby mode"
@property
def unique_id(self) -> str:
"""Return a unique id by combining controller id and purpose."""
return f"{self._controller.controller_id}-standby"
@property
def icon(self) -> str:
"""Return an icon for the standby switch."""
return "mdi:power"
@callback
def _async_handle_update(self, *args, **kwargs) -> None:
"""Update the state using webhook data."""
if args[0][0][KEY_SUBTYPE] == SUBTYPE_SLEEP_MODE_ON:
self._state = True
elif args[0][0][KEY_SUBTYPE] == SUBTYPE_SLEEP_MODE_OFF:
self._state = False
self.async_write_ha_state()
def turn_on(self, **kwargs) -> None:
"""Put the controller in standby mode."""
self._controller.rachio.device.turn_off(self._controller.controller_id)
def turn_off(self, **kwargs) -> None:
"""Resume controller functionality."""
self._controller.rachio.device.turn_on(self._controller.controller_id)
async def async_added_to_hass(self):
"""Subscribe to updates."""
if KEY_ON in self._controller.init_data:
self._state = not self._controller.init_data[KEY_ON]
self.async_on_remove(
async_dispatcher_connect(
self.hass,
SIGNAL_RACHIO_CONTROLLER_UPDATE,
self._async_handle_any_update,
)
)
class RachioRainDelay(RachioSwitch):
"""Representation of a rain delay status/switch."""
def __init__(self, controller):
"""Set up a Rachio rain delay switch."""
self._cancel_update = None
super().__init__(controller)
@property
def name(self) -> str:
"""Return the name of the switch."""
return f"{self._controller.name} rain delay"
@property
def unique_id(self) -> str:
"""Return a unique id by combining controller id and purpose."""
return f"{self._controller.controller_id}-delay"
@property
def icon(self) -> str:
"""Return an icon for rain delay."""
return "mdi:camera-timer"
@callback
def _async_handle_update(self, *args, **kwargs) -> None:
"""Update the state using webhook data."""
if self._cancel_update:
self._cancel_update()
self._cancel_update = None
if args[0][0][KEY_SUBTYPE] == SUBTYPE_RAIN_DELAY_ON:
endtime = parse_datetime(args[0][0][KEY_RAIN_DELAY_END])
_LOGGER.debug("Rain delay expires at %s", endtime)
self._state = True
self._cancel_update = async_track_point_in_utc_time(
self.hass, self._delay_expiration, endtime
)
elif args[0][0][KEY_SUBTYPE] == SUBTYPE_RAIN_DELAY_OFF:
self._state = False
self.async_write_ha_state()
@callback
def _delay_expiration(self, *args) -> None:
"""Trigger when a rain delay expires."""
self._state = False
self._cancel_update = None
self.async_write_ha_state()
def turn_on(self, **kwargs) -> None:
"""Activate a 24 hour rain delay on the controller."""
self._controller.rachio.device.rain_delay(self._controller.controller_id, 86400)
_LOGGER.debug("Starting rain delay for 24 hours")
def turn_off(self, **kwargs) -> None:
"""Resume controller functionality."""
self._controller.rachio.device.rain_delay(self._controller.controller_id, 0)
_LOGGER.debug("Canceling rain delay")
async def async_added_to_hass(self):
"""Subscribe to updates."""
if KEY_RAIN_DELAY in self._controller.init_data:
self._state = self._controller.init_data[
KEY_RAIN_DELAY
] / 1000 > as_timestamp(now())
# If the controller was in a rain delay state during a reboot, this re-sets the timer
if self._state is True:
delay_end = utc_from_timestamp(
self._controller.init_data[KEY_RAIN_DELAY] / 1000
)
_LOGGER.debug("Re-setting rain delay timer for %s", delay_end)
self._cancel_update = async_track_point_in_utc_time(
self.hass, self._delay_expiration, delay_end
)
self.async_on_remove(
async_dispatcher_connect(
self.hass,
SIGNAL_RACHIO_RAIN_DELAY_UPDATE,
self._async_handle_any_update,
)
)
class RachioZone(RachioSwitch):
"""Representation of one zone of sprinklers connected to the Rachio Iro."""
def __init__(self, person, controller, data, current_schedule):
"""Initialize a new Rachio Zone."""
self.id = data[KEY_ID]
self._zone_name = data[KEY_NAME]
self._zone_number = data[KEY_ZONE_NUMBER]
self._zone_enabled = data[KEY_ENABLED]
self._entity_picture = data.get(KEY_IMAGE_URL)
self._person = person
self._shade_type = data.get(KEY_CUSTOM_SHADE, {}).get(KEY_NAME)
self._zone_type = data.get(KEY_CUSTOM_CROP, {}).get(KEY_NAME)
self._slope_type = data.get(KEY_CUSTOM_SLOPE, {}).get(KEY_NAME)
self._summary = ""
self._current_schedule = current_schedule
super().__init__(controller)
def __str__(self):
"""Display the zone as a string."""
return 'Rachio Zone "{}" on {}'.format(self.name, str(self._controller))
@property
def zone_id(self) -> str:
"""How the Rachio API refers to the zone."""
return self.id
@property
def name(self) -> str:
"""Return the friendly name of the zone."""
return self._zone_name
@property
def unique_id(self) -> str:
"""Return a unique id by combining controller id and zone number."""
return f"{self._controller.controller_id}-zone-{self.zone_id}"
@property
def icon(self) -> str:
"""Return the icon to display."""
return "mdi:water"
@property
def zone_is_enabled(self) -> bool:
"""Return whether the zone is allowed to run."""
return self._zone_enabled
@property
def entity_picture(self):
"""Return the entity picture to use in the frontend, if any."""
return self._entity_picture
@property
def extra_state_attributes(self) -> dict:
"""Return the optional state attributes."""
props = {ATTR_ZONE_NUMBER: self._zone_number, ATTR_ZONE_SUMMARY: self._summary}
if self._shade_type:
props[ATTR_ZONE_SHADE] = self._shade_type
if self._zone_type:
props[ATTR_ZONE_TYPE] = self._zone_type
if self._slope_type:
if self._slope_type == SLOPE_FLAT:
props[ATTR_ZONE_SLOPE] = "Flat"
elif self._slope_type == SLOPE_SLIGHT:
props[ATTR_ZONE_SLOPE] = "Slight"
elif self._slope_type == SLOPE_MODERATE:
props[ATTR_ZONE_SLOPE] = "Moderate"
elif self._slope_type == SLOPE_STEEP:
props[ATTR_ZONE_SLOPE] = "Steep"
return props
def turn_on(self, **kwargs) -> None:
"""Start watering this zone."""
# Stop other zones first
self.turn_off()
# Start this zone
manual_run_time = timedelta(
minutes=self._person.config_entry.options.get(
CONF_MANUAL_RUN_MINS, DEFAULT_MANUAL_RUN_MINS
)
)
self._controller.rachio.zone.start(self.zone_id, manual_run_time.seconds)
_LOGGER.debug(
"Watering %s on %s for %s",
self.name,
self._controller.name,
str(manual_run_time),
)
def turn_off(self, **kwargs) -> None:
"""Stop watering all zones."""
self._controller.stop_watering()
def set_moisture_percent(self, percent) -> None:
"""Set the zone moisture percent."""
_LOGGER.debug("Setting %s moisture to %s percent", self._zone_name, percent)
self._controller.rachio.zone.set_moisture_percent(self.id, percent / 100)
@callback
def _async_handle_update(self, *args, **kwargs) -> None:
"""Handle incoming webhook zone data."""
if args[0][KEY_ZONE_ID] != self.zone_id:
return
self._summary = args[0][KEY_SUMMARY]
if args[0][KEY_SUBTYPE] == SUBTYPE_ZONE_STARTED:
self._state = True
elif args[0][KEY_SUBTYPE] in [
SUBTYPE_ZONE_STOPPED,
SUBTYPE_ZONE_COMPLETED,
SUBTYPE_ZONE_PAUSED,
]:
self._state = False
self.async_write_ha_state()
async def async_added_to_hass(self):
"""Subscribe to updates."""
self._state = self.zone_id == self._current_schedule.get(KEY_ZONE_ID)
self.async_on_remove(
async_dispatcher_connect(
self.hass, SIGNAL_RACHIO_ZONE_UPDATE, self._async_handle_update
)
)
class RachioSchedule(RachioSwitch):
"""Representation of one fixed schedule on the Rachio Iro."""
def __init__(self, person, controller, data, current_schedule):
"""Initialize a new Rachio Schedule."""
self._schedule_id = data[KEY_ID]
self._schedule_name = data[KEY_NAME]
self._duration = data[KEY_DURATION]
self._schedule_enabled = data[KEY_ENABLED]
self._summary = data[KEY_SUMMARY]
self.type = data.get(KEY_TYPE, SCHEDULE_TYPE_FIXED)
self._current_schedule = current_schedule
super().__init__(controller)
@property
def name(self) -> str:
"""Return the friendly name of the schedule."""
return f"{self._schedule_name} Schedule"
@property
def unique_id(self) -> str:
"""Return a unique id by combining controller id and schedule."""
return f"{self._controller.controller_id}-schedule-{self._schedule_id}"
@property
def icon(self) -> str:
"""Return the icon to display."""
return "mdi:water" if self.schedule_is_enabled else "mdi:water-off"
@property
def extra_state_attributes(self) -> dict:
"""Return the optional state attributes."""
return {
ATTR_SCHEDULE_SUMMARY: self._summary,
ATTR_SCHEDULE_ENABLED: self.schedule_is_enabled,
ATTR_SCHEDULE_DURATION: f"{round(self._duration / 60)} minutes",
ATTR_SCHEDULE_TYPE: self.type,
}
@property
def schedule_is_enabled(self) -> bool:
"""Return whether the schedule is allowed to run."""
return self._schedule_enabled
def turn_on(self, **kwargs) -> None:
"""Start this schedule."""
self._controller.rachio.schedulerule.start(self._schedule_id)
_LOGGER.debug(
"Schedule %s started on %s",
self.name,
self._controller.name,
)
def turn_off(self, **kwargs) -> None:
"""Stop watering all zones."""
self._controller.stop_watering()
@callback
def _async_handle_update(self, *args, **kwargs) -> None:
"""Handle incoming webhook schedule data."""
# Schedule ID not passed when running individual zones, so we catch that error
with suppress(KeyError):
if args[0][KEY_SCHEDULE_ID] == self._schedule_id:
if args[0][KEY_SUBTYPE] in [SUBTYPE_SCHEDULE_STARTED]:
self._state = True
elif args[0][KEY_SUBTYPE] in [
SUBTYPE_SCHEDULE_STOPPED,
SUBTYPE_SCHEDULE_COMPLETED,
]:
self._state = False
self.async_write_ha_state()
async def async_added_to_hass(self):
"""Subscribe to updates."""
self._state = self._schedule_id == self._current_schedule.get(KEY_SCHEDULE_ID)
self.async_on_remove(
async_dispatcher_connect(
self.hass, SIGNAL_RACHIO_SCHEDULE_UPDATE, self._async_handle_update
)
)
| {
"content_hash": "155cdeb952408d66c151ce294d0b1879",
"timestamp": "",
"source": "github",
"line_count": 549,
"max_line_length": 93,
"avg_line_length": 34.05464480874317,
"alnum_prop": 0.6043538724860933,
"repo_name": "adrienbrault/home-assistant",
"id": "8d87b688aa47e701fc40d90b639d91ed42628d4e",
"size": "18696",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "homeassistant/components/rachio/switch.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "1795"
},
{
"name": "Python",
"bytes": "32021043"
},
{
"name": "Shell",
"bytes": "4900"
}
],
"symlink_target": ""
} |
from siuba.siu import CallTreeLocal, FunctionLookupError, ExecutionValidatorVisitor
from .groupby import SeriesGroupBy
from .translate import (
forward_method,
not_implemented,
method_agg_op,
method_win_op_agg_result
)
from siuba.experimental.pd_groups.groupby import SeriesGroupBy, GroupByAgg, broadcast_agg, is_compatible
# THE REAL DIALECT FILE LET'S DO THIS
# ====================================
from siuba.ops import ALL_OPS
from siuba import ops
# register concrete implementations for all ops -------------------------------
# note that this may include versions that would error (e.g. tries to look
# up a Series method that doesn't exist). Custom implementations to fix
# are registered over these further down
for dispatcher in ALL_OPS.values():#vars(ops).values():
#try:
forward_method(dispatcher)
#except KeyError:
# pass
# custom implementations ------------------------------------------------------
def register_method(ns, op_name, f, is_property = False, accessor = None):
generic = ns[op_name]
return generic.register(SeriesGroupBy, f(op_name, is_property, accessor))
# add to new op spec
# create new call tree
# aggregate ----
NOT_IMPLEMENTED_AGG = [
"bool", "dot", "empty", "equals", "hasnans", "is_unique", "kurt",
"kurtosis", "memory_usage", "nbytes", "product"
]
for f_name in NOT_IMPLEMENTED_AGG:
ALL_OPS[f_name].register(SeriesGroupBy, not_implemented(f_name))
# size is a property on ungrouped, but not grouped pandas data.
# since siuba follows the ungrouped API, it's used as _.x.size, and
# just needs its implementation registered as a *non*-property.
ops.size.register(SeriesGroupBy, method_agg_op("size", is_property = False, accessor = None))
# window ----
NOT_IMPLEMENTED_WIN = [
"asof", "at", "autocorr", "cat.remove_unused_categories",
"convert_dtypes", "drop_duplicates", "duplicated", "get",
"iat", "iloc", "infer_objects", "is_monotonic",
]
for f_name in NOT_IMPLEMENTED_WIN:
ALL_OPS[f_name].register(SeriesGroupBy, not_implemented(f_name))
# a few functions apply window operations, but return an agg-like result
forward_method(ops.is_monotonic_decreasing, method_win_op_agg_result)
forward_method(ops.is_monotonic_increasing, method_win_op_agg_result)
# NOTE TODO: these methods could be implemented, but depend on the type of
# time index they're operating on
NOT_IMPLEMENTED_DT = [
"dt.qyear", "dt.to_pytimedelta", "dt.to_timestamp", "dt.total_seconds", "dt.tz_convert",
"to_period","dt.to_pydatetime"
]
for f_name in NOT_IMPLEMENTED_DT:
ALL_OPS[f_name].register(SeriesGroupBy, not_implemented(f_name))
# ====================================
from .translate import GroupByAgg, SeriesGroupBy
# TODO: use pandas call tree creator
from siuba.ops.generics import ALL_PROPERTIES, ALL_ACCESSORS
call_listener = CallTreeLocal(
ALL_OPS,
call_sub_attr = ALL_ACCESSORS,
chain_sub_attr = True,
dispatch_cls = GroupByAgg,
result_cls = SeriesGroupBy,
call_props = ALL_PROPERTIES
)
call_validator = ExecutionValidatorVisitor(GroupByAgg, SeriesGroupBy)
# Fast group by verbs =========================================================
from siuba.siu import Call, singledispatch2
from siuba.dply.verbs import mutate, filter, summarize, DataFrameGroupBy
from pandas.core.dtypes.inference import is_scalar
import warnings
def fallback_warning(expr, reason):
warnings.warn(
"The expression below cannot be executed quickly. "
"Using the slow (but general) pandas apply method."
"\n\nExpression: {}\nReason: {}"
.format(expr, reason)
)
def grouped_eval(__data, expr, require_agg = False):
if is_scalar(expr):
return expr
if isinstance(expr, Call):
try:
call = call_listener.enter(expr)
call_validator.visit(call)
except FunctionLookupError as e:
fallback_warning(expr, str(e))
call = expr
#
grouped_res = call(__data)
if isinstance(grouped_res, SeriesGroupBy):
if not is_compatible(grouped_res, __data):
raise ValueError("Incompatible groupers")
# TODO: may want to validate result is correct length / index?
# e.g. a SeriesGroupBy could be compatible and not an agg
if require_agg:
return grouped_res.obj
else:
# broadcast from aggregate to original length (like transform)
return broadcast_agg(grouped_res)
else:
# can happen right now if user selects, e.g., a property of the
# groupby object, like .dtype, which returns a single value
# in the future, could restrict set of operations user could perform
raise ValueError("Result must be subclass of SeriesGroupBy")
raise ValueError("Grouped expressions must be a siu expression or scalar")
# Fast mutate ----
def _transform_args(args):
out = []
for expr in args:
if is_scalar(expr):
out.append(expr)
elif isinstance(expr, Call):
try:
call = call_listener.enter(expr)
call_validator.visit(call)
out.append(call)
except FunctionLookupError as e:
fallback_warning(expr, str(e))
return None
elif callable(expr):
return None
return out
def _copy_dispatch(dispatcher, cls, func = None):
if func is None:
return lambda f: _copy_dispatch(dispatcher, cls, f)
# Note stripping symbolics may occur twice. Once in the original, and once
# in this dispatcher.
new_dispatch = singledispatch2(cls, func)
new_dispatch.register(object, dispatcher)
return new_dispatch
@_copy_dispatch(mutate, DataFrameGroupBy)
def fast_mutate(__data, **kwargs):
"""Warning: this function is experimental"""
# transform call trees, potentially bail out to slow method --------
new_vals = _transform_args(kwargs.values())
if new_vals is None:
return mutate(__data, **kwargs)
# perform fast method ----
out = __data.obj.copy()
groupings = __data.grouper.groupings
for name, expr in zip(kwargs, new_vals):
res = grouped_eval(__data, expr)
out[name] = res
return out.groupby(groupings)
# Fast filter ----
@_copy_dispatch(filter, DataFrameGroupBy)
def fast_filter(__data, *args):
"""Warning: this function is experimental"""
# transform call trees, potentially bail out to slow method --------
new_vals = _transform_args(args)
if new_vals is None:
return filter(__data, *args)
# perform fast method ----
out = []
groupings = __data.grouper.groupings
for expr in args:
res = grouped_eval(__data, expr)
out.append(res)
filter_df = filter.dispatch(__data.obj.__class__)
df_result = filter_df(__data.obj, *out)
# TODO: research how to efficiently & robustly subset groupings
group_names = [ping.name for ping in groupings]
return df_result.groupby(group_names)
# Fast summarize ----
@_copy_dispatch(summarize, DataFrameGroupBy)
def fast_summarize(__data, **kwargs):
"""Warning: this function is experimental"""
# transform call trees, potentially bail out to slow method --------
new_vals = _transform_args(kwargs.values())
if new_vals is None:
return summarize(__data, **kwargs)
# perform fast method ----
groupings = __data.grouper.groupings
# TODO: better way of getting this frame?
out = __data.grouper.result_index.to_frame()
for name, expr in kwargs.items():
# special case: set scalars directly
res = grouped_eval(__data, expr, require_agg = True)
out[name] = res
return out.reset_index(drop = True)
| {
"content_hash": "3cea890606aef8858671334137be6fcc",
"timestamp": "",
"source": "github",
"line_count": 269,
"max_line_length": 104,
"avg_line_length": 29.74721189591078,
"alnum_prop": 0.6295926018495376,
"repo_name": "machow/siuba",
"id": "1697e5ac54fa98c0695a552af0fdfb19fc99802e",
"size": "8002",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "siuba/experimental/pd_groups/dialect.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "1007"
},
{
"name": "Python",
"bytes": "573788"
}
],
"symlink_target": ""
} |
from os.path import dirname as _dn, join as _j
class MainFunc:
def __init__(self):
pass
def run(self, filename):
code = None
with open(_j(_dn(__file__), "main_func_template.py"), 'r') as f:
code = f.read()
out_file = filename
with open(out_file, 'w') as f:
f.write(code)
| {
"content_hash": "9cdf60b57eb33219693d331448dd8b39",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 72,
"avg_line_length": 19.666666666666668,
"alnum_prop": 0.5028248587570622,
"repo_name": "amol9/redcmd",
"id": "f448ced5b6860bd1a097fb8df6c58e1b1d86e380",
"size": "354",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "redcmd/init/main_func.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "106122"
},
{
"name": "Shell",
"bytes": "470"
}
],
"symlink_target": ""
} |
from .catalog_item import CatalogItem
class USqlPackage(CatalogItem):
"""A Data Lake Analytics catalog U-SQL package item.
:param compute_account_name: the name of the Data Lake Analytics account.
:type compute_account_name: str
:param version: the version of the catalog item.
:type version: str
:param database_name: the name of the database containing the package.
:type database_name: str
:param schema_name: the name of the schema associated with this package
and database.
:type schema_name: str
:param name: the name of the package.
:type name: str
:param definition: the definition of the package.
:type definition: str
"""
_attribute_map = {
'compute_account_name': {'key': 'computeAccountName', 'type': 'str'},
'version': {'key': 'version', 'type': 'str'},
'database_name': {'key': 'databaseName', 'type': 'str'},
'schema_name': {'key': 'schemaName', 'type': 'str'},
'name': {'key': 'packageName', 'type': 'str'},
'definition': {'key': 'definition', 'type': 'str'},
}
def __init__(self, compute_account_name=None, version=None, database_name=None, schema_name=None, name=None, definition=None):
super(USqlPackage, self).__init__(compute_account_name=compute_account_name, version=version)
self.database_name = database_name
self.schema_name = schema_name
self.name = name
self.definition = definition
| {
"content_hash": "dbbb079bfe037b95ffd186b618c1588d",
"timestamp": "",
"source": "github",
"line_count": 36,
"max_line_length": 130,
"avg_line_length": 41.02777777777778,
"alnum_prop": 0.6445497630331753,
"repo_name": "lmazuel/azure-sdk-for-python",
"id": "bcb62ae92f1352e91f06e5fed8dd64edef0ce2e1",
"size": "1951",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/usql_package.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "42572767"
}
],
"symlink_target": ""
} |
import random
import pprint as pp
import logging as log
import ctpy
log.basicConfig(level=log.DEBUG, format='%(asctime)s %(levelname)s: %(message)s')
def build_even_dimension(simconfig, num_modes):
mode_boundaries = []
mode_width = float(simconfig.MAXALLELES) / float(num_modes)
lower_val = 0.0
upper_val = 0.0
for mode in range(0, num_modes):
upper_val += mode_width
mode_boundaries.append(dict(lower=lower_val, upper=upper_val))
lower_val = upper_val
log.debug("boundaries: %s", mode_boundaries)
return mode_boundaries
def build_random_dimension(simconfig, num_modes):
iboundaries = []
mode_boundaries = []
num_internal_boundaries = num_modes - 1
for i in range(0, num_internal_boundaries):
random_variate = random.random()
iboundaries.append(random_variate)
# add the final upper boundary
iboundaries.append(1.0)
lower_val = 0.0
upper_val = 0.0
iboundaries.sort()
for mode in range(0, num_modes):
lower_val = lower_val * simconfig.MAXALLELES
upper_val = iboundaries[mode] * simconfig.MAXALLELES
mode_boundaries.append(dict(lower=lower_val, upper=upper_val))
lower_val = iboundaries[mode]
log.debug("boundaries: %s", mode_boundaries)
return mode_boundaries
# # TODO: needs to deal with cases where maxalleles % num_modes leaves a remainder...
#
#
# def build_even_partitions_all_dimensions(num_modes, sim_param):
# """
#
#
# :param num_modes:
# :param sim_param:
# :return:
# """
# dimensions = {}
#
# # to start, we partition into quarters for each locus (dimension)
# mode_width = sim_param["maxalleles"] / num_modes
#
# for dimension in range(0, sim_param["numloci"]):
# mode_boundaries_dict = {}
# lower_val = 0.0
# upper_val = 0.0
# for mode in range(0,num_modes):
# upper_val += mode_width
# mode_boundaries_dict[mode] = dict(lower=lower_val, upper=upper_val)
# lower_val = upper_val
# dimensions[dimension] = mode_boundaries_dict
# return dimensions
#
#
# def build_random_partitions_all_dimensions(num_modes, sim_param):
# """
# For k desired modes, generate random mode boundaries within maxalleles.
# Algorithm generates k-1 "internal" boundaries on the unit interval [0,1]
# and then scales maxalleles by the unit interval partitions. Upper
# and lower internal boundaries are equivalent, since they will be
# interpreted with open/closed interval semantics.
#
#
# :param num_modes:
# :param sim_param:
# :return: dict of dimension-specific dicts, within each of which a mode maps to a dict of upper and lower boundaries
# """
# dimensions = {}
# num_internal_boundaries = num_modes - 1
#
# for dimension in range(0, sim_param["numloci"]):
# tempboundary = list()
# mode_boundaries_dict = {}
# maxalleles = sim_param["maxalleles"]
#
# for i in range(0, num_internal_boundaries):
# random_variate = random.random()
# tempboundary.append(random_variate)
#
# # add the final upper boundary
# tempboundary.append(1.0)
#
# lower_val = 0.0
# upper_val = 0.0
# tempboundary.sort()
#
# for mode in range(0, num_modes):
# lower_val = int(lower_val * maxalleles)
# upper_val = int(tempboundary[mode] * maxalleles)
# mode_boundaries_dict[mode] = dict(lower=lower_val, upper=upper_val)
# lower_val = tempboundary[mode]
#
# dimensions[dimension] = mode_boundaries_dict
# # TODO: missing logic for scaling to maxalleles, need to debug this first...
# return dimensions
#
#
# if __name__ == "__main__":
# sim_param = {}
# sim_param["numloci"] = 3
# sim_param["maxalleles"] = 100000000
#
#
# print "Testing random partitions for 3 dimensions, 4 modes"
# result_dict = build_random_partitions_all_dimensions(4, sim_param)
# pp.pprint(result_dict)
#
# print "Testing even partitions for 3 dimensions, 4 modes"
# result_dict = build_even_partitions_all_dimensions(4, sim_param)
# pp.pprint(result_dict) | {
"content_hash": "b5cc145e0ccd748b4dcdc956cd578caa",
"timestamp": "",
"source": "github",
"line_count": 131,
"max_line_length": 121,
"avg_line_length": 32.3587786259542,
"alnum_prop": 0.6301014390186365,
"repo_name": "mmadsen/CTPy",
"id": "62849a69842ce7dbee2c5ee40a4976ff010dba01",
"size": "4963",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ctpy/coarsegraining/dimension_mode_builder.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "29478"
},
{
"name": "JavaScript",
"bytes": "37933"
},
{
"name": "Python",
"bytes": "197849"
},
{
"name": "Shell",
"bytes": "1406"
},
{
"name": "TeX",
"bytes": "252609"
}
],
"symlink_target": ""
} |
from vitrage.common.constants import VertexProperties as VProps
class PropsConverter(object):
PROPS_CONVERSION = {
'category': VProps.VITRAGE_CATEGORY,
'type': VProps.VITRAGE_TYPE,
'resource_id': VProps.VITRAGE_RESOURCE_ID,
'sample_timestamp': VProps.VITRAGE_SAMPLE_TIMESTAMP,
'is_deleted': VProps.VITRAGE_IS_DELETED,
'is_placeholder': VProps.VITRAGE_IS_PLACEHOLDER,
'aggregated_state': VProps.VITRAGE_AGGREGATED_STATE,
'operational_state': VProps.VITRAGE_OPERATIONAL_STATE,
'aggregated_severity': VProps.VITRAGE_AGGREGATED_SEVERITY,
'operational_severity': VProps.VITRAGE_OPERATIONAL_SEVERITY
}
@classmethod
def convert_props_with_set(cls, properties):
converted_properties = set()
for key, value in properties:
new_key = cls.PROPS_CONVERSION[key] if key in \
cls.PROPS_CONVERSION else key
converted_properties.add((new_key, value))
return converted_properties
@classmethod
def convert_props_with_dictionary(cls, properties):
converted_properties = {}
for key, value in properties.items():
new_key = cls.PROPS_CONVERSION[key] if key in \
cls.PROPS_CONVERSION else key
converted_properties[new_key] = value
return converted_properties
| {
"content_hash": "c7d5ac9034726f43cfb9bd5ad9aee206",
"timestamp": "",
"source": "github",
"line_count": 35,
"max_line_length": 67,
"avg_line_length": 39.2,
"alnum_prop": 0.6574344023323615,
"repo_name": "openstack/vitrage",
"id": "c18dda51d93a0335f3a5b23dd1d454c01729cfe2",
"size": "1945",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "vitrage/evaluator/template_loading/props_converter.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "26541"
},
{
"name": "Mako",
"bytes": "896"
},
{
"name": "Python",
"bytes": "2074427"
},
{
"name": "Shell",
"bytes": "17668"
}
],
"symlink_target": ""
} |
from django.views.generic.base import TemplateView
class HomePageView(TemplateView):
template_name = "home.html"
| {
"content_hash": "2730c53abd4a94764912fee03e62403b",
"timestamp": "",
"source": "github",
"line_count": 4,
"max_line_length": 50,
"avg_line_length": 29.5,
"alnum_prop": 0.788135593220339,
"repo_name": "tomp/food_pantry",
"id": "b4f1ee8eebccd76afe08d366469127d8a4b2e7a9",
"size": "118",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pantry/views.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "19728"
},
{
"name": "HTML",
"bytes": "10272"
},
{
"name": "JavaScript",
"bytes": "380502"
},
{
"name": "Python",
"bytes": "30598"
}
],
"symlink_target": ""
} |
from django.utils import timezone
from django.shortcuts import render, get_object_or_404, redirect
from .models import Post, Comment, Notice
from .forms import PostForm, CommentForm, NoticeForm
from django.contrib.auth.decorators import login_required
from django.contrib.admin.views.decorators import staff_member_required
# Create your views here.
def post_list(request):
posts = Post.objects.filter(published_date__lte=timezone.now()).order_by('-published_date')
return render(request,'blog/post_list.html',{'posts': posts})
def post_detail(request, pk):
post = get_object_or_404(Post, pk=pk)
if request.method == "POST":
form = CommentForm(request.POST)
if form.is_valid():
comment = form.save(commit=False)
comment.author = request.user
comment.post = post
comment.save()
return redirect('blog:post_detail', pk=post.pk)
else:
form = CommentForm()
return render(request, 'blog/post_detail.html', {'post': post, 'form': form})
def notice_list(request):
notices = Notice.objects.filter(published_date__lte=timezone.now()).order_by('-published_date')
public = Notice.objects.filter(privacy="Public").filter(published_date__lte=timezone.now()).order_by('-published_date')
return render(request,'blog/notice_list.html',{'notices': notices, 'public': public})
def notice_detail(request, pk):
notice = get_object_or_404(Notice, pk=pk)
return render(request, 'blog/notice_detail.html', {'notice': notice})
@login_required
def post_new(request):
if request.method == "POST":
form = PostForm(request.POST)
if form.is_valid():
post = form.save(commit=False)
post.author = request.user
post.published_date = timezone.now()
post.save()
return redirect('blog:post_detail', pk=post.pk)
else:
form = PostForm()
return render(request, 'blog/post_edit.html', {'form': form})
@login_required
def post_edit(request, pk):
post = get_object_or_404(Post, pk=pk)
if request.method == "POST":
form = PostForm(request.POST, instance=post)
if form.is_valid():
post = form.save(commit=False)
post.author = request.user
post.save()
return redirect('blog:post_detail', pk=post.pk)
else:
form = PostForm(instance=post)
return render(request, 'blog/post_edit.html', {'form': form})
@staff_member_required
def notice_new(request):
if request.method == "POST":
form = NoticeForm(request.POST, request.FILES)
if form.is_valid():
notice = form.save(commit=False)
notice.author = request.user
notice.published_date = timezone.now()
notice.save()
return redirect('blog:notice_detail', pk=notice.pk)
else:
form = NoticeForm()
return render(request, 'blog/notice_edit.html', {'form': form})
@staff_member_required
def notice_edit(request, pk):
notice = get_object_or_404(Notice, pk=pk)
if request.method == "POST":
form = NoticeForm(request.POST, request.FILES, instance=notice)
if form.is_valid():
notice = form.save(commit=False)
notice.author = request.user
notice.save()
return redirect('blog.views.notice_detail', pk=notice.pk)
else:
form = NoticeForm(instance=notice)
return render(request, 'blog/notice_edit.html', {'form': form})
# @login_required
# def post_draft_list(request):
# posts = Post.objects.filter(published_date__isnull=True).order_by('created_date')
# return render(request, 'blog/post_draft_list.html', {'posts': posts})
# @login_required
# def post_publish(request, pk):
# post = get_object_or_404(Post, pk=pk)
# post.publish()
# return redirect('blog.views.post_detail', pk=pk)
# @login_required
# def publish(self):
# self.published_date = timezone.now()
# self.save()
@login_required
def post_remove(request, pk):
post = get_object_or_404(Post, pk=pk)
post.delete()
return redirect('blog:post_list')
@staff_member_required
def notice_remove(request, pk):
notice = get_object_or_404(Notice, pk=pk)
notice.delete()
return redirect('blog.views.notice_list')
# def add_comment_to_post(request, pk):
# post = get_object_or_404(Post, pk=pk)
# if request.method == "POST":
# form = CommentForm(request.POST)
# if form.is_valid():
# comment = form.save(commit=False)
# comment.author = request.user
# comment.post = post
# comment.save()
# return redirect('blog.views.post_detail', pk=post.pk)
# else:
# form = CommentForm()
# return render(request, 'blog/post_detail.html', {'form': form, 'post': post})
@login_required
def comment_approve(request, pk):
comment = get_object_or_404(Comment, pk=pk)
comment.approve()
return redirect('blog:post_detail', pk=comment.post.pk)
@login_required
def comment_remove(request, pk):
comment = get_object_or_404(Comment, pk=pk)
post_pk = comment.post.pk
comment.delete()
return redirect('blog:post_detail', pk=post_pk)
| {
"content_hash": "2a4d9f6a7f6c1ecb3ed70ec24069208a",
"timestamp": "",
"source": "github",
"line_count": 148,
"max_line_length": 123,
"avg_line_length": 35.37162162162162,
"alnum_prop": 0.6389684813753582,
"repo_name": "RachellCalhoun/cathotel",
"id": "e44c690f21d3715e3cfd8ea9b6a890312066a8b0",
"size": "5236",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "blog/views.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "4016"
},
{
"name": "HTML",
"bytes": "30678"
},
{
"name": "Python",
"bytes": "33344"
}
],
"symlink_target": ""
} |
import os
import sys
import getpass
from functools import wraps
import ldap
import ldap.modlist as modlist
import cog.directory as dir
import cog.util.passwd as passwd
from cog.util.misc import loop_on
from cog.objects.group import Group
from cog.config.settings import Profiles
from cog.config.templates import Templates
accounts = Templates().get('accounts')
settings = Profiles()
class User(object):
def __init__(self, name, account_data=None, groups=None, bind=False):
"""
User object, unsurprisingly.
"""
self.tree = dir.Tree()
self.name = name
self.exists = True
self.base_dn = settings.user_dn
self.ldap_query = settings.user_query % (settings.user_rdn, self.name)
user_data = self.tree.search(self.base_dn, search_filter=self.ldap_query, bind=bind)
if len(user_data) > 1:
raise dir.MultipleObjectsFound("The user ID is not unique.")
if len(user_data) == 1:
self.data = user_data[0]
self.uid = self.data.get('uid')
else:
self.exists = False
self.uid = [name]
self.data = account_data
self.groups = groups
def user_exists(method):
"""
Make sure that you're operating on an existing object.
"""
@wraps(method)
def _user_exists(self, *args, **kwargs):
if not self.exists:
raise dir.ObjectNotFound("User ‘%s’ cannot be found." % self.name)
return method(self, *args, **kwargs)
return _user_exists
def add(self):
self.tree.add(self.data)
self.exists = True
if self.groups:
for group in self.groups:
try:
self.addgroup(group)
except:
print "There was a problem with adding user %s to the group %s." % (self.name, group)
@user_exists
def replace_item(self, item, value):
self.data.replace(item, value)
@user_exists
def append_to_item(self, item, value):
self.data.append(item, value)
@user_exists
def remove_from_item(self, item, value):
self.data.remove(item, value)
@user_exists
def commit_changes(self):
self.tree.modify(self.data)
@user_exists
def find_groups(self):
for uid in loop_on(self.uid):
group_filter = '(&(objectClass=posixGroup)(|(memberUid=%s)(%s=%s)))' % (uid, settings.rfc2307bis_group_member_attribute, self.data.dn)
groups = [x['cn'][0] for x in self.tree.search(search_filter=group_filter, attributes=['cn'])]
yield groups
@user_exists
def strip_groups(self):
for uid in self.uid:
groups = [x['cn'][0] for x in self.tree.search(search_filter='(&(objectClass=posixGroup)(memberUid=%s))' % uid, attributes=['cn'])]
for group in groups:
self.delgroup(group)
@user_exists
def addgroup(self, user_group):
group_obj = Group(user_group)
for uid in self.uid:
group_obj.add_uid(uid)
group_obj.commit_changes()
@user_exists
def delgroup(self, user_group):
group_obj = Group(user_group)
for uid in self.uid:
group_obj.del_uid(uid)
group_obj.commit_changes()
@user_exists
def set_password(self, password=None):
if not password:
password = getpass.getpass('enter new LDAP password for %s: ' % self.name)
self.data.replace('userPassword', passwd.make_sha512(password))
self.tree.modify(self.data)
@user_exists
def rename(self, new_name):
self.tree.rename(self.data.dn, new_rdn='%s=%s'
% (settings.user_rdn, new_name))
@user_exists
def remove(self):
self.strip_groups()
self.tree.remove(self.data.dn)
@user_exists
def retire(self):
self.set_password(passwd.random_string(32))
self.data.replace('gidNumber', accounts.get('retired').get('gidNumber'))
self.tree.modify(self.data)
self.tree.move(self.data.dn, new_parent=dir.get_account_base('retired'))
self.strip_groups()
| {
"content_hash": "08add296ff6b02b813ce2d025d68b7bc",
"timestamp": "",
"source": "github",
"line_count": 130,
"max_line_length": 146,
"avg_line_length": 32.30769230769231,
"alnum_prop": 0.5947619047619047,
"repo_name": "jubalfh/cog",
"id": "4a4929ad38202ff3ec26785cec8d6d267fc934e9",
"size": "4500",
"binary": false,
"copies": "1",
"ref": "refs/heads/trunk",
"path": "cog/objects/user.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "67882"
},
{
"name": "Shell",
"bytes": "562"
}
],
"symlink_target": ""
} |
from tkinter import TclError
class WidgetRedirector:
"""Support for redirecting arbitrary widget subcommands.
Some Tk operations don't normally pass through tkinter. For example, if a
character is inserted into a Text widget by pressing a key, a default Tk
binding to the widget's 'insert' operation is activated, and the Tk library
processes the insert without calling back into tkinter.
Although a binding to <Key> could be made via tkinter, what we really want
to do is to hook the Tk 'insert' operation itself. For one thing, we want
a text.insert call in idle code to have the same effect as a key press.
When a widget is instantiated, a Tcl command is created whose name is the
same as the pathname widget._w. This command is used to invoke the various
widget operations, e.g. insert (for a Text widget). We are going to hook
this command and provide a facility ('register') to intercept the widget
operation. We will also intercept method calls on the tkinter class
instance that represents the tk widget.
In IDLE, WidgetRedirector is used in Percolator to intercept Text
commands. The function being registered provides access to the top
of a Percolator chain. At the bottom of the chain is a call to the
original Tk widget operation.
"""
def __init__(self, widget):
'''Initialize attributes and setup redirection.
_operations: dict mapping operation name to new function.
widget: the widget whose tcl command is to be intercepted.
tk: widget.tk, a convenience attribute, probably not needed.
orig: new name of the original tcl command.
Since renaming to orig fails with TclError when orig already
exists, only one WidgetDirector can exist for a given widget.
'''
self._operations = {}
self.widget = widget # widget instance
self.tk = tk = widget.tk # widget's root
w = widget._w # widget's (full) Tk pathname
self.orig = w + "_orig"
# Rename the Tcl command within Tcl:
tk.call("rename", w, self.orig)
# Create a new Tcl command whose name is the widget's pathname, and
# whose action is to dispatch on the operation passed to the widget:
tk.createcommand(w, self.dispatch)
def __repr__(self):
return "%s(%s<%s>)" % (self.__class__.__name__,
self.widget.__class__.__name__,
self.widget._w)
def close(self):
"Unregister operations and revert redirection created by .__init__."
for operation in list(self._operations):
self.unregister(operation)
widget = self.widget
tk = widget.tk
w = widget._w
# Restore the original widget Tcl command.
tk.deletecommand(w)
tk.call("rename", self.orig, w)
del self.widget, self.tk # Should not be needed
# if instance is deleted after close, as in Percolator.
def register(self, operation, function):
'''Return OriginalCommand(operation) after registering function.
Registration adds an operation: function pair to ._operations.
It also adds an widget function attribute that masks the tkinter
class instance method. Method masking operates independently
from command dispatch.
If a second function is registered for the same operation, the
first function is replaced in both places.
'''
self._operations[operation] = function
setattr(self.widget, operation, function)
return OriginalCommand(self, operation)
def unregister(self, operation):
'''Return the function for the operation, or None.
Deleting the instance attribute unmasks the class attribute.
'''
if operation in self._operations:
function = self._operations[operation]
del self._operations[operation]
try:
delattr(self.widget, operation)
except AttributeError:
pass
return function
else:
return None
def dispatch(self, operation, *args):
'''Callback from Tcl which runs when the widget is referenced.
If an operation has been registered in self._operations, apply the
associated function to the args passed into Tcl. Otherwise, pass the
operation through to Tk via the original Tcl function.
Note that if a registered function is called, the operation is not
passed through to Tk. Apply the function returned by self.register()
to *args to accomplish that. For an example, see ColorDelegator.py.
'''
m = self._operations.get(operation)
try:
if m:
return m(*args)
else:
return self.tk.call((self.orig, operation) + args)
except TclError:
return ""
class OriginalCommand:
'''Callable for original tk command that has been redirected.
Returned by .register; can be used in the function registered.
redir = WidgetRedirector(text)
def my_insert(*args):
print("insert", args)
original_insert(*args)
original_insert = redir.register("insert", my_insert)
'''
def __init__(self, redir, operation):
'''Create .tk_call and .orig_and_operation for .__call__ method.
.redir and .operation store the input args for __repr__.
.tk and .orig copy attributes of .redir (probably not needed).
'''
self.redir = redir
self.operation = operation
self.tk = redir.tk # redundant with self.redir
self.orig = redir.orig # redundant with self.redir
# These two could be deleted after checking recipient code.
self.tk_call = redir.tk.call
self.orig_and_operation = (redir.orig, operation)
def __repr__(self):
return "%s(%r, %r)" % (self.__class__.__name__,
self.redir, self.operation)
def __call__(self, *args):
return self.tk_call(self.orig_and_operation + args)
def _widget_redirector(parent): # htest #
from tkinter import Tk, Text
import re
root = Tk()
root.title("Test WidgetRedirector")
width, height, x, y = list(map(int, re.split('[x+]', parent.geometry())))
root.geometry("+%d+%d"%(x, y + 150))
text = Text(root)
text.pack()
text.focus_set()
redir = WidgetRedirector(text)
def my_insert(*args):
print("insert", args)
original_insert(*args)
original_insert = redir.register("insert", my_insert)
root.mainloop()
if __name__ == "__main__":
import unittest
unittest.main('idlelib.idle_test.test_widgetredir',
verbosity=2, exit=False)
from idlelib.idle_test.htest import run
run(_widget_redirector)
| {
"content_hash": "3b3a6d4006341b33f45882d2e9546a14",
"timestamp": "",
"source": "github",
"line_count": 176,
"max_line_length": 79,
"avg_line_length": 39.43181818181818,
"alnum_prop": 0.6321325648414986,
"repo_name": "sharhar/USB-Thing",
"id": "67d7f61e623b4289bf55de2c641affd63b60e681",
"size": "6940",
"binary": false,
"copies": "9",
"ref": "refs/heads/master",
"path": "UpdaterFiles/Lib/python-3.5.1.amd64/Lib/idlelib/WidgetRedirector.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "5015"
},
{
"name": "C",
"bytes": "436714"
},
{
"name": "C#",
"bytes": "8440"
},
{
"name": "C++",
"bytes": "100530"
},
{
"name": "CSS",
"bytes": "96"
},
{
"name": "F#",
"bytes": "2310"
},
{
"name": "Forth",
"bytes": "506"
},
{
"name": "GLSL",
"bytes": "1040"
},
{
"name": "HTML",
"bytes": "41126"
},
{
"name": "Jupyter Notebook",
"bytes": "752587"
},
{
"name": "Makefile",
"bytes": "895"
},
{
"name": "Mask",
"bytes": "969"
},
{
"name": "PowerShell",
"bytes": "1372"
},
{
"name": "Python",
"bytes": "14041449"
},
{
"name": "Shell",
"bytes": "13559"
},
{
"name": "Tcl",
"bytes": "2173292"
}
],
"symlink_target": ""
} |
from GroundedScan.dataset import GroundedScan
from GroundedScan.grammar import Derivation
from GroundedScan.world import Situation
from GroundedScan.world import Position
from GroundedScan.world import Object
from GroundedScan.world import INT_TO_DIR
from GroundedScan.world import PositionedObject
from GroundedScan.helpers import numpy_array_to_image
from GroundedScan.helpers import image_to_numpy_array
import os
import time
import numpy as np
import logging
import shutil
logging.getLogger("PyQt5").disabled = True
logging.getLogger('matplotlib.font_manager').disabled = True
logger = logging.getLogger("GroundedScan")
TEST_DIRECTORY = "test_dir"
TEST_PATH = os.path.join(os.getcwd(), TEST_DIRECTORY)
if not os.path.exists(TEST_PATH):
os.mkdir(TEST_PATH)
EXAMPLES_TO_TEST = 10000
intransitive_verbs = ["walk"]
transitive_verbs = ["push", "pull"]
adverbs = ["cautiously"]
nouns = ["circle", "cylinder", "square"]
color_adjectives = ["red", "blue", "green", "yellow"]
size_adjectives = ["big", "small"]
TEST_DATASET = GroundedScan(intransitive_verbs=intransitive_verbs,
transitive_verbs=transitive_verbs,
adverbs=adverbs, nouns=nouns,
color_adjectives=color_adjectives,
size_adjectives=size_adjectives, percentage_train=0.8,
min_object_size=1, max_object_size=4, sample_vocabulary='default',
save_directory=TEST_DIRECTORY, grid_size=15, type_grammar="adverb")
TEST_DATASET_NONCE = GroundedScan(intransitive_verbs=1,
transitive_verbs=2,
adverbs=1, nouns=3,
color_adjectives=4,
size_adjectives=2, percentage_train=0.8,
min_object_size=1, max_object_size=4, sample_vocabulary='sample',
save_directory=TEST_DIRECTORY, grid_size=15, type_grammar="adverb")
TEST_SITUATION_1 = Situation(grid_size=15, agent_position=Position(row=7, column=2), agent_direction=INT_TO_DIR[0],
target_object=PositionedObject(object=Object(size=2, color='red', shape='circle'),
position=Position(row=10, column=4),
vector=np.array([1, 0, 1])),
placed_objects=[PositionedObject(object=Object(size=2, color='red', shape='circle'),
position=Position(row=10, column=4),
vector=np.array([1, 0, 1])),
PositionedObject(object=Object(size=4, color='green', shape='circle'),
position=Position(row=3, column=12),
vector=np.array([0, 1, 0]))], carrying=None)
TEST_SITUATION_2 = Situation(grid_size=15, agent_position=Position(row=7, column=2), agent_direction=INT_TO_DIR[0],
target_object=PositionedObject(object=Object(size=4, color='red', shape='circle'),
position=Position(row=10, column=4),
vector=np.array([1, 0, 1])),
placed_objects=[PositionedObject(object=Object(size=4, color='red', shape='circle'),
position=Position(row=10, column=4),
vector=np.array([1, 0, 1])),
PositionedObject(object=Object(size=4, color='green', shape='cylinder'),
position=Position(row=3, column=12),
vector=np.array([0, 1, 0]))], carrying=None)
TEST_SITUATION_3 = Situation(grid_size=15, agent_position=Position(row=7, column=2), agent_direction=INT_TO_DIR[0],
target_object=None,
placed_objects=[PositionedObject(object=Object(size=1, color='red', shape='circle'),
position=Position(row=10, column=4),
vector=np.array([1, 0, 1])),
PositionedObject(object=Object(size=2, color='green', shape='circle'),
position=Position(row=3, column=1),
vector=np.array([0, 1, 0]))], carrying=None)
TEST_SITUATION_4 = Situation(grid_size=15, agent_position=Position(row=7, column=2), agent_direction=INT_TO_DIR[0],
target_object=None,
placed_objects=[PositionedObject(object=Object(size=2, color='red', shape='circle'),
position=Position(row=10, column=4),
vector=np.array([1, 0, 1])),
PositionedObject(object=Object(size=4, color='red', shape='circle'),
position=Position(row=3, column=1),
vector=np.array([0, 1, 0]))], carrying=None)
def test_save_and_load_dataset(dataset):
start = time.time()
dataset.get_data_pairs(max_examples=EXAMPLES_TO_TEST)
dataset.save_dataset("test.txt")
dataset.save_dataset_statistics(split="train")
dataset.save_dataset_statistics(split="test")
test_grounded_scan = GroundedScan.load_dataset_from_file(os.path.join(TEST_DIRECTORY, "test.txt"),
TEST_DIRECTORY)
for example_one, example_two in zip(dataset.get_examples_with_image("train"),
test_grounded_scan.get_examples_with_image("train")):
assert dataset.command_repr(example_one["input_command"]) == test_grounded_scan.command_repr(
example_two["input_command"]), "test_save_and_load_dataset FAILED"
assert dataset.command_repr(example_one["target_command"]) == test_grounded_scan.command_repr(
example_two["target_command"]), "test_save_and_load_dataset FAILED"
assert np.array_equal(example_one["situation_image"], example_two["situation_image"]),\
"test_save_and_load_dataset FAILED"
assert dataset.command_repr(example_one["input_meaning"]) == test_grounded_scan.command_repr(
example_two["input_meaning"]), "test_save_and_load_dataset FAILED"
os.remove(os.path.join(TEST_DIRECTORY, "test.txt"))
end = time.time()
logger.info("test_save_and_load_dataset PASSED in {} seconds".format(end - start))
return
def test_save_and_load_dataset_nonce():
start = time.time()
TEST_DATASET_NONCE.get_data_pairs(max_examples=EXAMPLES_TO_TEST)
TEST_DATASET_NONCE.save_dataset("test.txt")
TEST_DATASET_NONCE.save_dataset_statistics(split="train")
TEST_DATASET_NONCE.save_dataset_statistics(split="test")
test_grounded_scan = GroundedScan.load_dataset_from_file(os.path.join(TEST_DIRECTORY, "test.txt"),
TEST_DIRECTORY)
for example_one, example_two in zip(TEST_DATASET_NONCE.get_examples_with_image("train"),
test_grounded_scan.get_examples_with_image("train")):
assert TEST_DATASET_NONCE.command_repr(example_one["input_command"]) == test_grounded_scan.command_repr(
example_two["input_command"]), "test_save_and_load_dataset FAILED"
assert TEST_DATASET_NONCE.command_repr(example_one["target_command"]) == test_grounded_scan.command_repr(
example_two["target_command"]), "test_save_and_load_dataset FAILED"
assert np.array_equal(example_one["situation_image"], example_two["situation_image"]),\
"test_save_and_load_dataset FAILED"
assert TEST_DATASET_NONCE.command_repr(example_one["input_meaning"]) == test_grounded_scan.command_repr(
example_two["input_meaning"]), "test_save_and_load_dataset FAILED"
os.remove(os.path.join(TEST_DIRECTORY, "test.txt"))
end = time.time()
logger.info("test_save_and_load_dataset PASSED in {} seconds".format(end - start))
return
def test_derivation_from_rules(dataset):
start = time.time()
derivation, arguments = dataset.sample_command()
rules_list = []
lexicon = {}
derivation.to_rules(rules_list, lexicon)
test = Derivation.from_rules(rules_list, lexicon=lexicon)
assert ' '.join(test.words()) == ' '.join(derivation.words()), "test_derivation_from_rules FAILED"
end = time.time()
logger.info("test_derivation_from_rules PASSED in {} seconds".format(end - start))
def test_derivation_from_string(dataset):
start = time.time()
derivation, arguments = dataset.sample_command()
derivation_str = derivation.__repr__()
rules_str, lexicon_str = derivation_str.split(';')
new_derivation = Derivation.from_str(rules_str, lexicon_str, dataset._grammar)
assert ' '.join(new_derivation.words()) == ' '.join(derivation.words()), "test_derivation_from_string FAILED"
end = time.time()
logger.info("test_derivation_from_string PASSED in {} seconds".format(end - start))
def test_demonstrate_target_commands_one(dataset):
"""Test that target commands sequence resulting from demonstrate_command is the same as the one executed by
demonstrate_target_commands"""
start = time.time()
rules_str = "NP -> NN,NP -> JJ NP,DP -> 'a' NP,VP -> VV_intrans 'to' DP,ROOT -> VP"
translate_fn = dataset._vocabulary.translate_meaning
lexicon_str = "T:{},NT:VV_intransitive -> {},T:to,T:a,T:{},NT:JJ -> {},T:{},NT:NN -> {}".format(
translate_fn("walk"), translate_fn("walk"), translate_fn("small"), translate_fn("small"),
translate_fn("circle"), translate_fn("circle")
)
derivation = Derivation.from_str(rules_str, lexicon_str, dataset._grammar)
actual_target_commands, _, _ = dataset.demonstrate_command(derivation, TEST_SITUATION_1)
command = ' '.join(derivation.words())
target_commands, _, _, _ = dataset.demonstrate_target_commands(command, TEST_SITUATION_1, actual_target_commands)
assert ','.join(actual_target_commands) == ','.join(target_commands), \
"test_demonstrate_target_commands_one FAILED"
end = time.time()
logger.info("test_demonstrate_target_commands_one PASSED in {} seconds".format(end - start))
def test_demonstrate_target_commands_two(dataset):
"""Test that target commands sequence resulting from demonstrate_command for pushing a heavy objectis the same as
the executed one by demonstrate_target_commands"""
start = time.time()
rules_str = "NP -> NN,NP -> JJ NP,DP -> 'a' NP,VP -> VV_trans DP,ROOT -> VP"
translate_fn = dataset._vocabulary.translate_meaning
lexicon_str = "T:{},NT:VV_transitive -> {},T:a,T:{},NT:JJ -> {},T:{},NT:NN -> {}".format(
translate_fn("push"), translate_fn("push"), translate_fn("big"), translate_fn("big"), translate_fn("circle"),
translate_fn("circle")
)
derivation = Derivation.from_str(rules_str, lexicon_str, dataset._grammar)
actual_target_commands, _, _ = dataset.demonstrate_command(derivation, initial_situation=TEST_SITUATION_2)
command = ' '.join(derivation.words())
target_commands, _, _, _ = dataset.demonstrate_target_commands(command, TEST_SITUATION_2, actual_target_commands)
assert ','.join(actual_target_commands) == ','.join(target_commands), "test_demonstrate_target_commands_two FAILED"
end = time.time()
logger.info("test_demonstrate_target_commands_two PASSED in {} seconds".format(end - start))
def test_demonstrate_target_commands_three(dataset):
"""Test that target commands sequence resulting from demonstrate_command for pushing a light object is the same as
the executed one by demonstrate_target_commands"""
start = time.time()
rules_str = "NP -> NN,NP -> JJ NP,DP -> 'a' NP,VP -> VV_trans DP,ROOT -> VP"
translate_fn = dataset._vocabulary.translate_meaning
lexicon_str = "T:{},NT:VV_transitive -> {},T:a,T:{},NT:JJ -> {},T:{},NT:NN -> {}".format(
translate_fn("push"), translate_fn("push"), translate_fn("small"), translate_fn("small"),
translate_fn("circle"), translate_fn("circle")
)
derivation = Derivation.from_str(rules_str, lexicon_str, dataset._grammar)
actual_target_commands, _, _ = dataset.demonstrate_command(derivation, initial_situation=TEST_SITUATION_1)
command = ' '.join(derivation.words())
target_commands, _, _, _ = dataset.demonstrate_target_commands(command, TEST_SITUATION_1, actual_target_commands)
assert ','.join(actual_target_commands) == ','.join(target_commands), "test_demonstrate_target_commands_three FAILED"
end = time.time()
logger.info("test_demonstrate_target_commands_three PASSED in {} seconds".format(end - start))
def test_demonstrate_command_one(dataset):
"""Test pushing a light object (where one target command of 'push <dir>' results in movement of 1 grid)."""
start = time.time()
rules_str = "NP -> NN,NP -> JJ NP,DP -> 'a' NP,VP -> VV_trans DP,ROOT -> VP"
translate_fn = dataset._vocabulary.translate_meaning
lexicon_str = "T:{},NT:VV_transitive -> {},T:a,T:{},NT:JJ -> {},T:{},NT:NN -> {}".format(
translate_fn("push"), translate_fn("push"), translate_fn("small"), translate_fn("small"),
translate_fn("circle"), translate_fn("circle")
)
derivation = Derivation.from_str(rules_str, lexicon_str, dataset._grammar)
expected_target_commands = "walk,walk,turn right,walk,walk,walk,"\
"push,push,push,push"
actual_target_commands, _, _ = dataset.demonstrate_command(derivation, initial_situation=TEST_SITUATION_1)
assert expected_target_commands == ','.join(actual_target_commands), "test_demonstrate_command_one FAILED"
end = time.time()
logger.info("test_demonstrate_command_one PASSED in {} seconds".format(end - start))
def test_demonstrate_command_two(dataset):
"""Test pushing a heavy object (where one target command of 'push <dir>' results in movement of 1 grid)."""
start = time.time()
rules_str = "NP -> NN,NP -> JJ NP,DP -> 'a' NP,VP -> VV_trans DP,ROOT -> VP"
translate_fn = dataset._vocabulary.translate_meaning
lexicon_str = "T:{},NT:VV_transitive -> {},T:a,T:{},NT:JJ -> {},T:{},NT:NN -> {}".format(
translate_fn("push"), translate_fn("push"), translate_fn("small"), translate_fn("small"),
translate_fn("circle"), translate_fn("circle")
)
derivation = Derivation.from_str(rules_str, lexicon_str, dataset._grammar)
expected_target_commands = "walk,walk,turn right,walk,walk,walk," \
"push,push,push,push,push,push,push,push"
actual_target_commands, _, _ = dataset.demonstrate_command(derivation, initial_situation=TEST_SITUATION_2)
assert expected_target_commands == ','.join(actual_target_commands), "test_demonstrate_command_two FAILED"
end = time.time()
logger.info("test_demonstrate_command_two PASSED in {} seconds".format(end - start))
def test_demonstrate_command_three(dataset):
"""Test walk to a small circle, tests that the function demonstrate command is able to find the target small circle
even if that circle isn't explicitly set as the target object in the situation (which it wouldn't be at test time).
"""
start = time.time()
rules_str = "NP -> NN,NP -> JJ NP,DP -> 'a' NP,VP -> VV_intrans 'to' DP,ROOT -> VP"
translate_fn = dataset._vocabulary.translate_meaning
lexicon_str = "T:{},NT:VV_intransitive -> {},T:to,T:a,T:{},NT:JJ -> {},T:{},NT:NN -> {}".format(
translate_fn("walk"), translate_fn("walk"), translate_fn("small"), translate_fn("small"), translate_fn("circle"),
translate_fn("circle")
)
derivation = Derivation.from_str(rules_str, lexicon_str, dataset._grammar)
expected_target_commands = "walk,walk,turn right,walk,walk,walk"
actual_target_commands, _, _ = dataset.demonstrate_command(derivation, initial_situation=TEST_SITUATION_3)
assert expected_target_commands == ','.join(actual_target_commands), "test_demonstrate_command_three FAILED"
end = time.time()
logger.info("test_demonstrate_command_three PASSED in {} seconds".format(end - start))
def test_demonstrate_command_four(dataset):
"""Test walk to a small circle, tests that the function demonstrate command is able to find the target big circle
even if that circle isn't explicitly set as the target object in the situation (which it wouldn't be at test time).
"""
start = time.time()
rules_str = "NP -> NN,NP -> JJ NP,DP -> 'a' NP,VP -> VV_intrans 'to' DP,ROOT -> VP"
translate_fn = dataset._vocabulary.translate_meaning
lexicon_str = "T:{},NT:VV_intransitive -> {},T:to,T:a,T:{},NT:JJ -> {},T:{},NT:NN -> {}".format(
translate_fn("walk"), translate_fn("walk"), translate_fn("big"), translate_fn("big"), translate_fn("circle"),
translate_fn("circle")
)
derivation = Derivation.from_str(rules_str, lexicon_str, dataset._grammar)
expected_target_commands = "turn left,turn left,walk,turn right,walk,walk,walk,walk"
actual_target_commands, _, _ = dataset.demonstrate_command(derivation, initial_situation=TEST_SITUATION_3)
assert expected_target_commands == ','.join(actual_target_commands), "test_demonstrate_command_four FAILED"
end = time.time()
logger.info("test_demonstrate_command_four PASSED in {} seconds".format(end - start))
def test_demonstrate_command_five(dataset):
"""Test that when referring to a small red circle and two present in the world, it finds the correct one."""
start = time.time()
rules_str = "NP -> NN,NP -> JJ NP,NP -> JJ NP,DP -> 'a' NP,VP -> VV_intrans 'to' DP,ROOT -> VP"
translate_fn = dataset._vocabulary.translate_meaning
lexicon_str = "T:{},NT:VV_intransitive -> {},T:to,T:a,T:{},NT:JJ -> {}:JJ -> {},T:{},T:{},NT:"\
"NN -> {}".format(translate_fn("walk"), translate_fn("walk"), translate_fn("red"),
translate_fn("small"), translate_fn("red"), translate_fn("small"),
translate_fn("circle"), translate_fn("circle"))
derivation = Derivation.from_str(rules_str, lexicon_str, dataset._grammar)
expected_target_commands = "walk,walk,turn right,walk,walk,walk"
actual_target_commands, _, _ = dataset.demonstrate_command(derivation, initial_situation=TEST_SITUATION_4)
assert expected_target_commands == ','.join(actual_target_commands), "test_demonstrate_command_five FAILED"
end = time.time()
logger.info("test_demonstrate_command_five PASSED in {} seconds".format(end - start))
def test_demonstrate_command_six(dataset):
"""Test that when referring to a small red circle but only one red circle is present, demonstrate_commands fails."""
start = time.time()
rules_str = "NP -> NN,NP -> JJ NP,NP -> JJ NP,DP -> 'a' NP,VP -> VV_intrans 'to' DP,ROOT -> VP"
translate_fn = dataset._vocabulary.translate_meaning
lexicon_str = "T:{},NT:VV_intransitive -> {},T:to,T:a,T:{},NT:JJ -> {}:JJ -> {},T:{},T:{},NT:" \
"NN -> {}".format(translate_fn("walk"), translate_fn("walk"), translate_fn("red"),
translate_fn("small"), translate_fn("red"), translate_fn("small"),
translate_fn("circle"), translate_fn("circle"))
derivation = Derivation.from_str(rules_str, lexicon_str, dataset._grammar)
expected_target_commands = ""
try:
actual_target_commands, _, _ = dataset.demonstrate_command(derivation, initial_situation=TEST_SITUATION_3)
except AssertionError:
actual_target_commands = ""
assert expected_target_commands == ','.join(actual_target_commands), "test_demonstrate_command_six FAILED"
end = time.time()
logger.info("test_demonstrate_command_six PASSED in {} seconds".format(end - start))
def test_find_referred_target_one(dataset):
"""Test that for particular referred targets, the Derivation class identifies it correctly."""
start = time.time()
rules_str = "NP -> NN,NP -> JJ NP,NP -> JJ NP,DP -> 'a' NP,VP -> VV_intrans 'to' DP,ROOT -> VP"
translate_fn = dataset._vocabulary.translate_meaning
lexicon_str = "T:{},NT:VV_intransitive -> {},T:to,T:a,T:{},NT:JJ -> {}:JJ -> {},T:{},T:{},NT:" \
"NN -> {}".format(translate_fn("walk"), translate_fn("walk"), translate_fn("red"),
translate_fn("small"), translate_fn("red"), translate_fn("small"),
translate_fn("circle"), translate_fn("circle"))
derivation = Derivation.from_str(rules_str, lexicon_str, dataset._grammar)
arguments = []
derivation.meaning(arguments)
assert len(arguments) == 1, "test_find_referred_target_one FAILED."
target_str, target_predicate = arguments.pop().to_predicate()
translate_fn_word = dataset._vocabulary.translate_word
translated_target_str = ' '.join([translate_fn_word(word) for word in target_str.split()])
assert translated_target_str == "red circle", "test_find_referred_target FAILED."
assert target_predicate["noun"] == translate_fn("circle"), "test_find_referred_target_one FAILED."
assert target_predicate["size"] == translate_fn("small"), "test_find_referred_target_one FAILED."
assert target_predicate["color"] == translate_fn("red"), "test_find_referred_target_one FAILED."
end = time.time()
logger.info("test_find_referred_target_one PASSED in {} seconds".format(end - start))
def test_find_referred_target_two(dataset):
"""Test that for particular referred targets, the Derivation class identifies it correctly."""
start = time.time()
rules_str = "NP -> NN,NP -> JJ NP,DP -> 'a' NP,VP -> VV_intrans 'to' DP,ROOT -> VP"
translate_fn = dataset._vocabulary.translate_meaning
lexicon_str = "T:{},NT:VV_intransitive -> {},T:to,T:a,T:{},NT:JJ -> {},T:{},NT:NN -> {}".format(
translate_fn("walk"), translate_fn("walk"), translate_fn("big"), translate_fn("big"), translate_fn("circle"),
translate_fn("circle")
)
derivation = Derivation.from_str(rules_str, lexicon_str, dataset._grammar)
arguments = []
derivation.meaning(arguments)
assert len(arguments) == 1, "test_find_referred_target_two FAILED."
target_str, target_predicate = arguments.pop().to_predicate()
translate_fn_word = dataset._vocabulary.translate_word
translated_target_str = ' '.join([translate_fn_word(word) for word in target_str.split()])
assert translated_target_str == "circle", "test_find_referred_target_two FAILED."
assert target_predicate["noun"] == translate_fn("circle"), "test_find_referred_target_two FAILED."
assert target_predicate["size"] == translate_fn("big"), "test_find_referred_target_two FAILED."
assert target_predicate["color"] == translate_fn(""), "test_find_referred_target_two FAILED."
end = time.time()
logger.info("test_find_referred_target_two PASSED in {} seconds".format(end - start))
def test_generate_possible_targets_one(dataset):
"""Test that for particular referred targets, the right possible target objects get generated."""
start = time.time()
translate_meaning = dataset._vocabulary.translate_meaning
target_predicate = {"noun": translate_meaning("circle"),
"color": translate_meaning("red"),
"size": translate_meaning("big")}
translate_word = dataset._vocabulary.translate_word
expected_possible_targets = {(2, "red", "circle"), (3, "red", "circle"), (4, "red", "circle")}
actual_possible_targets = dataset.generate_possible_targets(
referred_size=translate_word(target_predicate["size"]),
referred_color=translate_word(target_predicate["color"]),
referred_shape=translate_word(target_predicate["noun"]))
for actual_possible_target in actual_possible_targets:
assert actual_possible_target in expected_possible_targets, "test_generate_possible_targets_one FAILED."
end = time.time()
logger.info("test_generate_possible_targets_one PASSED in {} seconds".format(end - start))
def test_generate_possible_targets_two(dataset):
"""Test that for particular referred targets, the right possible target objects get generated."""
start = time.time()
translate_meaning = dataset._vocabulary.translate_meaning
target_predicate = {"noun": translate_meaning("circle"),
"color": translate_meaning("red"),
"size": translate_meaning("small")}
translate_word = dataset._vocabulary.translate_word
expected_possible_targets = {(1, "red", "circle"), (2, "red", "circle"), (3, "red", "circle"),
(1, "blue", "circle"), (2, "blue", "circle"), (3, "blue", "circle"),
(1, "green", "circle"), (2, "green", "circle"), (3, "green", "circle")}
actual_possible_targets = dataset.generate_possible_targets(
referred_size=translate_word(target_predicate["size"]),
referred_color=translate_word(target_predicate["color"]),
referred_shape=translate_word(target_predicate["noun"]))
for expected_possible_target, actual_possible_target in zip(expected_possible_targets, actual_possible_targets):
assert actual_possible_target in expected_possible_targets, "test_generate_possible_targets_two FAILED."
end = time.time()
logger.info("test_generate_possible_targets_two PASSED in {} seconds".format(end - start))
def test_generate_situations_one(dataset):
"""Test that when a small green circle is referred to there exist no smaller green circles than the target object in
the world and at least one larger green circle."""
start = time.time()
translate_meaning = dataset._vocabulary.translate_meaning
target_shape = "circle"
target_color = "green"
target_size = 2
referred_size = translate_meaning("small")
referred_color = translate_meaning("green")
referred_shape = translate_meaning("circle")
situation_specifications = dataset.generate_situations(num_resampling=1)
relevant_situation = situation_specifications[target_shape][target_color][target_size].pop()
dataset.initialize_world_from_spec(relevant_situation, referred_size=referred_size,
referred_color=referred_color,
referred_shape=referred_shape,
actual_size=target_size,
sample_percentage=0.5
)
smallest_object = dataset._world.object_positions("green circle",
object_size="small").pop()
assert smallest_object == relevant_situation["target_position"], "test_generate_situations_one FAILED."
other_related_objects = dataset._world.object_positions("green circle")
larger_objects = []
for size, sized_objects in other_related_objects:
if size < target_size:
assert not sized_objects, "test_generate_situations_one FAILED."
elif size > target_size:
larger_objects.extend(sized_objects)
assert len(larger_objects) >= 1, "test_generate_situations_one FAILED."
end = time.time()
logger.info("test_generate_situations_one PASSED in {} seconds".format(end - start))
def test_generate_situations_two(dataset):
"""Test that when a big green circle is referred to there exists no larger green circles and the exists at least
one smaller green circle."""
start = time.time()
translate_meaning = dataset._vocabulary.translate_meaning
target_shape = "circle"
target_color = "green"
target_size = 2
referred_size = translate_meaning("big")
referred_color = translate_meaning("green")
referred_shape = translate_meaning("circle")
situation_specifications = dataset.generate_situations(num_resampling=1)
relevant_situation = situation_specifications[target_shape][target_color][target_size].pop()
dataset.initialize_world_from_spec(relevant_situation, referred_size=referred_size,
referred_color=referred_color,
referred_shape=referred_shape,
actual_size=target_size,
sample_percentage=0.5
)
largest_object = dataset._world.object_positions("green circle",
object_size="big").pop()
assert largest_object == relevant_situation["target_position"], "test_generate_situations_two FAILED."
other_related_objects = dataset._world.object_positions("green circle")
smaller_objects = []
for size, sized_objects in other_related_objects:
if size > target_size:
assert not sized_objects, "test_generate_situations_two FAILED."
elif size < target_size:
smaller_objects.extend(sized_objects)
assert len(smaller_objects) >= 1, "test_generate_situations_two FAILED."
end = time.time()
logger.info("test_generate_situations_two PASSED in {} seconds".format(end - start))
def test_generate_situations_three(dataset):
"""Test that for particular commands the right situations get matched."""
start = time.time()
translate_meaning = dataset._vocabulary.translate_meaning
target_shape = "circle"
target_color = "green"
target_size = 2
referred_size = translate_meaning("big")
referred_shape = translate_meaning("circle")
situation_specifications = dataset.generate_situations(num_resampling=1)
relevant_situation = situation_specifications[target_shape][target_color][target_size].pop()
dataset.initialize_world_from_spec(relevant_situation, referred_size=referred_size,
referred_color="",
referred_shape=referred_shape,
actual_size=target_size,
sample_percentage=0.5
)
largest_object = dataset._world.object_positions("circle",
object_size="big").pop()
assert largest_object == relevant_situation["target_position"], "test_generate_situations_three FAILED."
other_related_objects = dataset._world.object_positions("circle")
smaller_objects = []
for size, sized_objects in other_related_objects:
if size > target_size:
assert not sized_objects, "test_generate_situations_three FAILED."
elif size < target_size:
smaller_objects.extend(sized_objects)
assert len(smaller_objects) >= 1, "test_generate_situations_three FAILED."
end = time.time()
logger.info("test_generate_situations_three PASSED in {} seconds".format(end - start))
def test_situation_representation_eq():
start = time.time()
test_situations = [TEST_SITUATION_1, TEST_SITUATION_2, TEST_SITUATION_3, TEST_SITUATION_4]
for i, test_situation_1 in enumerate(test_situations):
for j, test_situation_2 in enumerate(test_situations):
if i == j:
assert test_situation_1 == test_situation_2, "test_situation_representation_eq FAILED."
else:
assert test_situation_1 != test_situation_2, "test_situation_representation_eq FAILED."
end = time.time()
logger.info("test_situation_representation_eq PASSED in {} seconds".format(end - start))
def test_example_representation_eq(dataset):
"""Test that the function for comparing examples returns true when exactly the same example is passed twice."""
start = time.time()
rules_str = "NP -> NN,NP -> JJ NP,DP -> 'a' NP,VP -> VV_intrans 'to' DP,ROOT -> VP"
translate_fn = dataset._vocabulary.translate_meaning
lexicon_str = "T:{},NT:VV_intransitive -> {},T:to,T:a,T:{},NT:JJ -> {},T:{},NT:NN -> {}".format(
translate_fn("walk"), translate_fn("walk"), translate_fn("big"), translate_fn("big"), translate_fn("circle"),
translate_fn("circle")
)
derivation = Derivation.from_str(rules_str, lexicon_str, dataset._grammar)
arguments = []
derivation.meaning(arguments)
target_str, target_predicate = arguments.pop().to_predicate()
adverb = ""
for word in derivation.words():
if word in dataset._vocabulary.get_adverbs():
adverb = word
target_commands, _, target_action = dataset.demonstrate_command(derivation, initial_situation=TEST_SITUATION_1)
TEST_DATASET.fill_example(derivation.words(), derivation, TEST_SITUATION_1, target_commands, target_action,
target_predicate, visualize=False, splits=["train"], adverb=adverb)
TEST_DATASET.get_data_pairs(max_examples=10, num_resampling=2)
for split, examples in dataset._data_pairs.items():
for example in examples:
assert dataset.compare_examples(example, example), "test_example_representation_eq FAILED."
end = time.time()
logger.info("test_example_representation_eq PASSED in {} seconds".format(end - start))
def test_example_representation(dataset):
"""Test that when you save an example in its representation its the same if you parse it again."""
start = time.time()
rules_str = "NP -> NN,NP -> JJ NP,DP -> 'a' NP,VP -> VV_intrans 'to' DP,ROOT -> VP"
translate_fn = dataset._vocabulary.translate_meaning
lexicon_str = "T:{},NT:VV_intransitive -> {},T:to,T:a,T:{},NT:JJ -> {},T:{},NT:NN -> {}".format(
translate_fn("walk"), translate_fn("walk"), translate_fn("big"), translate_fn("big"), translate_fn("circle"),
translate_fn("circle")
)
derivation = Derivation.from_str(rules_str, lexicon_str, dataset._grammar)
arguments = []
derivation.meaning(arguments)
target_str, target_predicate = arguments.pop().to_predicate()
adverb = ""
for word in derivation.words():
if word in dataset._vocabulary.get_adverbs():
adverb = word
target_commands, _, target_action = dataset.demonstrate_command(derivation, initial_situation=TEST_SITUATION_1)
dataset.fill_example(derivation.words(), derivation, TEST_SITUATION_1, target_commands, target_action,
target_predicate, visualize=False, splits=["train"], adverb=adverb)
example = dataset._data_pairs["train"].pop()
(parsed_command, parsed_meaning, parsed_derivation, parsed_situation,
parsed_target_commands, _, parsed_action) = dataset.parse_example(
example
)
assert example["command"] == dataset.command_repr(parsed_command), "test_example_representation FAILED."
assert example["meaning"] == dataset.command_repr(parsed_meaning), "test_example_representation FAILED."
assert example["derivation"] == dataset.derivation_repr(parsed_derivation), "test_example_representation "\
"FAILED."
situation = Situation.from_representation(example["situation"])
assert situation == parsed_situation, "test_example_representation FAILED."
assert example["target_commands"] == dataset.command_repr(parsed_target_commands), \
"test_example_representation FAILED."
assert example["verb_in_command"] == dataset._vocabulary.translate_word(parsed_action),\
"test_example_representation FAILED."
assert example["referred_target"] == ' '.join([dataset._vocabulary.translate_word(target_predicate["size"]),
dataset._vocabulary.translate_word(target_predicate["color"]),
dataset._vocabulary.translate_word(target_predicate["noun"])]),\
"test_example_representation FAILED."
end = time.time()
logger.info("test_example_representation PASSED in {} seconds".format(end - start))
def test_initialize_world(dataset):
"""Test that two the same situations get represented in exactly the same image by rendering.py and minigrid.py"""
start = time.time()
test_situations = [TEST_SITUATION_1, TEST_SITUATION_2, TEST_SITUATION_3, TEST_SITUATION_4]
current_situation = dataset._world.get_current_situation()
current_mission = dataset._world.mission
for i, test_situation_1 in enumerate(test_situations):
for j, test_situation_2 in enumerate(test_situations):
dataset._world.clear_situation()
dataset.initialize_world(test_situation_1)
situation_1 = dataset._world.get_current_situation()
dataset._world.clear_situation()
dataset.initialize_world(test_situation_2)
situation_2 = dataset._world.get_current_situation()
if i == j:
assert situation_1 == situation_2, "test_initialize_world FAILED."
else:
assert situation_1 != situation_2, "test_initialize_world FAILED."
dataset.initialize_world(current_situation, mission=current_mission)
end = time.time()
logger.info("test_initialize_world PASSED in {} seconds".format(end - start))
def test_image_representation_situations(dataset):
"""Test that situations are still the same when they need to be in image / numpy RGB array form."""
start = time.time()
current_situation = dataset._world.get_current_situation()
current_mission = dataset._world.mission
test_situations = [TEST_SITUATION_1, TEST_SITUATION_2, TEST_SITUATION_3, TEST_SITUATION_4]
for i, test_situation_1 in enumerate(test_situations):
for j, test_situation_2 in enumerate(test_situations):
dataset._world.clear_situation()
dataset.initialize_world(test_situation_1)
np_situation_image_1 = dataset._world.render(mode='human').getArray()
numpy_array_to_image(np_situation_image_1, os.path.join(TEST_DIRECTORY, "test_im_1.png"))
np_situation_image_1_reread = image_to_numpy_array(os.path.join(TEST_DIRECTORY, "test_im_1.png"))
assert np.array_equal(np_situation_image_1,
np_situation_image_1_reread), "test_image_representation_situations FAILED."
dataset._world.clear_situation()
dataset.initialize_world(test_situation_2)
np_situation_image_2 = dataset._world.render().getArray()
numpy_array_to_image(np_situation_image_2, os.path.join(TEST_DIRECTORY, "test_im_2.png"))
np_situation_image_2_reread = image_to_numpy_array(os.path.join(TEST_DIRECTORY, "test_im_2.png"))
assert np.array_equal(np_situation_image_2,
np_situation_image_2_reread), "test_image_representation_situations FAILED."
if i == j:
assert np.array_equal(np_situation_image_1, np_situation_image_2), \
"test_image_representation_situations FAILED."
else:
assert not np.array_equal(np_situation_image_1, np_situation_image_2), \
"test_image_representation_situations FAILED."
os.remove(os.path.join(TEST_DIRECTORY, "test_im_1.png"))
os.remove(os.path.join(TEST_DIRECTORY, "test_im_2.png"))
dataset.initialize_world(current_situation, mission=current_mission)
end = time.time()
logger.info("test_image_representation_situations PASSED in {} seconds".format(end - start))
def test_encode_situation(dataset):
start = time.time()
current_situation = dataset._world.get_current_situation()
current_mission = dataset._world.mission
test_situation = Situation(grid_size=15, agent_position=Position(row=7, column=2), agent_direction=INT_TO_DIR[0],
target_object=PositionedObject(object=Object(size=2, color='red', shape='circle'),
position=Position(row=7, column=2),
vector=np.array([1, 0, 1])),
placed_objects=[PositionedObject(object=Object(size=2, color='red', shape='circle'),
position=Position(row=7, column=2),
vector=np.array([1, 0, 1])),
PositionedObject(object=Object(size=4, color='green', shape='circle'),
position=Position(row=3, column=12),
vector=np.array([0, 1, 0]))], carrying=None)
dataset._world.clear_situation()
dataset.initialize_world(test_situation)
expected_numpy_array = np.zeros([15, 15, dataset._world.grid._num_attributes_object + 1 + 4], dtype='uint8')
expected_numpy_array[7, 2, -5] = 1
expected_numpy_array[7, 2, -4:] = np.array([1, 0, 0, 0])
expected_numpy_array[7, 2, :-5] = dataset._object_vocabulary.get_object_vector(shape='circle', color='red',
size=2)
expected_numpy_array[3, 12, :-5] = dataset._object_vocabulary.get_object_vector(shape='circle', color='green',
size=4)
encoded_numpy_array = dataset._world.grid.encode(agent_row=7, agent_column=2, agent_direction=0)
assert np.array_equal(expected_numpy_array, encoded_numpy_array), "test_encode_situation FAILED."
dataset.initialize_world(current_situation, mission=current_mission)
end = time.time()
logger.info("test_encode_situation PASSED in {} seconds".format(end - start))
def test_k_shot_generalization(dataset):
start = time.time()
current_situation = dataset._world.get_current_situation()
current_mission = dataset._world.mission
k_shot_generalization = 5
dataset.get_data_pairs(max_examples=100000, num_resampling=1, other_objects_sample_percentage=0.5,
split_type="generalization", k_shot_generalization=k_shot_generalization)
# Test that all the splits only contain examples related to their split.
visual_split_examples = dataset._data_pairs["visual"]
for example in visual_split_examples:
target_object = example["situation"]["target_object"]["object"]
assert target_object["shape"] == "square" and target_object["color"] == "red", \
"test_k_shot_generalization FAILED in split visual."
situational_split_1 = dataset._data_pairs["situational_1"]
for example in situational_split_1:
direction_to_target = example["situation"]["direction_to_target"]
assert direction_to_target == "sw", "test_k_shot_generalization FAILED in split situational_1."
situational_split_2 = dataset._data_pairs["situational_2"]
for example in situational_split_2:
referred_target = example["referred_target"]
assert "small" in referred_target, \
"test_k_shot_generalization FAILED in split situational_2."
target_size = example["situation"]["target_object"]["object"]["size"]
assert target_size == '2', "test_k_shot_generalization FAILED in split situational_2."
contextual_split = dataset._data_pairs["contextual"]
for example in contextual_split:
assert (dataset._vocabulary.translate_meaning(example["verb_in_command"])
in dataset._vocabulary.get_transitive_verbs()), \
"test_k_shot_generalization FAILED in split contextual."
target_object = example["situation"]["target_object"]["object"]
assert target_object["shape"] == "square" and target_object["size"] == '3', \
"test_k_shot_generalization FAILED in split contextual."
# Test that the training set doesn't contain more than k examples of each of the test splits.
examples_per_split = {"visual": 0, "situational_1": 0, "situational_2": 0, "contextual": 0, "adverb_1": 0}
for example in dataset._data_pairs["train"]:
target_object = example["situation"]["target_object"]["object"]
target_size = target_object["size"]
direction_to_target = example["situation"]["direction_to_target"]
referred_target = example["referred_target"]
if target_object["shape"] == "square" and target_object["color"] == "red":
examples_per_split["visual"] += 1
if direction_to_target == "sw":
examples_per_split["situational_1"] += 1
if "small" in referred_target and target_size == 2:
examples_per_split["situational_2"] += 1
if (dataset._vocabulary.translate_meaning(example["verb_in_command"]) in
dataset._vocabulary.get_transitive_verbs() and
target_object["shape"] == "square" and target_object["size"] == '3'):
examples_per_split["contextual"] += 1
for split, examples_count in examples_per_split.items():
if split == "adverb_1":
assert examples_count == k_shot_generalization, \
"test_k_shot_generalization FAILED in split train for split {}.".format(split)
else:
assert examples_count == 0, "test_k_shot_generalization FAILED in split train for split {}.".format(split)
dataset.initialize_world(current_situation, mission=current_mission)
end = time.time()
logger.info("test_k_shot_generalization PASSED in {} seconds".format(end - start))
def run_all_tests():
test_save_and_load_dataset(TEST_DATASET)
test_save_and_load_dataset(TEST_DATASET_NONCE)
test_save_and_load_dataset_nonce()
test_derivation_from_rules(TEST_DATASET)
test_derivation_from_rules(TEST_DATASET_NONCE)
test_derivation_from_string(TEST_DATASET)
test_derivation_from_string(TEST_DATASET_NONCE)
test_demonstrate_target_commands_one(TEST_DATASET)
test_demonstrate_target_commands_one(TEST_DATASET_NONCE)
test_demonstrate_target_commands_two(TEST_DATASET)
test_demonstrate_target_commands_two(TEST_DATASET_NONCE)
test_demonstrate_target_commands_three(TEST_DATASET)
test_demonstrate_target_commands_three(TEST_DATASET_NONCE)
test_demonstrate_command_one(TEST_DATASET)
test_demonstrate_command_one(TEST_DATASET_NONCE)
test_demonstrate_command_two(TEST_DATASET)
test_demonstrate_command_two(TEST_DATASET_NONCE)
test_demonstrate_command_three(TEST_DATASET)
test_demonstrate_command_three(TEST_DATASET_NONCE)
test_demonstrate_command_four(TEST_DATASET)
test_demonstrate_command_four(TEST_DATASET_NONCE)
test_demonstrate_command_five(TEST_DATASET)
test_demonstrate_command_five(TEST_DATASET_NONCE)
test_demonstrate_command_six(TEST_DATASET)
test_demonstrate_command_six(TEST_DATASET_NONCE)
test_find_referred_target_one(TEST_DATASET)
test_find_referred_target_one(TEST_DATASET_NONCE)
test_find_referred_target_two(TEST_DATASET)
test_find_referred_target_two(TEST_DATASET_NONCE)
test_generate_possible_targets_one(TEST_DATASET)
test_generate_possible_targets_one(TEST_DATASET_NONCE)
test_generate_possible_targets_two(TEST_DATASET)
test_generate_possible_targets_two(TEST_DATASET_NONCE)
test_generate_situations_one(TEST_DATASET)
test_generate_situations_one(TEST_DATASET_NONCE)
test_generate_situations_two(TEST_DATASET)
test_generate_situations_two(TEST_DATASET_NONCE)
test_generate_situations_three(TEST_DATASET)
test_generate_situations_three(TEST_DATASET_NONCE)
test_situation_representation_eq()
test_example_representation_eq(TEST_DATASET)
test_example_representation_eq(TEST_DATASET_NONCE)
test_example_representation(TEST_DATASET)
test_example_representation(TEST_DATASET_NONCE)
test_initialize_world(TEST_DATASET)
test_initialize_world(TEST_DATASET_NONCE)
test_image_representation_situations(TEST_DATASET)
test_image_representation_situations(TEST_DATASET_NONCE)
test_encode_situation(TEST_DATASET)
test_encode_situation(TEST_DATASET_NONCE)
#test_k_shot_generalization(TEST_DATASET)
#test_k_shot_generalization(TEST_DATASET_NONCE)
shutil.rmtree(TEST_DIRECTORY)
| {
"content_hash": "17da3717f7b50ae84b76c43d95d8aea2",
"timestamp": "",
"source": "github",
"line_count": 809,
"max_line_length": 121,
"avg_line_length": 60.58961681087763,
"alnum_prop": 0.6309647673256217,
"repo_name": "LauraRuis/groundedSCAN",
"id": "b2a65f4fa6f094c996c527515c6b18195577f403",
"size": "49063",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "GroundedScan/dataset_test.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "284405"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import, division, print_function
# Import standard modules
import copy
import numpy as np
# Import astronomical modules
from astropy.stats import sigma_clip, sigma_clipped_stats
# Import the relevant PTS classes and modules
from . import general
from ..basics.mask import Mask
# -----------------------------------------------------------------
# Calculate sigma-to-FWHM and FWHM-to-sigma conversion factors
sigma_to_fwhm = (8 * np.log(2))**0.5
fwhm_to_sigma = 1.0 / sigma_to_fwhm
# -----------------------------------------------------------------
def sigma_clip_mask_list(data, sigma=3.0, mask=None):
"""
This function ...
:param data:
:param sigma:
:param mask:
:return:
"""
masked_list = sigma_clip(data, sigma=sigma, iters=None, copy=False)
new_mask = copy.deepcopy(mask) if mask is not None else [0]*len(data)
for i, masked in enumerate(masked_list.mask):
if masked: new_mask[i] = True
# Return the new or updated mask
return new_mask
# -----------------------------------------------------------------
def sigma_clip_mask(data, sigma_level=3.0, mask=None):
"""
This function ...
:param data:
:param sigma_level:
:param mask:
:return:
"""
# Split the x, y and z values of the data, without the masked values
x_values, y_values, z_values = general.split_xyz(data, mask=mask)
# Sigma-clip z-values that are outliers
masked_z_values = sigma_clip(z_values, sigma=sigma_level, iters=None, copy=False)
# Copy the mask or create a new one if none was provided
new_mask = copy.deepcopy(mask) if mask is not None else Mask(np.zeros_like(data))
for i, masked in enumerate(masked_z_values.mask):
if masked:
x = x_values[i]
y = y_values[i]
new_mask[y,x] = True
#if not isinstance(new_mask, Mask): print(new_mask, mask)
# Assert the mask is of type 'Mask'
assert isinstance(new_mask, Mask)
# Return the new or updated mask
return new_mask
# -----------------------------------------------------------------
def sigma_clipped_median(data, sigma=3.0, mask=None):
"""
This function ...
:param data:
:param sigma:
:param mask:
:return:
"""
# Calculate the sigma-clipped mean and median
_, median, _ = sigma_clipped_stats(data, mask=mask, sigma=sigma)
# Return the median value
return median
# -----------------------------------------------------------------
def sigma_clipped_statistics(data, sigma=3.0, mask=None):
"""
This function ...
:param data:
:param sigma:
:param mask:
:return:
"""
# Calculate the sigma-clipped mean and median
mean, median, stddev = sigma_clipped_stats(data, mask=mask, sigma=sigma)
# Return the statistical parameters
return mean, median, stddev
# -----------------------------------------------------------------
def sigma_clip_split(input_list, criterion, sigma=3.0, only_high=False, only_low=False, nans="low"):
"""
This function ...
:param input_list:
:param criterion:
:param sigma:
:param only_high:
:param only_low:
:param nans:
:return:
"""
# Initialize an empty list of widths
determinants = []
# Loop over all the star candidates and calculate their width
for item in input_list: determinants.append(criterion(item))
# Use sigma clipping to seperate stars and unidentified objects
mask = sigma_clip_mask_list(determinants, sigma=sigma)
# Calculate the mean value of the determinants that are not masked
mean = np.ma.mean(np.ma.masked_array(determinants, mask=mask))
# Create a seperate list for the stars and for the ufos
valid_list = []
invalid_list = []
# Loop over all items in the input list, putting them in either the valid or invalid list
for index, item in enumerate(input_list):
value = criterion(item)
if only_high:
if mask[index] and value > mean: invalid_list.append(item)
else: valid_list.append(item)
elif only_low:
if mask[index] and value < mean: invalid_list.append(item)
else: valid_list.append(item)
else:
if mask[index]: invalid_list.append(item)
else: valid_list.append(item)
# Return the valid and invalid lists
return valid_list, invalid_list
# -----------------------------------------------------------------
def cutoff(values, method, limit):
"""
This function ...
:param values:
:param method:
:param limit:
"""
# Percentage method
if method == "percentage":
# Create a sorted list for the input values
sorted_values = sorted(values)
# Determine the splitting point
split = (1.0-limit) * len(sorted_values)
index = int(round(split))
# Return the corresponding value in the sorted list
return sorted_values[index]
# Sigma-clipping method
elif method == "sigma_clip":
# Perform sigma clipping on the input list
masked_values = sigma_clip(np.array(values), sigma=limit, iters=None, copy=False)
# Calculate the maximum of the masked array
return np.ma.max(masked_values)
else: raise ValueError("Invalid cutoff method (must be 'percentage' or 'sigma_clip'")
# -----------------------------------------------------------------
| {
"content_hash": "02828f0c15e262d49b502255de53a08a",
"timestamp": "",
"source": "github",
"line_count": 205,
"max_line_length": 100,
"avg_line_length": 26.692682926829267,
"alnum_prop": 0.5778508771929824,
"repo_name": "Stargrazer82301/CAAPR",
"id": "28a575ee13d515782b2f7d4ab760baf7046b9194",
"size": "5964",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "CAAPR/CAAPR_AstroMagic/PTS/pts/magic/tools/statistics.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "51"
},
{
"name": "CSS",
"bytes": "21972"
},
{
"name": "HTML",
"bytes": "2408"
},
{
"name": "Prolog",
"bytes": "16433"
},
{
"name": "Python",
"bytes": "4465217"
},
{
"name": "Shell",
"bytes": "3793"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
import copy
import os
import pickle
import warnings
from django.core.exceptions import SuspiciousOperation
from django.http import (QueryDict, HttpResponse, HttpResponseRedirect,
HttpResponsePermanentRedirect, HttpResponseNotAllowed,
HttpResponseNotModified, StreamingHttpResponse,
SimpleCookie, BadHeaderError,
parse_cookie)
from django.test import TestCase
from django.utils.encoding import smart_str
from django.utils import six
from django.utils import unittest
class QueryDictTests(unittest.TestCase):
def test_missing_key(self):
q = QueryDict(str(''))
self.assertRaises(KeyError, q.__getitem__, 'foo')
def test_immutability(self):
q = QueryDict(str(''))
self.assertRaises(AttributeError, q.__setitem__, 'something', 'bar')
self.assertRaises(AttributeError, q.setlist, 'foo', ['bar'])
self.assertRaises(AttributeError, q.appendlist, 'foo', ['bar'])
self.assertRaises(AttributeError, q.update, {'foo': 'bar'})
self.assertRaises(AttributeError, q.pop, 'foo')
self.assertRaises(AttributeError, q.popitem)
self.assertRaises(AttributeError, q.clear)
def test_immutable_get_with_default(self):
q = QueryDict(str(''))
self.assertEqual(q.get('foo', 'default'), 'default')
def test_immutable_basic_operations(self):
q = QueryDict(str(''))
self.assertEqual(q.getlist('foo'), [])
if not six.PY3:
self.assertEqual(q.has_key('foo'), False)
self.assertEqual('foo' in q, False)
self.assertEqual(list(six.iteritems(q)), [])
self.assertEqual(list(six.iterlists(q)), [])
self.assertEqual(list(six.iterkeys(q)), [])
self.assertEqual(list(six.itervalues(q)), [])
self.assertEqual(len(q), 0)
self.assertEqual(q.urlencode(), '')
def test_single_key_value(self):
"""Test QueryDict with one key/value pair"""
q = QueryDict(str('foo=bar'))
self.assertEqual(q['foo'], 'bar')
self.assertRaises(KeyError, q.__getitem__, 'bar')
self.assertRaises(AttributeError, q.__setitem__, 'something', 'bar')
self.assertEqual(q.get('foo', 'default'), 'bar')
self.assertEqual(q.get('bar', 'default'), 'default')
self.assertEqual(q.getlist('foo'), ['bar'])
self.assertEqual(q.getlist('bar'), [])
self.assertRaises(AttributeError, q.setlist, 'foo', ['bar'])
self.assertRaises(AttributeError, q.appendlist, 'foo', ['bar'])
if not six.PY3:
self.assertTrue(q.has_key('foo'))
self.assertTrue('foo' in q)
if not six.PY3:
self.assertFalse(q.has_key('bar'))
self.assertFalse('bar' in q)
self.assertEqual(list(six.iteritems(q)), [('foo', 'bar')])
self.assertEqual(list(six.iterlists(q)), [('foo', ['bar'])])
self.assertEqual(list(six.iterkeys(q)), ['foo'])
self.assertEqual(list(six.itervalues(q)), ['bar'])
self.assertEqual(len(q), 1)
self.assertRaises(AttributeError, q.update, {'foo': 'bar'})
self.assertRaises(AttributeError, q.pop, 'foo')
self.assertRaises(AttributeError, q.popitem)
self.assertRaises(AttributeError, q.clear)
self.assertRaises(AttributeError, q.setdefault, 'foo', 'bar')
self.assertEqual(q.urlencode(), 'foo=bar')
def test_urlencode(self):
q = QueryDict(str(''), mutable=True)
q['next'] = '/a&b/'
self.assertEqual(q.urlencode(), 'next=%2Fa%26b%2F')
self.assertEqual(q.urlencode(safe='/'), 'next=/a%26b/')
q = QueryDict(str(''), mutable=True)
q['next'] = '/t\xebst&key/'
self.assertEqual(q.urlencode(), 'next=%2Ft%C3%ABst%26key%2F')
self.assertEqual(q.urlencode(safe='/'), 'next=/t%C3%ABst%26key/')
def test_mutable_copy(self):
"""A copy of a QueryDict is mutable."""
q = QueryDict(str('')).copy()
self.assertRaises(KeyError, q.__getitem__, "foo")
q['name'] = 'john'
self.assertEqual(q['name'], 'john')
def test_mutable_delete(self):
q = QueryDict(str('')).copy()
q['name'] = 'john'
del q['name']
self.assertFalse('name' in q)
def test_basic_mutable_operations(self):
q = QueryDict(str('')).copy()
q['name'] = 'john'
self.assertEqual(q.get('foo', 'default'), 'default')
self.assertEqual(q.get('name', 'default'), 'john')
self.assertEqual(q.getlist('name'), ['john'])
self.assertEqual(q.getlist('foo'), [])
q.setlist('foo', ['bar', 'baz'])
self.assertEqual(q.get('foo', 'default'), 'baz')
self.assertEqual(q.getlist('foo'), ['bar', 'baz'])
q.appendlist('foo', 'another')
self.assertEqual(q.getlist('foo'), ['bar', 'baz', 'another'])
self.assertEqual(q['foo'], 'another')
if not six.PY3:
self.assertTrue(q.has_key('foo'))
self.assertTrue('foo' in q)
self.assertEqual(sorted(list(six.iteritems(q))),
[('foo', 'another'), ('name', 'john')])
self.assertEqual(sorted(list(six.iterlists(q))),
[('foo', ['bar', 'baz', 'another']), ('name', ['john'])])
self.assertEqual(sorted(list(six.iterkeys(q))),
['foo', 'name'])
self.assertEqual(sorted(list(six.itervalues(q))),
['another', 'john'])
self.assertEqual(len(q), 2)
q.update({'foo': 'hello'})
self.assertEqual(q['foo'], 'hello')
self.assertEqual(q.get('foo', 'not available'), 'hello')
self.assertEqual(q.getlist('foo'), ['bar', 'baz', 'another', 'hello'])
self.assertEqual(q.pop('foo'), ['bar', 'baz', 'another', 'hello'])
self.assertEqual(q.pop('foo', 'not there'), 'not there')
self.assertEqual(q.get('foo', 'not there'), 'not there')
self.assertEqual(q.setdefault('foo', 'bar'), 'bar')
self.assertEqual(q['foo'], 'bar')
self.assertEqual(q.getlist('foo'), ['bar'])
self.assertIn(q.urlencode(), ['foo=bar&name=john', 'name=john&foo=bar'])
q.clear()
self.assertEqual(len(q), 0)
def test_multiple_keys(self):
"""Test QueryDict with two key/value pairs with same keys."""
q = QueryDict(str('vote=yes&vote=no'))
self.assertEqual(q['vote'], 'no')
self.assertRaises(AttributeError, q.__setitem__, 'something', 'bar')
self.assertEqual(q.get('vote', 'default'), 'no')
self.assertEqual(q.get('foo', 'default'), 'default')
self.assertEqual(q.getlist('vote'), ['yes', 'no'])
self.assertEqual(q.getlist('foo'), [])
self.assertRaises(AttributeError, q.setlist, 'foo', ['bar', 'baz'])
self.assertRaises(AttributeError, q.setlist, 'foo', ['bar', 'baz'])
self.assertRaises(AttributeError, q.appendlist, 'foo', ['bar'])
if not six.PY3:
self.assertEqual(q.has_key('vote'), True)
self.assertEqual('vote' in q, True)
if not six.PY3:
self.assertEqual(q.has_key('foo'), False)
self.assertEqual('foo' in q, False)
self.assertEqual(list(six.iteritems(q)), [('vote', 'no')])
self.assertEqual(list(six.iterlists(q)), [('vote', ['yes', 'no'])])
self.assertEqual(list(six.iterkeys(q)), ['vote'])
self.assertEqual(list(six.itervalues(q)), ['no'])
self.assertEqual(len(q), 1)
self.assertRaises(AttributeError, q.update, {'foo': 'bar'})
self.assertRaises(AttributeError, q.pop, 'foo')
self.assertRaises(AttributeError, q.popitem)
self.assertRaises(AttributeError, q.clear)
self.assertRaises(AttributeError, q.setdefault, 'foo', 'bar')
self.assertRaises(AttributeError, q.__delitem__, 'vote')
if not six.PY3:
def test_invalid_input_encoding(self):
"""
QueryDicts must be able to handle invalid input encoding (in this
case, bad UTF-8 encoding).
This test doesn't apply under Python 3 because the URL is a string
and not a bytestring.
"""
q = QueryDict(str(b'foo=bar&foo=\xff'))
self.assertEqual(q['foo'], '\ufffd')
self.assertEqual(q.getlist('foo'), ['bar', '\ufffd'])
def test_pickle(self):
q = QueryDict(str(''))
q1 = pickle.loads(pickle.dumps(q, 2))
self.assertEqual(q == q1, True)
q = QueryDict(str('a=b&c=d'))
q1 = pickle.loads(pickle.dumps(q, 2))
self.assertEqual(q == q1, True)
q = QueryDict(str('a=b&c=d&a=1'))
q1 = pickle.loads(pickle.dumps(q, 2))
self.assertEqual(q == q1, True)
def test_update_from_querydict(self):
"""Regression test for #8278: QueryDict.update(QueryDict)"""
x = QueryDict(str("a=1&a=2"), mutable=True)
y = QueryDict(str("a=3&a=4"))
x.update(y)
self.assertEqual(x.getlist('a'), ['1', '2', '3', '4'])
def test_non_default_encoding(self):
"""#13572 - QueryDict with a non-default encoding"""
q = QueryDict(str('cur=%A4'), encoding='iso-8859-15')
self.assertEqual(q.encoding, 'iso-8859-15')
self.assertEqual(list(six.iteritems(q)), [('cur', '€')])
self.assertEqual(q.urlencode(), 'cur=%A4')
q = q.copy()
self.assertEqual(q.encoding, 'iso-8859-15')
self.assertEqual(list(six.iteritems(q)), [('cur', '€')])
self.assertEqual(q.urlencode(), 'cur=%A4')
self.assertEqual(copy.copy(q).encoding, 'iso-8859-15')
self.assertEqual(copy.deepcopy(q).encoding, 'iso-8859-15')
class HttpResponseTests(unittest.TestCase):
def test_headers_type(self):
r = HttpResponse()
# The following tests explicitly test types in addition to values
# because in Python 2 u'foo' == b'foo'.
# ASCII unicode or bytes values are converted to native strings.
r['key'] = 'test'
self.assertEqual(r['key'], str('test'))
self.assertIsInstance(r['key'], str)
r['key'] = 'test'.encode('ascii')
self.assertEqual(r['key'], str('test'))
self.assertIsInstance(r['key'], str)
# Latin-1 unicode or bytes values are also converted to native strings.
r['key'] = 'café'
self.assertEqual(r['key'], smart_str('café', 'latin-1'))
self.assertIsInstance(r['key'], str)
r['key'] = 'café'.encode('latin-1')
self.assertEqual(r['key'], smart_str('café', 'latin-1'))
self.assertIsInstance(r['key'], str)
# Other unicode values are MIME-encoded (there's no way to pass them as bytes).
r['key'] = '†'
self.assertEqual(r['key'], str('=?utf-8?b?4oCg?='))
self.assertIsInstance(r['key'], str)
# The response also converts unicode or bytes keys to strings, but requires
# them to contain ASCII
r = HttpResponse()
del r['Content-Type']
r['foo'] = 'bar'
l = list(r.items())
self.assertEqual(len(l), 1)
self.assertEqual(l[0], ('foo', 'bar'))
self.assertIsInstance(l[0][0], str)
r = HttpResponse()
del r['Content-Type']
r[b'foo'] = 'bar'
l = list(r.items())
self.assertEqual(len(l), 1)
self.assertEqual(l[0], ('foo', 'bar'))
self.assertIsInstance(l[0][0], str)
r = HttpResponse()
self.assertRaises(UnicodeError, r.__setitem__, 'føø', 'bar')
self.assertRaises(UnicodeError, r.__setitem__, 'føø'.encode('utf-8'), 'bar')
def test_newlines_in_headers(self):
# Bug #10188: Do not allow newlines in headers (CR or LF)
r = HttpResponse()
self.assertRaises(BadHeaderError, r.__setitem__, 'test\rstr', 'test')
self.assertRaises(BadHeaderError, r.__setitem__, 'test\nstr', 'test')
def test_dict_behavior(self):
"""
Test for bug #14020: Make HttpResponse.get work like dict.get
"""
r = HttpResponse()
self.assertEqual(r.get('test'), None)
def test_non_string_content(self):
#Bug 16494: HttpResponse should behave consistently with non-strings
r = HttpResponse(12345)
self.assertEqual(r.content, b'12345')
#test content via property
r = HttpResponse()
r.content = 12345
self.assertEqual(r.content, b'12345')
def test_iter_content(self):
r = HttpResponse(['abc', 'def', 'ghi'])
self.assertEqual(r.content, b'abcdefghi')
#test iter content via property
r = HttpResponse()
r.content = ['idan', 'alex', 'jacob']
self.assertEqual(r.content, b'idanalexjacob')
r = HttpResponse()
r.content = [1, 2, 3]
self.assertEqual(r.content, b'123')
#test retrieval explicitly using iter (deprecated) and odd inputs
r = HttpResponse()
r.content = ['1', '2', 3, '\u079e']
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always", PendingDeprecationWarning)
my_iter = iter(r)
self.assertEqual(w[0].category, PendingDeprecationWarning)
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always", PendingDeprecationWarning)
result = list(my_iter)
self.assertEqual(w[0].category, PendingDeprecationWarning)
#'\xde\x9e' == unichr(1950).encode('utf-8')
self.assertEqual(result, [b'1', b'2', b'3', b'\xde\x9e'])
self.assertEqual(r.content, b'123\xde\x9e')
#with Content-Encoding header
r = HttpResponse()
r['Content-Encoding'] = 'winning'
r.content = [b'abc', b'def']
self.assertEqual(r.content, b'abcdef')
r.content = ['\u079e']
self.assertRaises(TypeError if six.PY3 else UnicodeEncodeError,
getattr, r, 'content')
# .content can safely be accessed multiple times.
r = HttpResponse(iter(['hello', 'world']))
self.assertEqual(r.content, r.content)
self.assertEqual(r.content, b'helloworld')
# accessing the iterator works (once) after accessing .content
self.assertEqual(b''.join(r), b'helloworld')
self.assertEqual(b''.join(r), b'')
# accessing .content still works
self.assertEqual(r.content, b'helloworld')
# XXX accessing .content doesn't work if the response was iterated first
# XXX change this when the deprecation completes in HttpResponse
r = HttpResponse(iter(['hello', 'world']))
with warnings.catch_warnings():
warnings.simplefilter("ignore", PendingDeprecationWarning)
self.assertEqual(b''.join(r), b'helloworld')
self.assertEqual(r.content, b'') # not the expected result!
# additional content can be written to the response.
r = HttpResponse(iter(['hello', 'world']))
self.assertEqual(r.content, b'helloworld')
r.write('!')
self.assertEqual(r.content, b'helloworld!')
def test_iterator_isnt_rewound(self):
# Regression test for #13222
r = HttpResponse('abc')
i = iter(r)
self.assertEqual(list(i), [b'abc'])
self.assertEqual(list(i), [])
def test_file_interface(self):
r = HttpResponse()
r.write(b"hello")
self.assertEqual(r.tell(), 5)
r.write("привет")
self.assertEqual(r.tell(), 17)
r = HttpResponse(['abc'])
r.write('def')
self.assertEqual(r.tell(), 6)
self.assertEqual(r.content, b'abcdef')
def test_unsafe_redirect(self):
bad_urls = [
'data:text/html,<script>window.alert("xss")</script>',
'mailto:test@example.com',
'file:///etc/passwd',
]
for url in bad_urls:
self.assertRaises(SuspiciousOperation,
HttpResponseRedirect, url)
self.assertRaises(SuspiciousOperation,
HttpResponsePermanentRedirect, url)
class HttpResponseSubclassesTests(TestCase):
def test_redirect(self):
response = HttpResponseRedirect('/redirected/')
self.assertEqual(response.status_code, 302)
# Test that standard HttpResponse init args can be used
response = HttpResponseRedirect('/redirected/',
content='The resource has temporarily moved',
content_type='text/html')
self.assertContains(response, 'The resource has temporarily moved', status_code=302)
def test_not_modified(self):
response = HttpResponseNotModified()
self.assertEqual(response.status_code, 304)
# 304 responses should not have content/content-type
with self.assertRaises(AttributeError):
response.content = "Hello dear"
self.assertNotIn('content-type', response)
def test_not_allowed(self):
response = HttpResponseNotAllowed(['GET'])
self.assertEqual(response.status_code, 405)
# Test that standard HttpResponse init args can be used
response = HttpResponseNotAllowed(['GET'],
content='Only the GET method is allowed',
content_type='text/html')
self.assertContains(response, 'Only the GET method is allowed', status_code=405)
class StreamingHttpResponseTests(TestCase):
def test_streaming_response(self):
r = StreamingHttpResponse(iter(['hello', 'world']))
# iterating over the response itself yields bytestring chunks.
chunks = list(r)
self.assertEqual(chunks, [b'hello', b'world'])
for chunk in chunks:
self.assertIsInstance(chunk, six.binary_type)
# and the response can only be iterated once.
self.assertEqual(list(r), [])
# even when a sequence that can be iterated many times, like a list,
# is given as content.
r = StreamingHttpResponse(['abc', 'def'])
self.assertEqual(list(r), [b'abc', b'def'])
self.assertEqual(list(r), [])
# streaming responses don't have a `content` attribute.
self.assertFalse(hasattr(r, 'content'))
# and you can't accidentally assign to a `content` attribute.
with self.assertRaises(AttributeError):
r.content = 'xyz'
# but they do have a `streaming_content` attribute.
self.assertTrue(hasattr(r, 'streaming_content'))
# that exists so we can check if a response is streaming, and wrap or
# replace the content iterator.
r.streaming_content = iter(['abc', 'def'])
r.streaming_content = (chunk.upper() for chunk in r.streaming_content)
self.assertEqual(list(r), [b'ABC', b'DEF'])
# coercing a streaming response to bytes doesn't return a complete HTTP
# message like a regular response does. it only gives us the headers.
r = StreamingHttpResponse(iter(['hello', 'world']))
self.assertEqual(
six.binary_type(r), b'Content-Type: text/html; charset=utf-8')
# and this won't consume its content.
self.assertEqual(list(r), [b'hello', b'world'])
# additional content cannot be written to the response.
r = StreamingHttpResponse(iter(['hello', 'world']))
with self.assertRaises(Exception):
r.write('!')
# and we can't tell the current position.
with self.assertRaises(Exception):
r.tell()
class FileCloseTests(TestCase):
def test_response(self):
filename = os.path.join(os.path.dirname(__file__), 'abc.txt')
# file isn't closed until we close the response.
file1 = open(filename)
r = HttpResponse(file1)
self.assertFalse(file1.closed)
r.close()
self.assertTrue(file1.closed)
# don't automatically close file when we finish iterating the response.
file1 = open(filename)
r = HttpResponse(file1)
self.assertFalse(file1.closed)
with warnings.catch_warnings():
warnings.simplefilter("ignore", PendingDeprecationWarning)
list(r)
self.assertFalse(file1.closed)
r.close()
self.assertTrue(file1.closed)
# when multiple file are assigned as content, make sure they are all
# closed with the response.
file1 = open(filename)
file2 = open(filename)
r = HttpResponse(file1)
r.content = file2
self.assertFalse(file1.closed)
self.assertFalse(file2.closed)
r.close()
self.assertTrue(file1.closed)
self.assertTrue(file2.closed)
def test_streaming_response(self):
filename = os.path.join(os.path.dirname(__file__), 'abc.txt')
# file isn't closed until we close the response.
file1 = open(filename)
r = StreamingHttpResponse(file1)
self.assertFalse(file1.closed)
r.close()
self.assertTrue(file1.closed)
# when multiple file are assigned as content, make sure they are all
# closed with the response.
file1 = open(filename)
file2 = open(filename)
r = StreamingHttpResponse(file1)
r.streaming_content = file2
self.assertFalse(file1.closed)
self.assertFalse(file2.closed)
r.close()
self.assertTrue(file1.closed)
self.assertTrue(file2.closed)
class CookieTests(unittest.TestCase):
def test_encode(self):
"""
Test that we don't output tricky characters in encoded value
"""
c = SimpleCookie()
c['test'] = "An,awkward;value"
self.assertTrue(";" not in c.output().rstrip(';')) # IE compat
self.assertTrue("," not in c.output().rstrip(';')) # Safari compat
def test_decode(self):
"""
Test that we can still preserve semi-colons and commas
"""
c = SimpleCookie()
c['test'] = "An,awkward;value"
c2 = SimpleCookie()
c2.load(c.output())
self.assertEqual(c['test'].value, c2['test'].value)
def test_decode_2(self):
"""
Test that we haven't broken normal encoding
"""
c = SimpleCookie()
c['test'] = b"\xf0"
c2 = SimpleCookie()
c2.load(c.output())
self.assertEqual(c['test'].value, c2['test'].value)
def test_nonstandard_keys(self):
"""
Test that a single non-standard cookie name doesn't affect all cookies. Ticket #13007.
"""
self.assertTrue('good_cookie' in parse_cookie('good_cookie=yes;bad:cookie=yes').keys())
def test_repeated_nonstandard_keys(self):
"""
Test that a repeated non-standard name doesn't affect all cookies. Ticket #15852
"""
self.assertTrue('good_cookie' in parse_cookie('a:=b; a:=c; good_cookie=yes').keys())
def test_httponly_after_load(self):
"""
Test that we can use httponly attribute on cookies that we load
"""
c = SimpleCookie()
c.load("name=val")
c['name']['httponly'] = True
self.assertTrue(c['name']['httponly'])
| {
"content_hash": "4791c2c88d2a89661d268a658f321075",
"timestamp": "",
"source": "github",
"line_count": 589,
"max_line_length": 95,
"avg_line_length": 39.49575551782682,
"alnum_prop": 0.5927868288698792,
"repo_name": "chrisfranzen/django",
"id": "2d172ad0e0aa1faec2009dfd8f5ea083e716593a",
"size": "23309",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tests/regressiontests/httpwrappers/tests.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "42663"
},
{
"name": "HTML",
"bytes": "95024"
},
{
"name": "JavaScript",
"bytes": "94313"
},
{
"name": "Python",
"bytes": "8216479"
},
{
"name": "Shell",
"bytes": "809"
},
{
"name": "Smarty",
"bytes": "130"
}
],
"symlink_target": ""
} |
import pika, json
from config import config
f = open("sample_queue_message.json", "r")
sample_data = json.load(f)
connection = pika.BlockingConnection(pika.ConnectionParameters(
host=config["server"]))
channel = connection.channel()
channel.queue_declare(queue=config["queue"])
channel.basic_publish(exchange='',
routing_key=config["queue"],
body=json.dumps(sample_data))
print " [x] Sent ", json.dumps(sample_data)
connection.close()
| {
"content_hash": "e8126db38a50b247c6f85e0e998edf9a",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 63,
"avg_line_length": 25.94736842105263,
"alnum_prop": 0.6673427991886409,
"repo_name": "adrianchifor/Flipcam-Backend",
"id": "a6648fd60cef1b5842ca7f11a044d41748abb559",
"size": "515",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "videoconcat/test_send.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "JavaScript",
"bytes": "16015"
},
{
"name": "Python",
"bytes": "2978"
}
],
"symlink_target": ""
} |
import uuid
import requests
from modularodm import Q
from modularodm.exceptions import ModularOdmException
from framework.auth import Auth
from website import security
from website import settings
from website.project import new_node
from website.models import User, Node, MailRecord
def record_message(message, created):
record = MailRecord(
data=message.raw,
records=created,
)
record.save()
def get_or_create_user(fullname, address, is_spam):
"""Get or create user by email address.
:param str fullname: User full name
:param str address: User email address
:param bool is_spam: User flagged as potential spam
:return: Tuple of (user, created)
"""
try:
user = User.find_one(Q('username', 'iexact', address))
return user, False
except ModularOdmException:
password = str(uuid.uuid4())
user = User.create_confirmed(address, password, fullname)
user.verification_key = security.random_string(20)
if is_spam:
user.system_tags.append('is_spam')
user.save()
return user, True
def get_or_create_node(title, user):
"""Get or create node by title and creating user.
:param str title: Node title
:param User user: User creating node
:return: Tuple of (node, created)
"""
try:
node = Node.find_one(
Q('title', 'iexact', title)
& Q('contributors', 'eq', user._id)
)
return node, False
except ModularOdmException:
node = new_node('project', title, user)
return node, True
def provision_node(conference, message, node, user):
"""
:param Conference conference:
:param ConferenceMessage message:
:param Node node:
:param User user:
"""
auth = Auth(user=user)
node.update_node_wiki('home', message.text, auth)
node.add_contributors(prepare_contributors(conference.admins), log=False)
if not message.is_spam and conference.public_projects:
node.set_privacy('public', auth=auth)
node.add_tag(message.conference_name, auth=auth)
node.add_tag(message.conference_category, auth=auth)
node.system_tags.extend(['emailed', message.conference_name, message.conference_category])
if message.is_spam:
node.system_tags.append('spam')
node.save()
def prepare_contributors(admins):
return [
{
'user': admin,
'permissions': ['read', 'write', 'admin'],
'visible': False,
}
for admin in admins
]
def upload_attachment(user, node, attachment):
from website.addons.osfstorage import utils as storage_utils
attachment.seek(0)
name = attachment.filename or settings.MISSING_FILE_NAME
content = attachment.read()
upload_url = storage_utils.get_waterbutler_upload_url(user, node, path=name)
requests.put(
upload_url,
data=content,
)
def upload_attachments(user, node, attachments):
for attachment in attachments:
upload_attachment(user, node, attachment)
| {
"content_hash": "5469b915ed9d2f03a1aa81bf6bd97b6b",
"timestamp": "",
"source": "github",
"line_count": 111,
"max_line_length": 94,
"avg_line_length": 27.64864864864865,
"alnum_prop": 0.6539589442815249,
"repo_name": "himanshuo/osf.io",
"id": "5360c49beccfaef409433677f6f06c85f2c53271",
"size": "3094",
"binary": false,
"copies": "3",
"ref": "refs/heads/develop",
"path": "website/conferences/utils.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "78345"
},
{
"name": "HTML",
"bytes": "34188"
},
{
"name": "JavaScript",
"bytes": "885345"
},
{
"name": "Mako",
"bytes": "442634"
},
{
"name": "Python",
"bytes": "2536134"
},
{
"name": "Shell",
"bytes": "234"
}
],
"symlink_target": ""
} |
import logging
import goaway.globalvars as globalvars
logger = logging.getLogger(__name__)
DATA_STORE_HANDLE_KIND_ATTR = "__store"
NAME_ATTR = "__name"
class ObjectHandle(object):
"""
Represents a shared object in a datstore.
Instances of this class are returned by object handle constructors.
Applications should not directly create these.
Example:
accumulators = goaway.StrictCentralized()
accumulators.flowers = 0
accumulators.trees = 10
"""
def __init__(self, data_store_kind, name):
"""
Args:
data_store_kind: Name of the type of datastore to use (from globalvars)
name: Name of the object, to identify its store.
"""
self.__dict__[DATA_STORE_HANDLE_KIND_ATTR] = data_store_kind
self.__dict__[NAME_ATTR] = name
def __getattr__(self, field):
"""
Hook when an attribute is fetched.
"""
store = globalvars.get_data_store(getattr(self, DATA_STORE_HANDLE_KIND_ATTR))
object_name = getattr(self, NAME_ATTR)
value = store.get(object_name, field)
return value
def __setattr__(self, field, value):
"""
Hook when an attribute is set.
"""
store = globalvars.get_data_store(getattr(self, DATA_STORE_HANDLE_KIND_ATTR))
object_name = getattr(self, NAME_ATTR)
store.set(object_name, field, value)
| {
"content_hash": "31a95cfc220e43fd59d061f2a4cd6ab5",
"timestamp": "",
"source": "github",
"line_count": 46,
"max_line_length": 85,
"avg_line_length": 30.934782608695652,
"alnum_prop": 0.617709065354884,
"repo_name": "anpere/goaway",
"id": "820f6582610d4d6536ed231e35188f8098b4410b",
"size": "1423",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "goaway/objecthandle.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "20495"
}
],
"symlink_target": ""
} |
import codecs, logging, os, random
import GlobalStore
logger = logging.getLogger('DideRobot')
def isAllowedPath(path):
#This function checks whether the provided path is inside the bot's data folder
# To prevent people adding "../.." to some bot calls to have free access to the server's filesystem
if not os.path.abspath(path).startswith(GlobalStore.scriptfolder):
logger.warning("[FileUtil] Somebody is trying to leave the bot's file systems by calling filename '{}'".format(path))
return False
return True
def getLineCount(filename):
#Set a default in case the file has no lines
linecount = -1 #'-1' so with the +1 at the end it ends up a 0 for an empty file
if not filename.startswith(GlobalStore.scriptfolder):
filename = os.path.join(GlobalStore.scriptfolder, filename)
if not os.path.isfile(filename):
return -1
with codecs.open(filename, 'r', 'utf-8') as f:
for linecount, line in enumerate(f):
continue
return linecount + 1 #'enumerate()' starts at 0, so add one
def getLineFromFile(filename, wantedLineNumber):
"""Returns the specified line number from the provided file (line number starts at 0)"""
if not filename.startswith(GlobalStore.scriptfolder):
filename = os.path.join(GlobalStore.scriptfolder, filename)
#Check if it's an allowed path
if not isAllowedPath(filename):
return None
if not os.path.isfile(filename):
logger.error(u"Can't read line {} from file '{}'; file does not exist".format(wantedLineNumber, filename))
return None
with codecs.open(filename, 'r', 'utf-8') as f:
for lineNumber, line in enumerate(f):
if lineNumber == wantedLineNumber:
return line.rstrip()
return None
def getRandomLineFromFile(filename, linecount=None):
if not filename.startswith(GlobalStore.scriptfolder):
filename = os.path.join(GlobalStore.scriptfolder, filename)
if not linecount:
linecount = getLineCount(filename)
if linecount <= 0:
return None
return getLineFromFile(filename, random.randrange(0, linecount))
def getAllLinesFromFile(filename):
#Make sure it's an absolute filename
if not filename.startswith(GlobalStore.scriptfolder):
filename = os.path.join(GlobalStore.scriptfolder, filename)
if not isAllowedPath(filename):
return None
if not os.path.exists(filename):
logger.error(u"Can't read lines from file '{}'; it does not exist".format(filename))
return None
#Get all the lines!
with codecs.open(filename, 'r', 'utf-8') as linesfile:
return linesfile.readlines()
def deleteIfExists(filename):
"""
Deletes the provided file if it exists
:param filename: The filename to delete
:return: True if the file existed and was removed, False if it didn't exist
"""
#Use try-except instead of 'os.exists' to prevent race conditions between the check and the delete
try:
os.remove(filename)
return True
except OSError:
return False
| {
"content_hash": "7a8e3ea905d49ea7ef3eb33226b9a3b9",
"timestamp": "",
"source": "github",
"line_count": 77,
"max_line_length": 119,
"avg_line_length": 36.896103896103895,
"alnum_prop": 0.7504399859204506,
"repo_name": "Didero/DideRobot",
"id": "0ebfa0a20369797d933d3b201c2f1fb26d45ee43",
"size": "2841",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "util/FileUtil.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "478319"
}
],
"symlink_target": ""
} |