text
stringlengths 4
1.02M
| meta
dict |
---|---|
"""Geometric transforms (e.g. rigid transformation)."""
import dataclasses
from typing import Union
import numpy as np
@dataclasses.dataclass
class Isometry:
"""3D transform object used to represent an SE(3) (isometric) transform.
Underneath this class stores the transform [R|t] composed of rotation (R) and
translation (t).
Usage example:
```python
frameB_from_frameA = Isometry(R=np.eye(3), t=np.ones(3))
pointA = np.random.rand(3)
pointB = frameB_from_frameA * pointA
pointA = frameB_from_frameA.inverse() * pointB
# Compose multiple transforms:
frameA_to_frameB = Isometry(...)
frameB_to_frameC = Isometry(...)
frameA_to_frameC = frameB_to_frameC * frameA_to_frameB
# Apply transform on single point:
pointB = frameA_to_frameB * np.array([4.0, 2.0, 1.0])
# Apply transform on a pointcloud (Nx3):
pointcloudC = frameA_to_frameC * np.random.rand(1000, 3)
```
"""
# Rotation component with tensor shape (3, 3)
R: np.ndarray = dataclasses.field(default_factory=lambda: np.eye(3)) # pylint: disable=invalid-name
# Translation component with tensor shape (3,)
t: np.ndarray = dataclasses.field(default_factory=lambda: np.zeros(3))
@classmethod
def from_matrix(cls, matrix: np.ndarray) -> 'Isometry':
"""Constructs from a 3x4 or 4x4 transform matrix."""
if matrix.shape not in [(3, 4), (4, 4)]:
raise ValueError('invalid matrix.shape={}'.format(matrix.shape))
return cls(R=matrix[:3, :3], t=matrix[:3, 3])
def matrix3x4(self) -> np.ndarray:
"""Returns as 3x4 matrix.
Returns a matrix [R|t] of shape (3, 4)
"""
return np.hstack((self.R, self.t.reshape((3, 1))))
def matrix4x4(self) -> np.ndarray:
"""Returns as 4x4 matrix.
Returns a matrix [R|t] of shape (4, 4)
[0|1]
"""
matrix = np.eye(4)
matrix[:3, :3] = self.R
matrix[:3, 3] = self.t
return matrix
def inverse(self) -> 'Isometry':
"""Returns the inverse of self.
Usage example:
frameB_from_frameA = Isometry(R=np.eye(3), t=np.ones(3))
frameA_from_frameB = frameB_from_frameA.inverse()
Returns:
Inverse transform of self.
"""
return Isometry(self.R.T, -self.R.T.dot(self.t))
def compose(self, other: 'Isometry') -> 'Isometry':
"""Returns the composite transform equal to self * other.
This function is used to compose multiple transforms together. This can
alternatively be achieved via `*` operator.
Usage example:
frameB_from_frameA = Isometry(R=..., t=...)
frameC_from_frameB = Isometry(R=..., t=...)
frameC_from_frameA = frameC_from_frameB.compose(frameB_from_frameA)
Args:
other: Another transform to compose with.
Returns:
Composite transform equal to self * other.
"""
return Isometry(self.R.dot(other.R), self.R.dot(other.t) + self.t)
def transform_points(self, points: np.ndarray) -> np.ndarray:
"""Computes the transformation of a set of points.
frameA_to_frameB = Isometry()
pointsA = np.random.rand(1000, 3)
pointsB = frameA_to_frameB.transform_points(pointsA)
Args:
points: Tensor containing point positions of shape (N, 3) or a single
point vector of shape (3,).
Returns:
Transformed points.
"""
projected = np.einsum('ij,nj->ni', self.R, points.reshape(-1, 3)) + self.t
return np.squeeze(projected)
def __mul__(
self, other: Union['Isometry', np.ndarray]
) -> Union['Isometry', np.ndarray]:
"""Returns the product of self with other i.e. `out = self * other`.
This function can be used to transform point(s) or compose multiple
transforms together.
Compose multiple transforms:
frameA_to_frameB = Isometry(...)
frameB_to_frameC = Isometry(...)
frameA_to_frameC = frameB_to_frameC * frameA_to_frameB
Apply transform on single point:
pointB = frameA_to_frameB * np.array([4.0, 2.0, 1.0])
Apply transform on a pointcloud (Nx3):
pointcloudC = frameA_to_frameC * np.random.rand(1000, 3)
Args:
other: Either 3D point(s) or vector(s) to transform or other transform to
compose with.
Returns:
When multiplying with another Isometry object `other`, the composite
transform equal to `(this * other)` is returned. When other is point with
shape (3,) or a pointcloud of shape (N, 3), the output is the transformed
point or pointcloud.
"""
if isinstance(other, np.ndarray):
return self.transform_points(other)
elif isinstance(other, Isometry):
return self.compose(other)
raise TypeError('Unsupported type')
| {
"content_hash": "5cc07c6ab0ca4aecc0548c531d84c9a8",
"timestamp": "",
"source": "github",
"line_count": 152,
"max_line_length": 102,
"avg_line_length": 30.36842105263158,
"alnum_prop": 0.654896013864818,
"repo_name": "google-research/sunds",
"id": "a5fa41a921133d285a7ac0eda09ba23ec9ae82c6",
"size": "5199",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "sunds/core/np_geometry/isometry.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "202324"
}
],
"symlink_target": ""
} |
from masseuse.core import massage
from masseuse.cli import argparser
from masseuse.kernels import kernels
import datetime as dt
import re
def makeparser(form):
def parse(astring):
return dt.datetime.strptime(astring, form).date()
return parse
def lintimes(times, lpad=0, rpad=0, step=1):
minday,maxday = min(times),max(times)
tdelta = (maxday - minday).days
return [minday + dt.timedelta(days) for days
in xrange(0-lpad, tdelta+rpad, step)]
def main(args):
parser = makeparser(args.format)
kernel = kernels(args.kernel, args.parameter)
otimes = [parser(line.split()[0])
for line in args.infile
if not line.startswith("#")]
ntimes = lintimes(otimes, args.step)
args.infile.seek(0)
massage(parser, kernel, ntimes, args.infile, args.outfile)
if __name__ == "__main__":
main(argparser().parse_args())
| {
"content_hash": "6d3cf8800dc1013b98b032f36c28e88e",
"timestamp": "",
"source": "github",
"line_count": 29,
"max_line_length": 62,
"avg_line_length": 30.93103448275862,
"alnum_prop": 0.6633221850613155,
"repo_name": "triposorbust/masseuse",
"id": "f98ef4312bcd2fa9fb1c9d971d74aa803db0de2f",
"size": "920",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "massage.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "4833"
}
],
"symlink_target": ""
} |
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
from pants_test.projects.base_project_integration_test import ProjectIntegrationTest
class ExamplesIntegrationTest(ProjectIntegrationTest):
def tests_examples(self):
# TODO: Remove the --exclude-target-regexp once we're on Java 8 everywhere.
pants_run = self.pants_test(['examples::',
'--exclude-target-regexp=examples/src/java/org/pantsbuild/example/plugin'])
self.assert_success(pants_run)
| {
"content_hash": "63ca9fc03d58d6d72519057bdc4901ff",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 108,
"avg_line_length": 49.5,
"alnum_prop": 0.702020202020202,
"repo_name": "gmalmquist/pants",
"id": "db8913867f06de7c24d7e914c6f29f3be03faca8",
"size": "741",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "tests/python/pants_test/projects/test_examples_integration.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "781"
},
{
"name": "CSS",
"bytes": "9444"
},
{
"name": "Cucumber",
"bytes": "919"
},
{
"name": "GAP",
"bytes": "2459"
},
{
"name": "Go",
"bytes": "1746"
},
{
"name": "HTML",
"bytes": "79866"
},
{
"name": "Java",
"bytes": "437330"
},
{
"name": "JavaScript",
"bytes": "29992"
},
{
"name": "Protocol Buffer",
"bytes": "3783"
},
{
"name": "Python",
"bytes": "5053630"
},
{
"name": "Scala",
"bytes": "84585"
},
{
"name": "Shell",
"bytes": "58748"
},
{
"name": "Thrift",
"bytes": "1966"
}
],
"symlink_target": ""
} |
import numpy as np
from sdafile.character_inserter import (
ArrayInserter, BytesInserter, StringInserter,
)
from sdafile.testing import InserterTestCase
class TestCharacterInserter(InserterTestCase):
def setUp(self):
InserterTestCase.setUp(self)
self.grp_attrs = dict(
RecordType='character',
Empty='no',
)
self.ds_attrs = dict(
RecordType='character',
Empty='no',
)
def tearDown(self):
del self.grp_attrs
del self.ds_attrs
InserterTestCase.tearDown(self)
def test_array_inserter(self):
data = np.frombuffer(b'01', 'S1')
expected = np.array([48, 49], np.uint8).reshape(2, -1)
self.assertSimpleInsert(
ArrayInserter,
data,
self.grp_attrs,
self.ds_attrs,
expected
)
def test_array_inserter_reshaped(self):
data = np.frombuffer(b'01', 'S1').reshape(2, -1)
expected = np.array([48, 49], np.uint8).reshape(-1, 2)
self.assertSimpleInsert(
ArrayInserter,
data,
self.grp_attrs,
self.ds_attrs,
expected
)
def test_array_inserter_empty(self):
data = np.array([], 'S1')
self.grp_attrs['Empty'] = self.ds_attrs['Empty'] = 'yes'
self.assertSimpleInsert(
ArrayInserter,
data,
self.grp_attrs,
self.ds_attrs,
expected=None
)
def test_string_inserter(self):
data = '01'
expected = np.array([48, 49], np.uint8).reshape(2, -1)
self.assertSimpleInsert(
StringInserter,
data,
self.grp_attrs,
self.ds_attrs,
expected,
)
def test_string_inserter_unicode(self):
data = u'01'
expected = np.array([48, 49], np.uint8).reshape(2, -1)
self.assertSimpleInsert(
StringInserter,
data,
self.grp_attrs,
self.ds_attrs,
expected,
)
def test_string_inserter_empty(self):
data = ''
self.grp_attrs['Empty'] = self.ds_attrs['Empty'] = 'yes'
self.assertSimpleInsert(
StringInserter,
data,
self.grp_attrs,
self.ds_attrs,
expected=None
)
def test_bytes_inserter(self):
data = b'01'
expected = np.array([48, 49], np.uint8).reshape(2, -1)
self.assertSimpleInsert(
BytesInserter,
data,
self.grp_attrs,
self.ds_attrs,
expected,
)
def test_bytes_inserter_empty(self):
data = b''
self.grp_attrs['Empty'] = self.ds_attrs['Empty'] = 'yes'
self.assertSimpleInsert(
BytesInserter,
data,
self.grp_attrs,
self.ds_attrs,
expected=None
)
| {
"content_hash": "e078c35f8dc730936f6cf639a96f9c14",
"timestamp": "",
"source": "github",
"line_count": 113,
"max_line_length": 64,
"avg_line_length": 26.424778761061948,
"alnum_prop": 0.5154052243804421,
"repo_name": "enthought/sandia-data-archive",
"id": "04dd713b2f9de1f962aea8c94db031117697760e",
"size": "2986",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sdafile/tests/test_character_inserter.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "4012"
},
{
"name": "Matlab",
"bytes": "1597"
},
{
"name": "Python",
"bytes": "134833"
}
],
"symlink_target": ""
} |
from django import forms
from django.http import HttpResponse
from django.shortcuts import redirect
from django.template.response import TemplateResponse
from django.utils.translation import gettext_lazy
from wagtail.admin import messages
from wagtail.admin.auth import user_passes_test
from wagtail.admin.views.generic import DeleteView, EditView, IndexView
from wagtail.contrib.forms.views import SubmissionsListView
from .models import ModelWithStringTypePrimaryKey
def user_is_called_bob(user):
return user.first_name == "Bob"
@user_passes_test(user_is_called_bob)
def bob_only_zone(request):
return HttpResponse("Bobs of the world unite!")
def message_test(request):
if request.method == "POST":
fn = getattr(messages, request.POST["level"])
fn(request, request.POST["message"])
return redirect("testapp_message_test")
else:
return TemplateResponse(request, "wagtailadmin/base.html")
class CustomSubmissionsListView(SubmissionsListView):
paginate_by = 50
ordering = ("submit_time",)
ordering_csv = ("-submit_time",)
def get_csv_filename(self):
"""Returns the filename for CSV file with page title at start"""
filename = super().get_csv_filename()
return self.form_page.slug + "-" + filename
class TestIndexView(IndexView):
model = ModelWithStringTypePrimaryKey
index_url_name = "testapp_generic_index"
template_name = "tests/generic_view_templates/index.html"
paginate_by = 20
context_object_name = "test_object"
page_title = gettext_lazy("test index view")
class CustomModelEditForm(forms.ModelForm):
class Meta:
model = ModelWithStringTypePrimaryKey
fields = ("content",)
class TestEditView(EditView):
model = ModelWithStringTypePrimaryKey
context_object_name = "test_object"
template_name = "tests/generic_view_templates/edit.html"
index_url_name = "testapp_generic_index"
success_url = "testapp_generic_index"
edit_url_name = "testapp_generic_edit"
delete_url_name = "testapp_generic_delete"
form_class = CustomModelEditForm
success_message = "User '{0}' updated."
page_title = gettext_lazy("test edit view")
class TestDeleteView(DeleteView):
model = ModelWithStringTypePrimaryKey
context_object_name = "test_object"
template_name = "tests/generic_view_templates/delete.html"
index_url_name = "testapp_generic_index"
edit_url_name = "testapp_generic_edit"
delete_url_name = "testapp_generic_delete"
success_message = "User '{0}' updated."
page_title = gettext_lazy("test delete view")
| {
"content_hash": "1c09542e579b37ff7413205a19119953",
"timestamp": "",
"source": "github",
"line_count": 83,
"max_line_length": 72,
"avg_line_length": 31.566265060240966,
"alnum_prop": 0.7179389312977099,
"repo_name": "zerolab/wagtail",
"id": "cca5129b86d6e7fe4eabdd36dfc2b96022d6e553",
"size": "2620",
"binary": false,
"copies": "4",
"ref": "refs/heads/main",
"path": "wagtail/test/testapp/views.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "2522"
},
{
"name": "Dockerfile",
"bytes": "2041"
},
{
"name": "HTML",
"bytes": "593037"
},
{
"name": "JavaScript",
"bytes": "615631"
},
{
"name": "Makefile",
"bytes": "1413"
},
{
"name": "Python",
"bytes": "6560334"
},
{
"name": "SCSS",
"bytes": "219204"
},
{
"name": "Shell",
"bytes": "6845"
},
{
"name": "TypeScript",
"bytes": "288102"
}
],
"symlink_target": ""
} |
"""
Copyright (c) 2014 Alex Forencich
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
from myhdl import *
import os
try:
from queue import Queue
except ImportError:
from Queue import Queue
import axis_ep
module = 'axis_register_64'
srcs = []
srcs.append("../rtl/%s.v" % module)
srcs.append("test_%s.v" % module)
src = ' '.join(srcs)
build_cmd = "iverilog -o test_%s.vvp %s" % (module, src)
def dut_axis_register_64(clk,
rst,
current_test,
input_axis_tdata,
input_axis_tkeep,
input_axis_tvalid,
input_axis_tready,
input_axis_tlast,
input_axis_tuser,
output_axis_tdata,
output_axis_tkeep,
output_axis_tvalid,
output_axis_tready,
output_axis_tlast,
output_axis_tuser):
if os.system(build_cmd):
raise Exception("Error running build command")
return Cosimulation("vvp -m myhdl test_%s.vvp -lxt2" % module,
clk=clk,
rst=rst,
current_test=current_test,
input_axis_tdata=input_axis_tdata,
input_axis_tkeep=input_axis_tkeep,
input_axis_tvalid=input_axis_tvalid,
input_axis_tready=input_axis_tready,
input_axis_tlast=input_axis_tlast,
input_axis_tuser=input_axis_tuser,
output_axis_tdata=output_axis_tdata,
output_axis_tkeep=output_axis_tkeep,
output_axis_tvalid=output_axis_tvalid,
output_axis_tready=output_axis_tready,
output_axis_tlast=output_axis_tlast,
output_axis_tuser=output_axis_tuser)
def bench():
# Inputs
clk = Signal(bool(0))
rst = Signal(bool(0))
current_test = Signal(intbv(0)[8:])
input_axis_tdata = Signal(intbv(0)[64:])
input_axis_tkeep = Signal(intbv(0)[8:])
input_axis_tvalid = Signal(bool(0))
input_axis_tlast = Signal(bool(0))
input_axis_tuser = Signal(bool(0))
output_axis_tready = Signal(bool(0))
# Outputs
input_axis_tready = Signal(bool(0))
output_axis_tdata = Signal(intbv(0)[64:])
output_axis_tkeep = Signal(intbv(0)[8:])
output_axis_tvalid = Signal(bool(0))
output_axis_tlast = Signal(bool(0))
output_axis_tuser = Signal(bool(0))
# sources and sinks
source_queue = Queue()
source_pause = Signal(bool(0))
sink_queue = Queue()
sink_pause = Signal(bool(0))
source = axis_ep.AXIStreamSource(clk,
rst,
tdata=input_axis_tdata,
tkeep=input_axis_tkeep,
tvalid=input_axis_tvalid,
tready=input_axis_tready,
tlast=input_axis_tlast,
tuser=input_axis_tuser,
fifo=source_queue,
pause=source_pause,
name='source')
sink = axis_ep.AXIStreamSink(clk,
rst,
tdata=output_axis_tdata,
tkeep=output_axis_tkeep,
tvalid=output_axis_tvalid,
tready=output_axis_tready,
tlast=output_axis_tlast,
tuser=output_axis_tuser,
fifo=sink_queue,
pause=sink_pause,
name='sink')
# DUT
dut = dut_axis_register_64(clk,
rst,
current_test,
input_axis_tdata,
input_axis_tkeep,
input_axis_tvalid,
input_axis_tready,
input_axis_tlast,
input_axis_tuser,
output_axis_tdata,
output_axis_tkeep,
output_axis_tvalid,
output_axis_tready,
output_axis_tlast,
output_axis_tuser)
@always(delay(4))
def clkgen():
clk.next = not clk
@instance
def check():
yield delay(100)
yield clk.posedge
rst.next = 1
yield clk.posedge
rst.next = 0
yield clk.posedge
yield delay(100)
yield clk.posedge
yield clk.posedge
yield clk.posedge
print("test 1: test packet")
current_test.next = 1
test_frame = axis_ep.AXIStreamFrame(b'\xDA\xD1\xD2\xD3\xD4\xD5' +
b'\x5A\x51\x52\x53\x54\x55' +
b'\x80\x00' +
b'\x00\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f\x10')
source_queue.put(test_frame)
yield clk.posedge
yield output_axis_tlast.posedge
yield clk.posedge
yield clk.posedge
rx_frame = None
if not sink_queue.empty():
rx_frame = sink_queue.get()
assert rx_frame == test_frame
yield delay(100)
yield clk.posedge
print("test 2: longer packet")
current_test.next = 2
test_frame = axis_ep.AXIStreamFrame(b'\xDA\xD1\xD2\xD3\xD4\xD5' +
b'\x5A\x51\x52\x53\x54\x55' +
b'\x80\x00' +
bytearray(range(256)))
source_queue.put(test_frame)
yield clk.posedge
yield output_axis_tlast.posedge
yield clk.posedge
yield clk.posedge
rx_frame = None
if not sink_queue.empty():
rx_frame = sink_queue.get()
assert rx_frame == test_frame
yield clk.posedge
print("test 3: test packet with pauses")
current_test.next = 3
test_frame = axis_ep.AXIStreamFrame(b'\xDA\xD1\xD2\xD3\xD4\xD5' +
b'\x5A\x51\x52\x53\x54\x55' +
b'\x80\x00' +
bytearray(range(256)))
source_queue.put(test_frame)
yield clk.posedge
yield delay(64)
yield clk.posedge
source_pause.next = True
yield delay(32)
yield clk.posedge
source_pause.next = False
yield delay(64)
yield clk.posedge
sink_pause.next = True
yield delay(32)
yield clk.posedge
sink_pause.next = False
yield output_axis_tlast.posedge
yield clk.posedge
yield clk.posedge
rx_frame = None
if not sink_queue.empty():
rx_frame = sink_queue.get()
assert rx_frame == test_frame
yield delay(100)
yield clk.posedge
print("test 4: back-to-back packets")
current_test.next = 4
test_frame1 = axis_ep.AXIStreamFrame(b'\xDA\xD1\xD2\xD3\xD4\xD5' +
b'\x5A\x51\x52\x53\x54\x55' +
b'\x80\x00' +
b'\x01\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f\x10')
test_frame2 = axis_ep.AXIStreamFrame(b'\xDA\xD1\xD2\xD3\xD4\xD5' +
b'\x5A\x51\x52\x53\x54\x55' +
b'\x80\x00' +
b'\x02\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f\x10')
source_queue.put(test_frame1)
source_queue.put(test_frame2)
yield clk.posedge
yield output_axis_tlast.posedge
yield clk.posedge
yield output_axis_tlast.posedge
yield clk.posedge
yield clk.posedge
rx_frame = None
if not sink_queue.empty():
rx_frame = sink_queue.get()
assert rx_frame == test_frame1
rx_frame = None
if not sink_queue.empty():
rx_frame = sink_queue.get()
assert rx_frame == test_frame2
yield delay(100)
yield clk.posedge
print("test 5: alternate pause source")
current_test.next = 5
test_frame1 = axis_ep.AXIStreamFrame(b'\xDA\xD1\xD2\xD3\xD4\xD5' +
b'\x5A\x51\x52\x53\x54\x55' +
b'\x80\x00' +
b'\x01\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f\x10')
test_frame2 = axis_ep.AXIStreamFrame(b'\xDA\xD1\xD2\xD3\xD4\xD5' +
b'\x5A\x51\x52\x53\x54\x55' +
b'\x80\x00' +
b'\x02\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f\x10')
source_queue.put(test_frame1)
source_queue.put(test_frame2)
yield clk.posedge
while input_axis_tvalid or output_axis_tvalid:
source_pause.next = True
yield clk.posedge
yield clk.posedge
yield clk.posedge
source_pause.next = False
yield clk.posedge
yield clk.posedge
yield clk.posedge
rx_frame = None
if not sink_queue.empty():
rx_frame = sink_queue.get()
assert rx_frame == test_frame1
rx_frame = None
if not sink_queue.empty():
rx_frame = sink_queue.get()
assert rx_frame == test_frame2
yield delay(100)
yield clk.posedge
print("test 6: alternate pause sink")
current_test.next = 6
test_frame1 = axis_ep.AXIStreamFrame(b'\xDA\xD1\xD2\xD3\xD4\xD5' +
b'\x5A\x51\x52\x53\x54\x55' +
b'\x80\x00' +
b'\x01\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f\x10')
test_frame2 = axis_ep.AXIStreamFrame(b'\xDA\xD1\xD2\xD3\xD4\xD5' +
b'\x5A\x51\x52\x53\x54\x55' +
b'\x80\x00' +
b'\x02\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f\x10')
source_queue.put(test_frame1)
source_queue.put(test_frame2)
yield clk.posedge
while input_axis_tvalid or output_axis_tvalid:
sink_pause.next = True
yield clk.posedge
yield clk.posedge
yield clk.posedge
sink_pause.next = False
yield clk.posedge
yield clk.posedge
yield clk.posedge
rx_frame = None
if not sink_queue.empty():
rx_frame = sink_queue.get()
assert rx_frame == test_frame1
rx_frame = None
if not sink_queue.empty():
rx_frame = sink_queue.get()
assert rx_frame == test_frame2
yield delay(100)
yield clk.posedge
print("test 7: tuser assert")
current_test.next = 7
test_frame = axis_ep.AXIStreamFrame(b'\xDA\xD1\xD2\xD3\xD4\xD5' +
b'\x5A\x51\x52\x53\x54\x55' +
b'\x80\x00' +
b'\x00\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f\x10')
test_frame.user = 1
source_queue.put(test_frame)
yield clk.posedge
yield output_axis_tlast.posedge
yield clk.posedge
yield clk.posedge
rx_frame = None
if not sink_queue.empty():
rx_frame = sink_queue.get()
assert rx_frame == test_frame
assert rx_frame.user[-1]
yield delay(100)
raise StopSimulation
return dut, source, sink, clkgen, check
def test_bench():
os.chdir(os.path.dirname(os.path.abspath(__file__)))
sim = Simulation(bench())
sim.run()
if __name__ == '__main__':
print("Running test...")
test_bench()
| {
"content_hash": "5d0e91512a98a707abba68dc61d36b61",
"timestamp": "",
"source": "github",
"line_count": 409,
"max_line_length": 117,
"avg_line_length": 33.02200488997555,
"alnum_prop": 0.5009625351695542,
"repo_name": "alexforencich/hdg2000",
"id": "986201bce01cd657b86d95e81744d1be4f810d17",
"size": "13528",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "fpga/lib/axis/tb/test_axis_register_64.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "9054"
},
{
"name": "Python",
"bytes": "934476"
},
{
"name": "Shell",
"bytes": "8661"
},
{
"name": "Verilog",
"bytes": "687285"
}
],
"symlink_target": ""
} |
"""
Command-line and common processing for Docutils front-end tools.
Exports the following classes:
* `OptionParser`: Standard Docutils command-line processing.
* `Option`: Customized version of `optparse.Option`; validation support.
* `Values`: Runtime settings; objects are simple structs
(``object.attribute``). Supports cumulative list settings (attributes).
* `ConfigParser`: Standard Docutils config file processing.
Also exports the following functions:
* Option callbacks: `store_multiple`, `read_config_file`.
* Setting validators: `validate_encoding`,
`validate_encoding_error_handler`,
`validate_encoding_and_error_handler`, `validate_boolean`,
`validate_threshold`, `validate_colon_separated_string_list`,
`validate_dependency_file`.
* `make_paths_absolute`.
* SettingSpec manipulation: `filter_settings_spec`.
"""
__docformat__ = 'reStructuredText'
import os
import os.path
import sys
import warnings
import ConfigParser as CP
import codecs
import optparse
from optparse import SUPPRESS_HELP
import docutils
import docutils.utils
import docutils.nodes
from docutils.error_reporting import locale_encoding, ErrorOutput, ErrorString
def store_multiple(option, opt, value, parser, *args, **kwargs):
"""
Store multiple values in `parser.values`. (Option callback.)
Store `None` for each attribute named in `args`, and store the value for
each key (attribute name) in `kwargs`.
"""
for attribute in args:
setattr(parser.values, attribute, None)
for key, value in kwargs.items():
setattr(parser.values, key, value)
def read_config_file(option, opt, value, parser):
"""
Read a configuration file during option processing. (Option callback.)
"""
try:
new_settings = parser.get_config_file_settings(value)
except ValueError, error:
parser.error(error)
parser.values.update(new_settings, parser)
def validate_encoding(setting, value, option_parser,
config_parser=None, config_section=None):
try:
codecs.lookup(value)
except LookupError:
raise (LookupError('setting "%s": unknown encoding: "%s"'
% (setting, value)),
None, sys.exc_info()[2])
return value
def validate_encoding_error_handler(setting, value, option_parser,
config_parser=None, config_section=None):
try:
codecs.lookup_error(value)
except LookupError:
raise (LookupError(
'unknown encoding error handler: "%s" (choices: '
'"strict", "ignore", "replace", "backslashreplace", '
'"xmlcharrefreplace", and possibly others; see documentation for '
'the Python ``codecs`` module)' % value),
None, sys.exc_info()[2])
return value
def validate_encoding_and_error_handler(
setting, value, option_parser, config_parser=None, config_section=None):
"""
Side-effect: if an error handler is included in the value, it is inserted
into the appropriate place as if it was a separate setting/option.
"""
if ':' in value:
encoding, handler = value.split(':')
validate_encoding_error_handler(
setting + '_error_handler', handler, option_parser,
config_parser, config_section)
if config_parser:
config_parser.set(config_section, setting + '_error_handler',
handler)
else:
setattr(option_parser.values, setting + '_error_handler', handler)
else:
encoding = value
validate_encoding(setting, encoding, option_parser,
config_parser, config_section)
return encoding
def validate_boolean(setting, value, option_parser,
config_parser=None, config_section=None):
if isinstance(value, unicode):
try:
return option_parser.booleans[value.strip().lower()]
except KeyError:
raise (LookupError('unknown boolean value: "%s"' % value),
None, sys.exc_info()[2])
return value
def validate_nonnegative_int(setting, value, option_parser,
config_parser=None, config_section=None):
value = int(value)
if value < 0:
raise ValueError('negative value; must be positive or zero')
return value
def validate_threshold(setting, value, option_parser,
config_parser=None, config_section=None):
try:
return int(value)
except ValueError:
try:
return option_parser.thresholds[value.lower()]
except (KeyError, AttributeError):
raise (LookupError('unknown threshold: %r.' % value),
None, sys.exc_info[2])
def validate_colon_separated_string_list(
setting, value, option_parser, config_parser=None, config_section=None):
if isinstance(value, unicode):
value = value.split(':')
else:
last = value.pop()
value.extend(last.split(':'))
return value
def validate_url_trailing_slash(
setting, value, option_parser, config_parser=None, config_section=None):
if not value:
return './'
elif value.endswith('/'):
return value
else:
return value + '/'
def validate_dependency_file(setting, value, option_parser,
config_parser=None, config_section=None):
try:
return docutils.utils.DependencyList(value)
except IOError:
return docutils.utils.DependencyList(None)
def validate_strip_class(setting, value, option_parser,
config_parser=None, config_section=None):
# convert to list:
if isinstance(value, unicode):
value = [value]
class_values = filter(None, [v.strip() for v in value.pop().split(',')])
# validate:
for class_value in class_values:
normalized = docutils.nodes.make_id(class_value)
if class_value != normalized:
raise ValueError('invalid class value %r (perhaps %r?)'
% (class_value, normalized))
value.extend(class_values)
return value
def make_paths_absolute(pathdict, keys, base_path=None):
"""
Interpret filesystem path settings relative to the `base_path` given.
Paths are values in `pathdict` whose keys are in `keys`. Get `keys` from
`OptionParser.relative_path_settings`.
"""
if base_path is None:
base_path = os.getcwdu() # type(base_path) == unicode
# to allow combining non-ASCII cwd with unicode values in `pathdict`
for key in keys:
if key in pathdict:
value = pathdict[key]
if isinstance(value, list):
value = [make_one_path_absolute(base_path, path)
for path in value]
elif value:
value = make_one_path_absolute(base_path, value)
pathdict[key] = value
def make_one_path_absolute(base_path, path):
return os.path.abspath(os.path.join(base_path, path))
def filter_settings_spec(settings_spec, *exclude, **replace):
"""Return a copy of `settings_spec` excluding/replacing some settings.
`settings_spec` is a tuple of configuration settings with a structure
described for docutils.SettingsSpec.settings_spec.
Optional positional arguments are names of to-be-excluded settings.
Keyword arguments are option specification replacements.
(See the html4strict writer for an example.)
"""
settings = list(settings_spec)
# every third item is a sequence of option tuples
for i in range(2, len(settings), 3):
newopts = []
for opt_spec in settings[i]:
# opt_spec is ("<help>", [<option strings>], {<keyword args>})
opt_name = [opt_string[2:].replace('-', '_')
for opt_string in opt_spec[1]
if opt_string.startswith('--')
][0]
if opt_name in exclude:
continue
if opt_name in replace.keys():
newopts.append(replace[opt_name])
else:
newopts.append(opt_spec)
settings[i] = tuple(newopts)
return tuple(settings)
class Values(optparse.Values):
"""
Updates list attributes by extension rather than by replacement.
Works in conjunction with the `OptionParser.lists` instance attribute.
"""
def __init__(self, *args, **kwargs):
optparse.Values.__init__(self, *args, **kwargs)
if (not hasattr(self, 'record_dependencies')
or self.record_dependencies is None):
# Set up dependency list, in case it is needed.
self.record_dependencies = docutils.utils.DependencyList()
def update(self, other_dict, option_parser):
if isinstance(other_dict, Values):
other_dict = other_dict.__dict__
other_dict = other_dict.copy()
for setting in option_parser.lists.keys():
if (hasattr(self, setting) and setting in other_dict):
value = getattr(self, setting)
if value:
value += other_dict[setting]
del other_dict[setting]
self._update_loose(other_dict)
def copy(self):
"""Return a shallow copy of `self`."""
return self.__class__(defaults=self.__dict__)
class Option(optparse.Option):
ATTRS = optparse.Option.ATTRS + ['validator', 'overrides']
def process(self, opt, value, values, parser):
"""
Call the validator function on applicable settings and
evaluate the 'overrides' option.
Extends `optparse.Option.process`.
"""
result = optparse.Option.process(self, opt, value, values, parser)
setting = self.dest
if setting:
if self.validator:
value = getattr(values, setting)
try:
new_value = self.validator(setting, value, parser)
except Exception, error:
raise (optparse.OptionValueError(
'Error in option "%s":\n %s'
% (opt, ErrorString(error))),
None, sys.exc_info()[2])
setattr(values, setting, new_value)
if self.overrides:
setattr(values, self.overrides, None)
return result
class OptionParser(optparse.OptionParser, docutils.SettingsSpec):
"""
Parser for command-line and library use. The `settings_spec`
specification here and in other Docutils components are merged to build
the set of command-line options and runtime settings for this process.
Common settings (defined below) and component-specific settings must not
conflict. Short options are reserved for common settings, and components
are restrict to using long options.
"""
standard_config_files = [
'/etc/docutils.conf', # system-wide
'./docutils.conf', # project-specific
'~/.docutils'] # user-specific
"""Docutils configuration files, using ConfigParser syntax. Filenames
will be tilde-expanded later. Later files override earlier ones."""
threshold_choices = 'info 1 warning 2 error 3 severe 4 none 5'.split()
"""Possible inputs for for --report and --halt threshold values."""
thresholds = {'info': 1, 'warning': 2, 'error': 3, 'severe': 4, 'none': 5}
"""Lookup table for --report and --halt threshold values."""
booleans={'1': 1, 'on': 1, 'yes': 1, 'true': 1,
'0': 0, 'off': 0, 'no': 0, 'false': 0, '': 0}
"""Lookup table for boolean configuration file settings."""
default_error_encoding = getattr(sys.stderr, 'encoding',
None) or locale_encoding or 'ascii'
default_error_encoding_error_handler = 'backslashreplace'
settings_spec = (
'General Docutils Options',
None,
(('Specify the document title as metadata.',
['--title'], {}),
('Include a "Generated by Docutils" credit and link.',
['--generator', '-g'], {'action': 'store_true',
'validator': validate_boolean}),
('Do not include a generator credit.',
['--no-generator'], {'action': 'store_false', 'dest': 'generator'}),
('Include the date at the end of the document (UTC).',
['--date', '-d'], {'action': 'store_const', 'const': '%Y-%m-%d',
'dest': 'datestamp'}),
('Include the time & date (UTC).',
['--time', '-t'], {'action': 'store_const',
'const': '%Y-%m-%d %H:%M UTC',
'dest': 'datestamp'}),
('Do not include a datestamp of any kind.',
['--no-datestamp'], {'action': 'store_const', 'const': None,
'dest': 'datestamp'}),
('Include a "View document source" link.',
['--source-link', '-s'], {'action': 'store_true',
'validator': validate_boolean}),
('Use <URL> for a source link; implies --source-link.',
['--source-url'], {'metavar': '<URL>'}),
('Do not include a "View document source" link.',
['--no-source-link'],
{'action': 'callback', 'callback': store_multiple,
'callback_args': ('source_link', 'source_url')}),
('Link from section headers to TOC entries. (default)',
['--toc-entry-backlinks'],
{'dest': 'toc_backlinks', 'action': 'store_const', 'const': 'entry',
'default': 'entry'}),
('Link from section headers to the top of the TOC.',
['--toc-top-backlinks'],
{'dest': 'toc_backlinks', 'action': 'store_const', 'const': 'top'}),
('Disable backlinks to the table of contents.',
['--no-toc-backlinks'],
{'dest': 'toc_backlinks', 'action': 'store_false'}),
('Link from footnotes/citations to references. (default)',
['--footnote-backlinks'],
{'action': 'store_true', 'default': 1,
'validator': validate_boolean}),
('Disable backlinks from footnotes and citations.',
['--no-footnote-backlinks'],
{'dest': 'footnote_backlinks', 'action': 'store_false'}),
('Enable section numbering by Docutils. (default)',
['--section-numbering'],
{'action': 'store_true', 'dest': 'sectnum_xform',
'default': 1, 'validator': validate_boolean}),
('Disable section numbering by Docutils.',
['--no-section-numbering'],
{'action': 'store_false', 'dest': 'sectnum_xform'}),
('Remove comment elements from the document tree.',
['--strip-comments'],
{'action': 'store_true', 'validator': validate_boolean}),
('Leave comment elements in the document tree. (default)',
['--leave-comments'],
{'action': 'store_false', 'dest': 'strip_comments'}),
('Remove all elements with classes="<class>" from the document tree. '
'Warning: potentially dangerous; use with caution. '
'(Multiple-use option.)',
['--strip-elements-with-class'],
{'action': 'append', 'dest': 'strip_elements_with_classes',
'metavar': '<class>', 'validator': validate_strip_class}),
('Remove all classes="<class>" attributes from elements in the '
'document tree. Warning: potentially dangerous; use with caution. '
'(Multiple-use option.)',
['--strip-class'],
{'action': 'append', 'dest': 'strip_classes',
'metavar': '<class>', 'validator': validate_strip_class}),
('Report system messages at or higher than <level>: "info" or "1", '
'"warning"/"2" (default), "error"/"3", "severe"/"4", "none"/"5"',
['--report', '-r'], {'choices': threshold_choices, 'default': 2,
'dest': 'report_level', 'metavar': '<level>',
'validator': validate_threshold}),
('Report all system messages. (Same as "--report=1".)',
['--verbose', '-v'], {'action': 'store_const', 'const': 1,
'dest': 'report_level'}),
('Report no system messages. (Same as "--report=5".)',
['--quiet', '-q'], {'action': 'store_const', 'const': 5,
'dest': 'report_level'}),
('Halt execution at system messages at or above <level>. '
'Levels as in --report. Default: 4 (severe).',
['--halt'], {'choices': threshold_choices, 'dest': 'halt_level',
'default': 4, 'metavar': '<level>',
'validator': validate_threshold}),
('Halt at the slightest problem. Same as "--halt=info".',
['--strict'], {'action': 'store_const', 'const': 1,
'dest': 'halt_level'}),
('Enable a non-zero exit status for non-halting system messages at '
'or above <level>. Default: 5 (disabled).',
['--exit-status'], {'choices': threshold_choices,
'dest': 'exit_status_level',
'default': 5, 'metavar': '<level>',
'validator': validate_threshold}),
('Enable debug-level system messages and diagnostics.',
['--debug'], {'action': 'store_true', 'validator': validate_boolean}),
('Disable debug output. (default)',
['--no-debug'], {'action': 'store_false', 'dest': 'debug'}),
('Send the output of system messages to <file>.',
['--warnings'], {'dest': 'warning_stream', 'metavar': '<file>'}),
('Enable Python tracebacks when Docutils is halted.',
['--traceback'], {'action': 'store_true', 'default': None,
'validator': validate_boolean}),
('Disable Python tracebacks. (default)',
['--no-traceback'], {'dest': 'traceback', 'action': 'store_false'}),
('Specify the encoding and optionally the '
'error handler of input text. Default: <locale-dependent>:strict.',
['--input-encoding', '-i'],
{'metavar': '<name[:handler]>',
'validator': validate_encoding_and_error_handler}),
('Specify the error handler for undecodable characters. '
'Choices: "strict" (default), "ignore", and "replace".',
['--input-encoding-error-handler'],
{'default': 'strict', 'validator': validate_encoding_error_handler}),
('Specify the text encoding and optionally the error handler for '
'output. Default: UTF-8:strict.',
['--output-encoding', '-o'],
{'metavar': '<name[:handler]>', 'default': 'utf-8',
'validator': validate_encoding_and_error_handler}),
('Specify error handler for unencodable output characters; '
'"strict" (default), "ignore", "replace", '
'"xmlcharrefreplace", "backslashreplace".',
['--output-encoding-error-handler'],
{'default': 'strict', 'validator': validate_encoding_error_handler}),
('Specify text encoding and error handler for error output. '
'Default: %s:%s.'
% (default_error_encoding, default_error_encoding_error_handler),
['--error-encoding', '-e'],
{'metavar': '<name[:handler]>', 'default': default_error_encoding,
'validator': validate_encoding_and_error_handler}),
('Specify the error handler for unencodable characters in '
'error output. Default: %s.'
% default_error_encoding_error_handler,
['--error-encoding-error-handler'],
{'default': default_error_encoding_error_handler,
'validator': validate_encoding_error_handler}),
('Specify the language (as BCP 47 language tag). Default: en.',
['--language', '-l'], {'dest': 'language_code', 'default': 'en',
'metavar': '<name>'}),
('Write output file dependencies to <file>.',
['--record-dependencies'],
{'metavar': '<file>', 'validator': validate_dependency_file,
'default': None}), # default set in Values class
('Read configuration settings from <file>, if it exists.',
['--config'], {'metavar': '<file>', 'type': 'string',
'action': 'callback', 'callback': read_config_file}),
("Show this program's version number and exit.",
['--version', '-V'], {'action': 'version'}),
('Show this help message and exit.',
['--help', '-h'], {'action': 'help'}),
# Typically not useful for non-programmatical use:
(SUPPRESS_HELP, ['--id-prefix'], {'default': ''}),
(SUPPRESS_HELP, ['--auto-id-prefix'], {'default': 'id'}),
# Hidden options, for development use only:
(SUPPRESS_HELP, ['--dump-settings'], {'action': 'store_true'}),
(SUPPRESS_HELP, ['--dump-internals'], {'action': 'store_true'}),
(SUPPRESS_HELP, ['--dump-transforms'], {'action': 'store_true'}),
(SUPPRESS_HELP, ['--dump-pseudo-xml'], {'action': 'store_true'}),
(SUPPRESS_HELP, ['--expose-internal-attribute'],
{'action': 'append', 'dest': 'expose_internals',
'validator': validate_colon_separated_string_list}),
(SUPPRESS_HELP, ['--strict-visitor'], {'action': 'store_true'}),
))
"""Runtime settings and command-line options common to all Docutils front
ends. Setting specs specific to individual Docutils components are also
used (see `populate_from_components()`)."""
settings_defaults = {'_disable_config': None,
'_source': None,
'_destination': None,
'_config_files': None}
"""Defaults for settings that don't have command-line option equivalents."""
relative_path_settings = ('warning_stream',)
config_section = 'general'
version_template = ('%%prog (Docutils %s [%s], Python %s, on %s)'
% (docutils.__version__, docutils.__version_details__,
sys.version.split()[0], sys.platform))
"""Default version message."""
def __init__(self, components=(), defaults=None, read_config_files=None,
*args, **kwargs):
"""
`components` is a list of Docutils components each containing a
``.settings_spec`` attribute. `defaults` is a mapping of setting
default overrides.
"""
self.lists = {}
"""Set of list-type settings."""
self.config_files = []
"""List of paths of applied configuration files."""
optparse.OptionParser.__init__(
self, option_class=Option, add_help_option=None,
formatter=optparse.TitledHelpFormatter(width=78),
*args, **kwargs)
if not self.version:
self.version = self.version_template
# Make an instance copy (it will be modified):
self.relative_path_settings = list(self.relative_path_settings)
self.components = (self,) + tuple(components)
self.populate_from_components(self.components)
self.set_defaults_from_dict(defaults or {})
if read_config_files and not self.defaults['_disable_config']:
try:
config_settings = self.get_standard_config_settings()
except ValueError, error:
self.error(error)
self.set_defaults_from_dict(config_settings.__dict__)
def populate_from_components(self, components):
"""
For each component, first populate from the `SettingsSpec.settings_spec`
structure, then from the `SettingsSpec.settings_defaults` dictionary.
After all components have been processed, check for and populate from
each component's `SettingsSpec.settings_default_overrides` dictionary.
"""
for component in components:
if component is None:
continue
settings_spec = component.settings_spec
self.relative_path_settings.extend(
component.relative_path_settings)
for i in range(0, len(settings_spec), 3):
title, description, option_spec = settings_spec[i:i+3]
if title:
group = optparse.OptionGroup(self, title, description)
self.add_option_group(group)
else:
group = self # single options
for (help_text, option_strings, kwargs) in option_spec:
option = group.add_option(help=help_text, *option_strings,
**kwargs)
if kwargs.get('action') == 'append':
self.lists[option.dest] = 1
if component.settings_defaults:
self.defaults.update(component.settings_defaults)
for component in components:
if component and component.settings_default_overrides:
self.defaults.update(component.settings_default_overrides)
def get_standard_config_files(self):
"""Return list of config files, from environment or standard."""
try:
config_files = os.environ['DOCUTILSCONFIG'].split(os.pathsep)
except KeyError:
config_files = self.standard_config_files
# If 'HOME' is not set, expandvars() requires the 'pwd' module which is
# not available under certain environments, for example, within
# mod_python. The publisher ends up in here, and we need to publish
# from within mod_python. Therefore we need to avoid expanding when we
# are in those environments.
expand = os.path.expanduser
if 'HOME' not in os.environ:
try:
import pwd
except ImportError:
expand = lambda x: x
return [expand(f) for f in config_files if f.strip()]
def get_standard_config_settings(self):
settings = Values()
for filename in self.get_standard_config_files():
settings.update(self.get_config_file_settings(filename), self)
return settings
def get_config_file_settings(self, config_file):
"""Returns a dictionary containing appropriate config file settings."""
parser = ConfigParser()
parser.read(config_file, self)
self.config_files.extend(parser._files)
base_path = os.path.dirname(config_file)
applied = {}
settings = Values()
for component in self.components:
if not component:
continue
for section in (tuple(component.config_section_dependencies or ())
+ (component.config_section,)):
if section in applied:
continue
applied[section] = 1
settings.update(parser.get_section(section), self)
make_paths_absolute(
settings.__dict__, self.relative_path_settings, base_path)
return settings.__dict__
def check_values(self, values, args):
"""Store positional arguments as runtime settings."""
values._source, values._destination = self.check_args(args)
make_paths_absolute(values.__dict__, self.relative_path_settings)
values._config_files = self.config_files
return values
def check_args(self, args):
source = destination = None
if args:
source = args.pop(0)
if source == '-': # means stdin
source = None
if args:
destination = args.pop(0)
if destination == '-': # means stdout
destination = None
if args:
self.error('Maximum 2 arguments allowed.')
if source and source == destination:
self.error('Do not specify the same file for both source and '
'destination. It will clobber the source file.')
return source, destination
def set_defaults_from_dict(self, defaults):
self.defaults.update(defaults)
def get_default_values(self):
"""Needed to get custom `Values` instances."""
defaults = Values(self.defaults)
defaults._config_files = self.config_files
return defaults
def get_option_by_dest(self, dest):
"""
Get an option by its dest.
If you're supplying a dest which is shared by several options,
it is undefined which option of those is returned.
A KeyError is raised if there is no option with the supplied
dest.
"""
for group in self.option_groups + [self]:
for option in group.option_list:
if option.dest == dest:
return option
raise KeyError('No option with dest == %r.' % dest)
class ConfigParser(CP.RawConfigParser):
old_settings = {
'pep_stylesheet': ('pep_html writer', 'stylesheet'),
'pep_stylesheet_path': ('pep_html writer', 'stylesheet_path'),
'pep_template': ('pep_html writer', 'template')}
"""{old setting: (new section, new setting)} mapping, used by
`handle_old_config`, to convert settings from the old [options] section."""
old_warning = """
The "[option]" section is deprecated. Support for old-format configuration
files may be removed in a future Docutils release. Please revise your
configuration files. See <http://docutils.sf.net/docs/user/config.html>,
section "Old-Format Configuration Files".
"""
not_utf8_error = """\
Unable to read configuration file "%s": content not encoded as UTF-8.
Skipping "%s" configuration file.
"""
def __init__(self, *args, **kwargs):
CP.RawConfigParser.__init__(self, *args, **kwargs)
self._files = []
"""List of paths of configuration files read."""
self._stderr = ErrorOutput()
"""Wrapper around sys.stderr catching en-/decoding errors"""
def read(self, filenames, option_parser):
if type(filenames) in (str, unicode):
filenames = [filenames]
for filename in filenames:
try:
# Config files must be UTF-8-encoded:
fp = codecs.open(filename, 'r', 'utf-8')
except IOError:
continue
try:
if sys.version_info < (3,2):
CP.RawConfigParser.readfp(self, fp, filename)
else:
CP.RawConfigParser.read_file(self, fp, filename)
except UnicodeDecodeError:
self._stderr.write(self.not_utf8_error % (filename, filename))
fp.close()
continue
fp.close()
self._files.append(filename)
if self.has_section('options'):
self.handle_old_config(filename)
self.validate_settings(filename, option_parser)
def handle_old_config(self, filename):
warnings.warn_explicit(self.old_warning, ConfigDeprecationWarning,
filename, 0)
options = self.get_section('options')
if not self.has_section('general'):
self.add_section('general')
for key, value in options.items():
if key in self.old_settings:
section, setting = self.old_settings[key]
if not self.has_section(section):
self.add_section(section)
else:
section = 'general'
setting = key
if not self.has_option(section, setting):
self.set(section, setting, value)
self.remove_section('options')
def validate_settings(self, filename, option_parser):
"""
Call the validator function and implement overrides on all applicable
settings.
"""
for section in self.sections():
for setting in self.options(section):
try:
option = option_parser.get_option_by_dest(setting)
except KeyError:
continue
if option.validator:
value = self.get(section, setting)
try:
new_value = option.validator(
setting, value, option_parser,
config_parser=self, config_section=section)
except Exception, error:
raise (ValueError(
'Error in config file "%s", section "[%s]":\n'
' %s\n'
' %s = %s'
% (filename, section, ErrorString(error),
setting, value)), None, sys.exc_info()[2])
self.set(section, setting, new_value)
if option.overrides:
self.set(section, option.overrides, None)
def optionxform(self, optionstr):
"""
Transform '-' to '_' so the cmdline form of option names can be used.
"""
return optionstr.lower().replace('-', '_')
def get_section(self, section):
"""
Return a given section as a dictionary (empty if the section
doesn't exist).
"""
section_dict = {}
if self.has_section(section):
for option in self.options(section):
section_dict[option] = self.get(section, option)
return section_dict
class ConfigDeprecationWarning(DeprecationWarning):
"""Warning for deprecated configuration file features."""
| {
"content_hash": "86b4bcc80c744da5959f1270d533e20d",
"timestamp": "",
"source": "github",
"line_count": 784,
"max_line_length": 80,
"avg_line_length": 43.26020408163265,
"alnum_prop": 0.5713527538624837,
"repo_name": "ddd332/presto",
"id": "83f5fde594647739f22da18ac205dba4666b0b6c",
"size": "34078",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "presto-docs/target/sphinx/docutils/frontend.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "1449"
},
{
"name": "CSS",
"bytes": "130017"
},
{
"name": "GAP",
"bytes": "41169"
},
{
"name": "Java",
"bytes": "6836515"
},
{
"name": "JavaScript",
"bytes": "135954"
},
{
"name": "Python",
"bytes": "8056702"
},
{
"name": "TeX",
"bytes": "55016"
}
],
"symlink_target": ""
} |
"""Foundational utilities common to many sql modules.
"""
import itertools
import operator
import re
from .visitors import ClauseVisitor
from .. import exc
from .. import util
coercions = None # type: types.ModuleType
elements = None # type: types.ModuleType
type_api = None # type: types.ModuleType
PARSE_AUTOCOMMIT = util.symbol("PARSE_AUTOCOMMIT")
NO_ARG = util.symbol("NO_ARG")
class Immutable(object):
"""mark a ClauseElement as 'immutable' when expressions are cloned."""
def unique_params(self, *optionaldict, **kwargs):
raise NotImplementedError("Immutable objects do not support copying")
def params(self, *optionaldict, **kwargs):
raise NotImplementedError("Immutable objects do not support copying")
def _clone(self):
return self
def _from_objects(*elements):
return itertools.chain(*[element._from_objects for element in elements])
@util.decorator
def _generative(fn, *args, **kw):
"""Mark a method as generative."""
self = args[0]._generate()
fn(self, *args[1:], **kw)
return self
def _clone(element, **kw):
return element._clone()
def _expand_cloned(elements):
"""expand the given set of ClauseElements to be the set of all 'cloned'
predecessors.
"""
return itertools.chain(*[x._cloned_set for x in elements])
def _cloned_intersection(a, b):
"""return the intersection of sets a and b, counting
any overlap between 'cloned' predecessors.
The returned set is in terms of the entities present within 'a'.
"""
all_overlap = set(_expand_cloned(a)).intersection(_expand_cloned(b))
return set(
elem for elem in a if all_overlap.intersection(elem._cloned_set)
)
def _cloned_difference(a, b):
all_overlap = set(_expand_cloned(a)).intersection(_expand_cloned(b))
return set(
elem for elem in a if not all_overlap.intersection(elem._cloned_set)
)
class _DialectArgView(util.collections_abc.MutableMapping):
"""A dictionary view of dialect-level arguments in the form
<dialectname>_<argument_name>.
"""
def __init__(self, obj):
self.obj = obj
def _key(self, key):
try:
dialect, value_key = key.split("_", 1)
except ValueError:
raise KeyError(key)
else:
return dialect, value_key
def __getitem__(self, key):
dialect, value_key = self._key(key)
try:
opt = self.obj.dialect_options[dialect]
except exc.NoSuchModuleError:
raise KeyError(key)
else:
return opt[value_key]
def __setitem__(self, key, value):
try:
dialect, value_key = self._key(key)
except KeyError:
raise exc.ArgumentError(
"Keys must be of the form <dialectname>_<argname>"
)
else:
self.obj.dialect_options[dialect][value_key] = value
def __delitem__(self, key):
dialect, value_key = self._key(key)
del self.obj.dialect_options[dialect][value_key]
def __len__(self):
return sum(
len(args._non_defaults)
for args in self.obj.dialect_options.values()
)
def __iter__(self):
return (
util.safe_kwarg("%s_%s" % (dialect_name, value_name))
for dialect_name in self.obj.dialect_options
for value_name in self.obj.dialect_options[
dialect_name
]._non_defaults
)
class _DialectArgDict(util.collections_abc.MutableMapping):
"""A dictionary view of dialect-level arguments for a specific
dialect.
Maintains a separate collection of user-specified arguments
and dialect-specified default arguments.
"""
def __init__(self):
self._non_defaults = {}
self._defaults = {}
def __len__(self):
return len(set(self._non_defaults).union(self._defaults))
def __iter__(self):
return iter(set(self._non_defaults).union(self._defaults))
def __getitem__(self, key):
if key in self._non_defaults:
return self._non_defaults[key]
else:
return self._defaults[key]
def __setitem__(self, key, value):
self._non_defaults[key] = value
def __delitem__(self, key):
del self._non_defaults[key]
class DialectKWArgs(object):
"""Establish the ability for a class to have dialect-specific arguments
with defaults and constructor validation.
The :class:`.DialectKWArgs` interacts with the
:attr:`.DefaultDialect.construct_arguments` present on a dialect.
.. seealso::
:attr:`.DefaultDialect.construct_arguments`
"""
@classmethod
def argument_for(cls, dialect_name, argument_name, default):
"""Add a new kind of dialect-specific keyword argument for this class.
E.g.::
Index.argument_for("mydialect", "length", None)
some_index = Index('a', 'b', mydialect_length=5)
The :meth:`.DialectKWArgs.argument_for` method is a per-argument
way adding extra arguments to the
:attr:`.DefaultDialect.construct_arguments` dictionary. This
dictionary provides a list of argument names accepted by various
schema-level constructs on behalf of a dialect.
New dialects should typically specify this dictionary all at once as a
data member of the dialect class. The use case for ad-hoc addition of
argument names is typically for end-user code that is also using
a custom compilation scheme which consumes the additional arguments.
:param dialect_name: name of a dialect. The dialect must be
locatable, else a :class:`.NoSuchModuleError` is raised. The
dialect must also include an existing
:attr:`.DefaultDialect.construct_arguments` collection, indicating
that it participates in the keyword-argument validation and default
system, else :class:`.ArgumentError` is raised. If the dialect does
not include this collection, then any keyword argument can be
specified on behalf of this dialect already. All dialects packaged
within SQLAlchemy include this collection, however for third party
dialects, support may vary.
:param argument_name: name of the parameter.
:param default: default value of the parameter.
.. versionadded:: 0.9.4
"""
construct_arg_dictionary = DialectKWArgs._kw_registry[dialect_name]
if construct_arg_dictionary is None:
raise exc.ArgumentError(
"Dialect '%s' does have keyword-argument "
"validation and defaults enabled configured" % dialect_name
)
if cls not in construct_arg_dictionary:
construct_arg_dictionary[cls] = {}
construct_arg_dictionary[cls][argument_name] = default
@util.memoized_property
def dialect_kwargs(self):
"""A collection of keyword arguments specified as dialect-specific
options to this construct.
The arguments are present here in their original ``<dialect>_<kwarg>``
format. Only arguments that were actually passed are included;
unlike the :attr:`.DialectKWArgs.dialect_options` collection, which
contains all options known by this dialect including defaults.
The collection is also writable; keys are accepted of the
form ``<dialect>_<kwarg>`` where the value will be assembled
into the list of options.
.. versionadded:: 0.9.2
.. versionchanged:: 0.9.4 The :attr:`.DialectKWArgs.dialect_kwargs`
collection is now writable.
.. seealso::
:attr:`.DialectKWArgs.dialect_options` - nested dictionary form
"""
return _DialectArgView(self)
@property
def kwargs(self):
"""A synonym for :attr:`.DialectKWArgs.dialect_kwargs`."""
return self.dialect_kwargs
@util.dependencies("sqlalchemy.dialects")
def _kw_reg_for_dialect(dialects, dialect_name):
dialect_cls = dialects.registry.load(dialect_name)
if dialect_cls.construct_arguments is None:
return None
return dict(dialect_cls.construct_arguments)
_kw_registry = util.PopulateDict(_kw_reg_for_dialect)
def _kw_reg_for_dialect_cls(self, dialect_name):
construct_arg_dictionary = DialectKWArgs._kw_registry[dialect_name]
d = _DialectArgDict()
if construct_arg_dictionary is None:
d._defaults.update({"*": None})
else:
for cls in reversed(self.__class__.__mro__):
if cls in construct_arg_dictionary:
d._defaults.update(construct_arg_dictionary[cls])
return d
@util.memoized_property
def dialect_options(self):
"""A collection of keyword arguments specified as dialect-specific
options to this construct.
This is a two-level nested registry, keyed to ``<dialect_name>``
and ``<argument_name>``. For example, the ``postgresql_where``
argument would be locatable as::
arg = my_object.dialect_options['postgresql']['where']
.. versionadded:: 0.9.2
.. seealso::
:attr:`.DialectKWArgs.dialect_kwargs` - flat dictionary form
"""
return util.PopulateDict(
util.portable_instancemethod(self._kw_reg_for_dialect_cls)
)
def _validate_dialect_kwargs(self, kwargs):
# validate remaining kwargs that they all specify DB prefixes
if not kwargs:
return
for k in kwargs:
m = re.match("^(.+?)_(.+)$", k)
if not m:
raise TypeError(
"Additional arguments should be "
"named <dialectname>_<argument>, got '%s'" % k
)
dialect_name, arg_name = m.group(1, 2)
try:
construct_arg_dictionary = self.dialect_options[dialect_name]
except exc.NoSuchModuleError:
util.warn(
"Can't validate argument %r; can't "
"locate any SQLAlchemy dialect named %r"
% (k, dialect_name)
)
self.dialect_options[dialect_name] = d = _DialectArgDict()
d._defaults.update({"*": None})
d._non_defaults[arg_name] = kwargs[k]
else:
if (
"*" not in construct_arg_dictionary
and arg_name not in construct_arg_dictionary
):
raise exc.ArgumentError(
"Argument %r is not accepted by "
"dialect %r on behalf of %r"
% (k, dialect_name, self.__class__)
)
else:
construct_arg_dictionary[arg_name] = kwargs[k]
class Generative(object):
"""Allow a ClauseElement to generate itself via the
@_generative decorator.
"""
def _generate(self):
s = self.__class__.__new__(self.__class__)
s.__dict__ = self.__dict__.copy()
return s
class Executable(Generative):
"""Mark a ClauseElement as supporting execution.
:class:`.Executable` is a superclass for all "statement" types
of objects, including :func:`select`, :func:`delete`, :func:`update`,
:func:`insert`, :func:`text`.
"""
supports_execution = True
_execution_options = util.immutabledict()
_bind = None
@_generative
def execution_options(self, **kw):
""" Set non-SQL options for the statement which take effect during
execution.
Execution options can be set on a per-statement or
per :class:`.Connection` basis. Additionally, the
:class:`.Engine` and ORM :class:`~.orm.query.Query` objects provide
access to execution options which they in turn configure upon
connections.
The :meth:`execution_options` method is generative. A new
instance of this statement is returned that contains the options::
statement = select([table.c.x, table.c.y])
statement = statement.execution_options(autocommit=True)
Note that only a subset of possible execution options can be applied
to a statement - these include "autocommit" and "stream_results",
but not "isolation_level" or "compiled_cache".
See :meth:`.Connection.execution_options` for a full list of
possible options.
.. seealso::
:meth:`.Connection.execution_options`
:meth:`.Query.execution_options`
:meth:`.Executable.get_execution_options`
"""
if "isolation_level" in kw:
raise exc.ArgumentError(
"'isolation_level' execution option may only be specified "
"on Connection.execution_options(), or "
"per-engine using the isolation_level "
"argument to create_engine()."
)
if "compiled_cache" in kw:
raise exc.ArgumentError(
"'compiled_cache' execution option may only be specified "
"on Connection.execution_options(), not per statement."
)
self._execution_options = self._execution_options.union(kw)
def get_execution_options(self):
""" Get the non-SQL options which will take effect during execution.
.. versionadded:: 1.3
.. seealso::
:meth:`.Executable.execution_options`
"""
return self._execution_options
def execute(self, *multiparams, **params):
"""Compile and execute this :class:`.Executable`."""
e = self.bind
if e is None:
label = getattr(self, "description", self.__class__.__name__)
msg = (
"This %s is not directly bound to a Connection or Engine. "
"Use the .execute() method of a Connection or Engine "
"to execute this construct." % label
)
raise exc.UnboundExecutionError(msg)
return e._execute_clauseelement(self, multiparams, params)
def scalar(self, *multiparams, **params):
"""Compile and execute this :class:`.Executable`, returning the
result's scalar representation.
"""
return self.execute(*multiparams, **params).scalar()
@property
def bind(self):
"""Returns the :class:`.Engine` or :class:`.Connection` to
which this :class:`.Executable` is bound, or None if none found.
This is a traversal which checks locally, then
checks among the "from" clauses of associated objects
until a bound engine or connection is found.
"""
if self._bind is not None:
return self._bind
for f in _from_objects(self):
if f is self:
continue
engine = f.bind
if engine is not None:
return engine
else:
return None
class SchemaEventTarget(object):
"""Base class for elements that are the targets of :class:`.DDLEvents`
events.
This includes :class:`.SchemaItem` as well as :class:`.SchemaType`.
"""
def _set_parent(self, parent):
"""Associate with this SchemaEvent's parent object."""
def _set_parent_with_dispatch(self, parent):
self.dispatch.before_parent_attach(self, parent)
self._set_parent(parent)
self.dispatch.after_parent_attach(self, parent)
class SchemaVisitor(ClauseVisitor):
"""Define the visiting for ``SchemaItem`` objects."""
__traverse_options__ = {"schema_visitor": True}
class ColumnCollection(object):
"""Collection of :class:`.ColumnElement` instances, typically for
selectables.
The :class:`.ColumnCollection` has both mapping- and sequence- like
behaviors. A :class:`.ColumnCollection` usually stores :class:`.Column`
objects, which are then accessible both via mapping style access as well
as attribute access style. The name for which a :class:`.Column` would
be present is normally that of the :paramref:`.Column.key` parameter,
however depending on the context, it may be stored under a special label
name::
>>> from sqlalchemy import Column, Integer
>>> from sqlalchemy.sql import ColumnCollection
>>> x, y = Column('x', Integer), Column('y', Integer)
>>> cc = ColumnCollection(columns=[x, y])
>>> cc.x
Column('x', Integer(), table=None)
>>> cc.y
Column('y', Integer(), table=None)
>>> cc['x']
Column('x', Integer(), table=None)
>>> cc['y']
:class`.ColumnCollection` also indexes the columns in order and allows
them to be accessible by their integer position::
>>> cc[0]
Column('x', Integer(), table=None)
>>> cc[1]
Column('y', Integer(), table=None)
.. versionadded:: 1.4 :class:`.ColumnCollection` allows integer-based
index access to the collection.
Iterating the collection yields the column expressions in order::
>>> list(cc)
[Column('x', Integer(), table=None),
Column('y', Integer(), table=None)]
The base :class:`.ColumnCollection` object can store duplicates, which can
mean either two columns with the same key, in which case the column
returned by key access is **arbitrary**::
>>> x1, x2 = Column('x', Integer), Column('x', Integer)
>>> cc = ColumnCollection(columns=[x1, x2])
>>> list(cc)
[Column('x', Integer(), table=None),
Column('x', Integer(), table=None)]
>>> cc['x'] is x1
False
>>> cc['x'] is x2
True
Or it can also mean the same column multiple times. These cases are
supported as :class:`.ColumnCollection` is used to represent the columns in
a SELECT statement which may include duplicates.
A special subclass :class:`.DedupeColumnCollection` exists which instead
maintains SQLAlchemy's older behavior of not allowing duplicates; this
collection is used for schema level objects like :class:`.Table` and
:class:`.PrimaryKeyConstraint` where this deduping is helpful. The
:class:`.DedupeColumnCollection` class also has additional mutation methods
as the schema constructs have more use cases that require removal and
replacement of columns.
.. versionchanged:: 1.4 :class:`.ColumnCollection` now stores duplicate
column keys as well as the same column in multiple positions. The
:class:`.DedupeColumnCollection` class is added to maintain the
former behavior in those cases where deduplication as well as
additional replace/remove operations are needed.
"""
__slots__ = "_collection", "_index", "_colset"
def __init__(self, columns=None):
object.__setattr__(self, "_colset", set())
object.__setattr__(self, "_index", {})
object.__setattr__(self, "_collection", [])
if columns:
self._initial_populate(columns)
def _initial_populate(self, iter_):
self._populate_separate_keys(iter_)
@property
def _all_columns(self):
return [col for (k, col) in self._collection]
def keys(self):
return [k for (k, col) in self._collection]
def __len__(self):
return len(self._collection)
def __iter__(self):
# turn to a list first to maintain over a course of changes
return iter([col for k, col in self._collection])
def __getitem__(self, key):
try:
return self._index[key]
except KeyError:
if isinstance(key, util.int_types):
raise IndexError(key)
else:
raise
def __getattr__(self, key):
try:
return self._index[key]
except KeyError:
raise AttributeError(key)
def __contains__(self, key):
if key not in self._index:
if not isinstance(key, util.string_types):
raise exc.ArgumentError(
"__contains__ requires a string argument"
)
return False
else:
return True
def compare(self, other):
for l, r in util.zip_longest(self, other):
if l is not r:
return False
else:
return True
def __eq__(self, other):
return self.compare(other)
def get(self, key, default=None):
if key in self._index:
return self._index[key]
else:
return default
def __str__(self):
return repr([str(c) for c in self])
def __setitem__(self, key, value):
raise NotImplementedError()
def __delitem__(self, key):
raise NotImplementedError()
def __setattr__(self, key, obj):
raise NotImplementedError()
def clear(self):
raise NotImplementedError()
def remove(self, column):
raise NotImplementedError()
def update(self, iter_):
raise NotImplementedError()
__hash__ = None
def _populate_separate_keys(self, iter_):
"""populate from an iterator of (key, column)"""
cols = list(iter_)
self._collection[:] = cols
self._colset.update(c for k, c in self._collection)
self._index.update(
(idx, c) for idx, (k, c) in enumerate(self._collection)
)
self._index.update({k: col for k, col in reversed(self._collection)})
def add(self, column, key=None):
if key is None:
key = column.key
l = len(self._collection)
self._collection.append((key, column))
self._colset.add(column)
self._index[l] = column
if key not in self._index:
self._index[key] = column
def __getstate__(self):
return {"_collection": self._collection, "_index": self._index}
def __setstate__(self, state):
object.__setattr__(self, "_index", state["_index"])
object.__setattr__(self, "_collection", state["_collection"])
object.__setattr__(
self, "_colset", {col for k, col in self._collection}
)
def contains_column(self, col):
return col in self._colset
def as_immutable(self):
return ImmutableColumnCollection(self)
def corresponding_column(self, column, require_embedded=False):
"""Given a :class:`.ColumnElement`, return the exported
:class:`.ColumnElement` object from this :class:`.ColumnCollection`
which corresponds to that original :class:`.ColumnElement` via a common
ancestor column.
:param column: the target :class:`.ColumnElement` to be matched
:param require_embedded: only return corresponding columns for
the given :class:`.ColumnElement`, if the given
:class:`.ColumnElement` is actually present within a sub-element
of this :class:`.Selectable`. Normally the column will match if
it merely shares a common ancestor with one of the exported
columns of this :class:`.Selectable`.
.. seealso::
:meth:`.Selectable.corresponding_column` - invokes this method
against the collection returned by
:attr:`.Selectable.exported_columns`.
.. versionchanged:: 1.4 the implementation for ``corresponding_column``
was moved onto the :class:`.ColumnCollection` itself.
"""
def embedded(expanded_proxy_set, target_set):
for t in target_set.difference(expanded_proxy_set):
if not set(_expand_cloned([t])).intersection(
expanded_proxy_set
):
return False
return True
# don't dig around if the column is locally present
if column in self._colset:
return column
col, intersect = None, None
target_set = column.proxy_set
cols = [c for (k, c) in self._collection]
for c in cols:
expanded_proxy_set = set(_expand_cloned(c.proxy_set))
i = target_set.intersection(expanded_proxy_set)
if i and (
not require_embedded
or embedded(expanded_proxy_set, target_set)
):
if col is None:
# no corresponding column yet, pick this one.
col, intersect = c, i
elif len(i) > len(intersect):
# 'c' has a larger field of correspondence than
# 'col'. i.e. selectable.c.a1_x->a1.c.x->table.c.x
# matches a1.c.x->table.c.x better than
# selectable.c.x->table.c.x does.
col, intersect = c, i
elif i == intersect:
# they have the same field of correspondence. see
# which proxy_set has fewer columns in it, which
# indicates a closer relationship with the root
# column. Also take into account the "weight"
# attribute which CompoundSelect() uses to give
# higher precedence to columns based on vertical
# position in the compound statement, and discard
# columns that have no reference to the target
# column (also occurs with CompoundSelect)
col_distance = util.reduce(
operator.add,
[
sc._annotations.get("weight", 1)
for sc in col._uncached_proxy_set()
if sc.shares_lineage(column)
],
)
c_distance = util.reduce(
operator.add,
[
sc._annotations.get("weight", 1)
for sc in c._uncached_proxy_set()
if sc.shares_lineage(column)
],
)
if c_distance < col_distance:
col, intersect = c, i
return col
class DedupeColumnCollection(ColumnCollection):
"""A :class:`.ColumnCollection that maintains deduplicating behavior.
This is useful by schema level objects such as :class:`.Table` and
:class:`.PrimaryKeyConstraint`. The collection includes more
sophisticated mutator methods as well to suit schema objects which
require mutable column collections.
.. versionadded: 1.4
"""
def add(self, column, key=None):
if key is not None and column.key != key:
raise exc.ArgumentError(
"DedupeColumnCollection requires columns be under "
"the same key as their .key"
)
key = column.key
if key is None:
raise exc.ArgumentError(
"Can't add unnamed column to column collection"
)
if key in self._index:
existing = self._index[key]
if existing is column:
return
self.replace(column)
# pop out memoized proxy_set as this
# operation may very well be occurring
# in a _make_proxy operation
util.memoized_property.reset(column, "proxy_set")
else:
l = len(self._collection)
self._collection.append((key, column))
self._colset.add(column)
self._index[l] = column
self._index[key] = column
def _populate_separate_keys(self, iter_):
"""populate from an iterator of (key, column)"""
cols = list(iter_)
replace_col = []
for k, col in cols:
if col.key != k:
raise exc.ArgumentError(
"DedupeColumnCollection requires columns be under "
"the same key as their .key"
)
if col.name in self._index and col.key != col.name:
replace_col.append(col)
elif col.key in self._index:
replace_col.append(col)
else:
self._index[k] = col
self._collection.append((k, col))
self._colset.update(c for (k, c) in self._collection)
self._index.update(
(idx, c) for idx, (k, c) in enumerate(self._collection)
)
for col in replace_col:
self.replace(col)
def extend(self, iter_):
self._populate_separate_keys((col.key, col) for col in iter_)
def remove(self, column):
if column not in self._colset:
raise ValueError(
"Can't remove column %r; column is not in this collection"
% column
)
del self._index[column.key]
self._colset.remove(column)
self._collection[:] = [
(k, c) for (k, c) in self._collection if c is not column
]
self._index.update(
{idx: col for idx, (k, col) in enumerate(self._collection)}
)
# delete higher index
del self._index[len(self._collection)]
def replace(self, column):
"""add the given column to this collection, removing unaliased
versions of this column as well as existing columns with the
same key.
e.g.::
t = Table('sometable', metadata, Column('col1', Integer))
t.columns.replace(Column('col1', Integer, key='columnone'))
will remove the original 'col1' from the collection, and add
the new column under the name 'columnname'.
Used by schema.Column to override columns during table reflection.
"""
remove_col = set()
# remove up to two columns based on matches of name as well as key
if column.name in self._index and column.key != column.name:
other = self._index[column.name]
if other.name == other.key:
remove_col.add(other)
if column.key in self._index:
remove_col.add(self._index[column.key])
new_cols = []
replaced = False
for k, col in self._collection:
if col in remove_col:
if not replaced:
replaced = True
new_cols.append((column.key, column))
else:
new_cols.append((k, col))
if remove_col:
self._colset.difference_update(remove_col)
if not replaced:
new_cols.append((column.key, column))
self._colset.add(column)
self._collection[:] = new_cols
self._index.clear()
self._index.update(
{idx: col for idx, (k, col) in enumerate(self._collection)}
)
self._index.update(self._collection)
class ImmutableColumnCollection(util.ImmutableContainer, ColumnCollection):
__slots__ = ("_parent",)
def __init__(self, collection):
object.__setattr__(self, "_parent", collection)
object.__setattr__(self, "_colset", collection._colset)
object.__setattr__(self, "_index", collection._index)
object.__setattr__(self, "_collection", collection._collection)
def __getstate__(self):
return {"_parent": self._parent}
def __setstate__(self, state):
parent = state["_parent"]
self.__init__(parent)
add = extend = remove = util.ImmutableContainer._immutable
class ColumnSet(util.ordered_column_set):
def contains_column(self, col):
return col in self
def extend(self, cols):
for col in cols:
self.add(col)
def __add__(self, other):
return list(self) + list(other)
def __eq__(self, other):
l = []
for c in other:
for local in self:
if c.shares_lineage(local):
l.append(c == local)
return elements.and_(*l)
def __hash__(self):
return hash(tuple(x for x in self))
def _bind_or_error(schemaitem, msg=None):
bind = schemaitem.bind
if not bind:
name = schemaitem.__class__.__name__
label = getattr(
schemaitem, "fullname", getattr(schemaitem, "name", None)
)
if label:
item = "%s object %r" % (name, label)
else:
item = "%s object" % name
if msg is None:
msg = (
"%s is not bound to an Engine or Connection. "
"Execution can not proceed without a database to execute "
"against." % item
)
raise exc.UnboundExecutionError(msg)
return bind
| {
"content_hash": "71d723683dffa3ea8d979d19d80a0cb9",
"timestamp": "",
"source": "github",
"line_count": 993,
"max_line_length": 79,
"avg_line_length": 33.152064451158104,
"alnum_prop": 0.5808930741190765,
"repo_name": "wujuguang/sqlalchemy",
"id": "da384bdabcb34703efd87c183cfb8fb5fd9603aa",
"size": "33152",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lib/sqlalchemy/sql/base.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "45930"
},
{
"name": "Python",
"bytes": "11287383"
}
],
"symlink_target": ""
} |
"""
>>> pingpong = thriftpy.load("pingpong.thrift")
>>>
>>> class Dispatcher(object):
>>> def ping(self):
>>> return "pong"
>>> server = make_server(pingpong.PingPong, Dispatcher())
>>> server.listen(6000)
>>> client = ioloop.IOLoop.current().run_sync(
lambda: make_client(pingpong.PingPong, '127.0.0.1', 6000))
>>> ioloop.IOLoop.current().run_sync(client.ping)
'pong'
"""
from __future__ import absolute_import
from contextlib import contextmanager
from tornado import tcpserver, ioloop, iostream, gen
from io import BytesIO
from datetime import timedelta
from .transport import TTransportException, TTransportBase
from .transport.memory import TMemoryBuffer
from .thrift import TApplicationException, TProcessor, TClient
# TODO need TCyTornadoStreamTransport to work with cython binary protocol
from .protocol.binary import TBinaryProtocolFactory
import logging
import socket
import struct
import toro
class TTornadoStreamTransport(TTransportBase):
"""a framed, buffered transport over a Tornado stream"""
DEFAULT_CONNECT_TIMEOUT = timedelta(seconds=1)
DEFAULT_READ_TIMEOUT = timedelta(seconds=1)
def __init__(self, host, port, stream=None, io_loop=None,
read_timeout=DEFAULT_READ_TIMEOUT):
self.host = host
self.port = port
self.io_loop = io_loop or ioloop.IOLoop.current()
self.read_timeout = read_timeout
self.is_queuing_reads = False
self.read_queue = []
self.__wbuf = BytesIO()
self._read_lock = toro.Lock()
# servers provide a ready-to-go stream
self.stream = stream
if self.stream is not None:
self._set_close_callback()
def with_timeout(self, timeout, future):
return gen.with_timeout(timeout, future, self.io_loop)
@gen.coroutine
def open(self, timeout=DEFAULT_CONNECT_TIMEOUT):
logging.debug('socket connecting')
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM, 0)
self.stream = iostream.IOStream(sock)
try:
yield self.with_timeout(timeout, self.stream.connect(
(self.host, self.port)))
except (socket.error, OSError, IOError):
message = 'could not connect to {}:{}'.format(self.host, self.port)
raise TTransportException(
type=TTransportException.NOT_OPEN,
message=message)
self._set_close_callback()
raise gen.Return(self)
def _set_close_callback(self):
self.stream.set_close_callback(self.close)
def close(self):
# don't raise if we intend to close
self.stream.set_close_callback(None)
self.stream.close()
def read(self, _):
# The generated code for Tornado shouldn't do individual reads -- only
# frames at a time
assert False, "you're doing it wrong"
@contextmanager
def io_exception_context(self):
try:
yield
except (socket.error, OSError, IOError) as e:
raise TTransportException(
type=TTransportException.END_OF_FILE,
message=str(e))
except iostream.StreamBufferFullError as e:
raise TTransportException(
type=TTransportException.UNKNOWN,
message=str(e))
except gen.TimeoutError as e:
raise TTransportException(
type=TTransportException.TIMED_OUT,
message=str(e))
@gen.coroutine
def read_frame(self):
# IOStream processes reads one at a time
with (yield self._read_lock.acquire()):
with self.io_exception_context():
frame_header = yield self._read_bytes(4)
if len(frame_header) == 0:
raise iostream.StreamClosedError(
'Read zero bytes from stream')
frame_length, = struct.unpack('!i', frame_header)
logging.debug('received frame header, frame length = %d',
frame_length)
frame = yield self._read_bytes(frame_length)
logging.debug('received frame payload: %r', frame)
raise gen.Return(frame)
def _read_bytes(self, n):
return self.with_timeout(self.read_timeout, self.stream.read_bytes(n))
def write(self, buf):
self.__wbuf.write(buf)
def flush(self):
frame = self.__wbuf.getvalue()
# reset wbuf before write/flush to preserve state on underlying failure
frame_length = struct.pack('!i', len(frame))
self.__wbuf = BytesIO()
with self.io_exception_context():
return self.stream.write(frame_length + frame)
class TTornadoServer(tcpserver.TCPServer):
def __init__(self, processor, iprot_factory, oprot_factory=None,
transport_read_timeout=TTornadoStreamTransport.DEFAULT_READ_TIMEOUT, # noqa
*args, **kwargs):
super(TTornadoServer, self).__init__(*args, **kwargs)
self._processor = processor
self._iprot_factory = iprot_factory
self._oprot_factory = (oprot_factory if oprot_factory is not None
else iprot_factory)
self.transport_read_timeout = transport_read_timeout
@gen.coroutine
def handle_stream(self, stream, address):
host, port = address
trans = TTornadoStreamTransport(
host=host, port=port, stream=stream,
io_loop=self.io_loop, read_timeout=self.transport_read_timeout)
try:
oprot = self._oprot_factory.get_protocol(trans)
iprot = self._iprot_factory.get_protocol(TMemoryBuffer())
while not trans.stream.closed():
# TODO: maybe read multiple frames in advance for concurrency
try:
frame = yield trans.read_frame()
except TTransportException as e:
if e.type == TTransportException.END_OF_FILE:
break
else:
raise
iprot.trans.setvalue(frame)
api, seqid, result, call = self._processor.process_in(iprot)
if isinstance(result, TApplicationException):
self._processor.send_exception(oprot, api, result, seqid)
else:
try:
result.success = yield gen.maybe_future(call())
except Exception as e:
# raise if api don't have throws
self._processor.handle_exception(e, result)
self._processor.send_result(oprot, api, result, seqid)
except Exception:
logging.exception('thrift exception in handle_stream')
trans.close()
logging.info('client disconnected %s:%d', host, port)
class TTornadoClient(TClient):
@gen.coroutine
def _recv(self, api):
frame = yield self._oprot.trans.read_frame()
self._iprot.trans.setvalue(frame)
result = super(TTornadoClient, self)._recv(api)
raise gen.Return(result)
def close(self):
self._oprot.trans.close()
def make_server(
service, handler, proto_factory=TBinaryProtocolFactory(),
io_loop=None,
transport_read_timeout=TTornadoStreamTransport.DEFAULT_READ_TIMEOUT):
processor = TProcessor(service, handler)
server = TTornadoServer(processor, iprot_factory=proto_factory,
transport_read_timeout=transport_read_timeout,
io_loop=io_loop)
return server
@gen.coroutine
def make_client(
service, host, port, proto_factory=TBinaryProtocolFactory(),
io_loop=None,
connect_timeout=TTornadoStreamTransport.DEFAULT_CONNECT_TIMEOUT,
read_timeout=TTornadoStreamTransport.DEFAULT_READ_TIMEOUT):
transport = TTornadoStreamTransport(host, port, io_loop=io_loop,
read_timeout=read_timeout)
iprot = proto_factory.get_protocol(TMemoryBuffer())
oprot = proto_factory.get_protocol(transport)
yield transport.open(connect_timeout)
client = TTornadoClient(service, iprot, oprot)
raise gen.Return(client)
| {
"content_hash": "7d43dff99f6b05eb3c59060b6ffc9307",
"timestamp": "",
"source": "github",
"line_count": 225,
"max_line_length": 93,
"avg_line_length": 36.80888888888889,
"alnum_prop": 0.6108427915962328,
"repo_name": "importcjj/thriftpy",
"id": "2006823ae30b6ece48853c1f2e70f48ee9877c80",
"size": "8307",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "thriftpy/tornado.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "705"
},
{
"name": "Makefile",
"bytes": "342"
},
{
"name": "Python",
"bytes": "214653"
},
{
"name": "Thrift",
"bytes": "22032"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import models
from django.utils import timezone
class Post(models.Model):
author = models.ForeignKey('auth.User')
title = models.CharField(max_length=200)
text = models.TextField()
created_date = models.DateTimeField(
default=timezone.now)
published_date = models.DateTimeField(
blank=True, null=True)
def publish(self):
self.published_date = timezone.now()
self.save()
def __str__(self):
return self.title
| {
"content_hash": "3e647a67bf8ceabb79b193d18ee227ed",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 44,
"avg_line_length": 25.857142857142858,
"alnum_prop": 0.6574585635359116,
"repo_name": "django-girls/best-blog-in-the-world",
"id": "454a89829dcf659184ad6d1c86d40bd8e34fc005",
"size": "567",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "blog/models.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "746"
},
{
"name": "HTML",
"bytes": "2076"
},
{
"name": "Python",
"bytes": "10202"
}
],
"symlink_target": ""
} |
import random
import math
import subprocess
import codecs
import numbers
import base64, StringIO
### maybe someday convert to cElementTree output rather than string concatenation
# try:
# import xml.etree.cElementTree as ElementTree
# except ImportError:
# import xml.etree.cElementTree as ElementTree
# Special dependencies
import PIL.Image # sudo apt-get install python-imaging
# Cassius interdependencies
import mathtools
import utilities
import color
import containers
try:
import _svgview
except ImportError:
_svgview = None
# these can be re-set by the user on a per-session basis
defaults = {
"width": 1000,
"height": 1000,
"background": True,
}
default_frameargs = {
"leftmargin": 0.12,
"rightmargin": 0.05,
"topmargin": 0.05,
"bottommargin": 0.08,
"textscale": 1.,
"xlabel": None,
"ylabel": None,
"rightlabel": None,
"toplabel": None,
"xlabeloffset": 0.08,
"ylabeloffset": -0.10,
"rightlabeloffset": 0.,
"toplabeloffset": 0.,
"xlog": False,
"ylog": False,
"xticks": containers.Auto,
"yticks": containers.Auto,
"rightticks": containers.Auto,
"topticks": containers.Auto,
"show_topticklabels": containers.Auto,
"show_rightticklabels": containers.Auto,
"xmin": containers.Auto,
"ymin": containers.Auto,
"xmax": containers.Auto,
"ymax": containers.Auto,
"xmargin": 0.1,
"ymargin": 0.1,
}
# represents an SVG document filled by drawing commands
class SVG:
def __init__(self, width, height, background):
self.width, self.height = width, height
self.header = """<?xml version="1.0" standalone="no"?>
<!DOCTYPE svg PUBLIC "-//W3C//DTD SVG 1.1//EN" "http://www.w3.org/Graphics/SVG/1.1/DTD/svg11.dtd">
<svg style="stroke-linejoin:miter; stroke:black; stroke-width:2.5; text-anchor:middle; fill:none" xmlns="http://www.w3.org/2000/svg" font-family="Helvetica, Arial, FreeSans, Sans, sans, sans-serif" width="%(width)dpx" height="%(height)dpx" version="1.1" xmlns:xlink="http://www.w3.org/1999/xlink" viewBox="0 0 %(width)d %(height)d">
""" % vars()
self.footer = "</svg>\n"
self.names = {}
self.defs = {}
self.body = []
if background:
self.body.append("""<rect id="background" x="0" y="0" width="%(width)g" height="%(height)g" stroke="none" fill="white" />""" % vars())
def uniquename(self, base):
if base not in self.names:
self.names[base] = 0
else:
self.names[base] += 1
return "%s_%d" % (base, self.names[base])
def write(self, fileName):
f = codecs.open(fileName, "w", "utf-8")
f.write(self.header)
if len(self.defs) > 0:
f.write("<defs>\n")
keys = self.defs.keys()
keys.sort() # for readability
for key in keys:
f.write(self.defs[key]); f.write("\n")
f.write("</defs>\n")
f.write("<g id=\"whole_document\">\n")
for line in self.body:
f.write(line); f.write("\n")
f.write("</g>\n")
f.write(self.footer)
def tostring(self):
f = StringIO.StringIO()
f.write(self.header)
if len(self.defs) > 0:
f.write("<defs>")
for value in self.defs.values():
f.write(value)
f.write("</defs>")
for line in self.body:
f.write(line)
f.write(self.footer)
return f.getvalue()
# this is what the user calls
def view(obj, **kwds):
if _svgview is None:
raise RuntimeError, "The '_svgview' extension module has not been compiled; use \"draw(object, fileName='...')\" instead."
svg = kwds.get("svg", None)
# actual drawing is done in internal subcommands
try:
subcommand = eval("_draw_%s" % obj.__class__.__name__)
except NameError:
raise NotImplementedError, "A '_draw_%s' function has not been implemented in backends.svg" % obj.__class__.__name__
if svg is None:
# set the defaults (if not already overridden with explicit keyword arguments)
for arg, value in defaults.items():
if arg not in kwds:
kwds[arg] = value
# the following are derived arguments
svg = SVG(kwds["width"], kwds["height"], kwds["background"])
kwds["svg"] = svg
kwds["x1"], kwds["y1"], kwds["x2"], kwds["y2"] = 0., 0., float(kwds["width"]), float(kwds["height"])
# run the command and view the SVG
subcommand(obj, **kwds)
_svgview.str(svg.tostring())
else:
# not working on a new SVG; add to the existing one
subcommand(obj, **kwds)
# this is what the user calls
def draw(obj, **kwds):
svg = kwds.get("svg", None)
# actual drawing is done in internal subcommands
try:
subcommand = eval("_draw_%s" % obj.__class__.__name__)
except NameError:
raise NotImplementedError, "A '_draw_%s' function has not been implemented in backends.svg" % obj.__class__.__name__
if svg is None:
try:
fileName = kwds["fileName"]
except KeyError:
raise TypeError, "The 'svgdraw.draw' function requires fileName='...'"
# set the defaults (if not already overridden with explicit keyword arguments)
for arg, value in defaults.items():
if arg not in kwds:
kwds[arg] = value
# the following are derived arguments
svg = SVG(kwds["width"], kwds["height"], kwds["background"])
kwds["svg"] = svg
kwds["x1"], kwds["y1"], kwds["x2"], kwds["y2"] = 0., 0., float(kwds["width"]), float(kwds["height"])
# run the command and write the SVG
subcommand(obj, **kwds)
svg.write(fileName)
else:
# not working on a new SVG; add to the existing one
subcommand(obj, **kwds)
# this draws a PDF by invoking inkscape on an intermediary SVG file
def drawpdf(obj, fileName, tmpFileName="/tmp/tmp.svg", **kwds):
kwds["fileName"] = tmpFileName
draw(obj, **kwds)
proc = subprocess.Popen(["inkscape", tmpFileName, "--export-pdf=" + fileName])
proc.wait()
###################################################### utilities
def _svgopacity(obj):
if isinstance(obj, color.AbstractColor):
return obj.opacity
else:
return 1.
def _svgcolor(obj):
if obj is None:
return "none"
elif isinstance(obj, basestring):
return obj
else:
return str(obj)
def _svglinestyle(obj, linewidth=1.):
if obj is None or obj == "solid":
return ""
elif obj == "dashed":
return _svglinestyle((15.*linewidth, 15.*linewidth))
elif obj == "dotted":
return _svglinestyle((3.*linewidth, 3.*linewidth))
elif isinstance(obj, (list, tuple)):
allnumbers = True
for i in obj:
if not isinstance(i, numbers.Number):
allnumbers = False
break
if allnumbers:
return " ".join(map(str, obj))
else:
return obj
def _svglinewidth(obj):
return obj*3.
def _svgmarkersize(obj):
return obj*7.5
def _transformX(x, wx1, wx2, xmin, xmax, xlog):
if xlog:
return wx1 + (math.log10(x) - math.log10(xmin))*(wx2 - wx1)/(math.log10(xmax) - math.log10(xmin))
else:
return wx1 + (x - xmin)*(wx2 - wx1)/(xmax - xmin)
def _transformY(y, wy1, wy2, ymin, ymax, ylog):
if ylog:
return wy2 - (math.log10(y) - math.log10(ymin))*(wy2 - wy1)/(math.log10(ymax) - math.log10(ymin))
else:
return wy2 - (y - ymin)*(wy2 - wy1)/(ymax - ymin)
###################################################### draw_frame
def _get_frameargs(obj, **kwds):
output = obj._frameargs()
try:
subcommand = eval("_frameargs_prehook_%s" % obj.__class__.__name__)
except NameError:
subcommand = None
if subcommand is not None:
output = subcommand(obj, output, **kwds)
# framearg precedence:
# 1. draw(..., framearg=something)
# 2. draw(obj(..., framearg=something))
# 3. default_frameargs[framearg] = something
for i in default_frameargs:
if i in kwds:
output[i] = kwds[i] #1
else:
if i in output: pass #2 (it's already in there)
else:
output[i] = default_frameargs[i] #3
if output["leftmargin"] is None: output["leftmargin"] = 0.
if output["rightmargin"] is None: output["rightmargin"] = 0.
if output["topmargin"] is None: output["topmargin"] = 0.
if output["bottommargin"] is None: output["bottommargin"] = 0.
if output["xmargin"] is None: output["xmargin"] = 0.
if output["ymargin"] is None: output["ymargin"] = 0.
if output["xmin"] is containers.Auto or output["ymin"] is containers.Auto or output["xmax"] is containers.Auto or output["ymax"] is containers.Auto:
def goget(attrib, default):
out = output.get(attrib, default)
if isinstance(out, numbers.Number):
return out
else:
return default
xmin, ymin, xmax, ymax = obj.ranges(output["xlog"], output["ylog"])
xmargin = output["xmargin"]*(goget("xmax", xmax) - goget("xmin", xmin))
ymargin = output["ymargin"]*(goget("ymax", ymax) - goget("ymin", ymin))
xmin = xmin - xmargin
xmax = xmax + xmargin
ymin = ymin - ymargin
ymax = ymax + ymargin
if output["xmin"] is containers.Auto: output["xmin"] = xmin
if output["ymin"] is containers.Auto: output["ymin"] = ymin
if output["xmax"] is containers.Auto: output["xmax"] = xmax
if output["ymax"] is containers.Auto: output["ymax"] = ymax
if output["xticks"] is None:
output["xticks"] = {}
elif callable(output["xticks"]):
output["xticks"] = output["xticks"](output["xmin"], output["xmax"])
elif output["xticks"] is containers.Auto:
if output["xlog"]:
output["xticks"] = utilities.tickmarks(logbase=10)(output["xmin"], output["xmax"])
else:
output["xticks"] = utilities.tickmarks()(output["xmin"], output["xmax"])
elif callable(output["xticks"]):
vals = output["xticks"](output["xmin"], output["xmax"])
output["xticks"] = dict(map(lambda x: (x, unumber(x)), vals))
elif isinstance(output["xticks"], (tuple, list)) and len(output["xticks"]) == 2 and callable(output["xticks"][0]) and callable(output["xticks"][1]):
if output["xticks"][0].func_name == "timeticks" and output["xticks"][1].func_name == "timeminiticks":
major = output["xticks"][0](output["xmin"], output["xmax"])
minor = output["xticks"][1](output["xmin"], output["xmax"])
else:
major = dict(map(lambda x: (x, unumber(x)), output["xticks"][0](output["xmin"], output["xmax"])))
minor = dict(map(lambda x: (x, None), output["xticks"][1](output["xmin"], output["xmax"])))
minor.update(major)
output["xticks"] = minor
if output["yticks"] is None:
output["yticks"] = {}
elif callable(output["yticks"]):
output["yticks"] = output["yticks"](output["ymin"], output["ymax"])
elif output["yticks"] is containers.Auto:
if output["ylog"]:
output["yticks"] = utilities.tickmarks(logbase=10)(output["ymin"], output["ymax"])
else:
output["yticks"] = utilities.tickmarks()(output["ymin"], output["ymax"])
elif callable(output["yticks"]):
vals = output["yticks"](output["ymin"], output["ymax"])
output["yticks"] = dict(map(lambda x: (x, unumber(x)), vals))
elif isinstance(output["yticks"], (tuple, list)) and len(output["yticks"]) == 2 and callable(output["yticks"][0]) and callable(output["yticks"][1]):
major = dict(map(lambda x: (x, unumber(x)), output["yticks"][0](output["ymin"], output["ymax"])))
minor = dict(map(lambda x: (x, None), output["yticks"][1](output["ymin"], output["ymax"])))
minor.update(major)
output["yticks"] = minor
if output["topticks"] is None:
output["topticks"] = {}
elif output["topticks"] is containers.Auto:
output["topticks"] = output["xticks"]
if output["show_topticklabels"] is containers.Auto:
output["show_topticklabels"] = False
else:
if output["show_topticklabels"] is containers.Auto:
output["show_topticklabels"] = True
if output["rightticks"] is None:
output["rightticks"] = {}
elif output["rightticks"] is containers.Auto:
output["rightticks"] = output["yticks"]
if output["show_rightticklabels"] is containers.Auto:
output["show_rightticklabels"] = False
else:
if output["show_rightticklabels"] is containers.Auto:
output["show_rightticklabels"] = True
try:
subcommand = eval("_frameargs_posthook_%s" % obj.__class__.__name__)
except NameError:
subcommand = None
if subcommand is not None:
output = subcommand(obj, output, **kwds)
return output
def _get_window(**kwds):
x1, y1, x2, y2, f = kwds["x1"], kwds["y1"], kwds["x2"], kwds["y2"], kwds["frameargs"]
wx1, wx2 = x1 + f["leftmargin"]*(x2 - x1), x2 - f["rightmargin"]*(x2 - x1)
wy1, wy2 = y1 + f["topmargin"]*(y2 - y1), y2 - f["bottommargin"]*(y2 - y1)
return wx1, wy1, wx2, wy2
def _draw_frame(**kwds):
svg, x1, y1, x2, y2 = kwds["svg"], kwds["x1"], kwds["y1"], kwds["x2"], kwds["y2"]
f = kwds["frameargs"]
wx1, wy1, wx2, wy2 = _get_window(**kwds)
windowwidth, windowheight = wx2 - wx1, wy2 - wy1
font_size = f["textscale"]*30.
xmin, ymin, xmax, ymax = f["xmin"], f["ymin"], f["xmax"], f["ymax"]
xlog, ylog = f["xlog"], f["ylog"]
framename = svg.uniquename("frame")
svg.body.append(u"""<g id="%(framename)s">""" % vars())
svg.body.append(u""" <rect id="%(framename)s_border" x="%(wx1)g" y="%(wy1)g" width="%(windowwidth)g" height="%(windowheight)g" />""" % vars())
# bottom-axis label and ticks
if f["xlabel"] is not None:
tx, ty, text = wx1 + (wx2 - wx1)/2., wy2 + f["xlabeloffset"]*windowheight, f["xlabel"]
svg.body.append(u""" <text id="%(framename)s_bottomlabel" font-size="%(font_size)g" transform="translate(%(tx)g, %(ty)g)" text-anchor="middle" dominant-baseline="middle" stroke="none" fill="black">%(text)s</text>""" % vars())
svg.body.append(u""" <g id="%(framename)s_bottomticks">""" % vars())
tickend, minitickend, textmid = wy2 - 20., wy2 - 10., wy2 + 30.
for x, label in f["xticks"].items():
hpos = _transformX(x, wx1, wx2, xmin, xmax, xlog)
if label is not None:
svg.body.append(u""" <path d="M %(hpos)g %(wy2)g L %(hpos)g %(tickend)g" />""" % vars())
svg.body.append(u""" <text font-size="%(font_size)g" transform="translate(%(hpos)g, %(textmid)g)" text-anchor="middle" dominant-baseline="middle" stroke="none" fill="black">%(label)s</text>""" % vars())
else:
svg.body.append(u""" <path d="M %(hpos)g %(wy2)g L %(hpos)g %(minitickend)g" />""" % vars())
svg.body.append(u""" </g>""")
# left-axis label and ticks
if f["ylabel"] is not None:
tx, ty, text = wx1 + f["ylabeloffset"]*windowwidth, wy1 + (wy2 - wy1)/2., f["ylabel"]
svg.body.append(u""" <text id="%(framename)s_leftlabel" font-size="%(font_size)g" transform="translate(%(tx)g, %(ty)g) rotate(-90)" text-anchor="middle" dominant-baseline="middle" stroke="none" fill="black">%(text)s</text>""" % vars())
svg.body.append(u""" <g id="%(framename)s_leftticks">""" % vars())
tickend, minitickend, textmid = wx1 + 20., wx1 + 10., wx1 - 10.
for y, label in f["yticks"].items():
vpos = _transformY(y, wy1, wy2, ymin, ymax, ylog)
vpostext = vpos + 10.
if label is not None:
svg.body.append(u""" <path d="M %(wx1)g %(vpos)g L %(tickend)g %(vpos)g" />""" % vars())
svg.body.append(u""" <text font-size="%(font_size)g" transform="translate(%(textmid)g, %(vpostext)g)" text-anchor="end" dominant-baseline="middle" stroke="none" fill="black">%(label)s</text>""" % vars())
else:
svg.body.append(u""" <path d="M %(wx1)g %(vpos)g L %(minitickend)g %(vpos)g" />""" % vars())
svg.body.append(u""" </g>""")
# top-axis label and ticks
if f["toplabel"] is not None:
tx, ty, text = wx1 + (wx2 - wx1)/2., wy1 + f["toplabeloffset"]*windowheight, f["toplabel"]
svg.body.append(u""" <text id="%(framename)s_toplabel" font-size="%(font_size)g" transform="translate(%(tx)g, %(ty)g)" text-anchor="middle" dominant-baseline="middle" stroke="none" fill="black">%(text)s</text>""" % vars())
svg.body.append(u""" <g id="%(framename)s_topticks">""" % vars())
tickend, minitickend, textmid = wy1 + 20., wy1 + 10., wy1 - 30.
for x, label in f["topticks"].items():
hpos = _transformX(x, wx1, wx2, xmin, xmax, xlog)
if label is not None:
svg.body.append(u""" <path d="M %(hpos)g %(wy1)g L %(hpos)g %(tickend)g" />""" % vars())
if f["show_topticklabels"]:
svg.body.append(u""" <text font-size="%(font_size)g" transform="translate(%(hpos)g, %(textmid)g)" text-anchor="middle" dominant-baseline="middle" stroke="none" fill="black">%(label)s</text>""" % vars())
else:
svg.body.append(u""" <path d="M %(hpos)g %(wy1)g L %(hpos)g %(minitickend)g" />""" % vars())
svg.body.append(u""" </g>""")
# right-axis label and ticks
if f["rightlabel"] is not None:
tx, ty, text = wx2 + f["rightlabeloffset"]*windowwidth, wy1 + (wy2 - wy1)/2., f["rightlabel"]
svg.body.append(u""" <text id="%(framename)s_rightlabel" font-size="%(font_size)g" transform="translate(%(tx)g, %(ty)g) rotate(90)" text-anchor="middle" dominant-baseline="middle" stroke="none" fill="black">%(text)s</text>""" % vars())
svg.body.append(u""" <g id="%(framename)s_rightticks">""" % vars())
tickend, minitickend, textmid = wx2 - 20., wx2 - 10., wx2 + 10.
for y, label in f["rightticks"].items():
vpos = _transformY(y, wy1, wy2, ymin, ymax, ylog)
vpostext = vpos + 10.
if label is not None:
svg.body.append(u""" <path d="M %(wx2)g %(vpos)g L %(tickend)g %(vpos)g" />""" % vars())
if f["show_rightticklabels"]:
svg.body.append(u""" <text font-size="%(font_size)g" transform="translate(%(textmid)g, %(vpostext)g)" text-anchor="start" dominant-baseline="middle" stroke="none" fill="black">%(label)s</text>""" % vars())
else:
svg.body.append(u""" <path d="M %(wx2)g %(vpos)g L %(minitickend)g %(vpos)g" />""" % vars())
svg.body.append(u""" </g>""")
svg.body.append(u"""</g>""")
###################################################### actions for particular classes
def _draw_NoneType(obj, **kwds):
### debugging code
# svg, x1, y1, x2, y2 = kwds["svg"], kwds["x1"], kwds["y1"], kwds["x2"], kwds["y2"]
# width = x2 - x1
# height = y2 - y1
# color = rgbcolor(random.gauss(0.5, 0.3), random.gauss(0.5, 0.3), random.gauss(0.5, 0.3))
# svg.body.append(u"""<rect x="%(x1)g" y="%(y1)g" width="%(width)g" height="%(height)g" stroke="none" fill="%(color)s" />""" % vars())
pass
def _draw_Layout(obj, **kwds):
svg, x1, y1, x2, y2 = kwds["svg"], kwds["x1"], kwds["y1"], kwds["x2"], kwds["y2"]
# TODO: possibly need to change the margins for different layouts
# by passing down a multiplier?
width = (x2 - x1)/float(obj.ncols)
height = (y2 - y1)/float(obj.nrows)
for i in xrange(obj.nrows):
kwds["y1"], kwds["y2"] = (y1 + i*height), (y1 + (i+1)*height)
for j in xrange(obj.ncols):
kwds["x1"], kwds["x2"] = (x1 + j*width), (x1 + (j+1)*width)
draw(obj[i,j], **kwds)
def _draw_Overlay(obj, **kwds):
svg, x1, y1, x2, y2 = kwds["svg"], kwds["x1"], kwds["y1"], kwds["x2"], kwds["y2"]
drawframe = kwds.get("drawframe", True)
def findframe(obj):
if isinstance(obj, containers.Stack):
obj._prepare()
return findframe(obj._overlay)
elif isinstance(obj, containers.Overlay):
if "frame" in obj.__dict__ and obj.frame is not None:
if obj.frame >= len(obj.plots):
raise containers.ContainerException, "Overlay.frame points to a non-existent plot (%d <= %d)" % (obj.frame, len(obj.plots))
return findframe(obj.plots[obj.frame])
else:
return _get_frameargs(obj, **kwds)
else:
return _get_frameargs(obj, **kwds)
foundframe = findframe(obj) # to evaluate all Stacks
if drawframe:
kwds["frameargs"] = foundframe
kwds["drawframe"] = False # for the contained objects
# flatten any Overlay nesting and draw Legends _above_ the frame
def recurse(plotsin, nonlegends, legends):
for plot in plotsin:
if isinstance(plot, containers.Stack):
recurse(plot._overlay.plots, nonlegends, legends)
elif isinstance(plot, containers.Overlay):
recurse(plot.plots, nonlegends, legends)
elif isinstance(plot, containers.Legend):
legends.append(plot)
else:
nonlegends.append(plot)
nonlegends = []
legends = []
recurse(obj.plots, nonlegends, legends)
for plot in nonlegends:
draw(plot, **kwds)
if drawframe: _draw_frame(**kwds)
for plot in legends:
draw(plot, **kwds)
def _draw_Stack(obj, **kwds):
obj._prepare()
draw(obj._overlay, **kwds)
def _frameargs_prehook_Histogram(obj, output, **kwds):
if "ymin" not in output or output["ymin"] is None or output["ymin"] is containers.Auto:
if "ylog" not in output or not output["ylog"]:
output["ymin"] = 0.
if "xmargin" not in output:
output["xmargin"] = 0.
return output
def _frameargs_prehook_HistogramAbstract(obj, output, **kwds):
return _frameargs_prehook_Histogram(obj, output, **kwds)
def _frameargs_prehook_HistogramNonUniform(obj, output, **kwds):
return _frameargs_prehook_Histogram(obj, output, **kwds)
def _frameargs_prehook_HistogramCategorical(obj, output, **kwds):
return _frameargs_prehook_Histogram(obj, output, **kwds)
def _draw_HistogramAbstract(obj, **kwds):
_draw_Histogram(obj, **kwds)
def _draw_Histogram(obj, **kwds):
svg = kwds["svg"]
if kwds.get("drawframe", True): kwds["frameargs"] = _get_frameargs(obj, **kwds)
f = kwds["frameargs"]
linewidth = _svglinewidth(obj.linewidth)
linestyle = _svglinestyle(obj.linestyle, obj.linewidth)
lineopacity = _svgopacity(obj.linecolor)
linecolor = _svgcolor(obj.linecolor)
fillopacity = _svgopacity(obj.fillcolor)
fillcolor = _svgcolor(obj.fillcolor)
wx1, wy1, wx2, wy2 = _get_window(**kwds)
windowwidth, windowheight = wx2 - wx1, wy2 - wy1
xmin, ymin, xmax, ymax, xlog, ylog = f["xmin"], f["ymin"], f["xmax"], f["ymax"], f["xlog"], f["ylog"]
def t(x, y):
return _transformX(x, wx1, wx2, xmin, xmax, xlog), _transformY(y, wy1, wy2, ymin, ymax, ylog)
xepsilon = mathtools.epsilon * (xmax - xmin)
yepsilon = mathtools.epsilon * (ymax - ymin)
bins = obj.binedges()
gap = obj.gap*(_transformX(xmax, wx1, wx2, xmin, xmax, xlog) - _transformX(xmin, wx1, wx2, xmin, xmax, xlog))/len(obj.bins)
line = [] # in data coordinates
pathdata = [] # in SVG coordinates with gaps
for (binlow, binhigh), value in zip(bins, obj.values):
if len(line) == 0:
line.append((binlow, 0.))
pathdata.append("M %g %g" % t(*line[-1]))
if gap > mathtools.epsilon:
line.append((binlow, 0.))
x, y = t(*line[-1])
x += gap/2.
pathdata.append("L %g %g" % (x, y))
elif abs(line[-1][0] - binlow) > xepsilon or gap > mathtools.epsilon:
line.append((line[-1][0], 0.))
line.append((binlow, 0.))
if gap > mathtools.epsilon:
x, y = t(*line[-2])
x -= gap/2.
pathdata.append("L %g %g" % (x, y))
x, y = t(*line[-1])
x += gap/2.
pathdata.append("L %g %g" % (x, y))
else:
pathdata.append("L %g %g" % t(*line[-2]))
pathdata.append("L %g %g" % t(*line[-1]))
line.append((binlow, value))
line.append((binhigh, value))
if gap > mathtools.epsilon:
x, y = t(*line[-2])
x += gap/2.
pathdata.append("L %g %g" % (x, y))
x, y = t(*line[-1])
x -= gap/2.
pathdata.append("L %g %g" % (x, y))
else:
pathdata.append("L %g %g" % t(*line[-2]))
pathdata.append("L %g %g" % t(*line[-1]))
if gap > mathtools.epsilon:
line.append((line[-1][0], 0.))
x, y = t(*line[-1])
x -= gap/2.
pathdata.append("L %g %g" % (x, y))
line.append((line[-1][0], 0.))
pathdata.append("L %g %g" % t(*line[-1]))
pathdata = " ".join(pathdata)
plotname = svg.uniquename(obj.__class__.__name__)
plotclipname = "%s_clip" % plotname
svg.defs[plotclipname] = u""" <clipPath id="%(plotclipname)s">
<rect x="%(wx1)g" y="%(wy1)g" width="%(windowwidth)g" height="%(windowheight)g" />
</clipPath>""" % vars()
h = "#"
svg.body.append(u"""<g id="%(plotname)s" clip-path="url(%(h)s%(plotclipname)s)">""" % vars())
svg.body.append(u""" <path d="%(pathdata)s" stroke-width="%(linewidth)g" stroke-dasharray="%(linestyle)s" stroke="%(linecolor)s" stroke-opacity="%(lineopacity)g" fill="%(fillcolor)s" fill-opacity="%(fillopacity)g" />""" % vars())
svg.body.append(u"""</g>""")
if kwds.get("drawframe", True): _draw_frame(**kwds)
def _draw_HistogramNonUniform(obj, **kwds):
_draw_Histogram(obj, **kwds)
def _draw_HistogramCategorical(obj, **kwds):
_draw_Histogram(obj, **kwds)
def _frameargs_posthook_HistogramCategorical(obj, output, **kwds):
f = obj._frameargs()
if f.get("xticks", containers.Auto) is containers.Auto:
output["xticks"] = dict(enumerate(obj.bins))
return output
def _draw_Scatter(obj, **kwds):
svg = kwds["svg"]
if kwds.get("drawframe", True): kwds["frameargs"] = _get_frameargs(obj, **kwds)
f = kwds["frameargs"]
obj._prepare(f["xmin"], f["ymin"], f["xmax"], f["ymax"])
wx1, wy1, wx2, wy2 = _get_window(**kwds)
windowwidth, windowheight = wx2 - wx1, wy2 - wy1
xmin, ymin, xmax, ymax, xlog, ylog = f["xmin"], f["ymin"], f["xmax"], f["ymax"], f["xlog"], f["ylog"]
def t(x, y):
return _transformX(x, wx1, wx2, xmin, xmax, xlog), _transformY(y, wy1, wy2, ymin, ymax, ylog)
plotname = svg.uniquename(obj.__class__.__name__)
plotclipname = "%s_clip" % plotname
plotmarkname = "%s_mark" % plotname
svg.defs[plotclipname] = u""" <clipPath id="%(plotclipname)s">
<rect x="%(wx1)g" y="%(wy1)g" width="%(windowwidth)g" height="%(windowheight)g" />
</clipPath>""" % vars()
markeropacity = _svgopacity(obj.markercolor)
markercolor = _svgcolor(obj.markercolor)
markeroutlineopacity = _svgopacity(obj.markeroutline)
markeroutline = _svgcolor(obj.markeroutline)
# TODO: handle shapes other than circles (in a centralized way)
if obj.marker == "circle":
radius = _svgmarkersize(obj.markersize)
svg.defs[plotmarkname] = u""" <circle id="%(plotmarkname)s" cx="0" cy="0" r="%(radius)g" stroke="%(markeroutline)s" stroke-opacity="%(markeroutlineopacity)g" fill="%(markercolor)s" fill-opacity="%(markeropacity)g" />""" % vars()
else:
pass
h = "#"
svg.body.append(u"""<g id="%(plotname)s" clip-path="url(%(h)s%(plotclipname)s)">""" % vars())
xindex = obj.index()["x"]
yindex = obj.index()["y"]
if obj.linecolor is not None:
linewidth = _svglinewidth(obj.linewidth)
linestyle = _svglinestyle(obj.linestyle, obj.linewidth)
lineopacity = _svgopacity(obj.linecolor)
linecolor = _svgcolor(obj.linecolor)
pathdata = []
for value in obj._xlimited_values:
if len(pathdata) == 0:
pathdata.append("M %g %g" % t(value[xindex], value[yindex]))
else:
pathdata.append("L %g %g" % t(value[xindex], value[yindex]))
pathdata = " ".join(pathdata)
svg.body.append(u""" <path d="%(pathdata)s" stroke-width="%(linewidth)g" stroke-dasharray="%(linestyle)s" stroke="%(linecolor)s" stroke-opacity="%(lineopacity)g" fill="none" />""" % vars())
if "ex" in obj.sig:
lineopacity = _svgopacity(obj.linecolor)
linecolor = _svgcolor(obj.linecolor)
if "exl" in obj.sig: exlindex = obj.index()["exl"]
else: exlindex = obj.index()["ex"]
exindex = obj.index()["ex"]
def down(x, y): return x, y - 5.
def up(x, y): return x, y + 5.
for value in obj._limited_values:
x, y, exl, ex = value[xindex], value[yindex], abs(value[exlindex]), abs(value[exindex])
pathdata = ["M %g %g" % t(x - exl, y),
"L %g %g" % t(x + ex, y),
"M %g %g" % down(*t(x - exl, y)),
"L %g %g" % up(*t(x - exl, y)),
"M %g %g" % down(*t(x + ex, y)),
"L %g %g" % up(*t(x + ex, y))]
pathdata = " ".join(pathdata)
svg.body.append(u""" <path d="%(pathdata)s" stroke="%(linecolor)s" stroke-opacity="%(lineopacity)g" fill="none" />""" % vars())
if "ey" in obj.sig:
lineopacity = _svgopacity(obj.linecolor)
linecolor = _svgcolor(obj.linecolor)
if "eyl" in obj.sig: eylindex = obj.index()["eyl"]
else: eylindex = obj.index()["ey"]
eyindex = obj.index()["ey"]
def down(x, y): return x - 5., y
def up(x, y): return x + 5., y
for value in obj._limited_values:
x, y, eyl, ey = value[xindex], value[yindex], abs(value[eylindex]), abs(value[eyindex])
pathdata = ["M %g %g" % t(x, y - eyl),
"L %g %g" % t(x, y + ey),
"M %g %g" % down(*t(x, y - eyl)),
"L %g %g" % up(*t(x, y - eyl)),
"M %g %g" % down(*t(x, y + ey)),
"L %g %g" % up(*t(x, y + ey))]
pathdata = " ".join(pathdata)
svg.body.append(u""" <path d="%(pathdata)s" stroke="%(linecolor)s" stroke-opacity="%(lineopacity)g" fill="none" />""" % vars())
if obj.marker is not None:
for value in obj._limited_values:
x, y = t(value[xindex], value[yindex])
svg.body.append(u""" <use x="%(x)g" y="%(y)g" xlink:href="%(h)s%(plotmarkname)s" />""" % vars())
svg.body.append(u"""</g>""")
if kwds.get("drawframe", True): _draw_frame(**kwds)
def _frameargs_posthook_Scatter(obj, output, **kwds):
f = obj._frameargs()
if f.get("xticks", containers.Auto) is containers.Auto and getattr(obj, "_xticks", None) is not None:
output["xticks"] = obj._xticks
if f.get("yticks", containers.Auto) is containers.Auto and getattr(obj, "_yticks", None) is not None:
output["yticks"] = obj._yticks
return output
def _draw_TimeSeries(obj, **kwds):
_draw_Scatter(obj, **kwds)
def _frameargs_prehook_TimeSeries(obj, output, **kwds):
if "xmin" in output and output["xmin"] is not None and output["xmin"] is not containers.Auto and isinstance(output["xmin"], basestring):
output["xmin"] = obj.fromtimestring(output["xmin"])
if "xmax" in output and output["xmax"] is not None and output["xmax"] is not containers.Auto and isinstance(output["xmax"], basestring):
output["xmax"] = obj.fromtimestring(output["xmax"])
return output
def _frameargs_posthook_TimeSeries(obj, output, **kwds):
f = obj._frameargs()
if "xticks" not in f or f["xticks"] is containers.Auto:
xticks = output["xticks"]
for value, name in xticks.items():
if name is not None:
xticks[value] = obj.totimestring(value)
output["xticks"] = xticks
return output
def _draw_ColorField(obj, **kwds):
svg = kwds["svg"]
if kwds.get("drawframe", True): kwds["frameargs"] = _get_frameargs(obj, **kwds)
f = kwds["frameargs"]
wx1, wy1, wx2, wy2 = _get_window(**kwds)
windowwidth, windowheight = wx2 - wx1, wy2 - wy1
xmin, ymin, xmax, ymax, xlog, ylog = f["xmin"], f["ymin"], f["xmax"], f["ymax"], f["xlog"], f["ylog"]
xbins, ybins = obj.xbins(), obj.ybins()
zmin, zmax = obj.zranges()
if obj.zmin is not containers.Auto:
zmin = obj.zmin
if obj.zmax is not containers.Auto:
zmax = obj.zmax
image = PIL.Image.new("RGBA", (xbins, ybins), (0, 0, 0, 255))
for i in xrange(xbins):
for j in xrange(ybins):
col = obj.tocolor(obj.values[i,j], zmin, zmax)
if isinstance(col, color.RGB):
col = col.ints()
elif isinstance(col, (color.AbstractColor, basestring)):
col = color.RGB(col).ints()
image.putpixel((i, ybins-j-1), col)
buff = StringIO.StringIO()
image.save(buff, "PNG")
encoded = base64.b64encode(buff.getvalue())
if obj.smooth:
smooth = "optimizeQuality"
else:
smooth = "optimizeSpeed"
xpos = _transformX(obj.xmin, wx1, wx2, xmin, xmax, xlog)
xpos2 = _transformX(obj.xmax, wx1, wx2, xmin, xmax, xlog)
ypos = _transformY(obj.ymin, wy1, wy2, ymin, ymax, ylog)
ypos2 = _transformY(obj.ymax, wy1, wy2, ymin, ymax, ylog)
width = xpos2 - xpos
height = ypos - ypos2
plotname = svg.uniquename(obj.__class__.__name__)
plotclipname = "%s_clip" % plotname
svg.defs[plotclipname] = u""" <clipPath id="%(plotclipname)s">
<rect x="%(wx1)g" y="%(wy1)g" width="%(windowwidth)g" height="%(windowheight)g" />
</clipPath>""" % vars()
h = "#"
svg.body.append(u"""<g id="%(plotname)s" clip-path="url(%(h)s%(plotclipname)s)">""" % vars())
svg.body.append(u""" <image xlink:href="data:image/png;base64,%(encoded)s" x="%(xpos)g" y="%(ypos2)g" width="%(width)g" height="%(height)g" image-rendering="%(smooth)s" preserveAspectRatio="none" />""" % vars())
svg.body.append(u"""</g>""")
if kwds.get("drawframe", True): _draw_frame(**kwds)
def _draw_Region(obj, **kwds):
svg = kwds["svg"]
if kwds.get("drawframe", True): kwds["frameargs"] = _get_frameargs(obj, **kwds)
f = kwds["frameargs"]
fillopacity = _svgopacity(obj.fillcolor)
fillcolor = _svgcolor(obj.fillcolor)
wx1, wy1, wx2, wy2 = _get_window(**kwds)
windowwidth, windowheight = wx2 - wx1, wy2 - wy1
xmin, ymin, xmax, ymax, xlog, ylog = f["xmin"], f["ymin"], f["xmax"], f["ymax"], f["xlog"], f["ylog"]
plotname = svg.uniquename(obj.__class__.__name__)
plotclipname = "%s_clip" % plotname
svg.defs[plotclipname] = u""" <clipPath id="%(plotclipname)s">
<rect x="%(wx1)g" y="%(wy1)g" width="%(windowwidth)g" height="%(windowheight)g" />
</clipPath>""" % vars()
pathdata = []
for command in obj.commands:
if not isinstance(command, containers.RegionCommand):
raise containers.ContainerException, "Commands passed to Region must all be RegionCommands (MoveTo, EdgeTo, ClosePolygon)"
if isinstance(command, (containers.MoveTo, containers.EdgeTo)):
x, y = command.x, command.y
if isinstance(x, mathtools.InfiniteType):
x = (wx1 + wx2)/2. + windowwidth/mathtools.epsilon * x._multiplier
else:
x = _transformX(x, wx1, wx2, xmin, xmax, xlog)
if isinstance(y, mathtools.InfiniteType):
y = (wy1 + wy2)/2. - windowwidth/mathtools.epsilon * y._multiplier
else:
y = _transformY(y, wy1, wy2, ymin, ymax, ylog)
if isinstance(command, containers.MoveTo):
pathdata.append("M %g %g" % (x, y))
if isinstance(command, containers.EdgeTo):
pathdata.append("L %g %g" % (x, y))
elif isinstance(command, containers.ClosePolygon):
pathdata.append("Z")
pathdata = " ".join(pathdata)
h = "#"
svg.body.append(u"""<g id="%(plotname)s" clip-path="url(%(h)s%(plotclipname)s)">""" % vars())
svg.body.append(u""" <path d="%(pathdata)s" stroke-width="5." stroke="%(fillcolor)s" fill="%(fillcolor)s" fill-opacity="%(fillopacity)g" />""" % vars())
svg.body.append(u"""</g>""")
if kwds.get("drawframe", True): _draw_frame(**kwds)
def _draw_RegionMap(obj, **kwds):
svg = kwds["svg"]
if kwds.get("drawframe", True): kwds["frameargs"] = _get_frameargs(obj, **kwds)
f = kwds["frameargs"]
wx1, wy1, wx2, wy2 = _get_window(**kwds)
windowwidth, windowheight = wx2 - wx1, wy2 - wy1
xmin, ymin, xmax, ymax, xlog, ylog = f["xmin"], f["ymin"], f["xmax"], f["ymax"], f["xlog"], f["ylog"]
obj._prepare()
image = PIL.Image.new("RGBA", (obj.xbins, obj.ybins), (0, 0, 0, 255))
for i in xrange(obj.xbins):
for j in xrange(obj.ybins):
image.putpixel((i, obj.ybins-j-1), obj._values[i][j])
buff = StringIO.StringIO()
image.save(buff, "PNG")
encoded = base64.b64encode(buff.getvalue())
xpos = _transformX(obj.xmin, wx1, wx2, xmin, xmax, xlog)
xpos2 = _transformX(obj.xmax, wx1, wx2, xmin, xmax, xlog)
ypos = _transformY(obj.ymin, wy1, wy2, ymin, ymax, ylog)
ypos2 = _transformY(obj.ymax, wy1, wy2, ymin, ymax, ylog)
width = xpos2 - xpos
height = ypos - ypos2
plotname = svg.uniquename(obj.__class__.__name__)
plotclipname = "%s_clip" % plotname
svg.defs[plotclipname] = u""" <clipPath id="%(plotclipname)s">
<rect x="%(wx1)g" y="%(wy1)g" width="%(windowwidth)g" height="%(windowheight)g" />
</clipPath>""" % vars()
h = "#"
svg.body.append(u"""<g id="%(plotname)s" clip-path="url(%(h)s%(plotclipname)s)">""" % vars())
svg.body.append(u""" <image xlink:href="data:image/png;base64,%(encoded)s" x="%(xpos)g" y="%(ypos2)g" width="%(width)g" height="%(height)g" image-rendering="optimizeQuality" preserveAspectRatio="none" />""" % vars())
svg.body.append(u"""</g>""")
if kwds.get("drawframe", True): _draw_frame(**kwds)
def _draw_ConsumerRegionMap(obj, **kwds):
_draw_RegionMap(obj, **kwds)
def _frameargs_prehook_Curve(obj, output, **kwds):
obj._prepare(output.get("xlog", False))
output = obj._scatter._frameargs()
return output
def _draw_Curve(obj, **kwds):
if "_scatter" not in obj.__dict__ or obj._scatter is None:
if not kwds.get("drawframe", True):
xmin = kwds["frameargs"]["xmin"]
xmax = kwds["frameargs"]["xmax"]
xlog = kwds["frameargs"]["xlog"]
else:
if "xlog" in kwds:
xlog = kwds["xlog"]
elif "xlog" in obj.__dict__:
xlog = obj.xlog
else:
xlog = default_frameargs["xlog"]
if "xmin" in kwds:
xmin = kwds["xmin"]
elif "xmin" in obj.__dict__:
xmin = obj.xmin
else:
if xlog: xmin = 0.1
else: xmin = 0.
if "xmax" in kwds:
xmax = kwds["xmax"]
elif "xmax" in obj.__dict__:
xmax = obj.xmax
else:
xmax = 1.
obj._prepare(xmin=xmin, xmax=xmax, xlog=xlog)
_draw_Scatter(obj._scatter, **kwds)
obj._scatter = None
def _draw_Line(obj, **kwds):
svg = kwds["svg"]
if kwds.get("drawframe", True): kwds["frameargs"] = _get_frameargs(obj, **kwds)
f = kwds["frameargs"]
linewidth = _svglinewidth(obj.linewidth)
linestyle = _svglinestyle(obj.linestyle, obj.linewidth)
lineopacity = _svgopacity(obj.linecolor)
linecolor = _svgcolor(obj.linecolor)
wx1, wy1, wx2, wy2 = _get_window(**kwds)
windowwidth, windowheight = wx2 - wx1, wy2 - wy1
xmin, ymin, xmax, ymax, xlog, ylog = f["xmin"], f["ymin"], f["xmax"], f["ymax"], f["xlog"], f["ylog"]
def t(x, y):
return _transformX(x, wx1, wx2, xmin, xmax, xlog), _transformY(y, wy1, wy2, ymin, ymax, ylog)
plotname = svg.uniquename(obj.__class__.__name__)
plotclipname = "%s_clip" % plotname
svg.defs[plotclipname] = u""" <clipPath id="%(plotclipname)s">
<rect x="%(wx1)g" y="%(wy1)g" width="%(windowwidth)g" height="%(windowheight)g" />
</clipPath>""" % vars()
pathdata = []
if (isinstance(obj.x1, mathtools.InfiniteType) or isinstance(obj.y1, mathtools.InfiniteType)) and \
(isinstance(obj.x2, mathtools.InfiniteType) or isinstance(obj.y2, mathtools.InfiniteType)):
raise containers.ContainerException, "Only one of the two points can be at Infinity"
elif isinstance(obj.x1, mathtools.InfiniteType) or isinstance(obj.y1, mathtools.InfiniteType):
pathdata.append("M %g %g" % t(obj.x2, obj.y2))
if isinstance(obj.x1, mathtools.InfiniteType):
x = (wx1 + wx2)/2. + windowwidth/mathtools.epsilon * obj.x1._multiplier
else:
x = _transformX(obj.x1, wx1, wx2, xmin, xmax, xlog)
if isinstance(obj.y1, mathtools.InfiniteType):
y = (wy1 + wy2)/2. - windowwidth/mathtools.epsilon * obj.y1._multiplier
else:
y = _transformY(obj.y1, wy1, wy2, ymin, ymax, ylog)
pathdata.append("L %g %g" % (x, y))
elif isinstance(obj.x2, mathtools.InfiniteType) or isinstance(obj.y2, mathtools.InfiniteType):
pathdata.append("M %g %g" % t(obj.x1, obj.y1))
if isinstance(obj.x2, mathtools.InfiniteType):
x = (wx1 + wx2)/2. + windowwidth/mathtools.epsilon * obj.x2._multiplier
else:
x = _transformX(obj.x2, wx1, wx2, xmin, xmax, xlog)
if isinstance(obj.y2, mathtools.InfiniteType):
y = (wy1 + wy2)/2. - windowwidth/mathtools.epsilon * obj.y2._multiplier
else:
y = _transformY(obj.y2, wy1, wy2, ymin, ymax, ylog)
pathdata.append("L %g %g" % (x, y))
else:
pathdata.append("M %g %g L %g %g" % tuple(list(t(obj.x1, obj.y1)) + list(t(obj.x2, obj.y2))))
pathdata = " ".join(pathdata)
h = "#"
svg.body.append(u"""<g id="%(plotname)s" clip-path="url(%(h)s%(plotclipname)s)">""" % vars())
svg.body.append(u""" <path d="%(pathdata)s" stroke-width="%(linewidth)g" stroke-dasharray="%(linestyle)s" stroke="%(linecolor)s" stroke-opacity="%(lineopacity)g" fill="none" />""" % vars())
svg.body.append(u"""</g>""")
if kwds.get("drawframe", True): _draw_frame(**kwds)
def _draw_Grid(obj, **kwds):
svg = kwds["svg"]
if kwds.get("drawframe", True): kwds["frameargs"] = _get_frameargs(obj, **kwds)
f = kwds["frameargs"]
linewidth = _svglinewidth(obj.linewidth)
linestyle = _svglinestyle(obj.linestyle, obj.linewidth)
lineopacity = _svgopacity(obj.linecolor)
linecolor = _svgcolor(obj.linecolor)
wx1, wy1, wx2, wy2 = _get_window(**kwds)
windowwidth, windowheight = wx2 - wx1, wy2 - wy1
xmin, ymin, xmax, ymax, xlog, ylog = f["xmin"], f["ymin"], f["xmax"], f["ymax"], f["xlog"], f["ylog"]
def t(x, y):
return _transformX(x, wx1, wx2, xmin, xmax, xlog), _transformY(y, wy1, wy2, ymin, ymax, ylog)
obj._prepare(xmin, ymin, xmax, ymax)
plotname = svg.uniquename(obj.__class__.__name__)
plotclipname = "%s_clip" % plotname
svg.defs[plotclipname] = u""" <clipPath id="%(plotclipname)s">
<rect x="%(wx1)g" y="%(wy1)g" width="%(windowwidth)g" height="%(windowheight)g" />
</clipPath>""" % vars()
pathdata = []
for x in obj._vert:
pathdata.append("M %g %g L %g %g" % tuple(list(t(x, ymin)) + list(t(x, ymax))))
for y in obj._horiz:
pathdata.append("M %g %g L %g %g" % tuple(list(t(xmin, y)) + list(t(xmax, y))))
pathdata = " ".join(pathdata)
h = "#"
svg.body.append(u"""<g id="%(plotname)s" clip-path="url(%(h)s%(plotclipname)s)">""" % vars())
svg.body.append(u""" <path d="%(pathdata)s" stroke-width="%(linewidth)g" stroke-dasharray="%(linestyle)s" stroke="%(linecolor)s" stroke-opacity="%(lineopacity)g" fill="none" />""" % vars())
svg.body.append(u"""</g>""")
if kwds.get("drawframe", True): _draw_frame(**kwds)
def _draw_Legend(obj, **kwds):
svg, svgwidth, svgheight = kwds["svg"], kwds["width"], kwds["height"]
if kwds.get("drawframe", True): kwds["frameargs"] = _get_frameargs(obj, **kwds)
f = kwds["frameargs"]
linewidth = _svglinewidth(obj.linewidth)
linestyle = _svglinestyle(obj.linestyle, obj.linewidth)
lineopacity = _svgopacity(obj.linecolor)
linecolor = _svgcolor(obj.linecolor)
fillopacity = _svgopacity(obj.fillcolor)
fillcolor = _svgcolor(obj.fillcolor)
wx1, wy1, wx2, wy2 = _get_window(**kwds)
windowwidth, windowheight = wx2 - wx1, wy2 - wy1
obj._prepare()
if obj.height is containers.Auto:
# no top-padding
# objheight = (2.*obj.padding + obj._rows*obj.baselineskip)*svgheight / windowheight
objheight = (obj.padding + obj._rows*obj.baselineskip)*svgheight / windowheight
else:
objheight = obj.height
width = obj.width * windowwidth
height = objheight * windowheight
x = wx1 + obj.x*windowwidth
y = wy2 - obj.y*windowheight
if obj._anchor[1] == "m": x -= width/2.
elif obj._anchor[1] == "r": x -= width
if obj._anchor[0] == "m": y -= height/2.
elif obj._anchor[0] == "b": y -= height
plotname = svg.uniquename(obj.__class__.__name__)
plotclipname = "%s_clip" % plotname
svg.defs[plotclipname] = u""" <clipPath id="%(plotclipname)s">
<rect x="%(x)g" y="%(y)g" width="%(width)g" height="%(height)g" />
</clipPath>""" % vars()
if kwds.get("drawframe", True): _draw_frame(**kwds)
h = "#"
svg.body.append(u"""<g id="%(plotname)s">""" % vars())
svg.body.append(u""" <rect x="%(x)g" y="%(y)g" width="%(width)g" height="%(height)g" stroke-width="%(linewidth)g" stroke-dasharray="%(linestyle)s" stroke="%(linecolor)s" stroke-opacity="%(lineopacity)g" fill="%(fillcolor)s" fill-opacity="%(fillopacity)g" />""" % vars())
svg.body.append(u""" <g id="%(plotname)s_content" clip-path="url(%(h)s%(plotclipname)s)">""" % vars())
# no top-padding
# penx, peny = x + obj.padding * svgwidth, y + obj.padding * svgheight
penx, peny = x + obj.padding * svgwidth, y
width -= 2. * obj.padding * svgwidth
for i in range(len(obj._fields)):
peny += obj.baselineskip * svgheight
penxstart = penx
for j in range(len(obj._fields[i])):
drawable = obj._fields[i][j]
drawn = False
might_have_style = False
try:
drawable.__dict__
might_have_style = True
except AttributeError: pass
if might_have_style:
if "linecolor" in drawable.__dict__ and drawable.linecolor is not None:
lstyle = u" stroke-width=\"%g\" stroke-dasharray=\"%s\" stroke=\"%s\" stroke-opacity=\"%g\"" % (_svglinewidth(drawable.linewidth), _svglinestyle(drawable.linestyle, drawable.linewidth), _svgcolor(drawable.linecolor), _svgopacity(drawable.linecolor))
else:
lstyle = u" stroke=\"none\""
if "fillcolor" in drawable.__dict__ and drawable.fillcolor is not None:
drawable_fillopacity = _svgopacity(drawable.fillcolor)
drawable_fillcolor = _svgcolor(drawable.fillcolor)
rectwidth, rectheight = 1.5 * obj.baselineskip * svgheight, 0.75 * obj.baselineskip * svgheight
rectx, recty = penx, peny - rectheight
if obj._justify[j] == "l":
rectx += obj.padding * svgwidth
elif obj._justify[j] in ("m", "c"):
rectx += (obj._colwid[j] * width - rectwidth - obj.padding * svgwidth)/2.
elif obj._justify[j] == "r":
rectx += obj._colwid[j] * width - rectwidth - obj.padding * svgwidth
svg.body.append(u""" <rect x="%(rectx)g" y="%(recty)g" width="%(rectwidth)g" height="%(rectheight)g"%(lstyle)s fill="%(drawable_fillcolor)s" fill-opacity="%(drawable_fillopacity)g" />""" % vars())
drawn = True
elif "linecolor" in drawable.__dict__ and drawable.linecolor is not None:
linelength = 1.5 * obj.baselineskip * svgheight
linex1, liney1 = penx, peny - 0.3 * obj.baselineskip * svgheight
if obj._justify[j] == "l":
linex1 += obj.padding * svgwidth
elif obj._justify[j] in ("m", "c"):
linex1 += (obj._colwid[j] * width - linelength - obj.padding * svgwidth)/2.
elif obj._justify[j] == "r":
linex1 += obj._colwid[j] * width - linelength - obj.padding * svgwidth
linex2, liney2 = linex1 + linelength, liney1
svg.body.append(u""" <line x1="%(linex1)g" y1="%(liney1)g" x2="%(linex2)g" y2="%(liney2)g"%(lstyle)s />""" % vars())
drawn = True
if "marker" in drawable.__dict__ and drawable.marker is not None:
# TODO: handle shapes other than circles (in a centralized way)
plotmarkname = svg.uniquename("%s_mark" % plotname)
radius, markeroutlineopacity, markeroutline, markeropacity, markercolor = _svgmarkersize(drawable.markersize), _svgopacity(drawable.markeroutline), _svgcolor(drawable.markeroutline), _svgopacity(drawable.markercolor), _svgcolor(drawable.markercolor)
svg.defs[plotmarkname] = u""" <circle id="%(plotmarkname)s" cx="0" cy="0" r="%(radius)g" stroke="%(markeroutline)s" stroke-opacity="%(markeroutlineopacity)g" fill="%(markercolor)s" fill-opacity="%(markeropacity)g" />""" % vars()
linelength = 1.5 * obj.baselineskip * svgheight
withline = (("fillcolor" in drawable.__dict__ and drawable.fillcolor is not None) or
("linecolor" in drawable.__dict__ and drawable.linecolor is not None))
markx, marky = penx, peny - 0.375 * obj.baselineskip * svgheight
if obj._justify[j] == "l":
markx += obj.padding * svgwidth
if withline: markx += linelength / 2.
elif obj._justify[j] in ("m", "c"):
markx += (obj._colwid[j] * width - obj.padding * svgwidth)/2.
elif obj._justify[j] == "r":
markx += obj._colwid[j] * width - obj.padding * svgwidth
if withline: markx -= linelength / 2.
svg.body.append(u""" <use x="%(markx)g" y="%(marky)g" xlink:href="%(h)s%(plotmarkname)s" />""" % vars())
drawn = True
if not drawn and drawable is not None:
astext = unicode(drawable)
font_size = obj.textscale*30.
if obj._justify[j] == "l":
placement = penx
text_anchor = "start"
elif obj._justify[j] in ("m", "c"):
placement = penx + 0.5 * obj._colwid[j] * width
text_anchor = "middle"
elif obj._justify[j] == "r":
placement = penx + obj._colwid[j] * width
text_anchor = "end"
svg.body.append(u""" <text font-size="%(font_size)g" transform="translate(%(placement)g, %(peny)g)" text-anchor="%(text_anchor)s" dominant-baseline="middle" stroke="none" fill="black">%(astext)s</text>""" % vars())
penx += obj._colwid[j] * width
penx = penxstart
svg.body.append(u""" </g>""")
svg.body.append(u"""</g>""")
| {
"content_hash": "515832c98bda09ba8689ca2b3e9477eb",
"timestamp": "",
"source": "github",
"line_count": 1246,
"max_line_length": 332,
"avg_line_length": 42.170144462279296,
"alnum_prop": 0.5648599269183922,
"repo_name": "opendatagroup/cassius",
"id": "1fb34f5abd6b6d6fafef0450f71d55a5a05a7387",
"size": "52571",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tags/cassius-0_1_0_0/cassius/svgdraw.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "15656"
},
{
"name": "JavaScript",
"bytes": "12775"
},
{
"name": "Python",
"bytes": "1187698"
}
],
"symlink_target": ""
} |
"""SOAP client."""
import os.path
from pkg_resources import get_distribution, DistributionNotFound
RINSE_DIR = os.path.dirname(__file__)
ENVELOPE_XSD = 'soap-1.1_envelope.xsd'
NS_SOAPENV = 'http://schemas.xmlsoap.org/soap/envelope/'
NS_MAP = {
'soapenv': NS_SOAPENV,
}
try:
_dist = get_distribution('rinse')
if not __file__.startswith(os.path.join(_dist.location, 'rinse', '')):
# not installed, but there is another version that *is*
raise DistributionNotFound
except DistributionNotFound:
__version__ = 'development'
else:
__version__ = _dist.version
| {
"content_hash": "e7367d68cbd029d0f2bb6fac6153f025",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 74,
"avg_line_length": 25.82608695652174,
"alnum_prop": 0.6750841750841751,
"repo_name": "tysonclugg/rinse",
"id": "748c3198018fee66139d0e8cdab4aaabb4928940",
"size": "594",
"binary": false,
"copies": "5",
"ref": "refs/heads/develop",
"path": "rinse/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "743"
},
{
"name": "Python",
"bytes": "24343"
},
{
"name": "Shell",
"bytes": "194"
}
],
"symlink_target": ""
} |
import sys
import os
import glob
import tempfile
import shutil
import errno
from gerapy import get_logger
from gerapy.cmd.init import PROJECTS_FOLDER
from gerapy.server.core.config import config
from os.path import join
from subprocess import check_call
logger = get_logger(__name__)
def build_project(project):
"""
build project
:param project:
:return:
"""
egg = build_egg(project)
logger.info('successfully build project %s to egg file %s', project, egg)
return egg
_SETUP_PY_TEMPLATE = \
'''# Automatically created by: gerapy
from setuptools import setup, find_packages
setup(
name='%(project)s',
version='1.0',
packages=find_packages(),
entry_points={'scrapy':['settings=%(settings)s']},
)'''
def retry_on_eintr(function, *args, **kw):
"""Run a function and retry it while getting EINTR errors"""
while True:
try:
return function(*args, **kw)
except IOError as e:
if e.errno != errno.EINTR:
raise
# build Egg
def build_egg(project):
'''
build project to egg file
:param project:
:return:
'''
work_path = os.getcwd()
try:
path = os.path.abspath(join(os.getcwd(), PROJECTS_FOLDER))
project_path = join(path, project)
os.chdir(project_path)
settings = config(project_path, 'settings', 'default')
setup_file_path = join(project_path, 'setup.py')
create_default_setup_py(
setup_file_path, settings=settings, project=project)
d = tempfile.mkdtemp(prefix='gerapy-')
o = open(os.path.join(d, 'stdout'), 'wb')
e = open(os.path.join(d, 'stderr'), 'wb')
retry_on_eintr(check_call, [sys.executable, 'setup.py', 'clean', '-a', 'bdist_egg', '-d', d],
stdout=o, stderr=e)
o.close()
e.close()
egg = glob.glob(os.path.join(d, '*.egg'))[0]
# Delete Origin file
if find_egg(project_path):
os.remove(join(project_path, find_egg(project_path)))
shutil.move(egg, project_path)
return join(project_path, find_egg(project_path))
except Exception as e:
logger.error('error occurred %s', e.args)
finally:
os.chdir(work_path)
def find_egg(path):
"""
find egg from path
:param path:
:return:
"""
items = os.listdir(path)
for name in items:
if name.endswith('.egg'):
return name
def create_default_setup_py(path, **kwargs):
"""
create setup.py file to path
:param path:
:param kwargs:
:return:
"""
if os.path.exists(path):
logger.debug('setup.py file already exists at %s', path)
else:
with open(path, 'w', encoding='utf-8') as f:
file = _SETUP_PY_TEMPLATE % kwargs
f.write(file)
f.close()
logger.debug('successfully created setup.py file at %s', path)
| {
"content_hash": "629e90c231e22befcae3173d4cbc9510",
"timestamp": "",
"source": "github",
"line_count": 109,
"max_line_length": 101,
"avg_line_length": 26.96330275229358,
"alnum_prop": 0.5923783599863899,
"repo_name": "Gerapy/Gerapy",
"id": "986a83a34c5e9480d9b518db304e0e45f773b484",
"size": "2939",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "gerapy/server/core/build.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "198"
},
{
"name": "HTML",
"bytes": "3111"
},
{
"name": "JavaScript",
"bytes": "27475"
},
{
"name": "Python",
"bytes": "1723424"
},
{
"name": "SCSS",
"bytes": "10276"
},
{
"name": "Shell",
"bytes": "257"
},
{
"name": "Vue",
"bytes": "185910"
}
],
"symlink_target": ""
} |
from pexdoc.ptypes import real_num, positive_real_num, offset_range, function
from peng.ptypes import real_numpy_vector, increasing_real_numpy_vector
# Intra-package imports
from .basic_source import BasicSource
from .csv_source import CsvSource
from .series import Series
from .panel import Panel
from .figure import Figure
from .functions import parameterized_color_space, DataSource
from pplot.ptypes import interpolation_option, line_style_option, color_space_option
from .constants import (
AXIS_LABEL_FONT_SIZE,
AXIS_TICKS_FONT_SIZE,
LEGEND_SCALE,
LINE_WIDTH,
MARKER_SIZE,
MIN_TICKS,
PRECISION,
SUGGESTED_MAX_TICKS,
TITLE_FONT_SIZE,
)
| {
"content_hash": "fb104ac729f602c15cd3b4aaff58f888",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 84,
"avg_line_length": 30.818181818181817,
"alnum_prop": 0.7654867256637168,
"repo_name": "pmacosta/pplot",
"id": "d24483c15100646920d51aad62839a22ef4e9e3e",
"size": "818",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pplot/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "3515"
},
{
"name": "Python",
"bytes": "407899"
},
{
"name": "Shell",
"bytes": "14220"
}
],
"symlink_target": ""
} |
"""
Django settings for {{ project_name }} project.
Generated by 'django-admin startproject' using Django {{ django_version }}.
For more information on this file, see
https://docs.djangoproject.com/en/{{ docs_version }}/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/{{ docs_version }}/ref/settings/
"""
from __future__ import absolute_import, unicode_literals
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
PROJECT_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
BASE_DIR = os.path.dirname(PROJECT_DIR)
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/{{ docs_version }}/howto/deployment/checklist/
# Application definition
INSTALLED_APPS = [
'home',
'search',
'wagtail.wagtailforms',
'wagtail.wagtailredirects',
'wagtail.wagtailembeds',
'wagtail.wagtailsites',
'wagtail.wagtailusers',
'wagtail.wagtailsnippets',
'wagtail.wagtaildocs',
'wagtail.wagtailimages',
'wagtail.wagtailsearch',
'wagtail.wagtailadmin',
'wagtail.wagtailcore',
'modelcluster',
'taggit',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE_CLASSES = [
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
'wagtail.wagtailcore.middleware.SiteMiddleware',
'wagtail.wagtailredirects.middleware.RedirectMiddleware',
]
ROOT_URLCONF = '{{ project_name }}.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [
os.path.join(PROJECT_DIR, 'templates'),
],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = '{{ project_name }}.wsgi.application'
# Database
# https://docs.djangoproject.com/en/{{ docs_version }}/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/{{ docs_version }}/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/{{ docs_version }}/howto/static-files/
STATICFILES_FINDERS = [
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
]
STATICFILES_DIRS = [
os.path.join(PROJECT_DIR, 'static'),
]
STATIC_ROOT = os.path.join(BASE_DIR, 'static')
STATIC_URL = '/static/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
MEDIA_URL = '/media/'
# Wagtail settings
WAGTAIL_SITE_NAME = "{{ project_name }}"
| {
"content_hash": "1c9674ed85100253eacd9125c7c63542",
"timestamp": "",
"source": "github",
"line_count": 138,
"max_line_length": 86,
"avg_line_length": 26.043478260869566,
"alnum_prop": 0.6822481914301614,
"repo_name": "hamsterbacke23/wagtail",
"id": "c9756393734591525d24d7c9cd641d171825484b",
"size": "3594",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "wagtail/project_template/project_name/settings/base.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "172736"
},
{
"name": "HTML",
"bytes": "291553"
},
{
"name": "JavaScript",
"bytes": "116387"
},
{
"name": "Makefile",
"bytes": "548"
},
{
"name": "Python",
"bytes": "2243460"
},
{
"name": "Shell",
"bytes": "7387"
}
],
"symlink_target": ""
} |
import numpy as np
from numpy.testing import assert_array_equal, assert_allclose
import pytest
from sklearn.ensemble._hist_gradient_boosting.binning import (
_BinMapper,
_find_binning_thresholds,
_map_to_bins,
)
from sklearn.ensemble._hist_gradient_boosting.common import X_DTYPE
from sklearn.ensemble._hist_gradient_boosting.common import X_BINNED_DTYPE
from sklearn.ensemble._hist_gradient_boosting.common import ALMOST_INF
from sklearn.utils._openmp_helpers import _openmp_effective_n_threads
n_threads = _openmp_effective_n_threads()
DATA = (
np.random.RandomState(42)
.normal(loc=[0, 10], scale=[1, 0.01], size=(int(1e6), 2))
.astype(X_DTYPE)
)
def test_find_binning_thresholds_regular_data():
data = np.linspace(0, 10, 1001)
bin_thresholds = _find_binning_thresholds(data, max_bins=10)
assert_allclose(bin_thresholds, [1, 2, 3, 4, 5, 6, 7, 8, 9])
bin_thresholds = _find_binning_thresholds(data, max_bins=5)
assert_allclose(bin_thresholds, [2, 4, 6, 8])
def test_find_binning_thresholds_small_regular_data():
data = np.linspace(0, 10, 11)
bin_thresholds = _find_binning_thresholds(data, max_bins=5)
assert_allclose(bin_thresholds, [2, 4, 6, 8])
bin_thresholds = _find_binning_thresholds(data, max_bins=10)
assert_allclose(bin_thresholds, [1, 2, 3, 4, 5, 6, 7, 8, 9])
bin_thresholds = _find_binning_thresholds(data, max_bins=11)
assert_allclose(bin_thresholds, np.arange(10) + 0.5)
bin_thresholds = _find_binning_thresholds(data, max_bins=255)
assert_allclose(bin_thresholds, np.arange(10) + 0.5)
def test_find_binning_thresholds_random_data():
bin_thresholds = [
_find_binning_thresholds(DATA[:, i], max_bins=255) for i in range(2)
]
for i in range(len(bin_thresholds)):
assert bin_thresholds[i].shape == (254,) # 255 - 1
assert bin_thresholds[i].dtype == DATA.dtype
assert_allclose(
bin_thresholds[0][[64, 128, 192]], np.array([-0.7, 0.0, 0.7]), atol=1e-1
)
assert_allclose(
bin_thresholds[1][[64, 128, 192]], np.array([9.99, 10.00, 10.01]), atol=1e-2
)
def test_find_binning_thresholds_low_n_bins():
bin_thresholds = [
_find_binning_thresholds(DATA[:, i], max_bins=128) for i in range(2)
]
for i in range(len(bin_thresholds)):
assert bin_thresholds[i].shape == (127,) # 128 - 1
assert bin_thresholds[i].dtype == DATA.dtype
@pytest.mark.parametrize("n_bins", (2, 257))
def test_invalid_n_bins(n_bins):
err_msg = "n_bins={} should be no smaller than 3 and no larger than 256".format(
n_bins
)
with pytest.raises(ValueError, match=err_msg):
_BinMapper(n_bins=n_bins).fit(DATA)
def test_bin_mapper_n_features_transform():
mapper = _BinMapper(n_bins=42, random_state=42).fit(DATA)
err_msg = "This estimator was fitted with 2 features but 4 got passed"
with pytest.raises(ValueError, match=err_msg):
mapper.transform(np.repeat(DATA, 2, axis=1))
@pytest.mark.parametrize("max_bins", [16, 128, 255])
def test_map_to_bins(max_bins):
bin_thresholds = [
_find_binning_thresholds(DATA[:, i], max_bins=max_bins) for i in range(2)
]
binned = np.zeros_like(DATA, dtype=X_BINNED_DTYPE, order="F")
last_bin_idx = max_bins
_map_to_bins(DATA, bin_thresholds, last_bin_idx, n_threads, binned)
assert binned.shape == DATA.shape
assert binned.dtype == np.uint8
assert binned.flags.f_contiguous
min_indices = DATA.argmin(axis=0)
max_indices = DATA.argmax(axis=0)
for feature_idx, min_idx in enumerate(min_indices):
assert binned[min_idx, feature_idx] == 0
for feature_idx, max_idx in enumerate(max_indices):
assert binned[max_idx, feature_idx] == max_bins - 1
@pytest.mark.parametrize("max_bins", [5, 10, 42])
def test_bin_mapper_random_data(max_bins):
n_samples, n_features = DATA.shape
expected_count_per_bin = n_samples // max_bins
tol = int(0.05 * expected_count_per_bin)
# max_bins is the number of bins for non-missing values
n_bins = max_bins + 1
mapper = _BinMapper(n_bins=n_bins, random_state=42).fit(DATA)
binned = mapper.transform(DATA)
assert binned.shape == (n_samples, n_features)
assert binned.dtype == np.uint8
assert_array_equal(binned.min(axis=0), np.array([0, 0]))
assert_array_equal(binned.max(axis=0), np.array([max_bins - 1, max_bins - 1]))
assert len(mapper.bin_thresholds_) == n_features
for bin_thresholds_feature in mapper.bin_thresholds_:
assert bin_thresholds_feature.shape == (max_bins - 1,)
assert bin_thresholds_feature.dtype == DATA.dtype
assert np.all(mapper.n_bins_non_missing_ == max_bins)
# Check that the binned data is approximately balanced across bins.
for feature_idx in range(n_features):
for bin_idx in range(max_bins):
count = (binned[:, feature_idx] == bin_idx).sum()
assert abs(count - expected_count_per_bin) < tol
@pytest.mark.parametrize("n_samples, max_bins", [(5, 5), (5, 10), (5, 11), (42, 255)])
def test_bin_mapper_small_random_data(n_samples, max_bins):
data = np.random.RandomState(42).normal(size=n_samples).reshape(-1, 1)
assert len(np.unique(data)) == n_samples
# max_bins is the number of bins for non-missing values
n_bins = max_bins + 1
mapper = _BinMapper(n_bins=n_bins, random_state=42)
binned = mapper.fit_transform(data)
assert binned.shape == data.shape
assert binned.dtype == np.uint8
assert_array_equal(binned.ravel()[np.argsort(data.ravel())], np.arange(n_samples))
@pytest.mark.parametrize(
"max_bins, n_distinct, multiplier",
[
(5, 5, 1),
(5, 5, 3),
(255, 12, 42),
],
)
def test_bin_mapper_identity_repeated_values(max_bins, n_distinct, multiplier):
data = np.array(list(range(n_distinct)) * multiplier).reshape(-1, 1)
# max_bins is the number of bins for non-missing values
n_bins = max_bins + 1
binned = _BinMapper(n_bins=n_bins).fit_transform(data)
assert_array_equal(data, binned)
@pytest.mark.parametrize("n_distinct", [2, 7, 42])
def test_bin_mapper_repeated_values_invariance(n_distinct):
rng = np.random.RandomState(42)
distinct_values = rng.normal(size=n_distinct)
assert len(np.unique(distinct_values)) == n_distinct
repeated_indices = rng.randint(low=0, high=n_distinct, size=1000)
data = distinct_values[repeated_indices]
rng.shuffle(data)
assert_array_equal(np.unique(data), np.sort(distinct_values))
data = data.reshape(-1, 1)
mapper_1 = _BinMapper(n_bins=n_distinct + 1)
binned_1 = mapper_1.fit_transform(data)
assert_array_equal(np.unique(binned_1[:, 0]), np.arange(n_distinct))
# Adding more bins to the mapper yields the same results (same thresholds)
mapper_2 = _BinMapper(n_bins=min(256, n_distinct * 3) + 1)
binned_2 = mapper_2.fit_transform(data)
assert_allclose(mapper_1.bin_thresholds_[0], mapper_2.bin_thresholds_[0])
assert_array_equal(binned_1, binned_2)
@pytest.mark.parametrize(
"max_bins, scale, offset",
[
(3, 2, -1),
(42, 1, 0),
(255, 0.3, 42),
],
)
def test_bin_mapper_identity_small(max_bins, scale, offset):
data = np.arange(max_bins).reshape(-1, 1) * scale + offset
# max_bins is the number of bins for non-missing values
n_bins = max_bins + 1
binned = _BinMapper(n_bins=n_bins).fit_transform(data)
assert_array_equal(binned, np.arange(max_bins).reshape(-1, 1))
@pytest.mark.parametrize(
"max_bins_small, max_bins_large",
[
(2, 2),
(3, 3),
(4, 4),
(42, 42),
(255, 255),
(5, 17),
(42, 255),
],
)
def test_bin_mapper_idempotence(max_bins_small, max_bins_large):
assert max_bins_large >= max_bins_small
data = np.random.RandomState(42).normal(size=30000).reshape(-1, 1)
mapper_small = _BinMapper(n_bins=max_bins_small + 1)
mapper_large = _BinMapper(n_bins=max_bins_small + 1)
binned_small = mapper_small.fit_transform(data)
binned_large = mapper_large.fit_transform(binned_small)
assert_array_equal(binned_small, binned_large)
@pytest.mark.parametrize("n_bins", [10, 100, 256])
@pytest.mark.parametrize("diff", [-5, 0, 5])
def test_n_bins_non_missing(n_bins, diff):
# Check that n_bins_non_missing is n_unique_values when
# there are not a lot of unique values, else n_bins - 1.
n_unique_values = n_bins + diff
X = list(range(n_unique_values)) * 2
X = np.array(X).reshape(-1, 1)
mapper = _BinMapper(n_bins=n_bins).fit(X)
assert np.all(mapper.n_bins_non_missing_ == min(n_bins - 1, n_unique_values))
def test_subsample():
# Make sure bin thresholds are different when applying subsampling
mapper_no_subsample = _BinMapper(subsample=None, random_state=0).fit(DATA)
mapper_subsample = _BinMapper(subsample=256, random_state=0).fit(DATA)
for feature in range(DATA.shape[1]):
assert not np.allclose(
mapper_no_subsample.bin_thresholds_[feature],
mapper_subsample.bin_thresholds_[feature],
rtol=1e-4,
)
@pytest.mark.parametrize(
"n_bins, n_bins_non_missing, X_trans_expected",
[
(
256,
[4, 2, 2],
[
[0, 0, 0], # 255 <=> missing value
[255, 255, 0],
[1, 0, 0],
[255, 1, 1],
[2, 1, 1],
[3, 0, 0],
],
),
(
3,
[2, 2, 2],
[
[0, 0, 0], # 2 <=> missing value
[2, 2, 0],
[0, 0, 0],
[2, 1, 1],
[1, 1, 1],
[1, 0, 0],
],
),
],
)
def test_missing_values_support(n_bins, n_bins_non_missing, X_trans_expected):
# check for missing values: make sure nans are mapped to the last bin
# and that the _BinMapper attributes are correct
X = [
[1, 1, 0],
[np.NaN, np.NaN, 0],
[2, 1, 0],
[np.NaN, 2, 1],
[3, 2, 1],
[4, 1, 0],
]
X = np.array(X)
mapper = _BinMapper(n_bins=n_bins)
mapper.fit(X)
assert_array_equal(mapper.n_bins_non_missing_, n_bins_non_missing)
for feature_idx in range(X.shape[1]):
assert (
len(mapper.bin_thresholds_[feature_idx])
== n_bins_non_missing[feature_idx] - 1
)
assert mapper.missing_values_bin_idx_ == n_bins - 1
X_trans = mapper.transform(X)
assert_array_equal(X_trans, X_trans_expected)
def test_infinite_values():
# Make sure infinite values are properly handled.
bin_mapper = _BinMapper()
X = np.array([-np.inf, 0, 1, np.inf]).reshape(-1, 1)
bin_mapper.fit(X)
assert_allclose(bin_mapper.bin_thresholds_[0], [-np.inf, 0.5, ALMOST_INF])
assert bin_mapper.n_bins_non_missing_ == [4]
expected_binned_X = np.array([0, 1, 2, 3]).reshape(-1, 1)
assert_array_equal(bin_mapper.transform(X), expected_binned_X)
@pytest.mark.parametrize("n_bins", [15, 256])
def test_categorical_feature(n_bins):
# Basic test for categorical features
# we make sure that categories are mapped into [0, n_categories - 1] and
# that nans are mapped to the last bin
X = np.array(
[[4] * 500 + [1] * 3 + [10] * 4 + [0] * 4 + [13] + [7] * 5 + [np.nan] * 2],
dtype=X_DTYPE,
).T
known_categories = [np.unique(X[~np.isnan(X)])]
bin_mapper = _BinMapper(
n_bins=n_bins,
is_categorical=np.array([True]),
known_categories=known_categories,
).fit(X)
assert bin_mapper.n_bins_non_missing_ == [6]
assert_array_equal(bin_mapper.bin_thresholds_[0], [0, 1, 4, 7, 10, 13])
X = np.array([[0, 1, 4, np.nan, 7, 10, 13]], dtype=X_DTYPE).T
expected_trans = np.array([[0, 1, 2, n_bins - 1, 3, 4, 5]]).T
assert_array_equal(bin_mapper.transform(X), expected_trans)
# For unknown categories, the mapping is incorrect / undefined. This never
# happens in practice. This check is only for illustration purpose.
X = np.array([[-1, 100]], dtype=X_DTYPE).T
expected_trans = np.array([[0, 6]]).T
assert_array_equal(bin_mapper.transform(X), expected_trans)
@pytest.mark.parametrize("n_bins", (128, 256))
def test_categorical_with_numerical_features(n_bins):
# basic check for binmapper with mixed data
X1 = np.arange(10, 20).reshape(-1, 1) # numerical
X2 = np.arange(10, 15).reshape(-1, 1) # categorical
X2 = np.r_[X2, X2]
X = np.c_[X1, X2]
known_categories = [None, np.unique(X2).astype(X_DTYPE)]
bin_mapper = _BinMapper(
n_bins=n_bins,
is_categorical=np.array([False, True]),
known_categories=known_categories,
).fit(X)
assert_array_equal(bin_mapper.n_bins_non_missing_, [10, 5])
bin_thresholds = bin_mapper.bin_thresholds_
assert len(bin_thresholds) == 2
assert_array_equal(bin_thresholds[1], np.arange(10, 15))
expected_X_trans = [
[0, 0],
[1, 1],
[2, 2],
[3, 3],
[4, 4],
[5, 0],
[6, 1],
[7, 2],
[8, 3],
[9, 4],
]
assert_array_equal(bin_mapper.transform(X), expected_X_trans)
def test_make_known_categories_bitsets():
# Check the output of make_known_categories_bitsets
X = np.array(
[[14, 2, 30], [30, 4, 70], [40, 10, 180], [40, 240, 180]], dtype=X_DTYPE
)
bin_mapper = _BinMapper(
n_bins=256,
is_categorical=np.array([False, True, True]),
known_categories=[None, X[:, 1], X[:, 2]],
)
bin_mapper.fit(X)
known_cat_bitsets, f_idx_map = bin_mapper.make_known_categories_bitsets()
# Note that for non-categorical features, values are left to 0
expected_f_idx_map = np.array([0, 0, 1], dtype=np.uint8)
assert_allclose(expected_f_idx_map, f_idx_map)
expected_cat_bitset = np.zeros((2, 8), dtype=np.uint32)
# first categorical feature: [2, 4, 10, 240]
f_idx = 1
mapped_f_idx = f_idx_map[f_idx]
expected_cat_bitset[mapped_f_idx, 0] = 2 ** 2 + 2 ** 4 + 2 ** 10
# 240 = 32**7 + 16, therefore the 16th bit of the 7th array is 1.
expected_cat_bitset[mapped_f_idx, 7] = 2 ** 16
# second categorical feature [30, 70, 180]
f_idx = 2
mapped_f_idx = f_idx_map[f_idx]
expected_cat_bitset[mapped_f_idx, 0] = 2 ** 30
expected_cat_bitset[mapped_f_idx, 2] = 2 ** 6
expected_cat_bitset[mapped_f_idx, 5] = 2 ** 20
assert_allclose(expected_cat_bitset, known_cat_bitsets)
@pytest.mark.parametrize(
"is_categorical, known_categories, match",
[
(np.array([True]), [None], "Known categories for feature 0 must be provided"),
(
np.array([False]),
np.array([1, 2, 3]),
"isn't marked as a categorical feature, but categories were passed",
),
],
)
def test_categorical_parameters(is_categorical, known_categories, match):
# test the validation of the is_categorical and known_categories parameters
X = np.array([[1, 2, 3]], dtype=X_DTYPE)
bin_mapper = _BinMapper(
is_categorical=is_categorical, known_categories=known_categories
)
with pytest.raises(ValueError, match=match):
bin_mapper.fit(X)
| {
"content_hash": "ef6ac476738017467bbff731d38b47ca",
"timestamp": "",
"source": "github",
"line_count": 461,
"max_line_length": 86,
"avg_line_length": 33.24945770065076,
"alnum_prop": 0.6143658663883089,
"repo_name": "huzq/scikit-learn",
"id": "7cbc6603ee01f37702ea139bc3ca3dbc401d0f10",
"size": "15328",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "sklearn/ensemble/_hist_gradient_boosting/tests/test_binning.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "3366"
},
{
"name": "C",
"bytes": "394787"
},
{
"name": "C++",
"bytes": "140225"
},
{
"name": "Makefile",
"bytes": "1579"
},
{
"name": "PowerShell",
"bytes": "17042"
},
{
"name": "Python",
"bytes": "6394128"
},
{
"name": "Shell",
"bytes": "9250"
}
],
"symlink_target": ""
} |
"""Statistical metrics for Calibrator routines
"""
import numpy as np
from sklearn.metrics import r2_score
def filter_nan(s, o):
"""
this functions removed the data from simulated and observed data
wherever the observed data contains nan
this is used by all other functions, otherwise they will produce nan as
output
"""
data = np.array([s.flatten(), o.flatten()])
data = np.transpose(data)
data = data[~np.isnan(data).any(1)]
return data[:, 0], data[:, 1]
def percentage_deviation(s, o):
"""
Percent deviation
input:
s: simulated
o: observed
output:
percent deviation
"""
s, o = filter_nan(s, o)
return sum(sum(abs(s - o) / abs(o)))
def pc_bias(s, o):
"""
Percent Bias
input:
s: simulated
o: observed
output:
pc_bias: percent bias
"""
s, o = filter_nan(s, o)
return 100.0 * sum(s - o) / sum(o)
def apb(s, o):
"""
Absolute Percent Bias
input:
s: simulated
o: observed
output:
apb_bias: absolute percent bias
"""
s, o = filter_nan(s, o)
return 100.0 * sum(abs(s - o)) / sum(o)
def rmse(s, o):
"""
Root Mean Squared Error
input:
s: simulated
o: observed
output:
rmse: root mean squared error
"""
s, o = filter_nan(s, o)
return np.sqrt(np.mean((s - o)**2))
def norm_rmse(s, o):
"""
Normalized to stanard deviation Root Mean Squared Error
input:
s: simulated
o: observed
output:
nrmse: normalized root mean squared error: RMSE / mean or SD
"""
s, o = filter_nan(s, o)
return rmse(s, o) / np.std(o)
def mae(s, o):
"""
Mean Absolute Error
input:
s: simulated
o: observed
output:
maes: mean absolute error
"""
s, o = filter_nan(s, o)
return np.mean(abs(s - o))
def bias(s, o):
"""
Bias
input:
s: simulated
o: observed
output:
bias: bias
"""
s, o = filter_nan(s, o)
return np.mean(s - o)
def NS(s, o):
"""
Nash Sutcliffe efficiency coefficient (the same as r^2)
input:
s: simulated
o: observed
output:
ns: Nash Sutcliffe efficient coefficient
"""
s, o = filter_nan(s, o)
return 1 - sum((s - o)**2) / sum((o - np.mean(o))**2)
def likelihood(s, o, N=5):
"""
Likelihood
input:
s: simulated
o: observed
output:
L: likelihood
"""
s, o = filter_nan(s, o)
return np.exp(-N * sum((s - o)**2) / sum((o - np.mean(o))**2))
def correlation(s, o):
"""
correlation coefficient
input:
s: simulated
o: observed
output:
corr: correlation coefficient
"""
s, o = filter_nan(s, o)
if s.size == 0:
corr = np.NaN
else:
corr = np.corrcoef(o, s)[0, 1]
return corr
def index_agreement(s, o):
"""
index of agreement
input:
s: simulated
o: observed
output:
ia: index of agreement
"""
s, o = filter_nan(s, o)
ia = 1 - (np.sum((o - s)**2)) / (
np.sum((np.abs(s - np.mean(o)) + np.abs(o - np.mean(o)))**2))
return ia
def squared_error(s, o):
"""
squared error
input:
s: simulated
o: observed
output:
se: squared error
"""
s, o = filter_nan(s, o)
return sum((s - o)**2)
def coefficient_of_determination(s, o):
"""
coefficient of determination (r-squared)
input:
s: simulated
o: observed
output:
r2: coefficient of determination
"""
s, o = filter_nan(s, o)
o_mean = np.mean(o)
se = squared_error(s, o)
se_mean = squared_error(o, o_mean)
r2 = 1 - (se / se_mean)
return r2
def rsquared(s, o):
"""
coefficient of determination (r-squared)
using python sklern module
input:
s: simulated
o: observed
output:
r2: coefficient of determination
"""
s, o = filter_nan(s, o)
return r2_score(o, s)
| {
"content_hash": "21b6d0d03b8cfa8c26f043570b14593c",
"timestamp": "",
"source": "github",
"line_count": 212,
"max_line_length": 75,
"avg_line_length": 19.35377358490566,
"alnum_prop": 0.5303436509870826,
"repo_name": "biogeochemistry/PorousMediaLab",
"id": "10932ff3509f35456a50b8503dd54fdbe0777bd4",
"size": "4103",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "porousmedialab/metrics.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "101253"
},
{
"name": "Shell",
"bytes": "366"
}
],
"symlink_target": ""
} |
from test import *
MONGO_HOST = 'localhost'
| {
"content_hash": "0319dbb2b354c0ac3cbeae679d7d559d",
"timestamp": "",
"source": "github",
"line_count": 3,
"max_line_length": 24,
"avg_line_length": 15,
"alnum_prop": 0.7111111111111111,
"repo_name": "raccoongang/socraticqs2",
"id": "3d7ca2efd5a056c4596c4bee1a07be682669af7a",
"size": "61",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mysite/mysite/settings/test_localhost.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "189600"
},
{
"name": "Dockerfile",
"bytes": "580"
},
{
"name": "Gherkin",
"bytes": "289"
},
{
"name": "HTML",
"bytes": "342788"
},
{
"name": "JavaScript",
"bytes": "133425"
},
{
"name": "Makefile",
"bytes": "2991"
},
{
"name": "Python",
"bytes": "1504025"
},
{
"name": "Shell",
"bytes": "1521"
}
],
"symlink_target": ""
} |
from src.base.test_cases import TestCases
class SortCharByFreqTestCases(TestCases):
def __init__(self):
super(SortCharByFreqTestCases, self).__init__()
self.__add_test_case__('Test 1', 'tree', ['eetr', 'eert'])
self.__add_test_case__('Test 2', 'cccaaa', ['cccaaa', 'aaaccc'])
self.__add_test_case__('Test 3', 'Aabb', ['bbAa', 'bbaA'])
self.__add_test_case__('Test 4', 'eeeee', ['eeeee'])
| {
"content_hash": "8e15238db396ed95c2bad5496fae3286",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 72,
"avg_line_length": 39.54545454545455,
"alnum_prop": 0.5770114942528736,
"repo_name": "hychrisli/PyAlgorithms",
"id": "a300c5e6eed2eea909643e47a031632441615955",
"size": "435",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/tests/part2/q451_test_sort_char_by_freq.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "201747"
}
],
"symlink_target": ""
} |
from __future__ import print_function
import os
from . import py3k_compat as pickle
import numpy as np
def pickle_results(filename=None, verbose=True):
"""Generator for decorator which allows pickling the results of a funcion
Pickle is python's built-in object serialization. This decorator, when
used on a function, saves the results of the computation in the function
to a pickle file. If the function is called a second time with the
same inputs, then the computation will not be repeated and the previous
results will be used.
This functionality is useful for computations which take a long time,
but will need to be repeated (such as the first step of a data analysis).
Parameters
----------
filename : string (optional)
pickle file to which results will be saved.
If not specified, then the file is '<funcname>_output.pkl'
where '<funcname>' is replaced by the name of the decorated function.
verbose : boolean (optional)
if True, then print a message to standard out specifying when the
pickle file is written or read.
Examples
--------
>>> @pickle_results('tmp.pkl', verbose=True)
... def f(x):
... return x * x
>>> f(4)
@pickle_results: computing results and saving to 'tmp.pkl'
16
>>> f(4)
@pickle_results: using precomputed results from 'tmp.pkl'
16
>>> f(6)
@pickle_results: computing results and saving to 'tmp.pkl'
36
>>> import os; os.remove('tmp.pkl')
"""
def pickle_func(f, filename=filename, verbose=verbose):
if filename is None:
filename = '%s_output.pkl' % f.__name__
def new_f(*args, **kwargs):
try:
D = pickle.load(open(filename, 'rb'))
cache_exists = True
except:
D = {}
cache_exists = False
# simple comparison doesn't work in the case of numpy arrays
Dargs = D.get('args')
Dkwargs = D.get('kwargs')
try:
args_match = (args == Dargs)
except:
args_match = np.all([np.all(a1 == a2)
for (a1, a2) in zip(Dargs, args)])
try:
kwargs_match = (kwargs == Dkwargs)
except:
kwargs_match = ((sorted(Dkwargs.keys())
== sorted(kwargs.keys()))
and (np.all([np.all(Dkwargs[key]
== kwargs[key])
for key in kwargs])))
if (type(D) == dict and D.get('funcname') == f.__name__
and args_match and kwargs_match):
if verbose:
print("@pickle_results: using precomputed "
"results from '%s'" % filename)
retval = D['retval']
else:
if verbose:
print("@pickle_results: computing results "
"and saving to '%s'" % filename)
if cache_exists:
print(" warning: cache file '%s' exists" % filename)
print(" - args match: %s" % args_match)
print(" - kwargs match: %s" % kwargs_match)
retval = f(*args, **kwargs)
funcdict = dict(funcname=f.__name__, retval=retval,
args=args, kwargs=kwargs)
with open(filename, 'wb') as outfile:
pickle.dump(funcdict, outfile)
return retval
return new_f
return pickle_func
| {
"content_hash": "8afd6c4a65d0ad2afd33d6ed0c508a2a",
"timestamp": "",
"source": "github",
"line_count": 101,
"max_line_length": 77,
"avg_line_length": 37.05940594059406,
"alnum_prop": 0.5153620090836227,
"repo_name": "kcavagnolo/astroML",
"id": "7dc0a239a6eeb818130d51aedb2f3ad7c2034908",
"size": "3743",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "astroML/decorators.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Makefile",
"bytes": "696"
},
{
"name": "Python",
"bytes": "1087103"
}
],
"symlink_target": ""
} |
import collections
from .. import scalartypes as scalars
from .. import ir
def allocateRequiredConstants(pool, long_irs):
# see comments in writebytecode.finishCodeAttrs
# We allocate the constants pretty much greedily. This is far from optimal,
# but it shouldn't be a big deal since this code is almost never required
# in the first place. In fact, there are no known real world classes that
# even come close to exhausting the constant pool.
narrow_pairs = collections.Counter()
wide_pairs = collections.Counter()
alt_lens = {}
for _ir in long_irs:
for ins in _ir.flat_instructions:
if isinstance(ins, ir.PrimConstant):
key = ins.cpool_key()
alt_lens[key] = len(ins.bytecode)
if scalars.iswide(ins.st):
if len(ins.bytecode) > 3:
wide_pairs[key] += 1
else:
if len(ins.bytecode) > 2:
narrow_pairs[key] += 1
# see if already in the constant pool
for x in pool.vals:
del narrow_pairs[x]
del wide_pairs[x]
# if we have enough space for all required constants, preferentially allocate
# most commonly used constants to first 255 slots
if pool.space() >= len(narrow_pairs) + 2*len(wide_pairs) and pool.lowspace() > 0:
# We can't use Counter.most_common here because it is nondeterminstic in
# the case of ties.
most_common = sorted(narrow_pairs, key=lambda p:(-narrow_pairs[p], p))
for key in most_common[:pool.lowspace()]:
pool.insertDirectly(key, True)
del narrow_pairs[key]
scores = {}
for p, count in narrow_pairs.items():
scores[p] = (alt_lens[p] - 3) * count
for p, count in wide_pairs.items():
scores[p] = (alt_lens[p] - 3) * count
# sort by score
narrowq = sorted(narrow_pairs, key=lambda p:(scores[p], p))
wideq = sorted(wide_pairs, key=lambda p:(scores[p], p))
while pool.space() >= 1 and (narrowq or wideq):
if not narrowq and pool.space() < 2:
break
wscore = sum(scores[p] for p in wideq[-1:])
nscore = sum(scores[p] for p in narrowq[-2:])
if pool.space() >= 2 and wscore > nscore and wscore > 0:
pool.insertDirectly(wideq.pop(), False)
elif nscore > 0:
pool.insertDirectly(narrowq.pop(), True)
else:
break
| {
"content_hash": "eeab938778b87f2b586c361f657397f6",
"timestamp": "",
"source": "github",
"line_count": 62,
"max_line_length": 85,
"avg_line_length": 39.70967741935484,
"alnum_prop": 0.5934199837530463,
"repo_name": "tp7309/AndroidOneKeyDecompiler",
"id": "60f77e73a6cf2d6eace32d6124a394e63d7a0477",
"size": "3059",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "libs/enjarify/enjarify/jvm/optimization/consts.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "17032"
},
{
"name": "Python",
"bytes": "6214"
},
{
"name": "Shell",
"bytes": "28752"
}
],
"symlink_target": ""
} |
import ctypes
import windows.generated_def as gdef
from ..apiproxy import ApiProxy, NeededParameter
from ..error import fail_on_zero, no_error_check
class User32Proxy(ApiProxy):
APIDLL = "user32"
default_error_check = staticmethod(fail_on_zero)
# Window
@User32Proxy()
def EnumWindows(lpEnumFunc, lParam):
return EnumWindows.ctypes_function(lpEnumFunc, lParam)
@User32Proxy()
def GetParent(hWnd):
return GetParent.ctypes_function(hWnd)
@User32Proxy(error_check=no_error_check)
def GetWindowTextA(hWnd, lpString, nMaxCount):
return GetWindowTextA.ctypes_function(hWnd, lpString, nMaxCount)
@User32Proxy()
def GetWindowTextW(hWnd, lpString, nMaxCount):
return GetWindowTextW.ctypes_function(hWnd, lpString, nMaxCount)
@User32Proxy()
def FindWindowA(lpClassName, lpWindowName):
return FindWindowA.ctypes_function(lpClassName, lpWindowName)
@User32Proxy()
def FindWindowW(lpClassName, lpWindowName):
return FindWindowW.ctypes_function(lpClassName, lpWindowName)
@User32Proxy()
def GetWindowModuleFileNameA(hwnd, pszFileName, cchFileNameMax):
return GetWindowModuleFileNameA.ctypes_function(hwnd, pszFileName, cchFileNameMax)
@User32Proxy()
def GetWindowModuleFileNameW(hwnd, pszFileName, cchFileNameMax):
return GetWindowModuleFileNameW.ctypes_function(hwnd, pszFileName, cchFileNameMax)
@User32Proxy()
def EnumChildWindows(hWndParent, lpEnumFunc, lParam):
return EnumChildWindows.ctypes_function(hWndParent, lpEnumFunc, lParam)
@User32Proxy()
def GetClassInfoExA(hinst, lpszClass, lpwcx):
return GetClassInfoExA.ctypes_function(hinst, lpszClass, lpwcx)
@User32Proxy()
def GetClassInfoExW(hinst, lpszClass, lpwcx):
return GetClassInfoExW.ctypes_function(hinst, lpszClass, lpwcx)
@User32Proxy()
def GetWindowThreadProcessId(hWnd, lpdwProcessId):
return GetWindowThreadProcessId.ctypes_function(hWnd, lpdwProcessId)
@User32Proxy()
def WindowFromPoint(Point):
return WindowFromPoint.ctypes_function(Point)
@User32Proxy()
def GetWindowRect(hWnd, lpRect):
return GetWindowRect.ctypes_function(hWnd, lpRect)
@User32Proxy("RealGetWindowClassA")
def RealGetWindowClassA(hwnd, pszType, cchType=None):
if cchType is None:
cchType = len(pszType)
return RealGetWindowClassA.ctypes_function(hwnd, pszType, cchType)
@User32Proxy("RealGetWindowClassW")
def RealGetWindowClassW(hwnd, pszType, cchType=None):
if cchType is None:
cchType = len(pszType)
return RealGetWindowClassW.ctypes_function(hwnd, pszType, cchType)
@User32Proxy("GetClassNameA")
def GetClassNameA (hwnd, pszType, cchType=None):
if cchType is None:
cchType = len(pszType)
return GetClassNameA .ctypes_function(hwnd, pszType, cchType)
@User32Proxy("GetClassNameW")
def GetClassNameW (hwnd, pszType, cchType=None):
if cchType is None:
cchType = len(pszType)
return GetClassNameW .ctypes_function(hwnd, pszType, cchType)
## Windows Message
@User32Proxy()
def MessageBoxA(hWnd=0, lpText=NeededParameter, lpCaption=None, uType=0):
return MessageBoxA.ctypes_function(hWnd, lpText, lpCaption, uType)
@User32Proxy()
def MessageBoxW(hWnd=0, lpText=NeededParameter, lpCaption=None, uType=0):
return MessageBoxW.ctypes_function(hWnd, lpText, lpCaption, uType)
# Cursor
@User32Proxy()
def GetCursorPos(lpPoint):
return GetCursorPos.ctypes_function(lpPoint)
# System
# If the function succeeds, the return value is the requested system metric or configuration setting.
# If the function fails, the return value is 0. GetLastError does not provide extended error information.
# And 0 is also a valid return value.. Thanks a lot..
@User32Proxy(error_check=no_error_check)
def GetSystemMetrics(nIndex):
return GetSystemMetrics.ctypes_function(nIndex)
| {
"content_hash": "a9d87bae10ed15063317c9c845be8aec",
"timestamp": "",
"source": "github",
"line_count": 119,
"max_line_length": 105,
"avg_line_length": 31.57983193277311,
"alnum_prop": 0.7783395423097392,
"repo_name": "hakril/PythonForWindows",
"id": "98b16bc037d03c527f48a224565465096739ed6e",
"size": "3758",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "windows/winproxy/apis/user32.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "4087889"
}
],
"symlink_target": ""
} |
"""
core.configs
"""
__author__ = 'Rnd495'
__all__ = ['Configs', 'ConfigsError']
import os
import json
# check "config/now.conf"
# if not exists, create by copying "config/default.conf" to "config/now.conf"
ROOT_PATH = os.path.split(os.path.split(__file__)[0])[0]
CONFIG_PATH_NOW = os.path.join(ROOT_PATH, "config/now.conf")
CONFIG_PATH_DEFAULT = os.path.join(ROOT_PATH, "config/default.conf")
CONFIG_NOTNULL = [
'database_url',
'init_admin_username',
'init_admin_password'
]
if not os.path.exists(CONFIG_PATH_NOW):
# try to copy from default
import shutil
shutil.copy(CONFIG_PATH_DEFAULT, CONFIG_PATH_NOW)
del shutil
class ConfigsError(Exception):
"""
ConfigsError
"""
def __init__(self, *args, **kwargs):
Exception.__init__(self, *args, **kwargs)
self.error_line = kwargs.get('line', None)
class Configs(object):
"""
Configs
"""
def __init__(self, config_file_name):
with open(config_file_name, 'rb') as file_handle:
for line in file_handle:
line = line.strip()
# lines startswith '#' is commit
if not line or line.startswith('#'):
continue
separator_index = line.find('=')
if separator_index < 0:
raise ConfigsError('ConfigsError: config line syntax error with "%s"' % line)
name = line[:separator_index].strip()
value = line[separator_index + 1:].strip()
# accept upper case
if value.lower() in ('true', 'false'):
value = value.lower()
# param type parse
try:
data = json.loads(value)
self.__dict__[name] = data
except ValueError:
raise ConfigsError('ConfigsError: unknown data format "%s"' % value)
for name in CONFIG_NOTNULL:
if self.__dict__.get(name, None) is None:
raise ConfigsError('ConfigsError: property "%s" is not set' % name)
@staticmethod
def instance(config_file_name=None):
if not hasattr(Configs, "_instance"):
Configs._instance = Configs(config_file_name or CONFIG_PATH_NOW)
return Configs._instance
if __name__ == '__main__':
print "NOW_CONFIGS: "
print "path:", CONFIG_PATH_NOW
for k, v in Configs.instance().__dict__.iteritems():
print k, "=", v | {
"content_hash": "3ddd8fcd283098c9368306493dc797be",
"timestamp": "",
"source": "github",
"line_count": 76,
"max_line_length": 97,
"avg_line_length": 32.60526315789474,
"alnum_prop": 0.5585149313962873,
"repo_name": "SakuraSa/TenhouLoggerX",
"id": "fb8d38a9d4279160b1b528e866dd9747833490dc",
"size": "2516",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "core/configs.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1412"
},
{
"name": "HTML",
"bytes": "19738"
},
{
"name": "JavaScript",
"bytes": "11684"
},
{
"name": "Python",
"bytes": "57478"
}
],
"symlink_target": ""
} |
import os.path
import sys
import logging
from typing import List
import pygame
from .scenes.loginscene import LoginScene
from .config import config
class Game:
def __init__(self):
self.logger = logging.getLogger(__name__)
pygame.init()
self._clock = pygame.time.Clock()
# TODO: window icon doesn't show
icon_path = os.path.abspath(os.path.join(config.get(section="hacc", option="asset_dir"), "icon.png"))
icon = pygame.image.load(icon_path)
pygame.display.set_icon(icon)
self._width = config.getint(section="hacc", option="width")
self._height = config.getint(section="hacc", option="height")
self._display = pygame.display.set_mode((self._width, self._height),
pygame.DOUBLEBUF,
config.getint(section="hacc", option="depth"))
self._fps = config.getint(section="hacc", option="fps")
pygame.display.set_caption("hacc")
self._running = True
loginmenu = LoginScene(width=self._width, height=self._height)
self._scene = loginmenu
def _handle_events(self):
for event in pygame.event.get():
if event.type == pygame.QUIT:
self._quit()
elif event.type == pygame.KEYDOWN:
# if event.key == K_LEFT:
# if self.snake.direction != 1:
# self.snake.direction = 0
# elif event.key == K_RIGHT:
# if self.snake.direction != 0:
# self.snake.direction = 1
# elif event.key == K_UP:
# if self.snake.direction != 3:
# self.snake.direction = 2
# elif event.key == K_DOWN:
# if self.snake.direction != 2:
# self.snake.direction = 3
# elif event.key == K_ESCAPE:
# pygame.event.post(pygame.event.Event(QUIT))
pass
def _update(self):
self._scene.update()
def _render(self):
self._display.fill((0, 0, 0))
scene_surface = self._scene.render()
self._display.blit(scene_surface, (0, 0))
def mainloop(self):
while self._running:
self._handle_events()
self._update()
self._render()
self._clock.tick(self._fps)
def _quit(self, errors: List = None):
"""
Shutdow the game and pygame nicely.
:param errors: List of errors.
:return:
"""
pygame.quit()
if errors is None:
sys.exit(0)
else:
# TODO: handle errors
sys.exit(1)
| {
"content_hash": "0558f1cde2e644919a73a6264788249d",
"timestamp": "",
"source": "github",
"line_count": 90,
"max_line_length": 109,
"avg_line_length": 30.81111111111111,
"alnum_prop": 0.5106382978723404,
"repo_name": "XenGi/hacc",
"id": "df0793c7ab21c90dcbab1bf106f2de3783bd54a7",
"size": "2773",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "hacc/game.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "7469"
}
],
"symlink_target": ""
} |
from autobahn.wamp import Api
# create an API object to use the decorator style
# register/subscribe WAMP actions
api = Api()
@api.register(u'com.example.add2')
def add2(a, b):
return a + b
@api.subscribe(u'com.example.on-hello', details=True)
def on_hello(msg, details=None):
print(u'Someone said: {}'.format(msg))
details.session.leave()
@coroutine
def component1(reactor, session):
"""
A first component, which gets called "setup-like". When
it returns, this signals that the component is ready for work.
"""
# expose the API on the session
yield session.expose(api)
@coroutine
def component2(reactor, session):
"""
A second component, which gets called "main-like".
When it returns, this will automatically close the session.
"""
result = yield session.call(u'com.example.add2', 2, 3)
session.publish(u'com.example.on-hello', u'result={}'.format(result))
if __name__ == '__main__':
from autobahn.twisted.component import Component, run
# Components wrap either a setup or main function and
# can be configured with transports, authentication and so on.
components = [
Component(setup=component1),
Component(main=component2)
]
# a convenience runner is provided which takes a list of
# components and runs all of them
run(components)
| {
"content_hash": "b92be2d79a6d556d6235091f5b2f81ee",
"timestamp": "",
"source": "github",
"line_count": 48,
"max_line_length": 73,
"avg_line_length": 28.229166666666668,
"alnum_prop": 0.6856088560885609,
"repo_name": "RyanHope/AutobahnPython",
"id": "4ecd24732d4c9ef2d07c84334e0791a4e0354265",
"size": "1355",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "examples/twisted/wamp/work/newapi/test_newapi13.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "3648"
},
{
"name": "Python",
"bytes": "983364"
}
],
"symlink_target": ""
} |
import unittest
import time
from app import create_app, db
from app.models import User, Permission, AnonymousUser, Role
class UserModelTestCase(unittest.TestCase):
def setUp(self):
self.app = create_app('testing')
self.app_context = self.app.app_context()
self.app_context.push()
db.create_all()
Role.insert_roles()
def tearDown(self):
db.session.remove()
db.drop_all()
self.app_context.pop()
def test_password_setter(self):
u = User(password='cat')
self.assertTrue(u.password_hash is not None)
def test_no_password_getter(self):
u = User(password='cat')
with self.assertRaises(AttributeError):
u.password
def test_password_verification(self):
u = User(password='cat')
self.assertTrue(u.verify_password('cat'))
self.assertFalse(u.verify_password('dog'))
def test_password_salts_are_random(self):
u = User(password='cat')
u2 = User(password='dog')
self.assertTrue(u.password_hash != u2.password_hash)
def test_invalid_confirmation_token(self):
u1 = User(password='cat')
u2 = User(password='dog')
db.session.add(u1)
db.session.add(u2)
db.session.commit()
token = u1.generate_confirmation_token()
self.assertFalse(u2.confirm(token))
def test_expired_confirmation_token(self):
u = User(password='cat')
db.session.add(u)
db.session.commit()
token = u.generate_confirmation_token(1)
time.sleep(2)
self.assertFalse(u.confirm(token))
def test_valid_reset_token(self):
u = User(password='cat')
db.session.add(u)
db.session.commit()
token = u.generate_reset_token()
self.assertTrue(u.reset_password(token, 'dog'))
self.assertTrue(u.verify_password('dog'))
def test_invalid_reset_token(self):
u1 = User(password='cat')
u2 = User(password='dog')
db.session.add(u1)
db.session.add(u2)
db.session.commit()
token = u1.generate_reset_token()
self.assertFalse(u2.reset_password(token, 'horse'))
self.assertTrue(u2.verify_password('dog'))
def test_valid_email_change_token(self):
u = User(email='john@example.com', password='cat')
db.session.add(u)
db.session.commit()
token = u.generate_email_change_token('susan@example.org')
self.assertTrue(u.change_email(token))
self.assertTrue(u.email == 'susan@example.org')
def test_invalid_email_change_token(self):
u1 = User(email='john@example.com', password='cat')
u2 = User(email='susan@example.org', password='dog')
db.session.add(u1)
db.session.add(u2)
db.session.commit()
token = u1.generate_email_change_token('david@example.net')
self.assertFalse(u2.change_email(token))
self.assertTrue(u2.email == 'susan@example.org')
def test_duplicate_email_change_token(self):
u1 = User(email='john@example.com', password='cat')
u2 = User(email='susan@example.org', password='dog')
db.session.add(u1)
db.session.add(u2)
db.session.commit()
token = u2.generate_email_change_token('john@example.com')
self.assertFalse(u2.change_email(token))
self.assertTrue(u2.email == 'susan@example.org')
def test_roles_and_permissions(self):
u = User(email='john@example.com', password='cat')
self.assertTrue(u.can(Permission.WRITE_ARTICLES))
self.assertFalse(u.can(Permission.MODERATE_COMMENTS))
def test_anonymous_user(self):
u = AnonymousUser()
self.assertFalse(u.can(Permission.FOLLOW))
def test_gravatar(self):
# 测试头像链接生成函数
u = User(email='dronly@example.com')
with self.app.test_request_context('/'):
gravatar = u.gravatar()
gravatar_size = u.gravatar(size=256)
gravatar_pg = u.gravatar(rating='pg')
gravatar_retro = u.gravatar(default='retro')
with self.app.test_request_context('/', base_url='https://example.com'):
gravatar_ssl = u.gravatar()
self.assertIn("http://www.gravatar.com/avatar/" +
"f59a5c7a91436fdcf6a2c9cae4472dbb", gravatar)
self.assertIn("s=256", gravatar_size)
self.assertIn("r=pg", gravatar_pg)
self.assertIn("d=retro", gravatar_retro)
self.assertIn("https://secure.gravatar.com/avatar/" +
"f59a5c7a91436fdcf6a2c9cae4472dbb", gravatar_ssl)
| {
"content_hash": "7039d60b116d8bb67629732b3d378600",
"timestamp": "",
"source": "github",
"line_count": 129,
"max_line_length": 80,
"avg_line_length": 35.8062015503876,
"alnum_prop": 0.6139857111928989,
"repo_name": "dronly/flask-blog",
"id": "3b5aceec9197106859c3e037a853636b7012f969",
"size": "4662",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_user_model.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "564"
},
{
"name": "HTML",
"bytes": "9562"
},
{
"name": "Mako",
"bytes": "412"
},
{
"name": "Python",
"bytes": "38849"
}
],
"symlink_target": ""
} |
from secret import twitter_instance
from json import dump
import sys
tw = twitter_instance()
# [1]
response = tw.users.report_spam(screen_name='showa_yojyo')
# [2]
dump(response, sys.stdout, ensure_ascii=False, indent=4, sort_keys=True)
| {
"content_hash": "e3d02ba6b16d69f8a1e81e55142373c5",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 72,
"avg_line_length": 21.818181818181817,
"alnum_prop": 0.7416666666666667,
"repo_name": "showa-yojyo/notebook",
"id": "997a70e5515af26e8e41186733e2b4b66e834afa",
"size": "371",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "doc/source/_sample/ptt/users-report_spam.py",
"mode": "33261",
"license": "mit",
"language": [],
"symlink_target": ""
} |
import unittest
from quantum.common import utils
from quantum.plugins.nec.common import config
from quantum.plugins.nec.db import api as ndb
from quantum.plugins.nec.db import models as nmodels
from quantum.plugins.nec import ofc_manager
class OFCManagerTest(unittest.TestCase):
"""Class conisting of OFCManager unit tests"""
def setUp(self):
driver = "quantum.tests.unit.nec.stub_ofc_driver.StubOFCDriver"
config.CONF.set_override('driver', driver, 'OFC')
ndb.initialize()
self.ofc = ofc_manager.OFCManager()
def tearDown(self):
ndb.clear_db()
def get_random_params(self):
"""create random parameters for portinfo test"""
tenant = utils.str_uuid()
network = utils.str_uuid()
port = utils.str_uuid()
_filter = utils.str_uuid()
none = utils.str_uuid()
return tenant, network, port, _filter, none
def testa_create_ofc_tenant(self):
"""test create ofc_tenant"""
t, n, p, f, none = self.get_random_params()
self.assertFalse(ndb.find_ofc_item(nmodels.OFCTenant, t))
self.ofc.create_ofc_tenant(t)
self.assertTrue(ndb.find_ofc_item(nmodels.OFCTenant, t))
tenant = ndb.find_ofc_item(nmodels.OFCTenant, t)
self.assertEqual(tenant.id, "ofc-" + t[:-4])
def testb_exists_ofc_tenant(self):
"""test exists_ofc_tenant"""
t, n, p, f, none = self.get_random_params()
self.assertFalse(self.ofc.exists_ofc_tenant(t))
self.ofc.create_ofc_tenant(t)
self.assertTrue(self.ofc.exists_ofc_tenant(t))
def testc_delete_ofc_tenant(self):
"""test delete ofc_tenant"""
t, n, p, f, none = self.get_random_params()
self.ofc.create_ofc_tenant(t)
self.assertTrue(ndb.find_ofc_item(nmodels.OFCTenant, t))
self.ofc.delete_ofc_tenant(t)
self.assertFalse(ndb.find_ofc_item(nmodels.OFCTenant, t))
def testd_create_ofc_network(self):
"""test create ofc_network"""
t, n, p, f, none = self.get_random_params()
self.ofc.create_ofc_tenant(t)
self.assertFalse(ndb.find_ofc_item(nmodels.OFCNetwork, n))
self.ofc.create_ofc_network(t, n)
self.assertTrue(ndb.find_ofc_item(nmodels.OFCNetwork, n))
network = ndb.find_ofc_item(nmodels.OFCNetwork, n)
self.assertEqual(network.id, "ofc-" + n[:-4])
def teste_exists_ofc_network(self):
"""test exists_ofc_network"""
t, n, p, f, none = self.get_random_params()
self.ofc.create_ofc_tenant(t)
self.assertFalse(self.ofc.exists_ofc_network(n))
self.ofc.create_ofc_network(t, n)
self.assertTrue(self.ofc.exists_ofc_network(n))
def testf_delete_ofc_network(self):
"""test delete ofc_network"""
t, n, p, f, none = self.get_random_params()
self.ofc.create_ofc_tenant(t)
self.ofc.create_ofc_network(t, n)
self.assertTrue(ndb.find_ofc_item(nmodels.OFCNetwork, n))
self.ofc.delete_ofc_network(t, n)
self.assertFalse(ndb.find_ofc_item(nmodels.OFCNetwork, n))
def testg_create_ofc_port(self):
"""test create ofc_port"""
t, n, p, f, none = self.get_random_params()
self.ofc.create_ofc_tenant(t)
self.ofc.create_ofc_network(t, n)
ndb.add_portinfo(p, "0xabc", 1, 65535, "00:11:22:33:44:55")
self.assertFalse(ndb.find_ofc_item(nmodels.OFCPort, p))
self.ofc.create_ofc_port(t, n, p)
self.assertTrue(ndb.find_ofc_item(nmodels.OFCPort, p))
port = ndb.find_ofc_item(nmodels.OFCPort, p)
self.assertEqual(port.id, "ofc-" + p[:-4])
def testh_exists_ofc_port(self):
"""test exists_ofc_port"""
t, n, p, f, none = self.get_random_params()
self.ofc.create_ofc_tenant(t)
self.ofc.create_ofc_network(t, n)
ndb.add_portinfo(p, "0xabc", 2, 65535, "00:12:22:33:44:55")
self.assertFalse(self.ofc.exists_ofc_port(p))
self.ofc.create_ofc_port(t, n, p)
self.assertTrue(self.ofc.exists_ofc_port(p))
def testi_delete_ofc_port(self):
"""test delete ofc_port"""
t, n, p, f, none = self.get_random_params()
self.ofc.create_ofc_tenant(t)
self.ofc.create_ofc_network(t, n)
ndb.add_portinfo(p, "0xabc", 3, 65535, "00:13:22:33:44:55")
self.ofc.create_ofc_port(t, n, p)
self.assertTrue(ndb.find_ofc_item(nmodels.OFCPort, p))
self.ofc.delete_ofc_port(t, n, p)
self.assertFalse(ndb.find_ofc_item(nmodels.OFCPort, p))
def testj_create_ofc_packet_filter(self):
"""test create ofc_filter"""
t, n, p, f, none = self.get_random_params()
self.ofc.create_ofc_tenant(t)
self.ofc.create_ofc_network(t, n)
self.assertFalse(ndb.find_ofc_item(nmodels.OFCFilter, f))
self.ofc.create_ofc_packet_filter(t, n, f, {})
self.assertTrue(ndb.find_ofc_item(nmodels.OFCFilter, f))
_filter = ndb.find_ofc_item(nmodels.OFCFilter, f)
self.assertEqual(_filter.id, "ofc-" + f[:-4])
def testk_exists_ofc_packet_filter(self):
"""test exists_ofc_packet_filter"""
t, n, p, f, none = self.get_random_params()
self.ofc.create_ofc_tenant(t)
self.ofc.create_ofc_network(t, n)
self.assertFalse(self.ofc.exists_ofc_packet_filter(f))
self.ofc.create_ofc_packet_filter(t, n, f, {})
self.assertTrue(self.ofc.exists_ofc_packet_filter(f))
def testl_delete_ofc_packet_filter(self):
"""test delete ofc_filter"""
t, n, p, f, none = self.get_random_params()
self.ofc.create_ofc_tenant(t)
self.ofc.create_ofc_network(t, n)
self.ofc.create_ofc_packet_filter(t, n, f, {})
self.assertTrue(ndb.find_ofc_item(nmodels.OFCFilter, f))
self.ofc.delete_ofc_packet_filter(t, n, f)
self.assertFalse(ndb.find_ofc_item(nmodels.OFCFilter, f))
| {
"content_hash": "2a055e40f2a78ad8c01921b276468127",
"timestamp": "",
"source": "github",
"line_count": 143,
"max_line_length": 71,
"avg_line_length": 41.42657342657343,
"alnum_prop": 0.6205266711681297,
"repo_name": "psiwczak/quantum",
"id": "05dde8e52a06a2646edcce4fe91a508ece92c3d8",
"size": "6623",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "quantum/tests/unit/nec/test_ofc_manager.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "JavaScript",
"bytes": "67928"
},
{
"name": "Perl",
"bytes": "32974"
},
{
"name": "Python",
"bytes": "2446843"
},
{
"name": "Scala",
"bytes": "3005"
},
{
"name": "Shell",
"bytes": "7879"
}
],
"symlink_target": ""
} |
"""
This is the installation script for Satchmo. It will create the base Satchmo configuration.
Before running this script, you must have python and pip installed.
It is also recommended that you install Python Imaging using your distribution's
package method.
The simplest way to install Satchmo would be:
pip install -r http://bitbucket.org/chris1610/satchmo/raw/tip/scripts/requirements.txt
pip install -e hg+http://bitbucket.org/chris1610/satchmo/#egg=satchmo
Then run:
python clonesatchmo.py
"""
import os
import shutil
from random import choice
import re
from optparse import OptionParser
import string
__VERSION__ = "0.2"
def parse_command_line():
usage = 'usage: %prog [options]'
version = 'Version: %prog ' + '%s' % __VERSION__
parser = OptionParser(usage=usage, version=version)
parser.add_option('-s', '--site', action='store',type='string', default='store',
dest='site_name', help="Top level directory name for the site. [default: %default]")
parser.add_option('-l', '--localsite', action='store',type='string', default='localsite',
dest='local_site_name', help="Name for the local application stub. [default: %default]")
parser.add_option('--skel', action='store', type='string', default = None,
dest='skeleton_dir', help="Path to the skeleton directory")
opts, args = parser.parse_args()
return opts, args
def check_skeleton_dir(skel_dir):
"""
Verify that the skeleton directory exists and that it points
to a location with a localsite subdir.
"""
if skel_dir is None:
return (True, "")
if os.path.isdir(skel_dir):
check_dir = os.path.join(skel_dir, 'localsite')
if not os.path.isdir(check_dir):
return (False, "Skeleton directory does not contain localsite subdirectory. Path should be /path/to/satchmo/projects/skeleton")
else:
return (False, "Invalid skeleton directory. Path should be /path/to/satchmo/projects/skeleton")
return (True, "")
def install_pil():
os.system('pip install %s' % pil_requirements)
def create_satchmo_site(site_name, skeleton_dir):
"""
If we are passed a skeleton_dir, use it
If we aren't we assume the script is being run from the source tree so
we try to find it.
If this doesn't work, let the user know they need to specify it manually
"""
if skeleton_dir:
src_dir = os.path.abspath(skeleton_dir)
else:
clone_dir = os.path.dirname(__file__)
src_dir = os.path.abspath(os.path.join(clone_dir,'../satchmo/projects/skeleton'))
result,msg = check_skeleton_dir(src_dir)
if not result:
return (False, msg)
dest_dir = os.path.join('./',site_name)
shutil.copytree(src_dir, dest_dir)
return (True, "")
def customize_files(site_name, local_site_name):
"""
We need to make a couple of change to the files copied from the skeleton directory.
Set the SECRET_KEY to a random value
Set the ROOT_URLCONF
Set the DJANGO_PROJECT
Set the DJANGO_SETTINGS_MODULE
We also need to change the directory name to local_site_name
"""
dest_dir = os.path.join('./',site_name)
# Create a random SECRET_KEY hash, and put it in the main settings.
main_settings_file = os.path.join(dest_dir, 'settings.py')
settings_contents = open(main_settings_file, 'r').read()
fp = open(main_settings_file, 'w')
secret_key = ''.join([choice('abcdefghijklmnopqrstuvwxyz0123456789!@#$%^&*(-_=+)') for i in range(50)])
settings_contents = re.sub(r"(?<=SECRET_KEY = ')'", secret_key + "'", settings_contents)
# Configure the other variables that need to be modified
root_urlconf = site_name + '.urls'
settings_contents = re.sub(r"(?<=ROOT_URLCONF = ')'", root_urlconf + "'",settings_contents)
django_settings = site_name + '.settings'
settings_contents = re.sub(r"(?<=DJANGO_PROJECT = ')'", site_name + "'",settings_contents)
settings_contents = re.sub(r"(?<=DJANGO_SETTINGS_MODULE = ')'", django_settings + "'",settings_contents)
local_app = "%s.%s" % (site_name,local_site_name)
settings_contents = settings_contents.replace("simple.localsite",local_app)
fp.write(settings_contents)
fp.close()
# rename the local_app directory
os.rename(os.path.join(dest_dir,'localsite'), os.path.join(dest_dir,local_site_name))
def setup_satchmo(site_name, local_site_name):
"""
Do the final configs for satchmo
"""
os.system('cd %s && python manage.py satchmo_copy_static' % site_name)
os.system('cd %s && python manage.py syncdb' % site_name)
os.system('cd %s && python manage.py satchmo_load_l10n' % site_name)
os.system('cd %s && python manage.py satchmo_load_store' % site_name)
os.system('cd %s && python manage.py satchmo_rebuild_pricing' % site_name)
if __name__ == '__main__':
opts, args = parse_command_line()
errors = []
dest_dir = os.path.join('./',opts.site_name)
result, msg = check_skeleton_dir(opts.skeleton_dir)
if not result:
errors.append(msg)
if os.path.isdir(dest_dir):
errors.append("The destination directory already exists. This script can only be used to create new projects.")
try:
import PIL as Image
except ImportError:
errors.append("The Python Imaging Library is not installed. Install from your distribution binaries.")
if errors:
for error in errors:
print error
exit()
print "Creating the Satchmo Application"
result, msg = create_satchmo_site(opts.site_name, opts.skeleton_dir)
if not result:
print msg
exit()
print "Customizing the files"
customize_files(opts.site_name, opts.local_site_name)
print "Performing initial data synching"
setup_satchmo(opts.site_name, opts.local_site_name)
print "Store installation complete."
print "You may run the server by typying: \n cd %s \n python manage.py runserver" % opts.site_name
| {
"content_hash": "2ef2d29cdb3362f89a3401336ef46574",
"timestamp": "",
"source": "github",
"line_count": 151,
"max_line_length": 139,
"avg_line_length": 40.43046357615894,
"alnum_prop": 0.6545454545454545,
"repo_name": "dokterbob/satchmo",
"id": "08a5f7f7e3c6f2bf8f89d09d7d154423724ea9e3",
"size": "6127",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scripts/clonesatchmo.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "JavaScript",
"bytes": "72496"
},
{
"name": "Python",
"bytes": "1690527"
}
],
"symlink_target": ""
} |
import flask, sqlite3
class Database:
def __init__(self, path):
self.path = path
self._connection = None
def connect(self):
if not self._connection:
self._connection = sqlite3.connect(self.path)
self._connection.row_factory = sqlite3.Row
return self._connection
def close(self):
if self._connection:
self._connection.close()
def query(self, query, args=(), one=False):
cur = self.connect().execute(query, args)
rv = cur.fetchall()
cur.close()
return (rv[0] if rv else None) if one else rv
def querymany(self, query, args):
cur = self.connect().executemany(query, args)
def commit(self):
self.connect().commit()
| {
"content_hash": "b910f0ca5c5c40bda00e234d059f041b",
"timestamp": "",
"source": "github",
"line_count": 29,
"max_line_length": 48,
"avg_line_length": 22.586206896551722,
"alnum_prop": 0.6824427480916031,
"repo_name": "scizzorz/flaskplate",
"id": "27898d1e8d7104e99d1c96a03b8f35b404f0d9a8",
"size": "655",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "flaskplate/db.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "176"
},
{
"name": "JavaScript",
"bytes": "0"
},
{
"name": "Python",
"bytes": "1773"
}
],
"symlink_target": ""
} |
"""
第 0002 题:将 0001 题生成的 200 个激活码(或者优惠券)保存到 MySQL 关系型数据库中。
"""
import pymysql
__author__ = 'Chris5641'
def code2sql():
f = open('ActivationCode.txt', 'r')
conn = pymysql.connect(user='root', passwd='password')
cursor = conn.cursor()
cursor.execute('create database if not exists accode')
cursor.execute('use accode')
cursor.execute('create table accode(id int auto_increment primary key, code varchar(10))')
for line in f.readlines():
cursor.execute('insert into accode (code) values (%s)', [line.strip()])
conn.commit()
f.close()
cursor.close()
conn.close()
if __name__ == '__main__':
code2sql()
| {
"content_hash": "e27972ca4dc1bd732974e15d2faa80b6",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 94,
"avg_line_length": 27.375,
"alnum_prop": 0.6377473363774734,
"repo_name": "Yrthgze/prueba-sourcetree2",
"id": "4f81226b971b45712a3ea0ba8f60ad144f46c9da",
"size": "741",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "chris5641/0002/code2sql.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "3948"
},
{
"name": "C++",
"bytes": "5518"
},
{
"name": "CSS",
"bytes": "3474"
},
{
"name": "HTML",
"bytes": "1101085"
},
{
"name": "Java",
"bytes": "141"
},
{
"name": "JavaScript",
"bytes": "5282"
},
{
"name": "Jupyter Notebook",
"bytes": "324817"
},
{
"name": "Mako",
"bytes": "412"
},
{
"name": "Python",
"bytes": "535355"
}
],
"symlink_target": ""
} |
from setuptools import setup
setup(
name='vrouter',
version='0.1dev',
packages=['vrouter',
'vrouter.vrouter',
'vrouter.vrouter.cpuinfo',
'vrouter.vrouter.process_info',
'vrouter.sandesh',
'vrouter.sandesh.virtual_machine',
'vrouter.sandesh.virtual_machine.port_bmap',
'vrouter.sandesh.virtual_network',
'vrouter.sandesh.flow'
],
package_data={'':['*.html', '*.css', '*.xml']},
zip_safe=False,
long_description="Vrouter Sandesh",
install_requires=[
'lxml',
'gevent',
'geventhttpclient',
'redis',
'xmltodict',
'prettytable',
'psutil==0.4.1'
]
)
| {
"content_hash": "79d013edeb9373634aad523fbccedea5",
"timestamp": "",
"source": "github",
"line_count": 28,
"max_line_length": 58,
"avg_line_length": 27.071428571428573,
"alnum_prop": 0.5131926121372031,
"repo_name": "cloudwatt/contrail-controller",
"id": "2e18f69982fdbcc50f5a72115bc52e455201064f",
"size": "828",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "src/vnsw/agent/uve/setup.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ApacheConf",
"bytes": "80579"
},
{
"name": "C",
"bytes": "44989"
},
{
"name": "C++",
"bytes": "14908777"
},
{
"name": "CSS",
"bytes": "531"
},
{
"name": "Java",
"bytes": "171966"
},
{
"name": "Lua",
"bytes": "8164"
},
{
"name": "Makefile",
"bytes": "12449"
},
{
"name": "Objective-C",
"bytes": "720"
},
{
"name": "Protocol Buffer",
"bytes": "1120"
},
{
"name": "Python",
"bytes": "3057429"
},
{
"name": "Shell",
"bytes": "54611"
},
{
"name": "Thrift",
"bytes": "40763"
}
],
"symlink_target": ""
} |
from django.contrib import admin
from blog.models import Post
class PostAdmin(admin.ModelAdmin):
# fields display on change list
list_display = ['title', 'description']
# fields to filter the change list with
list_filter = ['published', 'created']
# fields to search in change list
search_fields = ['title', 'description', 'content']
# enable the date drill down on change list
date_hierarchy = 'created'
# enable the save buttons on top on change form
save_on_top = True
# prepopulate the slug from the title - big timesaver!
prepopulated_fields = {"slug": ("title",)}
admin.site.register(Post, PostAdmin)
| {
"content_hash": "1d19724f958a0a1dc47d40a84d4cd11d",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 62,
"avg_line_length": 37.526315789473685,
"alnum_prop": 0.6367461430575035,
"repo_name": "DanielleWingler/UnderstandingDjango",
"id": "f642fc4b80489d2eb0b15ef84e85d48ce11000b8",
"size": "774",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "TestSite/blog/admin.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "5746"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='PXLBoardModel_1',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('mlb', models.BooleanField()),
('nfl', models.BooleanField()),
('nhl', models.BooleanField()),
('headlines', models.BooleanField()),
('weather', models.BooleanField()),
],
),
migrations.CreateModel(
name='PXLBoardModel_2',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('mlb', models.BooleanField()),
('nfl', models.BooleanField()),
('nhl', models.BooleanField()),
('headlines', models.BooleanField()),
('weather', models.BooleanField()),
],
),
migrations.CreateModel(
name='PXLBoardModel_3',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('mlb', models.BooleanField()),
('nfl', models.BooleanField()),
('nhl', models.BooleanField()),
('headlines', models.BooleanField()),
('weather', models.BooleanField()),
],
),
migrations.CreateModel(
name='UserModel',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('pxlboard_1', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to='pxl.PXLBoardModel_1')),
('pxlboard_2', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to='pxl.PXLBoardModel_2')),
('pxlboard_3', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to='pxl.PXLBoardModel_3')),
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, related_name='profile', to=settings.AUTH_USER_MODEL)),
],
),
]
| {
"content_hash": "725d82b5d0bd5c1324e29580ac08c2be",
"timestamp": "",
"source": "github",
"line_count": 60,
"max_line_length": 145,
"avg_line_length": 41.916666666666664,
"alnum_prop": 0.557455268389662,
"repo_name": "PXL-CF2016/pxl-master-server",
"id": "3d4d1b0d1b4a54de2bb1f6bcd1d79693f6d64e79",
"size": "2587",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pxl/migrations/0001_initial.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "193997"
},
{
"name": "Python",
"bytes": "111924"
}
],
"symlink_target": ""
} |
import os
import unittest
from mock import Mock, create_autospec, patch
from apache.aurora.executor.bin.thermos_executor_main import dump_runner_pex, initialize, proxy_main
from apache.aurora.executor.common.path_detector import MesosPathDetector
from apache.aurora.executor.thermos_task_runner import DefaultThermosTaskRunnerProvider
def test_thermos_executor_valid_import_dependencies():
assert proxy_main is not None
class ThermosExecutorMainTest(unittest.TestCase):
def test_checkpoint_path(self):
mock_runner_provider = create_autospec(spec=DefaultThermosTaskRunnerProvider)
mock_dump_runner_pex = create_autospec(spec=dump_runner_pex)
mock_dump_runner_pex.return_value = Mock()
mock_options = Mock()
mock_options.execute_as_user = False
mock_options.nosetuid = False
mock_options.announcer_ensemble = None
mock_options.stop_timeout_in_secs = 1
with patch(
'apache.aurora.executor.bin.thermos_executor_main.dump_runner_pex',
return_value=mock_dump_runner_pex):
with patch(
'apache.aurora.executor.bin.thermos_executor_main.DefaultThermosTaskRunnerProvider',
return_value=mock_runner_provider) as mock_provider:
expected_path = os.path.join(os.path.abspath('.'), MesosPathDetector.DEFAULT_SANDBOX_PATH)
thermos_executor = initialize(mock_options)
assert thermos_executor is not None
assert len(mock_provider.mock_calls) == 1
args = mock_provider.mock_calls[0][1]
assert len(args) == 2 and expected_path == args[1]
| {
"content_hash": "dfa1dc725f65703f1501106f81a10e2d",
"timestamp": "",
"source": "github",
"line_count": 38,
"max_line_length": 100,
"avg_line_length": 40.94736842105263,
"alnum_prop": 0.7377892030848329,
"repo_name": "thinker0/aurora",
"id": "5ad2999d2fbb05bb44fc4801df605b95f8351538",
"size": "2104",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "src/test/python/apache/aurora/executor/bin/test_thermos_executor_entry_point.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "24231"
},
{
"name": "Groovy",
"bytes": "7847"
},
{
"name": "HTML",
"bytes": "13576"
},
{
"name": "Java",
"bytes": "3551071"
},
{
"name": "JavaScript",
"bytes": "215643"
},
{
"name": "Python",
"bytes": "1620477"
},
{
"name": "Ruby",
"bytes": "4315"
},
{
"name": "Shell",
"bytes": "91969"
},
{
"name": "Smalltalk",
"bytes": "79"
},
{
"name": "Smarty",
"bytes": "25233"
},
{
"name": "Thrift",
"bytes": "58310"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals, division, absolute_import
import collections
from gatesym.gates import Switch
class BinaryIn(collections.Sequence):
def __init__(self, network, size, value=0):
self.switches = [Switch(network) for i in range(size)]
self.write(value)
def write(self, value):
for switch in self.switches:
switch.write(value % 2)
value //= 2
def read(self):
res = 0
idx = 1
for switch in self.switches:
if switch.read():
res += idx
idx *= 2
return res
def __iter__(self):
return iter(self.switches)
def __len__(self):
return len(self.switches)
def __getitem__(self, key):
return self.switches.__getitem__(key)
class BinaryOut(object):
def __init__(self, gates):
self.gates = gates
def read(self):
res = 0
idx = 1
for gate in self.gates:
if gate.read():
res += idx
idx *= 2
return res
| {
"content_hash": "94a84f54b4043cb208a9370ec92bb108",
"timestamp": "",
"source": "github",
"line_count": 48,
"max_line_length": 66,
"avg_line_length": 22.354166666666668,
"alnum_prop": 0.5340167753960857,
"repo_name": "babbageclunk/gatesym",
"id": "5fd3713bdb5a0dbdbf8fe2439348ff826a8ffa66",
"size": "1073",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "gatesym/test_utils.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "53679"
}
],
"symlink_target": ""
} |
"""
ZFS Storage Appliance NFS Cinder Volume Driver
"""
import base64
import datetime as dt
import errno
from oslo_config import cfg
from oslo_log import log
from oslo_utils import excutils
from oslo_utils import units
from cinder import exception
from cinder.i18n import _, _LE, _LI
from cinder.volume.drivers import nfs
from cinder.volume.drivers.san import san
from cinder.volume.drivers.zfssa import zfssarest
ZFSSA_OPTS = [
cfg.StrOpt('zfssa_data_ip',
help='Data path IP address'),
cfg.StrOpt('zfssa_https_port', default='443',
help='HTTPS port number'),
cfg.StrOpt('zfssa_nfs_mount_options', default='',
help='Options to be passed while mounting share over nfs'),
cfg.StrOpt('zfssa_nfs_pool', default='',
help='Storage pool name.'),
cfg.StrOpt('zfssa_nfs_project', default='NFSProject',
help='Project name.'),
cfg.StrOpt('zfssa_nfs_share', default='nfs_share',
help='Share name.'),
cfg.StrOpt('zfssa_nfs_share_compression', default='off',
choices=['off', 'lzjb', 'gzip-2', 'gzip', 'gzip-9'],
help='Data compression.'),
cfg.StrOpt('zfssa_nfs_share_logbias', default='latency',
choices=['latency', 'throughput'],
help='Synchronous write bias-latency, throughput.'),
cfg.IntOpt('zfssa_rest_timeout',
help='REST connection timeout. (seconds)')
]
LOG = log.getLogger(__name__)
CONF = cfg.CONF
CONF.register_opts(ZFSSA_OPTS)
def factory_zfssa():
return zfssarest.ZFSSANfsApi()
class ZFSSANFSDriver(nfs.NfsDriver):
VERSION = '1.0.0'
volume_backend_name = 'ZFSSA_NFS'
protocol = driver_prefix = driver_volume_type = 'nfs'
def __init__(self, *args, **kwargs):
super(ZFSSANFSDriver, self).__init__(*args, **kwargs)
self.configuration.append_config_values(ZFSSA_OPTS)
self.configuration.append_config_values(san.san_opts)
self.zfssa = None
self._stats = None
def do_setup(self, context):
if not self.configuration.nfs_oversub_ratio > 0:
msg = _("NFS config 'nfs_oversub_ratio' invalid. Must be > 0: "
"%s") % self.configuration.nfs_oversub_ratio
LOG.error(msg)
raise exception.NfsException(msg)
if ((not self.configuration.nfs_used_ratio > 0) and
(self.configuration.nfs_used_ratio <= 1)):
msg = _("NFS config 'nfs_used_ratio' invalid. Must be > 0 "
"and <= 1.0: %s") % self.configuration.nfs_used_ratio
LOG.error(msg)
raise exception.NfsException(msg)
package = 'mount.nfs'
try:
self._execute(package, check_exit_code=False, run_as_root=True)
except OSError as exc:
if exc.errno == errno.ENOENT:
msg = _('%s is not installed') % package
raise exception.NfsException(msg)
else:
raise exc
lcfg = self.configuration
LOG.info(_LI('Connecting to host: %s.'), lcfg.san_ip)
host = lcfg.san_ip
user = lcfg.san_login
password = lcfg.san_password
https_port = lcfg.zfssa_https_port
credentials = ['san_ip', 'san_login', 'san_password', 'zfssa_data_ip']
for cred in credentials:
if not getattr(lcfg, cred, None):
exception_msg = _('%s not set in cinder.conf') % cred
LOG.error(exception_msg)
raise exception.CinderException(exception_msg)
self.zfssa = factory_zfssa()
self.zfssa.set_host(host, timeout=lcfg.zfssa_rest_timeout)
auth_str = base64.encodestring('%s:%s' % (user, password))[:-1]
self.zfssa.login(auth_str)
self.zfssa.create_project(lcfg.zfssa_nfs_pool, lcfg.zfssa_nfs_project,
compression=lcfg.zfssa_nfs_share_compression,
logbias=lcfg.zfssa_nfs_share_logbias)
share_args = {
'sharedav': 'rw',
'sharenfs': 'rw',
'root_permissions': '777',
'compression': lcfg.zfssa_nfs_share_compression,
'logbias': lcfg.zfssa_nfs_share_logbias
}
self.zfssa.create_share(lcfg.zfssa_nfs_pool, lcfg.zfssa_nfs_project,
lcfg.zfssa_nfs_share, share_args)
share_details = self.zfssa.get_share(lcfg.zfssa_nfs_pool,
lcfg.zfssa_nfs_project,
lcfg.zfssa_nfs_share)
mountpoint = share_details['mountpoint']
self.mount_path = lcfg.zfssa_data_ip + ':' + mountpoint
https_path = 'https://' + lcfg.zfssa_data_ip + ':' + https_port + \
'/shares' + mountpoint
LOG.debug('NFS mount path: %s' % self.mount_path)
LOG.debug('WebDAV path to the share: %s' % https_path)
self.shares = {}
mnt_opts = self.configuration.zfssa_nfs_mount_options
self.shares[self.mount_path] = mnt_opts if len(mnt_opts) > 1 else None
# Initialize the WebDAV client
self.zfssa.set_webdav(https_path, auth_str)
# Edit http service so that WebDAV requests are always authenticated
args = {'https_port': https_port,
'require_login': True}
self.zfssa.modify_service('http', args)
self.zfssa.enable_service('http')
def _ensure_shares_mounted(self):
try:
self._ensure_share_mounted(self.mount_path)
except Exception as exc:
LOG.error(_LE('Exception during mounting %s.') % exc)
self._mounted_shares = [self.mount_path]
LOG.debug('Available shares %s' % self._mounted_shares)
def check_for_setup_error(self):
"""Check that driver can login.
Check also for properly configured pool, project and share
Check that the http and nfs services are enabled
"""
lcfg = self.configuration
self.zfssa.verify_pool(lcfg.zfssa_nfs_pool)
self.zfssa.verify_project(lcfg.zfssa_nfs_pool, lcfg.zfssa_nfs_project)
self.zfssa.verify_share(lcfg.zfssa_nfs_pool, lcfg.zfssa_nfs_project,
lcfg.zfssa_nfs_share)
self.zfssa.verify_service('http')
self.zfssa.verify_service('nfs')
def create_snapshot(self, snapshot):
"""Creates a snapshot of a volume."""
LOG.info(_LI('Creating snapshot: %s'), snapshot['name'])
lcfg = self.configuration
snap_name = self._create_snapshot_name()
self.zfssa.create_snapshot(lcfg.zfssa_nfs_pool, lcfg.zfssa_nfs_project,
lcfg.zfssa_nfs_share, snap_name)
src_file = snap_name + '/' + snapshot['volume_name']
try:
self.zfssa.create_snapshot_of_volume_file(src_file=src_file,
dst_file=
snapshot['name'])
except Exception:
with excutils.save_and_reraise_exception():
LOG.debug('Error thrown during snapshot: %s creation' %
snapshot['name'])
finally:
self.zfssa.delete_snapshot(lcfg.zfssa_nfs_pool,
lcfg.zfssa_nfs_project,
lcfg.zfssa_nfs_share, snap_name)
def delete_snapshot(self, snapshot):
"""Deletes a snapshot."""
LOG.info(_LI('Deleting snapshot: %s'), snapshot['name'])
self.zfssa.delete_snapshot_of_volume_file(src_file=snapshot['name'])
def create_volume_from_snapshot(self, volume, snapshot, method='COPY'):
LOG.info(_LI('Creatng volume from snapshot. volume: %s'),
volume['name'])
LOG.info(_LI('Source Snapshot: %s'), snapshot['name'])
self._ensure_shares_mounted()
self.zfssa.create_volume_from_snapshot_file(src_file=snapshot['name'],
dst_file=volume['name'],
method=method)
volume['provider_location'] = self.mount_path
if volume['size'] != snapshot['volume_size']:
try:
self.extend_volume(volume, volume['size'])
except Exception:
vol_path = self.local_path(volume)
exception_msg = (_('Error in extending volume size: '
'Volume: %(volume)s '
'Vol_Size: %(vol_size)d with '
'Snapshot: %(snapshot)s '
'Snap_Size: %(snap_size)d')
% {'volume': volume['name'],
'vol_size': volume['size'],
'snapshot': snapshot['name'],
'snap_size': snapshot['volume_size']})
with excutils.save_and_reraise_exception():
LOG.error(exception_msg)
self._execute('rm', '-f', vol_path, run_as_root=True)
return {'provider_location': volume['provider_location']}
def create_cloned_volume(self, volume, src_vref):
"""Creates a snapshot and then clones the snapshot into a volume."""
LOG.info(_LI('new cloned volume: %s'), volume['name'])
LOG.info(_LI('source volume for cloning: %s'), src_vref['name'])
snapshot = {'volume_name': src_vref['name'],
'volume_id': src_vref['id'],
'volume_size': src_vref['size'],
'name': self._create_snapshot_name()}
self.create_snapshot(snapshot)
return self.create_volume_from_snapshot(volume, snapshot,
method='MOVE')
def _create_snapshot_name(self):
"""Creates a snapshot name from the date and time."""
return ('cinder-zfssa-nfs-snapshot-%s' %
dt.datetime.utcnow().isoformat())
def _get_share_capacity_info(self):
"""Get available and used capacity info for the NFS share."""
lcfg = self.configuration
share_details = self.zfssa.get_share(lcfg.zfssa_nfs_pool,
lcfg.zfssa_nfs_project,
lcfg.zfssa_nfs_share)
free = share_details['space_available']
used = share_details['space_total']
return free, used
def _update_volume_stats(self):
"""Get volume stats from zfssa"""
self._ensure_shares_mounted()
data = {}
backend_name = self.configuration.safe_get('volume_backend_name')
data['volume_backend_name'] = backend_name or self.__class__.__name__
data['vendor_name'] = 'Oracle'
data['driver_version'] = self.VERSION
data['storage_protocol'] = self.protocol
free, used = self._get_share_capacity_info()
capacity = float(free) + float(used)
ratio_used = used / capacity
data['QoS_support'] = False
data['reserved_percentage'] = 0
if ratio_used > self.configuration.nfs_used_ratio or \
ratio_used >= self.configuration.nfs_oversub_ratio:
data['reserved_percentage'] = 100
data['total_capacity_gb'] = float(capacity) / units.Gi
data['free_capacity_gb'] = float(free) / units.Gi
self._stats = data
| {
"content_hash": "5997075a9a224490209d804f61fc32fe",
"timestamp": "",
"source": "github",
"line_count": 292,
"max_line_length": 79,
"avg_line_length": 39.5958904109589,
"alnum_prop": 0.5533644698149109,
"repo_name": "yanheven/cinder",
"id": "721f11c0a507fdb6dd7b40a3e1abfd9bf239f857",
"size": "12177",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "cinder/volume/drivers/zfssa/zfssanfs.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "PLpgSQL",
"bytes": "2511"
},
{
"name": "Python",
"bytes": "10655225"
},
{
"name": "Shell",
"bytes": "8111"
}
],
"symlink_target": ""
} |
from ansible import errors
try:
import netaddr
except Exception, e:
raise errors.AnsibleFilterError('python-netaddr package is not installed')
# ---- IP address and network filters ----
def ipaddr(value, query = '', version = False, alias = 'ipaddr'):
''' Check if string is an IP address or network and filter it '''
query_types = [ 'type', 'bool', 'int', 'version', 'size', 'address', 'ip', 'host', \
'network', 'subnet', 'prefix', 'broadcast', 'netmask', 'hostmask', \
'unicast', 'multicast', 'private', 'public', 'loopback', 'lo', \
'revdns', 'wrap', 'ipv6', 'v6', 'ipv4', 'v4', 'cidr', 'net', \
'hostnet', 'router', 'gateway', 'gw', 'host/prefix', 'address/prefix' ]
if not value:
return False
elif value == True:
return False
# Check if value is a list and parse each element
elif isinstance(value, (list, tuple)):
_ret = []
for element in value:
if ipaddr(element, str(query), version):
_ret.append(ipaddr(element, str(query), version))
if _ret:
return _ret
else:
return list()
# Check if value is a number and convert it to an IP address
elif str(value).isdigit():
# We don't know what IP version to assume, so let's check IPv4 first,
# then IPv6
try:
if ((not version) or (version and version == 4)):
v = netaddr.IPNetwork('0.0.0.0/0')
v.value = int(value)
v.prefixlen = 32
elif version and version == 6:
v = netaddr.IPNetwork('::/0')
v.value = int(value)
v.prefixlen = 128
# IPv4 didn't work the first time, so it definitely has to be IPv6
except:
try:
v = netaddr.IPNetwork('::/0')
v.value = int(value)
v.prefixlen = 128
# The value is too big for IPv6. Are you a nanobot?
except:
return False
# We got an IP address, let's mark it as such
value = str(v)
vtype = 'address'
# value has not been recognized, check if it's a valid IP string
else:
try:
v = netaddr.IPNetwork(value)
# value is a valid IP string, check if user specified
# CIDR prefix or just an IP address, this will indicate default
# output format
try:
address, prefix = value.split('/')
vtype = 'network'
except:
vtype = 'address'
# value hasn't been recognized, maybe it's a numerical CIDR?
except:
try:
address, prefix = value.split('/')
address.isdigit()
address = int(address)
prefix.isdigit()
prefix = int(prefix)
# It's not numerical CIDR, give up
except:
return False
# It is something, so let's try and build a CIDR from the parts
try:
v = netaddr.IPNetwork('0.0.0.0/0')
v.value = address
v.prefixlen = prefix
# It's not a valid IPv4 CIDR
except:
try:
v = netaddr.IPNetwork('::/0')
v.value = address
v.prefixlen = prefix
# It's not a valid IPv6 CIDR. Give up.
except:
return False
# We have a valid CIDR, so let's write it in correct format
value = str(v)
vtype = 'network'
v_ip = netaddr.IPAddress(str(v.ip))
# We have a query string but it's not in the known query types. Check if
# that string is a valid subnet, if so, we can check later if given IP
# address/network is inside that specific subnet
try:
if query and query not in query_types and ipaddr(query, 'network'):
iplist = netaddr.IPSet([netaddr.IPNetwork(query)])
query = 'cidr_lookup'
except:
None
# This code checks if value maches the IP version the user wants, ie. if
# it's any version ("ipaddr()"), IPv4 ("ipv4()") or IPv6 ("ipv6()")
# If version does not match, return False
if version and v.version != version:
return False
# We don't have any query to process, so just check what type the user
# expects, and return the IP address in a correct format
if not query:
if v:
if vtype == 'address':
return str(v.ip)
elif vtype == 'network':
return str(v)
elif query == 'type':
if v.size == 1:
return 'address'
if v.size > 1:
if v.ip != v.network:
return 'address'
else:
return 'network'
elif query == 'bool':
if v:
return True
elif query == 'int':
if vtype == 'address':
return int(v.ip)
elif vtype == 'network':
return str(int(v.ip)) + '/' + str(int(v.prefixlen))
elif query == 'version':
return v.version
elif query == 'size':
return v.size
elif query in [ 'address', 'ip' ]:
if v.size == 1:
return str(v.ip)
if v.size > 1:
if v.ip != v.network:
return str(v.ip)
elif query == 'host':
if v.size == 1:
return str(v)
elif v.size > 1:
if v.ip != v.network:
return str(v.ip) + '/' + str(v.prefixlen)
elif query == 'net':
if v.size > 1:
if v.ip == v.network:
return str(v.network) + '/' + str(v.prefixlen)
elif query in [ 'hostnet', 'router', 'gateway', 'gw', 'host/prefix', 'address/prefix' ]:
if v.size > 1:
if v.ip != v.network:
return str(v.ip) + '/' + str(v.prefixlen)
elif query == 'network':
if v.size > 1:
return str(v.network)
elif query == 'subnet':
return str(v.cidr)
elif query == 'cidr':
return str(v)
elif query == 'prefix':
return int(v.prefixlen)
elif query == 'broadcast':
if v.size > 1:
return str(v.broadcast)
elif query == 'netmask':
if v.size > 1:
return str(v.netmask)
elif query == 'hostmask':
return str(v.hostmask)
elif query == 'unicast':
if v.is_unicast():
return value
elif query == 'multicast':
if v.is_multicast():
return value
elif query == 'link-local':
if v.version == 4:
if ipaddr(str(v_ip), '169.254.0.0/24'):
return value
elif v.version == 6:
if ipaddr(str(v_ip), 'fe80::/10'):
return value
elif query == 'private':
if v.is_private():
return value
elif query == 'public':
if v_ip.is_unicast() and not v_ip.is_private() and \
not v_ip.is_loopback() and not v_ip.is_netmask() and \
not v_ip.is_hostmask():
return value
elif query in [ 'loopback', 'lo' ]:
if v_ip.is_loopback():
return value
elif query == 'revdns':
return v_ip.reverse_dns
elif query == 'wrap':
if v.version == 6:
if vtype == 'address':
return '[' + str(v.ip) + ']'
elif vtype == 'network':
return '[' + str(v.ip) + ']/' + str(v.prefixlen)
else:
return value
elif query in [ 'ipv6', 'v6' ]:
if v.version == 4:
return str(v.ipv6())
else:
return value
elif query in [ 'ipv4', 'v4' ]:
if v.version == 6:
try:
return str(v.ipv4())
except:
return False
else:
return value
elif query == '6to4':
if v.version == 4:
if v.size == 1:
ipconv = str(v.ip)
elif v.size > 1:
if v.ip != v.network:
ipconv = str(v.ip)
else:
ipconv = False
if ipaddr(ipconv, 'public'):
numbers = list(map(int, ipconv.split('.')))
try:
return '2002:{:02x}{:02x}:{:02x}{:02x}::1/48'.format(*numbers)
except:
return False
elif v.version == 6:
if vtype == 'address':
if ipaddr(str(v), '2002::/16'):
return value
elif vtype == 'network':
if v.ip != v.network:
if ipaddr(str(v.ip), '2002::/16'):
return value
else:
return False
elif query == 'cidr_lookup':
try:
if v in iplist:
return value
except:
return False
else:
try:
float(query)
if v.size == 1:
if vtype == 'address':
return str(v.ip)
elif vtype == 'network':
return str(v)
elif v.size > 1:
try:
return str(v[query]) + '/' + str(v.prefixlen)
except:
return False
else:
return value
except:
raise errors.AnsibleFilterError(alias + ': unknown filter type: %s' % query)
return False
def ipwrap(value, query = ''):
try:
if isinstance(value, (list, tuple)):
_ret = []
for element in value:
if ipaddr(element, query, version = False, alias = 'ipwrap'):
_ret.append(ipaddr(element, 'wrap'))
else:
_ret.append(element)
return _ret
else:
_ret = ipaddr(value, query, version = False, alias = 'ipwrap')
if _ret:
return ipaddr(_ret, 'wrap')
else:
return value
except:
return value
def ipv4(value, query = ''):
return ipaddr(value, query, version = 4, alias = 'ipv4')
def ipv6(value, query = ''):
return ipaddr(value, query, version = 6, alias = 'ipv6')
# Split given subnet into smaller subnets or find out the biggest subnet of
# a given IP address with given CIDR prefix
# Usage:
#
# - address or address/prefix | ipsubnet
# returns CIDR subnet of a given input
#
# - address/prefix | ipsubnet(cidr)
# returns number of possible subnets for given CIDR prefix
#
# - address/prefix | ipsubnet(cidr, index)
# returns new subnet with given CIDR prefix
#
# - address | ipsubnet(cidr)
# returns biggest subnet with given CIDR prefix that address belongs to
#
# - address | ipsubnet(cidr, index)
# returns next indexed subnet which contains given address
def ipsubnet(value, query = '', index = 'x'):
''' Manipulate IPv4/IPv6 subnets '''
try:
vtype = ipaddr(value, 'type')
if vtype == 'address':
v = ipaddr(value, 'cidr')
elif vtype == 'network':
v = ipaddr(value, 'subnet')
value = netaddr.IPNetwork(v)
except:
return False
if not query:
return str(value)
elif str(query).isdigit():
vsize = ipaddr(v, 'size')
query = int(query)
try:
float(index)
index = int(index)
if vsize > 1:
try:
return str(list(value.subnet(query))[index])
except:
return False
elif vsize == 1:
try:
return str(value.supernet(query)[index])
except:
return False
except:
if vsize > 1:
try:
return str(len(list(value.subnet(query))))
except:
return False
elif vsize == 1:
try:
return str(value.supernet(query)[0])
except:
return False
return False
# ---- HWaddr / MAC address filters ----
def hwaddr(value, query = '', alias = 'hwaddr'):
''' Check if string is a HW/MAC address and filter it '''
try:
v = netaddr.EUI(value)
except:
if query and query not in [ 'bool' ]:
raise errors.AnsibleFilterError(alias + ': not a hardware address: %s' % value)
if not query:
if v:
return value
elif query == 'bool':
if v:
return True
elif query in [ 'win', 'eui48' ]:
v.dialect = netaddr.mac_eui48
return str(v)
elif query == 'unix':
v.dialect = netaddr.mac_unix
return str(v)
elif query in [ 'pgsql', 'postgresql', 'psql' ]:
v.dialect = netaddr.mac_pgsql
return str(v)
elif query == 'cisco':
v.dialect = netaddr.mac_cisco
return str(v)
elif query == 'bare':
v.dialect = netaddr.mac_bare
return str(v)
elif query == 'linux':
v.dialect = mac_linux
return str(v)
else:
raise errors.AnsibleFilterError(alias + ': unknown filter type: %s' % query)
return False
class mac_linux(netaddr.mac_unix): pass
mac_linux.word_fmt = '%.2x'
def macaddr(value, query = ''):
return hwaddr(value, query, alias = 'macaddr')
# ---- Ansible filters ----
class FilterModule(object):
''' IP address and network manipulation filters '''
def filters(self):
return {
# IP addresses and networks
'ipaddr': ipaddr,
'ipwrap': ipwrap,
'ipv4': ipv4,
'ipv6': ipv6,
'ipsubnet': ipsubnet,
# MAC / HW addresses
'hwaddr': hwaddr,
'macaddr': macaddr
}
| {
"content_hash": "d393b135f1a0d8e95f1c09e577de6819",
"timestamp": "",
"source": "github",
"line_count": 514,
"max_line_length": 92,
"avg_line_length": 27.381322957198442,
"alnum_prop": 0.48642887594145234,
"repo_name": "le9i0nx/ansible-home",
"id": "68e2fd778c4ce41d652ae2a9e435a8be455066b5",
"size": "14780",
"binary": false,
"copies": "9",
"ref": "refs/heads/master",
"path": "filter_plugins/ipaddr.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "68849"
}
],
"symlink_target": ""
} |
'''
This script helps you scrap stock data avaliable on Bloomberg Finance
and store them locally.
Please obey applicable local and federal laws and applicable API term of use
when using this scripts. I, the creater of this script, will not be responsible
for any legal issues resulting from the use of this script.
@author Gan Tu
@version python 2 or python 3
[HOW TO CHANGE PYTHON VERSION]
This script by default should be run by Python 2.
To use this in Python 3, change the followings:
1) change ALL occurrences of "urllib" to "urllib.request".
'''
import urllib
import re
import json
import os
# Stock Symbols Initialization
# Feel free to modify the file source to contain stock symbols you plan to scrap fro
stocks = open("nasdaq_symbols.txt", "r").read().split("\n")
# URL Initialization
urlPrefix = "http://www.bloomberg.com/markets/api/bulk-time-series/price/"
urlAffix = "%3AUS?timeFrame="
# Only four of these are valid options for now
# 1_Day will scrap minute by minute data for one day, while others will be daily close price
# Feel free to modify them for your own need
options = ["1_DAY", "1_MONTH", "1_YEAR", "5_YEAR"]
def setup():
try:
os.mkdir("data")
except Exception as e:
pass
for option in options:
try:
os.mkdir("data/" + option + "/")
except Exception as e:
pass
def scrap():
i = 0
while i < len(stocks):
for option in options:
file = open("data/" + option + "/" + stocks[i] + ".txt", "w")
file.close()
htmltext = urllib.urlopen(urlPrefix + stocks[i] + urlAffix + option)
try:
data = json.load(htmltext)[0]["price"]
key = "date"
if option == "1_DAY":
key = "dateTime"
file = open("data/" + option + "/" + stocks[i] + ".txt", "a")
for price in data:
file.write(stocks[i] + "," + price[key] + "," + str(price["value"]) + "\n")
file.close()
except Exception as e:
pass
i += 1
if __name__ == "__main__":
setup()
scrap()
| {
"content_hash": "89f216fe9ed594268a09ac220bede5d6",
"timestamp": "",
"source": "github",
"line_count": 74,
"max_line_length": 95,
"avg_line_length": 29.31081081081081,
"alnum_prop": 0.5956662056247118,
"repo_name": "Michael-Tu/tools",
"id": "6740dbc5cd1d471f34b6ff0a348f444320239e03",
"size": "2169",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "stock_scraping/stock_price_scraping_to_local.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Java",
"bytes": "15757"
},
{
"name": "JavaScript",
"bytes": "1408"
},
{
"name": "Makefile",
"bytes": "878"
},
{
"name": "Python",
"bytes": "35798"
},
{
"name": "Shell",
"bytes": "1875"
}
],
"symlink_target": ""
} |
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_resource
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_response
from nssrc.com.citrix.netscaler.nitro.service.options import options
from nssrc.com.citrix.netscaler.nitro.exception.nitro_exception import nitro_exception
from nssrc.com.citrix.netscaler.nitro.util.nitro_util import nitro_util
class gslbvserver_gslbservice_binding(base_resource) :
""" Binding class showing the gslbservice that can be bound to gslbvserver.
"""
def __init__(self) :
self._servicename = ""
self._weight = 0
self._cnameentry = ""
self._ipaddress = ""
self._port = 0
self._gslbboundsvctype = ""
self._curstate = ""
self._dynamicconfwt = 0
self._cumulativeweight = 0
self._svreffgslbstate = ""
self._gslbthreshold = 0
self._preferredlocation = ""
self._thresholdvalue = 0
self._iscname = ""
self._domainname = ""
self._sitepersistcookie = ""
self._svcsitepersistence = ""
self._name = ""
self.___count = 0
@property
def weight(self) :
ur"""Weight to assign to the GSLB service.<br/>Minimum value = 1<br/>Maximum value = 100.
"""
try :
return self._weight
except Exception as e:
raise e
@weight.setter
def weight(self, weight) :
ur"""Weight to assign to the GSLB service.<br/>Minimum value = 1<br/>Maximum value = 100
"""
try :
self._weight = weight
except Exception as e:
raise e
@property
def name(self) :
ur"""Name of the virtual server on which to perform the binding operation.<br/>Minimum length = 1.
"""
try :
return self._name
except Exception as e:
raise e
@name.setter
def name(self, name) :
ur"""Name of the virtual server on which to perform the binding operation.<br/>Minimum length = 1
"""
try :
self._name = name
except Exception as e:
raise e
@property
def servicename(self) :
ur"""Name of the GSLB service for which to change the weight.<br/>Minimum length = 1.
"""
try :
return self._servicename
except Exception as e:
raise e
@servicename.setter
def servicename(self, servicename) :
ur"""Name of the GSLB service for which to change the weight.<br/>Minimum length = 1
"""
try :
self._servicename = servicename
except Exception as e:
raise e
@property
def domainname(self) :
ur"""Domain name for which to change the time to live (TTL) and/or backup service IP address.<br/>Minimum length = 1.
"""
try :
return self._domainname
except Exception as e:
raise e
@domainname.setter
def domainname(self, domainname) :
ur"""Domain name for which to change the time to live (TTL) and/or backup service IP address.<br/>Minimum length = 1
"""
try :
self._domainname = domainname
except Exception as e:
raise e
@property
def cnameentry(self) :
ur"""The cname of the gslb service.
"""
try :
return self._cnameentry
except Exception as e:
raise e
@property
def svcsitepersistence(self) :
ur"""Type of Site Persistence set on the bound service.<br/>Possible values = ConnectionProxy, HTTPRedirect, NONE.
"""
try :
return self._svcsitepersistence
except Exception as e:
raise e
@property
def gslbboundsvctype(self) :
ur"""Protocol used by services bound to the GSLBvirtual server.<br/>Possible values = HTTP, FTP, TCP, UDP, SSL, SSL_BRIDGE, SSL_TCP, NNTP, ANY, SIP_UDP, RADIUS, RDP, RTSP, MYSQL, MSSQL, ORACLE.
"""
try :
return self._gslbboundsvctype
except Exception as e:
raise e
@property
def preferredlocation(self) :
ur"""The target site to be returned in the DNS response when a policy is successfully evaluated against the incoming DNS request. Target site is specified in dotted notation with up to 6 qualifiers. Wildcard `*' is accepted as a valid qualifier token.
"""
try :
return self._preferredlocation
except Exception as e:
raise e
@property
def dynamicconfwt(self) :
ur"""Weight obtained by the virtue of bound service count or weight.
"""
try :
return self._dynamicconfwt
except Exception as e:
raise e
@property
def cumulativeweight(self) :
ur"""Cumulative weight is the weight of GSLB service considering both its configured weight and dynamic weight. It is equal to product of dynamic weight and configured weight of the gslb service .
"""
try :
return self._cumulativeweight
except Exception as e:
raise e
@property
def gslbthreshold(self) :
ur"""Indicates if gslb svc has reached threshold.
"""
try :
return self._gslbthreshold
except Exception as e:
raise e
@property
def sitepersistcookie(self) :
ur"""This field is introduced for displaying the cookie in cluster setup.<br/>Minimum length = 1.
"""
try :
return self._sitepersistcookie
except Exception as e:
raise e
@property
def port(self) :
ur"""Port number.<br/>Range 1 - 65535.
"""
try :
return self._port
except Exception as e:
raise e
@property
def iscname(self) :
ur"""is cname feature set on vserver.<br/>Possible values = ENABLED, DISABLED.
"""
try :
return self._iscname
except Exception as e:
raise e
@property
def curstate(self) :
ur"""State of the gslb vserver.<br/>Possible values = UP, DOWN, UNKNOWN, BUSY, OUT OF SERVICE, GOING OUT OF SERVICE, DOWN WHEN GOING OUT OF SERVICE, NS_EMPTY_STR, Unknown, DISABLED.
"""
try :
return self._curstate
except Exception as e:
raise e
@property
def svreffgslbstate(self) :
ur"""Effective state of the gslb svc.<br/>Possible values = UP, DOWN, UNKNOWN, BUSY, OUT OF SERVICE, GOING OUT OF SERVICE, DOWN WHEN GOING OUT OF SERVICE, NS_EMPTY_STR, Unknown, DISABLED.
"""
try :
return self._svreffgslbstate
except Exception as e:
raise e
@property
def thresholdvalue(self) :
ur"""Tells whether threshold exceeded for this service participating in CUSTOMLB.
"""
try :
return self._thresholdvalue
except Exception as e:
raise e
@property
def ipaddress(self) :
ur"""IP address.
"""
try :
return self._ipaddress
except Exception as e:
raise e
def _get_nitro_response(self, service, response) :
ur""" converts nitro response into object and returns the object array in case of get request.
"""
try :
result = service.payload_formatter.string_to_resource(gslbvserver_gslbservice_binding_response, response, self.__class__.__name__)
if(result.errorcode != 0) :
if (result.errorcode == 444) :
service.clear_session(self)
if result.severity :
if (result.severity == "ERROR") :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
else :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
return result.gslbvserver_gslbservice_binding
except Exception as e :
raise e
def _get_object_name(self) :
ur""" Returns the value of object identifier argument
"""
try :
if self.name is not None :
return str(self.name)
return None
except Exception as e :
raise e
@classmethod
def add(cls, client, resource) :
try :
if resource and type(resource) is not list :
updateresource = gslbvserver_gslbservice_binding()
updateresource.name = resource.name
updateresource.servicename = resource.servicename
updateresource.weight = resource.weight
updateresource.domainname = resource.domainname
return updateresource.update_resource(client)
else :
if resource and len(resource) > 0 :
updateresources = [gslbvserver_gslbservice_binding() for _ in range(len(resource))]
for i in range(len(resource)) :
updateresources[i].name = resource[i].name
updateresources[i].servicename = resource[i].servicename
updateresources[i].weight = resource[i].weight
updateresources[i].domainname = resource[i].domainname
return cls.update_bulk_request(client, updateresources)
except Exception as e :
raise e
@classmethod
def delete(cls, client, resource) :
try :
if resource and type(resource) is not list :
deleteresource = gslbvserver_gslbservice_binding()
deleteresource.name = resource.name
deleteresource.servicename = resource.servicename
deleteresource.domainname = resource.domainname
return deleteresource.delete_resource(client)
else :
if resource and len(resource) > 0 :
deleteresources = [gslbvserver_gslbservice_binding() for _ in range(len(resource))]
for i in range(len(resource)) :
deleteresources[i].name = resource[i].name
deleteresources[i].servicename = resource[i].servicename
deleteresources[i].domainname = resource[i].domainname
return cls.delete_bulk_request(client, deleteresources)
except Exception as e :
raise e
@classmethod
def get(cls, service, name) :
ur""" Use this API to fetch gslbvserver_gslbservice_binding resources.
"""
try :
obj = gslbvserver_gslbservice_binding()
obj.name = name
response = obj.get_resources(service)
return response
except Exception as e:
raise e
@classmethod
def get_filtered(cls, service, name, filter_) :
ur""" Use this API to fetch filtered set of gslbvserver_gslbservice_binding resources.
Filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
"""
try :
obj = gslbvserver_gslbservice_binding()
obj.name = name
option_ = options()
option_.filter = filter_
response = obj.getfiltered(service, option_)
return response
except Exception as e:
raise e
@classmethod
def count(cls, service, name) :
ur""" Use this API to count gslbvserver_gslbservice_binding resources configued on NetScaler.
"""
try :
obj = gslbvserver_gslbservice_binding()
obj.name = name
option_ = options()
option_.count = True
response = obj.get_resources(service, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e:
raise e
@classmethod
def count_filtered(cls, service, name, filter_) :
ur""" Use this API to count the filtered set of gslbvserver_gslbservice_binding resources.
Filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
"""
try :
obj = gslbvserver_gslbservice_binding()
obj.name = name
option_ = options()
option_.count = True
option_.filter = filter_
response = obj.getfiltered(service, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e:
raise e
class Svcsitepersistence:
ConnectionProxy = "ConnectionProxy"
HTTPRedirect = "HTTPRedirect"
NONE = "NONE"
class Svreffgslbstate:
UP = "UP"
DOWN = "DOWN"
UNKNOWN = "UNKNOWN"
BUSY = "BUSY"
OUT_OF_SERVICE = "OUT OF SERVICE"
GOING_OUT_OF_SERVICE = "GOING OUT OF SERVICE"
DOWN_WHEN_GOING_OUT_OF_SERVICE = "DOWN WHEN GOING OUT OF SERVICE"
NS_EMPTY_STR = "NS_EMPTY_STR"
Unknown = "Unknown"
DISABLED = "DISABLED"
class Type:
REQUEST = "REQUEST"
RESPONSE = "RESPONSE"
class Gslbboundsvctype:
HTTP = "HTTP"
FTP = "FTP"
TCP = "TCP"
UDP = "UDP"
SSL = "SSL"
SSL_BRIDGE = "SSL_BRIDGE"
SSL_TCP = "SSL_TCP"
NNTP = "NNTP"
ANY = "ANY"
SIP_UDP = "SIP_UDP"
RADIUS = "RADIUS"
RDP = "RDP"
RTSP = "RTSP"
MYSQL = "MYSQL"
MSSQL = "MSSQL"
ORACLE = "ORACLE"
class Iscname:
ENABLED = "ENABLED"
DISABLED = "DISABLED"
class Curstate:
UP = "UP"
DOWN = "DOWN"
UNKNOWN = "UNKNOWN"
BUSY = "BUSY"
OUT_OF_SERVICE = "OUT OF SERVICE"
GOING_OUT_OF_SERVICE = "GOING OUT OF SERVICE"
DOWN_WHEN_GOING_OUT_OF_SERVICE = "DOWN WHEN GOING OUT OF SERVICE"
NS_EMPTY_STR = "NS_EMPTY_STR"
Unknown = "Unknown"
DISABLED = "DISABLED"
class gslbvserver_gslbservice_binding_response(base_response) :
def __init__(self, length=1) :
self.gslbvserver_gslbservice_binding = []
self.errorcode = 0
self.message = ""
self.severity = ""
self.sessionid = ""
self.gslbvserver_gslbservice_binding = [gslbvserver_gslbservice_binding() for _ in range(length)]
| {
"content_hash": "1296feb3287688ce9caf616ef34d6ec7",
"timestamp": "",
"source": "github",
"line_count": 425,
"max_line_length": 253,
"avg_line_length": 28.103529411764708,
"alnum_prop": 0.6940723375753517,
"repo_name": "benfinke/ns_python",
"id": "5298bb68dbbfebd46a1ba6d935526e5e6679cac9",
"size": "12558",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "build/lib/nssrc/com/citrix/netscaler/nitro/resource/config/gslb/gslbvserver_gslbservice_binding.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "21836782"
},
{
"name": "Shell",
"bytes": "513"
}
],
"symlink_target": ""
} |
import numpy as np
import base64
import json
from galry import CompoundVisual
__all__ = ['SceneCreator',
'encode_data', 'decode_data', 'serialize', 'deserialize', ]
# Scene creator
# -------------
class SceneCreator(object):
"""Construct a scene with `add_*` methods."""
def __init__(self, constrain_ratio=False,):
"""Initialize the scene."""
# options
self.constrain_ratio = constrain_ratio
# create an empty scene
self.scene = {'visuals': [], 'renderer_options': {}}
self.visual_objects = {}
# Visual methods
# --------------
def get_visuals(self):
"""Return all visuals defined in the scene."""
return self.scene['visuals']
def get_visual_object(self, name):
"""Get a visual object from its name."""
return self.visual_objects[name]
def get_visual(self, name):
"""Get a visual dictionary from its name."""
visuals = [v for v in self.get_visuals() if v.get('name', '') == name]
if not visuals:
return None
return visuals[0]
# Visual creation methods
# -----------------------
def add_visual(self, visual_class, *args, **kwargs):
"""Add a visual. This method should be called in `self.initialize`.
A visual is an instanciation of a `Visual`. A Visual
defines a pattern for one, or a homogeneous set of plotting objects.
Example: a text string, a set of rectangles, a set of triangles,
a set of curves, a set of points. A set of points and rectangles
does not define a visual since it is not an homogeneous set of
objects. The technical reason for this distinction is that OpenGL
allows for very fast rendering of homogeneous objects by calling
a single rendering command (even if several objects of the same type
need to be rendered, e.g. several rectangles). The lower the number
of rendering calls, the better the performance.
Hence, a visual is defined by a particular Visual, and by
specification of fields in this visual (positions of the points,
colors, text string for the example of the TextVisual, etc.). It
also comes with a number `N` which is the number of vertices contained
in the visual (N=4 for one rectangle, N=len(text) for a text since
every character is rendered independently, etc.)
Several visuals can be created in the PaintManager, but performance
decreases with the number of visuals, so that all homogeneous
objects to be rendered on the screen at the same time should be
grouped into a single visual (e.g. multiple line plots).
Arguments:
* visual_class=None: the visual class, deriving from
`Visual` (or directly from the base class `Visual`
if you don't want the navigation-related functionality).
* visible=True: whether this visual should be rendered. Useful
for showing/hiding a transient element. When hidden, the visual
does not go through the rendering pipeline at all.
* **kwargs: keyword arguments for the visual `initialize` method.
Returns:
* visual: a dictionary containing all the information about
the visual, and that can be used in `set_data`.
"""
if 'name' not in kwargs:
kwargs['name'] = 'visual%d' % (len(self.get_visuals()))
# handle compound visual, where we add all sub visuals
# as defined in CompoundVisual.initialize()
if issubclass(visual_class, CompoundVisual):
visual = visual_class(self.scene, *args, **kwargs)
for sub_cls, sub_args, sub_kwargs in visual.visuals:
self.add_visual(sub_cls, *sub_args, **sub_kwargs)
return visual
# get the name of the visual from kwargs
name = kwargs.pop('name')
if self.get_visual(name):
raise ValueError("Visual name '%s' already exists." % name)
# pass constrain_ratio to all visuals
if 'constrain_ratio' not in kwargs:
kwargs['constrain_ratio'] = self.constrain_ratio
# create the visual object
visual = visual_class(self.scene, *args, **kwargs)
# get the dictionary version
dic = visual.get_dic()
dic['name'] = name
# append the dic to the visuals list of the scene
self.get_visuals().append(dic)
# also, record the visual object
self.visual_objects[name] = visual
return visual
# Output methods
# --------------
def get_scene(self):
"""Return the scene dictionary."""
return self.scene
def serialize(self, **kwargs):
"""Return the JSON representation of the scene."""
self.scene.update(**kwargs)
return serialize(self.scene)
def from_json(self, scene_json):
"""Import the scene from a JSON string."""
self.scene = deserialize(scene_json)
# Scene serialization methods
# ---------------------------
def encode_data(data):
"""Return the Base64 encoding of a Numpy array."""
return base64.b64encode(data)
def decode_data(s, dtype=np.float32):
"""Return a Numpy array from its encoded Base64 string. The dtype
must be provided (float32 by default)."""
return np.fromstring(base64.b64decode(s), dtype=dtype)
class ArrayEncoder(json.JSONEncoder):
"""JSON encoder that handles Numpy arrays and serialize them with base64
encoding."""
def default(self, obj):
if isinstance(obj, np.ndarray):
return encode_data(obj)
return json.JSONEncoder.default(self, obj)
def is_str(obj):
tp = type(obj)
return tp == str or tp == unicode
def serialize(scene):
"""Serialize a scene."""
# HACK: force all attributes to float32
# for visual in scene.get('visuals', []):
# if isinstance(visual.get('bounds', None), np.ndarray):
# visual['bounds'] = encode_data(visual['bounds'])
# for variable in visual.get('variables', []):
# if isinstance(variable.get('data', None), np.ndarray):
# # vartype = variable.get('vartype', 'float')
# # if vartype == 'int':
# # dtype = np.int32
# # elif vartype == 'float':
# # dtype = np.float32
# variable['data'] = encode_data(np.array(variable['data'], dtype=np.float32))
scene_json = json.dumps(scene, cls=ArrayEncoder, ensure_ascii=True)
# scene_json = scene_json.replace('\\n', '\\\\n')
return scene_json
def deserialize(scene_json):
"""Deserialize a scene."""
scene = json.loads(scene_json)
for visual in scene.get('visuals', []):
if is_str(visual.get('bounds', None)):
visual['bounds'] = decode_data(visual['bounds'], np.int32)
for variable in visual.get('variables', []):
if is_str(variable.get('data', None)):
vartype = variable.get('vartype', 'float')
if vartype == 'int':
dtype = np.int32
elif vartype == 'float':
dtype = np.float32
variable['data'] = decode_data(variable['data'], dtype)
return scene
| {
"content_hash": "72951eac31101ccabc20af52f0e33708",
"timestamp": "",
"source": "github",
"line_count": 192,
"max_line_length": 94,
"avg_line_length": 39.307291666666664,
"alnum_prop": 0.5877832251225653,
"repo_name": "rossant/galry",
"id": "859b4ec505bdc0cd2753f69bcb57fc77a17da20e",
"size": "7547",
"binary": false,
"copies": "7",
"ref": "refs/heads/master",
"path": "galry/scene.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "JavaScript",
"bytes": "24569"
},
{
"name": "Python",
"bytes": "397431"
},
{
"name": "Shell",
"bytes": "57"
}
],
"symlink_target": ""
} |
from flask import current_app as app
from flask.ext.zodb import Object, List, Dict, ZODB
from hashlib import sha256
class User():
def __init__(self, username, password, userid):
self.username = username
self.passwordHash = sha256(password).hexdigest()
self.id = userid
self.offset = 0
self.restid_to_good_count = {}
self.restid_to_bad_count = {}
def updateGood(self, restid):
if restid in self.restid_to_good_count:
self.restid_to_good_count[restid] += 1
else:
self.restid_to_good_count[restid] = 1
def updateBad(self, restid):
if restid in self.rest_to_bad_count:
self.restid_to_bad_count[restid] += 1
else:
self.restid_to_bad_count[restid] = 1
def clearOffset(self):
self.offset = 0
def incrOffset(self, amount):
self.offset += amount
def getOffset(self):
return self.offset
@staticmethod
def getUserById(db, userid):
user = db['users'].get(userid, None)
return user
@staticmethod
def save(db, user):
print("updating user with id:"+str(user.id))
db['users'][user.id] = user
class Restaurant():
'''
# @name - string
# @pos - (lat, lon) tuple of floats (in degrees)
# @restid - int unique id for restaurant
# @categories - [str1, str2, str3, ...]
# @yelpCount - int
# @yelpRating - float
# @address - str
# @city - str
# @streets - str
# @zip_code - str
'''
def __init__(self, name, pos, restid, categories, yelp_count, yelp_rating, address, city, zip_code, img_url):
self.name = name
self.pos = pos
self.lat = pos[0]
self.lon = pos[1]
self.id = restid
self.good_count = 0
self.bad_count = 0
#yelp metadata
self.categories = categories
self.yelp_count = yelp_count
self.yelp_rating = yelp_rating
self.address = address
self.city = city
self.zip_code = zip_code
self.img_url = img_url
def updateGood(self):
self.good_count += 1
def updateBad(self):
self.bad_count += 1
#db - flask-ZODB instance
@staticmethod
def checkIfExists(db, pos):
if pos in db['restaurants']:
return True
else:
return False
@staticmethod
def getByPos(db, pos):
return db['restaurants'].get(pos, None)
@staticmethod
def save(db, restObj):
db['restaurants'][restObj.pos] = restObj
print "added "+restObj.name+" to the db."
| {
"content_hash": "afcf5d8e996ead5572c9e451aebf6a11",
"timestamp": "",
"source": "github",
"line_count": 104,
"max_line_length": 110,
"avg_line_length": 21.682692307692307,
"alnum_prop": 0.6651884700665188,
"repo_name": "skxu/Vittles",
"id": "8da3c134305bf412f40116ca2c82571e041b3b05",
"size": "2255",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "models.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "1832"
},
{
"name": "Python",
"bytes": "9557"
}
],
"symlink_target": ""
} |
"""Ros node for playing audio chirps and recording the returns along
with data from a depth camera.
This requires that the depth camera and sound_play nodes have been
started (in separate terminals.):
source ./ros_config_account.sh
roslaunch openni2_launch openni2.launch
roslaunch sound_play soundplay_node.launch
"""
#import h5py
import sys
import numpy as np
import rospy
from sensor_msgs.msg import Image
from cv_bridge import CvBridge
from sound_play.libsoundplay import SoundClient
from pyaudio_utils import AudioPlayer, AudioRecorder
CHIRP_FILE = '/home/hoangnt/echolocation/data/16000to8000.02s.wav'
class Recorder(object):
CHIRP_RATE = 6 # in hz
RECORD_DURATION = .06 # in seconds
CHANNELS = 2
def __init__(self, file_name):
rospy.init_node('ros_record')
self.file_name = file_name
self.bridge = CvBridge()
self.latest_depth = None
self.soundhandle = SoundClient(blocking=False)
self.audio_player = AudioPlayer(CHIRP_FILE)
self.audio_recorder = AudioRecorder(channels=self.CHANNELS)
rospy.Subscriber('/camera/depth/image_raw', Image, self.depth_callback)
while self.latest_depth is None and not rospy.is_shutdown():
rospy.loginfo("WAITING FOR CAMERA DATA.")
rospy.sleep(.1)
wavs = []
images = []
rate = rospy.Rate(self.CHIRP_RATE)
while not rospy.is_shutdown():
image = self.bridge.imgmsg_to_cv2(self.latest_depth)
images.append(image)
self.soundhandle.playWave(CHIRP_FILE)
#self.audio_player.play() # takes a while to actually play. (about .015 seconds)
rospy.sleep(.04)
self.audio_recorder.record(self.RECORD_DURATION)
while not self.audio_recorder.done_recording():
rospy.sleep(.005)
audio = self.audio_recorder.get_data()
wavs.append(audio[1])
rate.sleep()
audio = np.array(wavs)
depth = np.array(images)
rospy.loginfo("Saving data to disk...")
np.savez_compressed(self.file_name, audio=audio, depth=depth)
#with h5py.File(self.file_name, 'w') as h5:
# h5.create_dataset('audio', data=audio)
# h5.create_dataset('depth', data=depth)
#self.audio_player.shutdown()
self.audio_recorder.shutdown()
def depth_callback(self, depth_image):
self.latest_depth = depth_image
if __name__ == "__main__":
Recorder(sys.argv[1])
| {
"content_hash": "f81d808e38d5d7619857d7d7c89c46fd",
"timestamp": "",
"source": "github",
"line_count": 91,
"max_line_length": 92,
"avg_line_length": 27.67032967032967,
"alnum_prop": 0.6449563145353455,
"repo_name": "spragunr/echolocation",
"id": "7e2cf477d906637df8e77bf9f40a3d475e45bd8d",
"size": "2541",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "old_ros_record.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "116040"
},
{
"name": "Shell",
"bytes": "1537"
}
],
"symlink_target": ""
} |
"""
Featurizes proposed binding pockets.
"""
from __future__ import division
from __future__ import unicode_literals
__author__ = "Bharath Ramsundar"
__copyright__ = "Copyright 2017, Stanford University"
__license__ = "MIT"
import numpy as np
from deepchem.utils.save import log
from deepchem.feat import Featurizer
class BindingPocketFeaturizer(Featurizer):
"""
Featurizes binding pockets with information about chemical environments.
"""
residues = [
"ALA", "ARG", "ASN", "ASP", "CYS", "GLN", "GLU", "GLY", "HIS", "ILE",
"LEU", "LYS", "MET", "PHE", "PRO", "PYL", "SER", "SEC", "THR", "TRP",
"TYR", "VAL", "ASX", "GLX"
]
n_features = len(residues)
def featurize(self,
protein_file,
pockets,
pocket_atoms_map,
pocket_coords,
verbose=False):
"""
Calculate atomic coodinates.
"""
import mdtraj
protein = mdtraj.load(protein_file)
n_pockets = len(pockets)
n_residues = len(BindingPocketFeaturizer.residues)
res_map = dict(zip(BindingPocketFeaturizer.residues, range(n_residues)))
all_features = np.zeros((n_pockets, n_residues))
for pocket_num, (pocket, coords) in enumerate(zip(pockets, pocket_coords)):
pocket_atoms = pocket_atoms_map[pocket]
for ind, atom in enumerate(pocket_atoms):
atom_name = str(protein.top.atom(atom))
# atom_name is of format RESX-ATOMTYPE
# where X is a 1 to 4 digit number
residue = atom_name[:3]
if residue not in res_map:
log("Warning: Non-standard residue in PDB file", verbose)
continue
atomtype = atom_name.split("-")[1]
all_features[pocket_num, res_map[residue]] += 1
return all_features
| {
"content_hash": "8dbb5992ef42a4ed04a77372550ea921",
"timestamp": "",
"source": "github",
"line_count": 56,
"max_line_length": 79,
"avg_line_length": 31.607142857142858,
"alnum_prop": 0.6175141242937853,
"repo_name": "Agent007/deepchem",
"id": "bf26d0e76a73f84a5208746d56818c147ce2df34",
"size": "1770",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "deepchem/feat/binding_pocket_features.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "16453"
},
{
"name": "HTML",
"bytes": "20618"
},
{
"name": "Jupyter Notebook",
"bytes": "59756"
},
{
"name": "Python",
"bytes": "2129306"
},
{
"name": "Shell",
"bytes": "11976"
}
],
"symlink_target": ""
} |
"""Internal class for proxy query execution context implementation in the Azure
Cosmos database service.
"""
import json
from six.moves import xrange
from azure.cosmos.exceptions import CosmosHttpResponseError
from azure.cosmos._execution_context import multi_execution_aggregator
from azure.cosmos._execution_context.base_execution_context import _QueryExecutionContextBase
from azure.cosmos._execution_context.base_execution_context import _DefaultQueryExecutionContext
from azure.cosmos._execution_context.query_execution_info import _PartitionedQueryExecutionInfo
from azure.cosmos._execution_context import endpoint_component
from azure.cosmos.documents import _DistinctType
from azure.cosmos.http_constants import StatusCodes, SubStatusCodes
# pylint: disable=protected-access
def _is_partitioned_execution_info(e):
return (
e.status_code == StatusCodes.BAD_REQUEST and e.sub_status == SubStatusCodes.CROSS_PARTITION_QUERY_NOT_SERVABLE
)
def _get_partitioned_execution_info(e):
error_msg = json.loads(e.http_error_message)
return _PartitionedQueryExecutionInfo(json.loads(error_msg["additionalErrorInfo"]))
class _ProxyQueryExecutionContext(_QueryExecutionContextBase): # pylint: disable=abstract-method
"""Represents a proxy execution context wrapper.
By default, uses _DefaultQueryExecutionContext.
If backend responds a 400 error code with a Query Execution Info, switches
to _MultiExecutionContextAggregator
"""
def __init__(self, client, resource_link, query, options, fetch_function):
"""
Constructor
"""
super(_ProxyQueryExecutionContext, self).__init__(client, options)
self._execution_context = _DefaultQueryExecutionContext(client, options, fetch_function)
self._resource_link = resource_link
self._query = query
self._fetch_function = fetch_function
def __next__(self):
"""Returns the next query result.
:return: The next query result.
:rtype: dict
:raises StopIteration: If no more result is left.
"""
try:
return next(self._execution_context)
except CosmosHttpResponseError as e:
if _is_partitioned_execution_info(e):
query_to_use = self._query if self._query is not None else "Select * from root r"
query_execution_info = _PartitionedQueryExecutionInfo(self._client._GetQueryPlanThroughGateway
(query_to_use, self._resource_link))
self._execution_context = self._create_pipelined_execution_context(query_execution_info)
else:
raise e
return next(self._execution_context)
def fetch_next_block(self):
"""Returns a block of results.
This method only exists for backward compatibility reasons. (Because
QueryIterable has exposed fetch_next_block api).
:return: List of results.
:rtype: list
"""
try:
return self._execution_context.fetch_next_block()
except CosmosHttpResponseError as e:
if _is_partitioned_execution_info(e):
query_to_use = self._query if self._query is not None else "Select * from root r"
query_execution_info = _PartitionedQueryExecutionInfo(self._client._GetQueryPlanThroughGateway
(query_to_use, self._resource_link))
self._execution_context = self._create_pipelined_execution_context(query_execution_info)
else:
raise e
return self._execution_context.fetch_next_block()
def _create_pipelined_execution_context(self, query_execution_info):
assert self._resource_link, "code bug, resource_link is required."
if query_execution_info.has_aggregates() and not query_execution_info.has_select_value():
if self._options and ("enableCrossPartitionQuery" in self._options
and self._options["enableCrossPartitionQuery"]):
raise CosmosHttpResponseError(StatusCodes.BAD_REQUEST,
"Cross partition query only supports 'VALUE <AggregateFunc>' for aggregates")
execution_context_aggregator = multi_execution_aggregator._MultiExecutionContextAggregator(self._client,
self._resource_link,
self._query,
self._options,
query_execution_info)
return _PipelineExecutionContext(self._client, self._options, execution_context_aggregator,
query_execution_info)
next = __next__ # Python 2 compatibility.
class _PipelineExecutionContext(_QueryExecutionContextBase): # pylint: disable=abstract-method
DEFAULT_PAGE_SIZE = 1000
def __init__(self, client, options, execution_context, query_execution_info):
super(_PipelineExecutionContext, self).__init__(client, options)
if options.get("maxItemCount"):
self._page_size = options["maxItemCount"]
else:
self._page_size = _PipelineExecutionContext.DEFAULT_PAGE_SIZE
self._execution_context = execution_context
self._endpoint = endpoint_component._QueryExecutionEndpointComponent(execution_context)
order_by = query_execution_info.get_order_by()
if order_by:
self._endpoint = endpoint_component._QueryExecutionOrderByEndpointComponent(self._endpoint)
aggregates = query_execution_info.get_aggregates()
if aggregates:
self._endpoint = endpoint_component._QueryExecutionAggregateEndpointComponent(self._endpoint, aggregates)
offset = query_execution_info.get_offset()
if offset is not None:
self._endpoint = endpoint_component._QueryExecutionOffsetEndpointComponent(self._endpoint, offset)
top = query_execution_info.get_top()
if top is not None:
self._endpoint = endpoint_component._QueryExecutionTopEndpointComponent(self._endpoint, top)
limit = query_execution_info.get_limit()
if limit is not None:
self._endpoint = endpoint_component._QueryExecutionTopEndpointComponent(self._endpoint, limit)
distinct_type = query_execution_info.get_distinct_type()
if distinct_type != _DistinctType.NoneType:
if distinct_type == _DistinctType.Ordered:
self._endpoint = endpoint_component._QueryExecutionDistinctOrderedEndpointComponent(self._endpoint)
else:
self._endpoint = endpoint_component._QueryExecutionDistinctUnorderedEndpointComponent(self._endpoint)
def __next__(self):
"""Returns the next query result.
:return: The next query result.
:rtype: dict
:raises StopIteration: If no more result is left.
"""
return next(self._endpoint)
def fetch_next_block(self):
"""Returns a block of results.
This method only exists for backward compatibility reasons. (Because
QueryIterable has exposed fetch_next_block api).
This method internally invokes next() as many times required to collect
the requested fetch size.
:return: List of results.
:rtype: list
"""
results = []
for _ in xrange(self._page_size):
try:
results.append(next(self))
except StopIteration:
# no more results
break
return results
next = __next__ # Python 2 compatibility.
| {
"content_hash": "54eadf049f49520643469dff60e71e0f",
"timestamp": "",
"source": "github",
"line_count": 187,
"max_line_length": 120,
"avg_line_length": 42.80213903743316,
"alnum_prop": 0.627936031984008,
"repo_name": "Azure/azure-sdk-for-python",
"id": "c731d3535549dd2587b5b57f82160d3b03329332",
"size": "9126",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "sdk/cosmos/azure-cosmos/azure/cosmos/_execution_context/execution_dispatcher.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1224"
},
{
"name": "Bicep",
"bytes": "24196"
},
{
"name": "CSS",
"bytes": "6089"
},
{
"name": "Dockerfile",
"bytes": "4892"
},
{
"name": "HTML",
"bytes": "12058"
},
{
"name": "JavaScript",
"bytes": "8137"
},
{
"name": "Jinja",
"bytes": "10377"
},
{
"name": "Jupyter Notebook",
"bytes": "272022"
},
{
"name": "PowerShell",
"bytes": "518535"
},
{
"name": "Python",
"bytes": "715484989"
},
{
"name": "Shell",
"bytes": "3631"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
import django.contrib.auth.models
import django.contrib.auth.validators
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('core', '0001_initial'),
]
operations = [
migrations.AlterModelManagers(
name='user',
managers=[
('objects', django.contrib.auth.models.UserManager()),
],
),
migrations.AlterField(
model_name='account',
name='username',
field=models.CharField(blank=True, help_text='Bitstamp login number', max_length=255),
),
migrations.AlterField(
model_name='order',
name='status',
field=models.CharField(choices=[('open', 'open'), ('cancelled', 'cancelled'), ('processed', 'processed')], db_index=True, default=None, max_length=255),
),
migrations.AlterField(
model_name='order',
name='type',
field=models.IntegerField(choices=[(0, 'buy'), (1, 'sell')], db_index=True),
),
migrations.AlterField(
model_name='transaction',
name='type',
field=models.PositiveSmallIntegerField(choices=[(0, 'deposit'), (1, 'withdrawal'), (2, 'trade')], db_index=True),
),
migrations.AlterField(
model_name='user',
name='email',
field=models.EmailField(blank=True, max_length=254, verbose_name='email address'),
),
migrations.AlterField(
model_name='user',
name='groups',
field=models.ManyToManyField(blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', related_name='user_set', related_query_name='user', to='auth.Group', verbose_name='groups'),
),
migrations.AlterField(
model_name='user',
name='last_login',
field=models.DateTimeField(blank=True, null=True, verbose_name='last login'),
),
migrations.AlterField(
model_name='user',
name='username',
field=models.CharField(error_messages={'unique': 'A user with that username already exists.'}, help_text='Required. 150 characters or fewer. Letters, digits and @/./+/-/_ only.', max_length=150, unique=True, validators=[django.contrib.auth.validators.UnicodeUsernameValidator()], verbose_name='username'),
),
]
| {
"content_hash": "cbc33e412d9ddd9e181d4590efe49277",
"timestamp": "",
"source": "github",
"line_count": 61,
"max_line_length": 317,
"avg_line_length": 41.32786885245902,
"alnum_prop": 0.5894486314954384,
"repo_name": "jkbrzt/cointrol",
"id": "6025ee90b9180325f5ec7e57f17b48206e68870f",
"size": "2594",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "cointrol/core/migrations/0002_auto_20171102_0054.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "649"
},
{
"name": "CoffeeScript",
"bytes": "15346"
},
{
"name": "HTML",
"bytes": "798"
},
{
"name": "Handlebars",
"bytes": "5892"
},
{
"name": "Python",
"bytes": "74502"
}
],
"symlink_target": ""
} |
"""Utility to handle vocabularies."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import codecs
import os
import tensorflow as tf
from tensorflow.python.ops import lookup_ops
from third_party.nmt.utils import misc_utils as utils
UNK = "<unk>"
SOS = "<s>"
EOS = "</s>"
UNK_ID = 0
def load_vocab(vocab_file):
vocab = []
with codecs.getreader("utf-8")(tf.gfile.GFile(vocab_file, "rb")) as f:
vocab_size = 0
for word in f:
vocab_size += 1
vocab.append(word.strip())
return vocab, vocab_size
def check_vocab(vocab_file,
out_dir,
check_special_token=True,
sos=None,
eos=None,
unk=None,
context_delimiter=None):
"""Check if vocab_file doesn't exist, create from corpus_file."""
if tf.gfile.Exists(vocab_file):
utils.print_out("# Vocab file %s exists" % vocab_file)
vocab = []
with codecs.getreader("utf-8")(tf.gfile.GFile(vocab_file, "rb")) as f:
vocab_size = 0
for word in f:
word = word.rstrip("\n").rsplit("\t", 1)[0]
vocab_size += 1
vocab.append(word)
# add context delimiter if not exist yet
if context_delimiter is not None and context_delimiter not in vocab:
vocab += [context_delimiter]
vocab_size += 1
utils.print_out("Context delimiter {} does not exist"
.format(context_delimiter))
elif context_delimiter is not None and context_delimiter in vocab:
utils.print_out("Context delimiter {} already exists in vocab"
.format(context_delimiter))
if check_special_token:
# Verify if the vocab starts with unk, sos, eos
# If not, prepend those tokens & generate a new vocab file
if not unk:
unk = UNK
if not sos:
sos = SOS
if not eos:
eos = EOS
assert len(vocab) >= 3
if vocab[0] != unk or vocab[1] != sos or vocab[2] != eos:
utils.print_out("The first 3 vocab words [%s, %s, %s]"
" are not [%s, %s, %s]" % (vocab[0], vocab[1], vocab[2],
unk, sos, eos))
vocab = [unk, sos, eos] + vocab
vocab_size += 3
new_vocab_file = os.path.join(out_dir, os.path.basename(vocab_file))
with codecs.getwriter("utf-8")(tf.gfile.GFile(new_vocab_file,
"wb")) as f:
for word in vocab:
f.write("%s\n" % word)
vocab_file = new_vocab_file
else:
raise ValueError("vocab_file '%s' does not exist." % vocab_file)
vocab_size = len(vocab)
return vocab_size, vocab_file
def create_vocab_tables(src_vocab_file, tgt_vocab_file, share_vocab):
"""Creates vocab tables for src_vocab_file and tgt_vocab_file."""
src_vocab_table = lookup_ops.index_table_from_file(
src_vocab_file, default_value=UNK_ID)
if share_vocab:
tgt_vocab_table = src_vocab_table
else:
tgt_vocab_table = lookup_ops.index_table_from_file(
tgt_vocab_file, default_value=UNK_ID)
return src_vocab_table, tgt_vocab_table
def load_embed_txt(embed_file):
"""Load embed_file into a python dictionary.
Note: the embed_file should be a Glove/word2vec formatted txt file. Assuming
Here is an exampe assuming embed_size=5:
the -0.071549 0.093459 0.023738 -0.090339 0.056123
to 0.57346 0.5417 -0.23477 -0.3624 0.4037
and 0.20327 0.47348 0.050877 0.002103 0.060547
For word2vec format, the first line will be: <num_words> <emb_size>.
Args:
embed_file: file path to the embedding file.
Returns:
a dictionary that maps word to vector, and the size of embedding dimensions.
"""
emb_dict = dict()
emb_size = None
is_first_line = True
with codecs.getreader("utf-8")(tf.gfile.GFile(embed_file, "rb")) as f:
for line in f:
tokens = line.rstrip().split(" ")
if is_first_line:
is_first_line = False
if len(tokens) == 2: # header line
emb_size = int(tokens[1])
continue
word = tokens[0]
vec = list(map(float, tokens[1:]))
emb_dict[word] = vec
if emb_size:
if emb_size != len(vec):
utils.print_out(
"Ignoring %s since embeding size is inconsistent." % word)
del emb_dict[word]
else:
emb_size = len(vec)
return emb_dict, emb_size
| {
"content_hash": "d3960ef5bdfc9f8dd6aa42dc4839cf2d",
"timestamp": "",
"source": "github",
"line_count": 138,
"max_line_length": 80,
"avg_line_length": 32.30434782608695,
"alnum_prop": 0.5955585464333782,
"repo_name": "google/active-qa",
"id": "d07031177fb52608ea78716207329b755e989358",
"size": "5115",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "px/nmt/utils/vocab_utils.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "499394"
},
{
"name": "Shell",
"bytes": "980"
}
],
"symlink_target": ""
} |
from cupy.cuda import runtime
class Event(object):
"""CUDA event, a synchronization point of CUDA streams.
This class handles the CUDA event handle in RAII way, i.e., when an Event
instance is destroyed by the GC, its handle is also destroyed.
Args:
block (bool): If True, the event blocks on the
:meth:`~cupy.cuda.Event.synchronize` method.
disable_timing (bool): If True, the event does not prepare the timing
data.
interprocess (bool): If True, the event can be passed to other
processes.
Attributes:
ptr (cupy.cuda.runtime.Stream): Raw stream handle. It can be passed to
the CUDA Runtime API via ctypes.
"""
def __init__(self, block=False, disable_timing=False, interprocess=False):
self.ptr = None
if interprocess and not disable_timing:
raise ValueError('Timing must be disabled for interprocess events')
flag = ((block and runtime.eventBlockingSync) |
(disable_timing and runtime.eventDisableTiming) |
(interprocess and runtime.eventInterprocess))
self.ptr = runtime.eventCreateWithFlags(flag)
def __del__(self):
if self.ptr:
runtime.eventDestroy(self.ptr)
self.ptr = None
@property
def done(self):
"""True if the event is done."""
return bool(runtime.eventQuery(self.ptr))
def record(self, stream=None):
"""Records the event to a stream.
Args:
stream (cupy.cuda.Stream): CUDA stream to record event. The null
stream is used by default.
.. seealso:: :meth:`cupy.cuda.Stream.record`
"""
if stream is None:
stream = Stream(null=True)
runtime.eventRecord(self.ptr, stream.ptr)
def synchronize(self):
"""Synchronizes all device work to the event.
If the event is created as a blocking event, it also blocks the CPU
thread until the event is done.
"""
runtime.eventSynchronize(self.ptr)
def get_elapsed_time(start_event, end_event):
"""Gets the elapsed time between two events.
Args:
start_event (Event): Earlier event.
end_event (Event): Later event.
Returns:
float: Elapsed time in milliseconds.
"""
return runtime.eventElapsedTime(start_event.ptr, end_event.ptr)
class Stream(object):
"""CUDA stream.
This class handles the CUDA stream handle in RAII way, i.e., when an Stream
instance is destroyed by the GC, its handle is also destroyed.
Args:
null (bool): If True, the stream is a null stream (i.e. the default
stream that synchronizes with all streams). Otherwise, a plain new
stream is created.
non_blocking (bool): If True, the stream does not synchronize with the
NULL stream.
Attributes:
ptr (cupy.cuda.runtime.Stream): Raw stream handle. It can be passed to
the CUDA Runtime API via ctypes.
"""
def __init__(self, null=False, non_blocking=False):
if null:
self.ptr = None
elif non_blocking:
self.ptr = runtime.streamCreateWithFlags(runtime.streamNonBlocking)
else:
self.ptr = runtime.streamCreate()
def __del__(self):
if self.ptr:
runtime.streamDestroy(self.ptr)
self.ptr = None
@property
def done(self):
"""True if all work on this stream has been done."""
return bool(runtime.streamQuery(self.ptr))
def synchronize(self):
"""Waits for the stream completing all queued work."""
runtime.streamSynchronize(self.ptr)
def add_callback(self, callback, arg):
"""Adds a callback that is called when all queued work is done.
Args:
callback (function): Callback function. It must take three
arguments (Stream object, int error status, and user data
object), and returns nothing.
arg (object): Argument to the callback.
"""
runtime.streamAddCallback(self.ptr, callback, arg)
def record(self, event=None):
"""Records an event on the stream.
Args:
event (None or cupy.cuda.Event): CUDA event. If None, then a new
plain event is created and used.
Returns:
cupy.cuda.Event: The recorded event.
.. seealso:: :meth:`cupy.cuda.Event.record`
"""
if event is None:
event = Event()
runtime.eventRecord(event.ptr, self.ptr)
return event
def wait_event(self, event):
"""Makes the stream wait for an event.
The future work on this stream will be done after the event.
Args:
event (cupy.cuda.Event): CUDA event.
"""
runtime.streamWaitEvent(self.ptr, event)
| {
"content_hash": "3c7685a7707e62def92dcc70bb27dc94",
"timestamp": "",
"source": "github",
"line_count": 162,
"max_line_length": 79,
"avg_line_length": 30.395061728395063,
"alnum_prop": 0.6078391551584078,
"repo_name": "t-abe/chainer",
"id": "6cead5420867eaac27bd0dea3d3c314861d9160a",
"size": "4924",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "cupy/cuda/stream.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "18613"
},
{
"name": "Cuda",
"bytes": "6118"
},
{
"name": "Python",
"bytes": "1233416"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import models, migrations
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('Hindlebook', 'node_data'),
]
operations = [
migrations.AlterField(
model_name='comment',
name='pubDate',
field=models.DateTimeField(default=django.utils.timezone.now, db_index=True, verbose_name='date published'),
preserve_default=True,
),
migrations.AlterField(
model_name='post',
name='pubDate',
field=models.DateTimeField(default=django.utils.timezone.now, db_index=True, verbose_name='date published'),
preserve_default=True,
),
]
| {
"content_hash": "cde46ca5cf3c03875eeb6d4f5e728cb0",
"timestamp": "",
"source": "github",
"line_count": 26,
"max_line_length": 120,
"avg_line_length": 29.23076923076923,
"alnum_prop": 0.6157894736842106,
"repo_name": "Roshack/cmput410-project",
"id": "ba800ae06c020b734d989f31ba3a1249a4af4fbb",
"size": "784",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "DistributedSocialNetworking/Hindlebook/migrations/0001_auto_20150328_2255.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "36523"
},
{
"name": "HTML",
"bytes": "29835"
},
{
"name": "JavaScript",
"bytes": "89893"
},
{
"name": "Python",
"bytes": "166628"
}
],
"symlink_target": ""
} |
from django.contrib import admin
# import your models
# Register your models here.
# admin.site.register(YourModel) | {
"content_hash": "243b77622a46b2224ab38a58a474c6f4",
"timestamp": "",
"source": "github",
"line_count": 5,
"max_line_length": 32,
"avg_line_length": 23.2,
"alnum_prop": 0.7931034482758621,
"repo_name": "datamade/la-metro-councilmatic",
"id": "6767362fddda0f6e69e7d295dd02e55295b2c9d0",
"size": "116",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "lametro/admin.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "6935"
},
{
"name": "Dockerfile",
"bytes": "1488"
},
{
"name": "HTML",
"bytes": "192898"
},
{
"name": "JavaScript",
"bytes": "13628"
},
{
"name": "Makefile",
"bytes": "2651"
},
{
"name": "Python",
"bytes": "251693"
},
{
"name": "Shell",
"bytes": "1300"
}
],
"symlink_target": ""
} |
"""
This code sample demonstrates how to write records in pending mode
using the low-level generated client for Python.
"""
from google.cloud import bigquery_storage_v1
from google.cloud.bigquery_storage_v1 import types
from google.cloud.bigquery_storage_v1 import writer
from google.protobuf import descriptor_pb2
# If you update the customer_record.proto protocol buffer definition, run:
#
# protoc --python_out=. customer_record.proto
#
# from the samples/snippets directory to generate the customer_record_pb2.py module.
from . import customer_record_pb2
def create_row_data(row_num: int, name: str):
row = customer_record_pb2.CustomerRecord()
row.row_num = row_num
row.customer_name = name
return row.SerializeToString()
def append_rows_pending(project_id: str, dataset_id: str, table_id: str):
"""Create a write stream, write some sample data, and commit the stream."""
write_client = bigquery_storage_v1.BigQueryWriteClient()
parent = write_client.table_path(project_id, dataset_id, table_id)
write_stream = types.WriteStream()
# When creating the stream, choose the type. Use the PENDING type to wait
# until the stream is committed before it is visible. See:
# https://cloud.google.com/bigquery/docs/reference/storage/rpc/google.cloud.bigquery.storage.v1#google.cloud.bigquery.storage.v1.WriteStream.Type
write_stream.type_ = types.WriteStream.Type.PENDING
write_stream = write_client.create_write_stream(
parent=parent, write_stream=write_stream
)
stream_name = write_stream.name
# Create a template with fields needed for the first request.
request_template = types.AppendRowsRequest()
# The initial request must contain the stream name.
request_template.write_stream = stream_name
# So that BigQuery knows how to parse the serialized_rows, generate a
# protocol buffer representation of your message descriptor.
proto_schema = types.ProtoSchema()
proto_descriptor = descriptor_pb2.DescriptorProto()
customer_record_pb2.CustomerRecord.DESCRIPTOR.CopyToProto(proto_descriptor)
proto_schema.proto_descriptor = proto_descriptor
proto_data = types.AppendRowsRequest.ProtoData()
proto_data.writer_schema = proto_schema
request_template.proto_rows = proto_data
# Some stream types support an unbounded number of requests. Construct an
# AppendRowsStream to send an arbitrary number of requests to a stream.
append_rows_stream = writer.AppendRowsStream(write_client, request_template)
# Create a batch of row data by appending proto2 serialized bytes to the
# serialized_rows repeated field.
proto_rows = types.ProtoRows()
proto_rows.serialized_rows.append(create_row_data(1, "Alice"))
proto_rows.serialized_rows.append(create_row_data(2, "Bob"))
# Set an offset to allow resuming this stream if the connection breaks.
# Keep track of which requests the server has acknowledged and resume the
# stream at the first non-acknowledged message. If the server has already
# processed a message with that offset, it will return an ALREADY_EXISTS
# error, which can be safely ignored.
#
# The first request must always have an offset of 0.
request = types.AppendRowsRequest()
request.offset = 0
proto_data = types.AppendRowsRequest.ProtoData()
proto_data.rows = proto_rows
request.proto_rows = proto_data
response_future_1 = append_rows_stream.send(request)
# Send another batch.
proto_rows = types.ProtoRows()
proto_rows.serialized_rows.append(create_row_data(3, "Charles"))
# Since this is the second request, you only need to include the row data.
# The name of the stream and protocol buffers DESCRIPTOR is only needed in
# the first request.
request = types.AppendRowsRequest()
proto_data = types.AppendRowsRequest.ProtoData()
proto_data.rows = proto_rows
request.proto_rows = proto_data
# Offset must equal the number of rows that were previously sent.
request.offset = 2
response_future_2 = append_rows_stream.send(request)
print(response_future_1.result())
print(response_future_2.result())
# Shutdown background threads and close the streaming connection.
append_rows_stream.close()
# A PENDING type stream must be "finalized" before being committed. No new
# records can be written to the stream after this method has been called.
write_client.finalize_write_stream(name=write_stream.name)
# Commit the stream you created earlier.
batch_commit_write_streams_request = types.BatchCommitWriteStreamsRequest()
batch_commit_write_streams_request.parent = parent
batch_commit_write_streams_request.write_streams = [write_stream.name]
write_client.batch_commit_write_streams(batch_commit_write_streams_request)
print(f"Writes to stream: '{write_stream.name}' have been committed.")
# [END bigquerystorage_append_rows_pending]
| {
"content_hash": "370b150bee32f12aa89b9356581759e2",
"timestamp": "",
"source": "github",
"line_count": 119,
"max_line_length": 149,
"avg_line_length": 41.705882352941174,
"alnum_prop": 0.7386661293572436,
"repo_name": "googleapis/python-bigquery-storage",
"id": "af780ffa5b92dcce761f75b04540585e6a19f0c1",
"size": "5585",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "samples/snippets/append_rows_pending.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2050"
},
{
"name": "Python",
"bytes": "1136897"
},
{
"name": "Shell",
"bytes": "30690"
}
],
"symlink_target": ""
} |
from v2ex import v2ex
| {
"content_hash": "11ab3de796c244be1d2144c7d4304abc",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 21,
"avg_line_length": 22,
"alnum_prop": 0.8181818181818182,
"repo_name": "littson/bee",
"id": "37e9c071cf224d1dbf663ddfb7618eff078fa4d8",
"size": "22",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "bots/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "85"
},
{
"name": "Python",
"bytes": "2879"
}
],
"symlink_target": ""
} |
from celery.utils.log import get_task_logger
import datetime
from redash.worker import celery
from redash import utils
from redash import models, settings
from .base import BaseTask
logger = get_task_logger(__name__)
def base_url(org):
if settings.MULTI_ORG:
return "https://{}/{}".format(settings.HOST, org.slug)
return "http://{}".format(settings.HOST)
@celery.task(name="redash.tasks.check_alerts_for_query", base=BaseTask)
def check_alerts_for_query(query_id):
from redash.wsgi import app
logger.debug("Checking query %d for alerts", query_id)
query = models.Query.get_by_id(query_id)
for alert in query.alerts:
alert.query = query
new_state = alert.evaluate()
passed_rearm_threshold = False
if alert.rearm and alert.last_triggered_at:
passed_rearm_threshold = alert.last_triggered_at + datetime.timedelta(seconds=alert.rearm) < utils.utcnow()
if new_state != alert.state or (alert.state == models.Alert.TRIGGERED_STATE and passed_rearm_threshold ):
logger.info("Alert %d new state: %s", alert.id, new_state)
old_state = alert.state
alert.update_instance(state=new_state, last_triggered_at=utils.utcnow())
if old_state == models.Alert.UNKNOWN_STATE and new_state == models.Alert.OK_STATE:
logger.debug("Skipping notification (previous state was unknown and now it's ok).")
continue
host = base_url(alert.query.org)
for subscription in alert.subscriptions:
try:
subscription.notify(alert, query, subscription.user, new_state, app, host)
except Exception as e:
logger.exception("Error with processing destination")
| {
"content_hash": "d9a99bd64bdb930e56146b8ba6e5b270",
"timestamp": "",
"source": "github",
"line_count": 46,
"max_line_length": 119,
"avg_line_length": 38.869565217391305,
"alnum_prop": 0.6521252796420581,
"repo_name": "guaguadev/redash",
"id": "97b84a63e25b2f78f634ee31c46a92e1934ecd59",
"size": "1788",
"binary": false,
"copies": "1",
"ref": "refs/heads/guagua",
"path": "redash/tasks/alerts.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "CSS",
"bytes": "239783"
},
{
"name": "HTML",
"bytes": "121423"
},
{
"name": "JavaScript",
"bytes": "279730"
},
{
"name": "Makefile",
"bytes": "955"
},
{
"name": "Nginx",
"bytes": "577"
},
{
"name": "Python",
"bytes": "501609"
},
{
"name": "Ruby",
"bytes": "709"
},
{
"name": "Shell",
"bytes": "43388"
}
],
"symlink_target": ""
} |
import os
import yaml
from extended_uva_judge import errors
def get_problem_directory(app_config):
"""Gets the directory containing the problem configs.
:return: The path to the problem configs.
:rtype: str
"""
problem_directory = app_config['problem_directory']
if not problem_directory:
raise errors.MissingConfigEntryError('problem_directory')
# Check for full windows or *nix directory path
if not (problem_directory.startswith('/') or ':' in problem_directory):
# assume it's relative to the current working directory
problem_directory = os.path.join(os.getcwd(), problem_directory)
return problem_directory
def get_problem_config(app_config, problem_id):
"""Gets the configuration for this objects corresponding problem.
:return: The configuration for the users selected problem
:rtype: dict
"""
problem_directory = get_problem_directory(app_config)
problem_config_path = os.path.join(
problem_directory, '%s.yaml' % problem_id)
problem_config = yaml.load(open(problem_config_path))
return problem_config
def does_problem_config_exist(app_config, problem_id):
"""Checks to see if the problem configuration exists in the system.
:return: True if it exists, false otherwise
:rtype: bool
"""
problem_directory = get_problem_directory(app_config)
problem_config_path = os.path.join(
problem_directory, '%s.yaml' % problem_id)
return os.path.exists(problem_config_path)
| {
"content_hash": "7fa451574e9dcc35fc3a8bd88405d54e",
"timestamp": "",
"source": "github",
"line_count": 51,
"max_line_length": 75,
"avg_line_length": 29.862745098039216,
"alnum_prop": 0.6999343401181878,
"repo_name": "fritogotlayed/Extended-UVA-Judge",
"id": "899e62389d7e3e73d0a9cac9e6a927bac137f515",
"size": "1523",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "extended_uva_judge/utilities.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "802"
},
{
"name": "Python",
"bytes": "34882"
}
],
"symlink_target": ""
} |
"""Functions for FIR filter design."""
from __future__ import division, print_function, absolute_import
from math import ceil, log
import numpy as np
from numpy.fft import irfft
from scipy.special import sinc
from . import sigtools
__all__ = ['kaiser_beta', 'kaiser_atten', 'kaiserord',
'firwin', 'firwin2', 'remez']
# Some notes on function parameters:
#
# `cutoff` and `width` are given as a numbers between 0 and 1. These
# are relative frequencies, expressed as a fraction of the Nyquist rate.
# For example, if the Nyquist rate is 2KHz, then width=0.15 is a width
# of 300 Hz.
#
# The `order` of a FIR filter is one less than the number of taps.
# This is a potential source of confusion, so in the following code,
# we will always use the number of taps as the parameterization of
# the 'size' of the filter. The "number of taps" means the number
# of coefficients, which is the same as the length of the impulse
# response of the filter.
def kaiser_beta(a):
"""Compute the Kaiser parameter `beta`, given the attenuation `a`.
Parameters
----------
a : float
The desired attenuation in the stopband and maximum ripple in
the passband, in dB. This should be a *positive* number.
Returns
-------
beta : float
The `beta` parameter to be used in the formula for a Kaiser window.
References
----------
Oppenheim, Schafer, "Discrete-Time Signal Processing", p.475-476.
"""
if a > 50:
beta = 0.1102 * (a - 8.7)
elif a > 21:
beta = 0.5842 * (a - 21) ** 0.4 + 0.07886 * (a - 21)
else:
beta = 0.0
return beta
def kaiser_atten(numtaps, width):
"""Compute the attenuation of a Kaiser FIR filter.
Given the number of taps `N` and the transition width `width`, compute the
attenuation `a` in dB, given by Kaiser's formula:
a = 2.285 * (N - 1) * pi * width + 7.95
Parameters
----------
N : int
The number of taps in the FIR filter.
width : float
The desired width of the transition region between passband and
stopband (or, in general, at any discontinuity) for the filter.
Returns
-------
a : float
The attenuation of the ripple, in dB.
See Also
--------
kaiserord, kaiser_beta
"""
a = 2.285 * (numtaps - 1) * np.pi * width + 7.95
return a
def kaiserord(ripple, width):
"""
Design a Kaiser window to limit ripple and width of transition region.
Parameters
----------
ripple : float
Positive number specifying maximum ripple in passband (dB) and minimum
ripple in stopband.
width : float
Width of transition region (normalized so that 1 corresponds to pi
radians / sample).
Returns
-------
numtaps : int
The length of the kaiser window.
beta : float
The beta parameter for the kaiser window.
See Also
--------
kaiser_beta, kaiser_atten
Notes
-----
There are several ways to obtain the Kaiser window:
- ``signal.kaiser(numtaps, beta, sym=0)``
- ``signal.get_window(beta, numtaps)``
- ``signal.get_window(('kaiser', beta), numtaps)``
The empirical equations discovered by Kaiser are used.
References
----------
Oppenheim, Schafer, "Discrete-Time Signal Processing", p.475-476.
"""
A = abs(ripple) # in case somebody is confused as to what's meant
if A < 8:
# Formula for N is not valid in this range.
raise ValueError("Requested maximum ripple attentuation %f is too "
"small for the Kaiser formula." % A)
beta = kaiser_beta(A)
# Kaiser's formula (as given in Oppenheim and Schafer) is for the filter
# order, so we have to add 1 to get the number of taps.
numtaps = (A - 7.95) / 2.285 / (np.pi * width) + 1
return int(ceil(numtaps)), beta
def firwin(numtaps, cutoff, width=None, window='hamming', pass_zero=True,
scale=True, nyq=1.0):
"""
FIR filter design using the window method.
This function computes the coefficients of a finite impulse response
filter. The filter will have linear phase; it will be Type I if
`numtaps` is odd and Type II if `numtaps` is even.
Type II filters always have zero response at the Nyquist rate, so a
ValueError exception is raised if firwin is called with `numtaps` even and
having a passband whose right end is at the Nyquist rate.
Parameters
----------
numtaps : int
Length of the filter (number of coefficients, i.e. the filter
order + 1). `numtaps` must be even if a passband includes the
Nyquist frequency.
cutoff : float or 1D array_like
Cutoff frequency of filter (expressed in the same units as `nyq`)
OR an array of cutoff frequencies (that is, band edges). In the
latter case, the frequencies in `cutoff` should be positive and
monotonically increasing between 0 and `nyq`. The values 0 and
`nyq` must not be included in `cutoff`.
width : float or None
If `width` is not None, then assume it is the approximate width
of the transition region (expressed in the same units as `nyq`)
for use in Kaiser FIR filter design. In this case, the `window`
argument is ignored.
window : string or tuple of string and parameter values
Desired window to use. See `scipy.signal.get_window` for a list
of windows and required parameters.
pass_zero : bool
If True, the gain at the frequency 0 (i.e. the "DC gain") is 1.
Otherwise the DC gain is 0.
scale : bool
Set to True to scale the coefficients so that the frequency
response is exactly unity at a certain frequency.
That frequency is either:
- 0 (DC) if the first passband starts at 0 (i.e. pass_zero
is True)
- `nyq` (the Nyquist rate) if the first passband ends at
`nyq` (i.e the filter is a single band highpass filter);
center of first passband otherwise
nyq : float
Nyquist frequency. Each frequency in `cutoff` must be between 0
and `nyq`.
Returns
-------
h : (numtaps,) ndarray
Coefficients of length `numtaps` FIR filter.
Raises
------
ValueError
If any value in `cutoff` is less than or equal to 0 or greater
than or equal to `nyq`, if the values in `cutoff` are not strictly
monotonically increasing, or if `numtaps` is even but a passband
includes the Nyquist frequency.
See also
--------
scipy.signal.firwin2
Examples
--------
Low-pass from 0 to f::
>>> from scipy import signal
>>> signal.firwin(numtaps, f)
Use a specific window function::
>>> signal.firwin(numtaps, f, window='nuttall')
High-pass ('stop' from 0 to f)::
>>> signal.firwin(numtaps, f, pass_zero=False)
Band-pass::
>>> signal.firwin(numtaps, [f1, f2], pass_zero=False)
Band-stop::
>>> signal.firwin(numtaps, [f1, f2])
Multi-band (passbands are [0, f1], [f2, f3] and [f4, 1])::
>>> signal.firwin(numtaps, [f1, f2, f3, f4])
Multi-band (passbands are [f1, f2] and [f3,f4])::
>>> signal.firwin(numtaps, [f1, f2, f3, f4], pass_zero=False)
"""
# The major enhancements to this function added in November 2010 were
# developed by Tom Krauss (see ticket #902).
cutoff = np.atleast_1d(cutoff) / float(nyq)
# Check for invalid input.
if cutoff.ndim > 1:
raise ValueError("The cutoff argument must be at most "
"one-dimensional.")
if cutoff.size == 0:
raise ValueError("At least one cutoff frequency must be given.")
if cutoff.min() <= 0 or cutoff.max() >= 1:
raise ValueError("Invalid cutoff frequency: frequencies must be "
"greater than 0 and less than nyq.")
if np.any(np.diff(cutoff) <= 0):
raise ValueError("Invalid cutoff frequencies: the frequencies "
"must be strictly increasing.")
if width is not None:
# A width was given. Find the beta parameter of the Kaiser window
# and set `window`. This overrides the value of `window` passed in.
atten = kaiser_atten(numtaps, float(width) / nyq)
beta = kaiser_beta(atten)
window = ('kaiser', beta)
pass_nyquist = bool(cutoff.size & 1) ^ pass_zero
if pass_nyquist and numtaps % 2 == 0:
raise ValueError("A filter with an even number of coefficients must "
"have zero response at the Nyquist rate.")
# Insert 0 and/or 1 at the ends of cutoff so that the length of cutoff
# is even, and each pair in cutoff corresponds to passband.
cutoff = np.hstack(([0.0] * pass_zero, cutoff, [1.0] * pass_nyquist))
# `bands` is a 2D array; each row gives the left and right edges of
# a passband.
bands = cutoff.reshape(-1, 2)
# Build up the coefficients.
alpha = 0.5 * (numtaps - 1)
m = np.arange(0, numtaps) - alpha
h = 0
for left, right in bands:
h += right * sinc(right * m)
h -= left * sinc(left * m)
# Get and apply the window function.
from .signaltools import get_window
win = get_window(window, numtaps, fftbins=False)
h *= win
# Now handle scaling if desired.
if scale:
# Get the first passband.
left, right = bands[0]
if left == 0:
scale_frequency = 0.0
elif right == 1:
scale_frequency = 1.0
else:
scale_frequency = 0.5 * (left + right)
c = np.cos(np.pi * m * scale_frequency)
s = np.sum(h * c)
h /= s
return h
# Original version of firwin2 from scipy ticket #457, submitted by "tash".
#
# Rewritten by Warren Weckesser, 2010.
def firwin2(numtaps, freq, gain, nfreqs=None, window='hamming', nyq=1.0, antisymmetric=False):
"""
FIR filter design using the window method.
From the given frequencies `freq` and corresponding gains `gain`,
this function constructs an FIR filter with linear phase and
(approximately) the given frequency response.
Parameters
----------
numtaps : int
The number of taps in the FIR filter. `numtaps` must be less than
`nfreqs`.
freq : array_like, 1D
The frequency sampling points. Typically 0.0 to 1.0 with 1.0 being
Nyquist. The Nyquist frequency can be redefined with the argument
`nyq`.
The values in `freq` must be nondecreasing. A value can be repeated
once to implement a discontinuity. The first value in `freq` must
be 0, and the last value must be `nyq`.
gain : array_like
The filter gains at the frequency sampling points. Certain
constraints to gain values, depending on the filter type, are applied,
see Notes for details.
nfreqs : int, optional
The size of the interpolation mesh used to construct the filter.
For most efficient behavior, this should be a power of 2 plus 1
(e.g, 129, 257, etc). The default is one more than the smallest
power of 2 that is not less than `numtaps`. `nfreqs` must be greater
than `numtaps`.
window : string or (string, float) or float, or None, optional
Window function to use. Default is "hamming". See
`scipy.signal.get_window` for the complete list of possible values.
If None, no window function is applied.
nyq : float
Nyquist frequency. Each frequency in `freq` must be between 0 and
`nyq` (inclusive).
antisymmetric : bool
Whether resulting impulse response is symmetric/antisymmetric.
See Notes for more details.
Returns
-------
taps : ndarray
The filter coefficients of the FIR filter, as a 1-D array of length
`numtaps`.
See also
--------
scipy.signal.firwin
Notes
-----
From the given set of frequencies and gains, the desired response is
constructed in the frequency domain. The inverse FFT is applied to the
desired response to create the associated convolution kernel, and the
first `numtaps` coefficients of this kernel, scaled by `window`, are
returned.
The FIR filter will have linear phase. The type of filter is determined by
the value of 'numtaps` and `antisymmetric` flag.
There are four possible combinations:
- odd `numtaps`, `antisymmetric` is False, type I filter is produced
- even `numtaps`, `antisymmetric` is False, type II filter is produced
- odd `numtaps`, `antisymmetric` is True, type III filter is produced
- even `numtaps`, `antisymmetric` is True, type IV filter is produced
Magnitude response of all but type I filters are subjects to following
constraints:
- type II -- zero at the Nyquist frequency
- type III -- zero at zero and Nyquist frequencies
- type IV -- zero at zero frequency
.. versionadded:: 0.9.0
References
----------
.. [1] Oppenheim, A. V. and Schafer, R. W., "Discrete-Time Signal
Processing", Prentice-Hall, Englewood Cliffs, New Jersey (1989).
(See, for example, Section 7.4.)
.. [2] Smith, Steven W., "The Scientist and Engineer's Guide to Digital
Signal Processing", Ch. 17. http://www.dspguide.com/ch17/1.htm
Examples
--------
A lowpass FIR filter with a response that is 1 on [0.0, 0.5], and
that decreases linearly on [0.5, 1.0] from 1 to 0:
>>> from scipy import signal
>>> taps = signal.firwin2(150, [0.0, 0.5, 1.0], [1.0, 1.0, 0.0])
>>> print(taps[72:78])
[-0.02286961 -0.06362756 0.57310236 0.57310236 -0.06362756 -0.02286961]
"""
if len(freq) != len(gain):
raise ValueError('freq and gain must be of same length.')
if nfreqs is not None and numtaps >= nfreqs:
raise ValueError(('ntaps must be less than nfreqs, but firwin2 was '
'called with ntaps=%d and nfreqs=%s') %
(numtaps, nfreqs))
if freq[0] != 0 or freq[-1] != nyq:
raise ValueError('freq must start with 0 and end with `nyq`.')
d = np.diff(freq)
if (d < 0).any():
raise ValueError('The values in freq must be nondecreasing.')
d2 = d[:-1] + d[1:]
if (d2 == 0).any():
raise ValueError('A value in freq must not occur more than twice.')
if antisymmetric:
if numtaps % 2 == 0:
ftype = 4
else:
ftype = 3
else:
if numtaps % 2 == 0:
ftype = 2
else:
ftype = 1
if ftype == 2 and gain[-1] != 0.0:
raise ValueError("A Type II filter must have zero gain at the Nyquist rate.")
elif ftype == 3 and (gain[0] != 0.0 or gain[-1] != 0.0):
raise ValueError("A Type III filter must have zero gain at zero and Nyquist rates.")
elif ftype == 4 and gain[0] != 0.0:
raise ValueError("A Type IV filter must have zero gain at zero rate.")
if nfreqs is None:
nfreqs = 1 + 2 ** int(ceil(log(numtaps, 2)))
# Tweak any repeated values in freq so that interp works.
eps = np.finfo(float).eps
for k in range(len(freq)):
if k < len(freq) - 1 and freq[k] == freq[k + 1]:
freq[k] = freq[k] - eps
freq[k + 1] = freq[k + 1] + eps
# Linearly interpolate the desired response on a uniform mesh `x`.
x = np.linspace(0.0, nyq, nfreqs)
fx = np.interp(x, freq, gain)
# Adjust the phases of the coefficients so that the first `ntaps` of the
# inverse FFT are the desired filter coefficients.
shift = np.exp(-(numtaps - 1) / 2. * 1.j * np.pi * x / nyq)
if ftype > 2:
shift *= 1j
fx2 = fx * shift
# Use irfft to compute the inverse FFT.
out_full = irfft(fx2)
if window is not None:
# Create the window to apply to the filter coefficients.
from .signaltools import get_window
wind = get_window(window, numtaps, fftbins=False)
else:
wind = 1
# Keep only the first `numtaps` coefficients in `out`, and multiply by
# the window.
out = out_full[:numtaps] * wind
if ftype == 3:
out[out.size // 2] = 0.0
return out
def remez(numtaps, bands, desired, weight=None, Hz=1, type='bandpass',
maxiter=25, grid_density=16):
"""
Calculate the minimax optimal filter using the Remez exchange algorithm.
Calculate the filter-coefficients for the finite impulse response
(FIR) filter whose transfer function minimizes the maximum error
between the desired gain and the realized gain in the specified
frequency bands using the Remez exchange algorithm.
Parameters
----------
numtaps : int
The desired number of taps in the filter. The number of taps is
the number of terms in the filter, or the filter order plus one.
bands : array_like
A monotonic sequence containing the band edges in Hz.
All elements must be non-negative and less than half the sampling
frequency as given by `Hz`.
desired : array_like
A sequence half the size of bands containing the desired gain
in each of the specified bands.
weight : array_like, optional
A relative weighting to give to each band region. The length of
`weight` has to be half the length of `bands`.
Hz : scalar, optional
The sampling frequency in Hz. Default is 1.
type : {'bandpass', 'differentiator', 'hilbert'}, optional
The type of filter:
'bandpass' : flat response in bands. This is the default.
'differentiator' : frequency proportional response in bands.
'hilbert' : filter with odd symmetry, that is, type III
(for even order) or type IV (for odd order)
linear phase filters.
maxiter : int, optional
Maximum number of iterations of the algorithm. Default is 25.
grid_density : int, optional
Grid density. The dense grid used in `remez` is of size
``(numtaps + 1) * grid_density``. Default is 16.
Returns
-------
out : ndarray
A rank-1 array containing the coefficients of the optimal
(in a minimax sense) filter.
See Also
--------
freqz : Compute the frequency response of a digital filter.
References
----------
.. [1] J. H. McClellan and T. W. Parks, "A unified approach to the
design of optimum FIR linear phase digital filters",
IEEE Trans. Circuit Theory, vol. CT-20, pp. 697-701, 1973.
.. [2] J. H. McClellan, T. W. Parks and L. R. Rabiner, "A Computer
Program for Designing Optimum FIR Linear Phase Digital
Filters", IEEE Trans. Audio Electroacoust., vol. AU-21,
pp. 506-525, 1973.
Examples
--------
We want to construct a filter with a passband at 0.2-0.4 Hz, and
stop bands at 0-0.1 Hz and 0.45-0.5 Hz. Note that this means that the
behavior in the frequency ranges between those bands is unspecified and
may overshoot.
>>> from scipy import signal
>>> bpass = signal.remez(72, [0, 0.1, 0.2, 0.4, 0.45, 0.5], [0, 1, 0])
>>> freq, response = signal.freqz(bpass)
>>> ampl = np.abs(response)
>>> import matplotlib.pyplot as plt
>>> fig = plt.figure()
>>> ax1 = fig.add_subplot(111)
>>> ax1.semilogy(freq/(2*np.pi), ampl, 'b-') # freq in Hz
>>> plt.show()
"""
# Convert type
try:
tnum = {'bandpass': 1, 'differentiator': 2, 'hilbert': 3}[type]
except KeyError:
raise ValueError("Type must be 'bandpass', 'differentiator', "
"or 'hilbert'")
# Convert weight
if weight is None:
weight = [1] * len(desired)
bands = np.asarray(bands).copy()
return sigtools._remez(numtaps, bands, desired, weight, tnum, Hz,
maxiter, grid_density)
| {
"content_hash": "613bd463ad83ebea3b86bbb557b01c9e",
"timestamp": "",
"source": "github",
"line_count": 582,
"max_line_length": 94,
"avg_line_length": 34.52061855670103,
"alnum_prop": 0.6168931362301528,
"repo_name": "kmspriyatham/symath",
"id": "fd7c2e59c4b725138e868e603ca272a47cd4767b",
"size": "20091",
"binary": false,
"copies": "16",
"ref": "refs/heads/master",
"path": "scipy/scipy/signal/fir_filter_design.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "17042868"
},
{
"name": "C++",
"bytes": "10078577"
},
{
"name": "CSS",
"bytes": "14254"
},
{
"name": "FORTRAN",
"bytes": "6345626"
},
{
"name": "JavaScript",
"bytes": "3133"
},
{
"name": "M",
"bytes": "66"
},
{
"name": "Matlab",
"bytes": "4280"
},
{
"name": "Objective-C",
"bytes": "15478"
},
{
"name": "Python",
"bytes": "7388118"
},
{
"name": "Shell",
"bytes": "3288"
},
{
"name": "TeX",
"bytes": "37261"
},
{
"name": "nesC",
"bytes": "1736"
}
],
"symlink_target": ""
} |
import BaseHTTPServer
import os.path
from mimetypes import MimeTypes
from urlparse import urlparse
mime = MimeTypes()
class Handler( BaseHTTPServer.BaseHTTPRequestHandler ):
def do_GET( self ):
path = urlparse('client' + self.path).path
if not os.path.isfile(path):
path = 'client/index.html'
self.send_response(200)
self.send_header( 'Content-type', mime.guess_type(path)[0] )
self.end_headers()
self.wfile.write( open(path).read() )
httpd = BaseHTTPServer.HTTPServer( ('127.0.0.1', 9999), Handler )
httpd.serve_forever()
| {
"content_hash": "fbc1b59d7e3401fa0606bda8ab179c9f",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 68,
"avg_line_length": 31.05263157894737,
"alnum_prop": 0.6677966101694915,
"repo_name": "demerzel3/desmond",
"id": "04726a237e75e55e556c651821b231b33246b6a3",
"size": "613",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "server.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "6746"
},
{
"name": "HTML",
"bytes": "21698"
},
{
"name": "JavaScript",
"bytes": "107815"
},
{
"name": "Puppet",
"bytes": "289"
},
{
"name": "Python",
"bytes": "613"
},
{
"name": "Shell",
"bytes": "78"
}
],
"symlink_target": ""
} |
import sys
class ToleoException(Exception):
'''
Base exception class.
'''
def __init__(self, message, error=None):
super().__init__(message)
self.message = message
self.error = error or 'ToleoException'
def quit(self):
sys.exit('{}: {}'.format(self.error, self.message))
| {
"content_hash": "c8b45506f172b055b02cda6047011881",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 59,
"avg_line_length": 23.285714285714285,
"alnum_prop": 0.5828220858895705,
"repo_name": "carlwgeorge/toleo-old",
"id": "c6b251cc4b869058bf58fb518c5572d953d57f5b",
"size": "326",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "toleo/exceptions.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "15246"
}
],
"symlink_target": ""
} |
import unittest
import os
import subprocess
import hgapi
# vcs, , represent version controlled directories, internal
from yotta.lib import vcs
# fsutils, , misc filesystem utils, internal
from yotta.lib import fsutils
Test_Repo_git = "git@github.com:autopulated/testing-dummy.git"
Test_Repo_hg = "ssh://hg@bitbucket.org/autopulated/hg-testing-dummy"
class TestGit(unittest.TestCase):
@classmethod
def setUpClass(cls):
# test if we have a git user set up, if not we need to set one
child = subprocess.Popen([
'git','config', '--global', 'user.email'
], stdout=subprocess.PIPE, stderr=subprocess.PIPE
)
out, err = child.communicate()
if not len(out):
commands = [
['git','config', '--global', 'user.email', 'test@yottabuild.org'],
['git','config', '--global', 'user.name', 'Yotta Test']
]
for cmd in commands:
child = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = child.communicate()
cls.working_copy = vcs.Git.cloneToTemporaryDir(Test_Repo_git)
@classmethod
def tearDownClass(cls):
cls.working_copy.remove()
def test_creation(self):
self.assertTrue(self.working_copy)
def test_getCommitId(self):
commit_id = self.working_copy.getCommitId()
self.assertTrue(len(commit_id) >= 6)
def test_isClean(self):
self.assertTrue(self.working_copy.isClean())
fsutils.rmF(os.path.join(self.working_copy.workingDirectory(), 'module.json'))
self.assertFalse(self.working_copy.isClean())
def test_commit(self):
with open(os.path.join(self.working_copy.workingDirectory(), 'module.json'), "a") as f:
f.write("\n")
self.working_copy.markForCommit('module.json')
self.working_copy.commit('test commit: DO NOT PUSH')
self.assertTrue(self.working_copy.isClean())
class TestHg(unittest.TestCase):
@classmethod
def setUpClass(cls):
# test if we have a git user set up, if not we need to set one
info = hgapi.Repo.command(".", os.environ, "showconfig")
if info.find("ui.username") == -1:
# hg doesn't provide a way to set the username from the command line.
# The HGUSER environment variable can be used for that purpose.
os.environ['HGUSER'] = 'Yotta Test <test@yottabuild.org>'
cls.working_copy = vcs.HG.cloneToTemporaryDir(Test_Repo_hg)
@classmethod
def tearDownClass(cls):
cls.working_copy.remove()
def test_creation(self):
self.assertTrue(self.working_copy)
def test_getCommitId(self):
commit_id = self.working_copy.getCommitId()
self.assertTrue(len(commit_id) >= 6)
def test_isClean(self):
self.assertTrue(self.working_copy.isClean())
fsutils.rmF(os.path.join(self.working_copy.workingDirectory(), 'module.json'))
self.assertFalse(self.working_copy.isClean())
def test_commit(self):
with open(os.path.join(self.working_copy.workingDirectory(), 'module.json'), "a") as f:
f.write("\n")
self.working_copy.markForCommit('module.json')
self.working_copy.commit('test commit: DO NOT PUSH')
self.assertTrue(self.working_copy.isClean())
if __name__ == '__main__':
unittest.main()
| {
"content_hash": "32559f956820c74e22bfd0fa141d3098",
"timestamp": "",
"source": "github",
"line_count": 92,
"max_line_length": 95,
"avg_line_length": 37.09782608695652,
"alnum_prop": 0.6343392909463815,
"repo_name": "BlackstoneEngineering/yotta",
"id": "b1e4103ee9eeb9b3128e46e18f65fcfb3ce9fe0e",
"size": "3581",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "yotta/test/vcs.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CMake",
"bytes": "285"
},
{
"name": "Python",
"bytes": "402177"
},
{
"name": "Shell",
"bytes": "3034"
}
],
"symlink_target": ""
} |
from django.views.generic import TemplateView
class LandingPageView(TemplateView):
template_name = 'tt_disposal_wells/landing.html'
| {
"content_hash": "a4c412c37a9d16e54ed988d368591ce7",
"timestamp": "",
"source": "github",
"line_count": 5,
"max_line_length": 52,
"avg_line_length": 27.6,
"alnum_prop": 0.7971014492753623,
"repo_name": "texastribune/tt_disposal_wells",
"id": "cb2a594a26d36decf9ca4b8911bb4c460452c399",
"size": "138",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tt_disposal_wells/views.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "5526"
},
{
"name": "HTML",
"bytes": "11692"
},
{
"name": "JavaScript",
"bytes": "5151"
},
{
"name": "Python",
"bytes": "9539"
},
{
"name": "Ruby",
"bytes": "191"
}
],
"symlink_target": ""
} |
import pandas as pd
from sklearn import model_selection
from sklearn.ensemble import GradientBoostingClassifier
url = "https://archive.ics.uci.edu/ml/machine-learning-databases/pima-indians-diabetes/pima-indians-diabetes.data"
names = ['preg', 'plas', 'pres', 'skin', 'test', 'mass', 'pedi', 'age', 'class']
df = pd.read_csv(url, names=names)
array = df.values
X = array[:,0:8]
y = array[:,8]
seed = 21
num_trees = 100
kfold = model_selection.KFold(n_splits=10, random_state=seed)
model = GradientBoostingClassifier(n_estimators=num_trees, random_state=seed)
results = model_selection.cross_val_score(model, X, y, cv=kfold)
print('results: ')
print(results)
print()
print('mean: ' + str(results.mean())) | {
"content_hash": "e6a0d60036931babef05f81abd8e5b0a",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 114,
"avg_line_length": 32.13636363636363,
"alnum_prop": 0.7256011315417256,
"repo_name": "sindresf/The-Playground",
"id": "77888f51e949298f0f8a1a69a71b3a090a45ff95",
"size": "707",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Python/Machine Learning/ScikitClassifiers/Classifiers/Stochastic Gradient Boosting.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "1117180"
},
{
"name": "Python",
"bytes": "98856"
}
],
"symlink_target": ""
} |
import sys, re
import math
import fileinput
from subprocess import *
from nlputil import *
import itertools
import time
import os.path
import traceback
############################################################################
# Quick Start #
############################################################################
# This program reads in data from the specified bzipped files, concatenates
# them, splits them at newlines after a certain amount of data has been
# read, and bzips the results. The files are assumed to contains tweets in
# JSON format, and the resulting split files after named after the date
# in the first tweet of the split. We take some care to ensure that we
# start the file with a valid tweet, in case something invalid is in the
# file.
############################################################################
# Notes #
############################################################################
# When this script was run, an example run was
#
# run-nohup ~tgp/split_bzip.py -s 1450000000 -o split.global. ../global.tweets.2011-*.bz2 &
#
# The value given to -s is the uncompressed size of each split and has been
# empirically determined to give compressed sizes slightly under 192 MB --
# useful for Hadoop as it means that each split will take slightly under 3
# HDFS blocks at the default 64MB block size.
#
# NOTE: On the Longhorn work machines with 48GB of memory, it takes about 12
# hours to process 20-21 days of global tweets and 24-25 days of spritzer
# tweets. Given that you only have a maximum of 24 hours of time, you
# should probably not process more than about a month's worth of tweets in
# a single run. (As an alternative, if your process gets terminated due to
# running out of time or for any other reason, try removing the last,
# partially written split file and then rerunning the command with the
# additional option --skip-existing. This will cause it to redo the same split
# but not overwrite the files that already exist. Since bzip compression takes
# up most of the time, this should fairly rapidly scan through all of the
# already-written files and then do the rest of them. As an example, a split
# run on spritzer output that took 24 hours to process 49 days took only
# 3.5 hours to skip through them when --skip-existing was used.)
#######################################################################
# Process files #
#######################################################################
def split_tweet_bzip_files(opts, args):
status = StatusMessage("tweet")
totalsize = 0
outproc = None
skip_tweets = False
def finish_outproc(outproc):
errprint("Total uncompressed size this split: %s" % totalsize)
errprint("Total number of tweets so far: %s" % status.num_processed())
outproc.stdin.close()
errprint("Waiting for termination of output process ...")
outproc.wait()
errprint("Waiting for termination of output process ... done.")
for infile in args:
errprint("Opening input %s..." % infile)
errprint("Total uncompressed size this split so far: %s" % totalsize)
errprint("Total number of tweets so far: %s" % status.num_processed())
# NOTE: close_fds=True turns out to be necessary to avoid a deadlock in
# the following circumstance:
#
# 1) Open input from bzcat.
# 2) Open output to bzip2.
# 3) bzcat ends partway through a split (possibly after multiple splits,
# and hence multiple invocations of bzip2).
# 4) Wait for bzcat to finish, then start another bzcat for the next file.
# 5) End the split, close bzip2's stdin (our output pipe), and wait for
# bzip2 to finish.
# 6) Blammo! Deadlock while waiting for bzip2 to finish.
#
# When we opened the second bzcat, if we don't call close_fds, it
# inherits the file descriptor of the pipe to bzip2, and that screws
# things up. Presumably, the file descriptor inheritance means that
# there's still a file descriptor to the pipe to bzip2, so closing the
# output doesn't actually cause the pipe to get closed -- hence bzip2
# waits indefinitely for more input.
inproc = Popen("bzcat", stdin=open(infile, "rb"), stdout=PIPE, close_fds=True)
for full_line in inproc.stdout:
line = full_line[:-1]
status.item_processed()
if not line.startswith('{"'):
errprint("Unparsable line, not JSON?, #%s: %s" % (status.num_processed(), line))
else:
if totalsize >= opts.split_size or (not outproc and not skip_tweets):
# We need to open a new file. But keep writing the old file
# (if any) until we see a tweet with a time in it.
json = None
try:
json = split_json(line)
except Exception, exc:
errprint("Exception parsing JSON in line #%s: %s" % (status.num_processed(), line))
errprint("Exception is %s" % exc)
traceback.print_exc()
if json:
json = json[0]
#errprint("Processing JSON %s:" % json)
#errprint("Length: %s" % len(json))
for i in xrange(len(json)):
#errprint("Saw %s=%s" % (i, json[i]))
if json[i] == '"created_at"':
#errprint("Saw created")
if i + 2 >= len(json) or json[i+1] != ':' or json[i+2][0] != '"' or json[i+2][-1] != '"':
errprint("Something weird with JSON in line #%s, around here: %s" % (status.num_processed(), json[i-1:i+4]))
else:
json_time = json[i+2][1:-1].replace(" +0000 ", " UTC ")
tweet_time = time.strptime(json_time,
"%a %b %d %H:%M:%S %Z %Y")
if not tweet_time:
errprint("Can't parse time in line #%s: %s" % (status.num_processed(), json_time))
else:
# Now we're ready to create a new split.
skip_tweets = False
timesuff = time.strftime("%Y-%m-%d.%H%M-UTC", tweet_time)
def make_filename(suff):
return opts.output_prefix + suff + ".bz2"
outfile = make_filename(timesuff)
if os.path.exists(outfile):
if opts.skip_existing:
errprint("Skipping writing tweets to existing %s" % outfile)
skip_tweets = True
else:
errprint("Warning, path %s exists, not overwriting" % outfile)
for ind in itertools.count(1):
# Use _ not - because - sorts before the . of .bz2 but
# _ sorts after (as well as after all letters and numbers).
outfile = make_filename(timesuff + ("_%03d" % ind))
if not os.path.exists(outfile):
break
if outproc:
finish_outproc(outproc)
outproc = None
totalsize = 0
if not skip_tweets:
errprint("About to write to %s..." % outfile)
outfd = open(outfile, "wb")
outproc = Popen("bzip2", stdin=PIPE, stdout=outfd, close_fds=True)
outfd.close()
break
totalsize += len(full_line)
if skip_tweets:
pass
elif outproc:
outproc.stdin.write(full_line)
else:
errprint("Warning: Nowhere to write bad line #%s, skipping: %s" % (status.num_processed(), line))
errprint("Waiting for termination of input process ...")
inproc.stdout.close()
# This sleep probably isn't necessary. I put it in while attempting to
# solve the deadlock when closing the pipe to bzip2 (see comments above
# about close_fds).
sleep_time = 5
errprint("Sleeping %s seconds ..." % sleep_time)
time.sleep(sleep_time)
inproc.wait()
errprint("Waiting for termination of input process ... done.")
if outproc:
finish_outproc(outproc)
outproc = None
# A very simple JSON splitter. Doesn't take the next step of assembling
# into dictionaries, but easily could.
#
# FIXME: This is totally unnecessary, as Python has a built-in JSON parser.
# (I didn't realize this when I wrote the function.)
def split_json(line):
split = re.split(r'("(?:\\.|[^"])*?"|[][:{},])', line)
split = (x for x in split if x) # Filter out empty strings
curind = 0
def get_nested(endnest):
nest = []
try:
while True:
item = next(split)
if item == endnest:
return nest
elif item == '{':
nest += [get_nested('}')]
elif item == '[':
nest += [get_nested(']')]
else:
nest += [item]
except StopIteration:
if not endnest:
return nest
else:
raise
return get_nested(None)
#######################################################################
# Main code #
#######################################################################
def main():
op = OptionParser(usage="%prog [options] input_dir")
op.add_option("-s", "--split-size", metavar="SIZE",
type="int", default=1000000000,
help="""Size (uncompressed) of each split. Note that JSON
tweets compress in bzip about 8 to 1, hence 1 GB is a good uncompressed size
for Hadoop. Default %default.""")
op.add_option("-o", "--output-prefix", metavar="PREFIX",
help="""Prefix to use for all splits.""")
op.add_option("--skip-existing", action="store_true",
help="""If we would try and open an existing file,
skip writing any of the corresponding tweets.""")
opts, args = op.parse_args()
if not opts.output_prefix:
op.error("Must specify output prefix using -o or --output-prefix")
if not args:
op.error("No input files specified")
split_tweet_bzip_files(opts, args)
main()
| {
"content_hash": "24e8b80c73dbe2c504eb48e46f391445",
"timestamp": "",
"source": "github",
"line_count": 228,
"max_line_length": 126,
"avg_line_length": 44.96491228070175,
"alnum_prop": 0.5562817011314866,
"repo_name": "utcompling/fieldspring",
"id": "3d20a77051781493a4e3569a2d271821aa41f4c3",
"size": "10359",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "src/main/python/split_bzip.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Java",
"bytes": "423772"
},
{
"name": "Python",
"bytes": "464375"
},
{
"name": "Scala",
"bytes": "1232469"
},
{
"name": "Shell",
"bytes": "26594"
}
],
"symlink_target": ""
} |
import sys
import os
import urllib.parse
import requests
from deconstrst.builder import DeconstJSONBuilder
from sphinx.application import Sphinx
from sphinx.builders import BUILTIN_BUILDERS
def build(srcdir, destdir):
"""
Invoke Sphinx with locked arguments to generate JSON content.
"""
# I am a terrible person
BUILTIN_BUILDERS['deconst'] = DeconstJSONBuilder
doctreedir = os.path.join(destdir, '.doctrees')
app = Sphinx(srcdir=srcdir, confdir=srcdir, outdir=destdir,
doctreedir=doctreedir, buildername="deconst",
confoverrides={}, status=sys.stdout, warning=sys.stderr,
freshenv=True, warningiserror=False, tags=[], verbosity=0,
parallel=1)
app.build(True, [])
return app.statuscode
def submit(destdir, content_store_url, content_store_apikey, content_id_base):
"""
Submit the generated json files to the content store API.
"""
headers = {
"Content-Type": "application/json",
"Authorization": 'deconst apikey="{}"'.format(content_store_apikey)
}
for dirpath, dirnames, filenames in os.walk(destdir):
for name in filenames:
fullpath = os.path.join(dirpath, name)
base, ext = os.path.splitext(name)
if os.path.isfile(fullpath) and ext == ".json":
relpath = os.path.relpath(fullpath, destdir)
if base == "index":
full_suffix = dirpath
else:
full_suffix = os.path.join(dirpath, base)
content_suffix = os.path.relpath(full_suffix, destdir)
content_id = content_id_base + content_suffix
content_id = content_id.rstrip("/.")
print(
"submitting [{}] as [{}] ... ".format(relpath, content_id),
end=''
)
url = content_store_url + "content/" + \
urllib.parse.quote(content_id, safe='')
with open(fullpath, "rb") as inf:
response = requests.put(url, data=inf, headers=headers)
response.raise_for_status()
print("success")
print("All generated content submitted to the content store.")
return 0
| {
"content_hash": "9c6e241ad243d1403e70b4cb5ff102af",
"timestamp": "",
"source": "github",
"line_count": 75,
"max_line_length": 79,
"avg_line_length": 30.906666666666666,
"alnum_prop": 0.5763589301121657,
"repo_name": "ktbartholomew/preparer-sphinx",
"id": "8f5c5e37946dea3e89a3d599681295ad2175cf48",
"size": "2343",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "deconstrst/deconstrst.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "1708"
},
{
"name": "Python",
"bytes": "9969"
},
{
"name": "Shell",
"bytes": "785"
}
],
"symlink_target": ""
} |
from django.core.exceptions import ImproperlyConfigured
from django.db import models
from django.template.loader import render_to_string
from django.utils.translation import ugettext_lazy as _
from feincms_simplegallery.util.mixins import OrderableMixin
try:
from feincms.admin.item_editor import FeinCMSInline
except ImportError: # pragma: no cover, FeinCMS not available.
# Does not do anything sane, but does not hurt either
from django.contrib.admin import StackedInline as FeinCMSInline
class BaseContainer(OrderableMixin):
"""
Container of Elements to be rendered.
Do not instance this! Instance subclasses instead!
"""
title = models.CharField(_('Title'), max_length=300)
def __str__(self):
return self.title
class Meta:
abstract = True
ordering = ['_order', 'title']
verbose_name = _('Base Container')
verbose_name_plural = _('Base Containers')
def render(self, **kwargs):
# return
return render_to_string(
# 'content/containers/%s/container.html' % self.type,
'content/simplegallery/gallery/default/container.html',
{
'elements': self.container_elements.all(),
'container_title': self.title,
'container_id': id(self)
},
context_instance=kwargs.get('context'))
class BaseElement(OrderableMixin, models.Model):
"""
Base Element to be rendered.
Do not instance this! Instance subclasses instead!
"""
title = models.CharField(
_('Title'),
max_length=100,
blank=True,
null=True
)
subtitle = models.CharField(
_('Subtitle'),
max_length=200,
blank=True,
null=True
)
description = models.TextField(
_('Description'),
blank=True,
null=True
)
url = models.CharField(
_('URL'),
max_length=2048,
blank=True,
null=True
)
class Meta:
abstract = True
ordering = ['_order', 'title']
verbose_name = _('Base Element')
verbose_name_plural = _('Base Elements')
def __str__(self):
return self.title
def render(self, **kwargs):
return render_to_string(
'content/containers/%s/elem.html' %
self.__class__.__name__.lower(),
{
'elem': self,
'elem_id': id(self)
},
context_instance=kwargs.get('context'))
class BaseContentInline(FeinCMSInline):
raw_id_fields = ('container',)
class BaseContent(models.Model):
feincms_item_editor_inline = BaseContentInline
container_type = 'base'
class Meta:
abstract = True
verbose_name = _('Base Content')
verbose_name_plural = _('Base Content')
def __str__(self):
return _('Base Content') + "#{}".format(self.pk)
@classmethod
def initialize_type(cls, TYPE_CHOICES=None, cleanse=None):
if cls == BaseContent:
raise ImproperlyConfigured(
'You cannot instance BaseContent class.'
'Instance its subclasses instead'
)
if TYPE_CHOICES is None:
raise ImproperlyConfigured(
'You need to set TYPE_CHOICES when creating a'
' %s' % cls.__name__
)
cls.add_to_class(
'type',
models.CharField(
_('type'),
max_length=20, choices=TYPE_CHOICES,
default=TYPE_CHOICES[0][0]
)
)
if cleanse:
cls.cleanse = cleanse
def render(self, **kwargs):
# return
template_dir = 'content/simplegallery/%s/%s/' % (
self.container_type,
self.type,
)
template_container = template_dir + 'container.html'
template_element = template_dir + 'element.html'
return render_to_string(
template_container,
{
'elements': self.container.container_elements.all(),
'container': self,
'container_title': self.container.title,
'container_id': id(self),
'template_dir': template_dir,
'template_element': template_element,
},
context_instance=kwargs.get('context'))
| {
"content_hash": "68ea60a9c51772158e661525ccedbed1",
"timestamp": "",
"source": "github",
"line_count": 157,
"max_line_length": 69,
"avg_line_length": 28.305732484076433,
"alnum_prop": 0.5562556255625563,
"repo_name": "paramono/feincms_simplegallery",
"id": "247820a60c88b56208515dbbc8c5f965b9f3962a",
"size": "4444",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "feincms_simplegallery/util/base.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "5512"
},
{
"name": "Python",
"bytes": "17821"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
import re
from collections import namedtuple
from six import string_types
from prompt_toolkit.document import Document
from prompt_toolkit.filters import to_filter
from .base import Completer, Completion
from .word_completer import WordCompleter
__all__ = [
'FuzzyCompleter',
'FuzzyWordCompleter',
]
class FuzzyCompleter(Completer):
"""
Fuzzy completion.
This wraps any other completer and turns it into a fuzzy completer.
If the list of words is: ["leopard" , "gorilla", "dinosaur", "cat", "bee"]
Then trying to complete "oar" would yield "leopard" and "dinosaur", but not
the others, because they match the regular expression 'o.*a.*r'.
Similar, in another application "djm" could expand to "django_migrations".
The results are sorted by relevance, which is defined as the start position
and the length of the match.
Notice that this is not really a tool to work around spelling mistakes,
like what would be possible with difflib. The purpose is rather to have a
quicker or more intuitive way to filter the given completions, especially
when many completions have a common prefix.
Fuzzy algorithm is based on this post:
https://blog.amjith.com/fuzzyfinder-in-10-lines-of-python
:param completer: A :class:`~.Completer` instance.
:param WORD: When True, use WORD characters.
:param pattern: Regex pattern which selects the characters before the
cursor that are considered for the fuzzy matching.
:param enable_fuzzy: (bool or `Filter`) Enabled the fuzzy behavior. For
easily turning fuzzyness on or off according to a certain condition.
"""
def __init__(self, completer, WORD=False, pattern=None, enable_fuzzy=True):
assert isinstance(completer, Completer)
assert pattern is None or pattern.startswith('^')
self.completer = completer
self.pattern = pattern
self.WORD = WORD
self.pattern = pattern
self.enable_fuzzy = to_filter(enable_fuzzy)
def get_completions(self, document, complete_event):
if self.enable_fuzzy():
return self._get_fuzzy_completions(document, complete_event)
else:
return self.completer.get_completions(document, complete_event)
def _get_pattern(self):
if self.pattern:
return self.pattern
if self.WORD:
return r'[^\s]+'
return '^[a-zA-Z0-9_]*'
def _get_fuzzy_completions(self, document, complete_event):
word_before_cursor = document.get_word_before_cursor(
pattern=re.compile(self._get_pattern()))
# Get completions
document2 = Document(
text=document.text[:document.cursor_position - len(word_before_cursor)],
cursor_position=document.cursor_position - len(word_before_cursor))
completions = list(self.completer.get_completions(document2, complete_event))
fuzzy_matches = []
pat = '.*?'.join(map(re.escape, word_before_cursor))
pat = '(?=({0}))'.format(pat) # lookahead regex to manage overlapping matches
regex = re.compile(pat, re.IGNORECASE)
for compl in completions:
matches = list(regex.finditer(compl.text))
if matches:
# Prefer the match, closest to the left, then shortest.
best = min(matches, key=lambda m: (m.start(), len(m.group(1))))
fuzzy_matches.append(_FuzzyMatch(len(best.group(1)), best.start(), compl))
def sort_key(fuzzy_match):
" Sort by start position, then by the length of the match. "
return fuzzy_match.start_pos, fuzzy_match.match_length
fuzzy_matches = sorted(fuzzy_matches, key=sort_key)
for match in fuzzy_matches:
# Include these completions, but set the correct `display`
# attribute and `start_position`.
yield Completion(
match.completion.text,
start_position=match.completion.start_position - len(word_before_cursor),
display_meta=match.completion.display_meta,
display=self._get_display(match, word_before_cursor),
style=match.completion.style)
def _get_display(self, fuzzy_match, word_before_cursor):
"""
Generate formatted text for the display label.
"""
m = fuzzy_match
word = m.completion.text
if m.match_length == 0:
# No highlighting when we have zero length matches (no input text).
return word
result = []
# Text before match.
result.append(('class:fuzzymatch.outside', word[:m.start_pos]))
# The match itself.
characters = list(word_before_cursor)
for c in word[m.start_pos:m.start_pos + m.match_length]:
classname = 'class:fuzzymatch.inside'
if characters and c.lower() == characters[0].lower():
classname += '.character'
del characters[0]
result.append((classname, c))
# Text after match.
result.append(
('class:fuzzymatch.outside', word[m.start_pos + m.match_length:]))
return result
class FuzzyWordCompleter(Completer):
"""
Fuzzy completion on a list of words.
(This is basically a `WordCompleter` wrapped in a `FuzzyCompleter`.)
:param words: List of words or callable that returns a list of words.
:param meta_dict: Optional dict mapping words to their meta-information.
:param WORD: When True, use WORD characters.
"""
def __init__(self, words, meta_dict=None, WORD=False):
assert callable(words) or all(isinstance(w, string_types) for w in words)
self.words = words
self.meta_dict = meta_dict or {}
self.WORD = WORD
self.word_completer = WordCompleter(
words=lambda: self.words,
WORD=self.WORD)
self.fuzzy_completer = FuzzyCompleter(
self.word_completer,
WORD=self.WORD)
def get_completions(self, document, complete_event):
return self.fuzzy_completer.get_completions(document, complete_event)
_FuzzyMatch = namedtuple('_FuzzyMatch', 'match_length start_pos completion')
| {
"content_hash": "e679741a6612d58467c73e6d1fc51ba5",
"timestamp": "",
"source": "github",
"line_count": 171,
"max_line_length": 90,
"avg_line_length": 36.87134502923977,
"alnum_prop": 0.6413957176843775,
"repo_name": "lmregus/Portfolio",
"id": "83e602495a3ca41e62808b1b04390d59bdc5aa99",
"size": "6305",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/design_patterns/env/lib/python3.7/site-packages/prompt_toolkit/completion/fuzzy_completer.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "27682"
},
{
"name": "C++",
"bytes": "25458"
},
{
"name": "CSS",
"bytes": "12842"
},
{
"name": "HTML",
"bytes": "49171"
},
{
"name": "Java",
"bytes": "99711"
},
{
"name": "JavaScript",
"bytes": "827"
},
{
"name": "Python",
"bytes": "42857"
},
{
"name": "Shell",
"bytes": "5710"
}
],
"symlink_target": ""
} |
"""generated automatically by auto_dao.py"""
class DBChangeParameter(object):
vtType = 'changeParameter'
def __init__(self, moduleId=None, alias=None, functionId=None, function=None, parameterId=None, parameter=None, type=None, value=None):
self.__db_moduleId = moduleId
self.__db_alias = alias
self.__db_functionId = functionId
self.__db_function = function
self.__db_parameterId = parameterId
self.__db_parameter = parameter
self.__db_type = type
self.__db_value = value
def __get_db_moduleId(self):
return self.__db_moduleId
def __set_db_moduleId(self, moduleId):
self.__db_moduleId = moduleId
db_moduleId = property(__get_db_moduleId, __set_db_moduleId)
def db_add_moduleId(self, moduleId):
self.__db_moduleId = moduleId
def db_change_moduleId(self, moduleId):
self.__db_moduleId = moduleId
def db_delete_moduleId(self, moduleId):
self.__db_moduleId = None
def __get_db_alias(self):
return self.__db_alias
def __set_db_alias(self, alias):
self.__db_alias = alias
db_alias = property(__get_db_alias, __set_db_alias)
def db_add_alias(self, alias):
self.__db_alias = alias
def db_change_alias(self, alias):
self.__db_alias = alias
def db_delete_alias(self, alias):
self.__db_alias = None
def __get_db_functionId(self):
return self.__db_functionId
def __set_db_functionId(self, functionId):
self.__db_functionId = functionId
db_functionId = property(__get_db_functionId, __set_db_functionId)
def db_add_functionId(self, functionId):
self.__db_functionId = functionId
def db_change_functionId(self, functionId):
self.__db_functionId = functionId
def db_delete_functionId(self, functionId):
self.__db_functionId = None
def __get_db_function(self):
return self.__db_function
def __set_db_function(self, function):
self.__db_function = function
db_function = property(__get_db_function, __set_db_function)
def db_add_function(self, function):
self.__db_function = function
def db_change_function(self, function):
self.__db_function = function
def db_delete_function(self, function):
self.__db_function = None
def __get_db_parameterId(self):
return self.__db_parameterId
def __set_db_parameterId(self, parameterId):
self.__db_parameterId = parameterId
db_parameterId = property(__get_db_parameterId, __set_db_parameterId)
def db_add_parameterId(self, parameterId):
self.__db_parameterId = parameterId
def db_change_parameterId(self, parameterId):
self.__db_parameterId = parameterId
def db_delete_parameterId(self, parameterId):
self.__db_parameterId = None
def __get_db_parameter(self):
return self.__db_parameter
def __set_db_parameter(self, parameter):
self.__db_parameter = parameter
db_parameter = property(__get_db_parameter, __set_db_parameter)
def db_add_parameter(self, parameter):
self.__db_parameter = parameter
def db_change_parameter(self, parameter):
self.__db_parameter = parameter
def db_delete_parameter(self, parameter):
self.__db_parameter = None
def __get_db_type(self):
return self.__db_type
def __set_db_type(self, type):
self.__db_type = type
db_type = property(__get_db_type, __set_db_type)
def db_add_type(self, type):
self.__db_type = type
def db_change_type(self, type):
self.__db_type = type
def db_delete_type(self, type):
self.__db_type = None
def __get_db_value(self):
return self.__db_value
def __set_db_value(self, value):
self.__db_value = value
db_value = property(__get_db_value, __set_db_value)
def db_add_value(self, value):
self.__db_value = value
def db_change_value(self, value):
self.__db_value = value
def db_delete_value(self, value):
self.__db_value = None
def getPrimaryKey(self):
return self.__db_moduleId
"""generated automatically by auto_dao.py"""
class DBDeleteFunction(object):
vtType = 'deleteFunction'
def __init__(self, moduleId=None, functionId=None):
self.__db_moduleId = moduleId
self.__db_functionId = functionId
def __get_db_moduleId(self):
return self.__db_moduleId
def __set_db_moduleId(self, moduleId):
self.__db_moduleId = moduleId
db_moduleId = property(__get_db_moduleId, __set_db_moduleId)
def db_add_moduleId(self, moduleId):
self.__db_moduleId = moduleId
def db_change_moduleId(self, moduleId):
self.__db_moduleId = moduleId
def db_delete_moduleId(self, moduleId):
self.__db_moduleId = None
def __get_db_functionId(self):
return self.__db_functionId
def __set_db_functionId(self, functionId):
self.__db_functionId = functionId
db_functionId = property(__get_db_functionId, __set_db_functionId)
def db_add_functionId(self, functionId):
self.__db_functionId = functionId
def db_change_functionId(self, functionId):
self.__db_functionId = functionId
def db_delete_functionId(self, functionId):
self.__db_functionId = None
def getPrimaryKey(self):
return self.__db_moduleId
"""generated automatically by auto_dao.py"""
class DBDeleteConnection(object):
vtType = 'deleteConnection'
def __init__(self, connectionId=None):
self.__db_connectionId = connectionId
def __get_db_connectionId(self):
return self.__db_connectionId
def __set_db_connectionId(self, connectionId):
self.__db_connectionId = connectionId
db_connectionId = property(__get_db_connectionId, __set_db_connectionId)
def db_add_connectionId(self, connectionId):
self.__db_connectionId = connectionId
def db_change_connectionId(self, connectionId):
self.__db_connectionId = connectionId
def db_delete_connectionId(self, connectionId):
self.__db_connectionId = None
def getPrimaryKey(self):
return self.__db_connectionId
"""generated automatically by auto_dao.py"""
class DBAddModule(object):
vtType = 'addModule'
def __init__(self, id=None, cache=None, name=None, x=None, y=None):
self.__db_id = id
self.__db_cache = cache
self.__db_name = name
self.__db_x = x
self.__db_y = y
def __get_db_id(self):
return self.__db_id
def __set_db_id(self, id):
self.__db_id = id
db_id = property(__get_db_id, __set_db_id)
def db_add_id(self, id):
self.__db_id = id
def db_change_id(self, id):
self.__db_id = id
def db_delete_id(self, id):
self.__db_id = None
def __get_db_cache(self):
return self.__db_cache
def __set_db_cache(self, cache):
self.__db_cache = cache
db_cache = property(__get_db_cache, __set_db_cache)
def db_add_cache(self, cache):
self.__db_cache = cache
def db_change_cache(self, cache):
self.__db_cache = cache
def db_delete_cache(self, cache):
self.__db_cache = None
def __get_db_name(self):
return self.__db_name
def __set_db_name(self, name):
self.__db_name = name
db_name = property(__get_db_name, __set_db_name)
def db_add_name(self, name):
self.__db_name = name
def db_change_name(self, name):
self.__db_name = name
def db_delete_name(self, name):
self.__db_name = None
def __get_db_x(self):
return self.__db_x
def __set_db_x(self, x):
self.__db_x = x
db_x = property(__get_db_x, __set_db_x)
def db_add_x(self, x):
self.__db_x = x
def db_change_x(self, x):
self.__db_x = x
def db_delete_x(self, x):
self.__db_x = None
def __get_db_y(self):
return self.__db_y
def __set_db_y(self, y):
self.__db_y = y
db_y = property(__get_db_y, __set_db_y)
def db_add_y(self, y):
self.__db_y = y
def db_change_y(self, y):
self.__db_y = y
def db_delete_y(self, y):
self.__db_y = None
def getPrimaryKey(self):
return self.__db_id
"""generated automatically by auto_dao.py"""
class DBDeleteAnnotation(object):
vtType = 'deleteAnnotation'
def __init__(self, moduleId=None, key=None):
self.__db_moduleId = moduleId
self.__db_key = key
def __get_db_moduleId(self):
return self.__db_moduleId
def __set_db_moduleId(self, moduleId):
self.__db_moduleId = moduleId
db_moduleId = property(__get_db_moduleId, __set_db_moduleId)
def db_add_moduleId(self, moduleId):
self.__db_moduleId = moduleId
def db_change_moduleId(self, moduleId):
self.__db_moduleId = moduleId
def db_delete_moduleId(self, moduleId):
self.__db_moduleId = None
def __get_db_key(self):
return self.__db_key
def __set_db_key(self, key):
self.__db_key = key
db_key = property(__get_db_key, __set_db_key)
def db_add_key(self, key):
self.__db_key = key
def db_change_key(self, key):
self.__db_key = key
def db_delete_key(self, key):
self.__db_key = None
def getPrimaryKey(self):
return self.__db_moduleId
"""generated automatically by auto_dao.py"""
class DBDeleteModulePort(object):
vtType = 'deleteModulePort'
def __init__(self, moduleId=None, portType=None, portName=None):
self.__db_moduleId = moduleId
self.__db_portType = portType
self.__db_portName = portName
def __get_db_moduleId(self):
return self.__db_moduleId
def __set_db_moduleId(self, moduleId):
self.__db_moduleId = moduleId
db_moduleId = property(__get_db_moduleId, __set_db_moduleId)
def db_add_moduleId(self, moduleId):
self.__db_moduleId = moduleId
def db_change_moduleId(self, moduleId):
self.__db_moduleId = moduleId
def db_delete_moduleId(self, moduleId):
self.__db_moduleId = None
def __get_db_portType(self):
return self.__db_portType
def __set_db_portType(self, portType):
self.__db_portType = portType
db_portType = property(__get_db_portType, __set_db_portType)
def db_add_portType(self, portType):
self.__db_portType = portType
def db_change_portType(self, portType):
self.__db_portType = portType
def db_delete_portType(self, portType):
self.__db_portType = None
def __get_db_portName(self):
return self.__db_portName
def __set_db_portName(self, portName):
self.__db_portName = portName
db_portName = property(__get_db_portName, __set_db_portName)
def db_add_portName(self, portName):
self.__db_portName = portName
def db_change_portName(self, portName):
self.__db_portName = portName
def db_delete_portName(self, portName):
self.__db_portName = None
def getPrimaryKey(self):
return self.__db_moduleId
"""generated automatically by auto_dao.py"""
class DBDeleteModule(object):
vtType = 'deleteModule'
def __init__(self, moduleId=None):
self.__db_moduleId = moduleId
def __get_db_moduleId(self):
return self.__db_moduleId
def __set_db_moduleId(self, moduleId):
self.__db_moduleId = moduleId
db_moduleId = property(__get_db_moduleId, __set_db_moduleId)
def db_add_moduleId(self, moduleId):
self.__db_moduleId = moduleId
def db_change_moduleId(self, moduleId):
self.__db_moduleId = moduleId
def db_delete_moduleId(self, moduleId):
self.__db_moduleId = None
def getPrimaryKey(self):
return self.__db_moduleId
"""generated automatically by auto_dao.py"""
class DBTag(object):
vtType = 'tag'
def __init__(self, time=None, name=None):
self.__db_time = time
self.__db_name = name
def __get_db_time(self):
return self.__db_time
def __set_db_time(self, time):
self.__db_time = time
db_time = property(__get_db_time, __set_db_time)
def db_add_time(self, time):
self.__db_time = time
def db_change_time(self, time):
self.__db_time = time
def db_delete_time(self, time):
self.__db_time = None
def __get_db_name(self):
return self.__db_name
def __set_db_name(self, name):
self.__db_name = name
db_name = property(__get_db_name, __set_db_name)
def db_add_name(self, name):
self.__db_name = name
def db_change_name(self, name):
self.__db_name = name
def db_delete_name(self, name):
self.__db_name = None
def getPrimaryKey(self):
return self.__db_time
"""generated automatically by auto_dao.py"""
class DBAddModulePort(object):
vtType = 'addModulePort'
def __init__(self, moduleId=None, portType=None, portName=None, portSpec=None):
self.__db_moduleId = moduleId
self.__db_portType = portType
self.__db_portName = portName
self.__db_portSpec = portSpec
def __get_db_moduleId(self):
return self.__db_moduleId
def __set_db_moduleId(self, moduleId):
self.__db_moduleId = moduleId
db_moduleId = property(__get_db_moduleId, __set_db_moduleId)
def db_add_moduleId(self, moduleId):
self.__db_moduleId = moduleId
def db_change_moduleId(self, moduleId):
self.__db_moduleId = moduleId
def db_delete_moduleId(self, moduleId):
self.__db_moduleId = None
def __get_db_portType(self):
return self.__db_portType
def __set_db_portType(self, portType):
self.__db_portType = portType
db_portType = property(__get_db_portType, __set_db_portType)
def db_add_portType(self, portType):
self.__db_portType = portType
def db_change_portType(self, portType):
self.__db_portType = portType
def db_delete_portType(self, portType):
self.__db_portType = None
def __get_db_portName(self):
return self.__db_portName
def __set_db_portName(self, portName):
self.__db_portName = portName
db_portName = property(__get_db_portName, __set_db_portName)
def db_add_portName(self, portName):
self.__db_portName = portName
def db_change_portName(self, portName):
self.__db_portName = portName
def db_delete_portName(self, portName):
self.__db_portName = None
def __get_db_portSpec(self):
return self.__db_portSpec
def __set_db_portSpec(self, portSpec):
self.__db_portSpec = portSpec
db_portSpec = property(__get_db_portSpec, __set_db_portSpec)
def db_add_portSpec(self, portSpec):
self.__db_portSpec = portSpec
def db_change_portSpec(self, portSpec):
self.__db_portSpec = portSpec
def db_delete_portSpec(self, portSpec):
self.__db_portSpec = None
def getPrimaryKey(self):
return self.__db_moduleId
"""generated automatically by auto_dao.py"""
class DBAction(object):
vtType = 'action'
def __init__(self, time=None, parent=None, user=None, what=None, date=None, notes=None, datas=None):
self.__db_time = time
self.__db_parent = parent
self.__db_user = user
self.__db_what = what
self.__db_date = date
self.__db_notes = notes
if datas is None:
self.__db_datas = []
else:
self.__db_datas = datas
def __get_db_time(self):
return self.__db_time
def __set_db_time(self, time):
self.__db_time = time
db_time = property(__get_db_time, __set_db_time)
def db_add_time(self, time):
self.__db_time = time
def db_change_time(self, time):
self.__db_time = time
def db_delete_time(self, time):
self.__db_time = None
def __get_db_parent(self):
return self.__db_parent
def __set_db_parent(self, parent):
self.__db_parent = parent
db_parent = property(__get_db_parent, __set_db_parent)
def db_add_parent(self, parent):
self.__db_parent = parent
def db_change_parent(self, parent):
self.__db_parent = parent
def db_delete_parent(self, parent):
self.__db_parent = None
def __get_db_user(self):
return self.__db_user
def __set_db_user(self, user):
self.__db_user = user
db_user = property(__get_db_user, __set_db_user)
def db_add_user(self, user):
self.__db_user = user
def db_change_user(self, user):
self.__db_user = user
def db_delete_user(self, user):
self.__db_user = None
def __get_db_what(self):
return self.__db_what
def __set_db_what(self, what):
self.__db_what = what
db_what = property(__get_db_what, __set_db_what)
def db_add_what(self, what):
self.__db_what = what
def db_change_what(self, what):
self.__db_what = what
def db_delete_what(self, what):
self.__db_what = None
def __get_db_date(self):
return self.__db_date
def __set_db_date(self, date):
self.__db_date = date
db_date = property(__get_db_date, __set_db_date)
def db_add_date(self, date):
self.__db_date = date
def db_change_date(self, date):
self.__db_date = date
def db_delete_date(self, date):
self.__db_date = None
def __get_db_notes(self):
return self.__db_notes
def __set_db_notes(self, notes):
self.__db_notes = notes
db_notes = property(__get_db_notes, __set_db_notes)
def db_add_notes(self, notes):
self.__db_notes = notes
def db_change_notes(self, notes):
self.__db_notes = notes
def db_delete_notes(self, notes):
self.__db_notes = None
def __get_db_datas(self):
return self.__db_datas
def __set_db_datas(self, datas):
self.__db_datas = datas
db_datas = property(__get_db_datas, __set_db_datas)
def db_get_datas(self):
return self.__db_datas
def db_add_data(self, data):
self.__db_datas.append(data)
def db_change_data(self, data):
found = False
for i in xrange(len(self.__db_datas)):
if self.__db_datas[i].db_id == data.db_id:
self.__db_datas[i] = data
found = True
break
if not found:
self.__db_datas.append(data)
def db_delete_data(self, data):
for i in xrange(len(self.__db_datas)):
if self.__db_datas[i].db_id == data.db_id:
del self.__db_datas[i]
break
def db_get_data(self, key):
for i in xrange(len(self.__db_datas)):
if self.__db_datas[i].db_id == data.db_id:
return self.__db_datas[i]
return None
def getPrimaryKey(self):
return self.__db_time
"""generated automatically by auto_dao.py"""
class DBAddConnection(object):
vtType = 'addConnection'
def __init__(self, id=None, destinationId=None, destinationModule=None, destinationPort=None, sourceId=None, sourceModule=None, sourcePort=None):
self.__db_id = id
self.__db_destinationId = destinationId
self.__db_destinationModule = destinationModule
self.__db_destinationPort = destinationPort
self.__db_sourceId = sourceId
self.__db_sourceModule = sourceModule
self.__db_sourcePort = sourcePort
def __get_db_id(self):
return self.__db_id
def __set_db_id(self, id):
self.__db_id = id
db_id = property(__get_db_id, __set_db_id)
def db_add_id(self, id):
self.__db_id = id
def db_change_id(self, id):
self.__db_id = id
def db_delete_id(self, id):
self.__db_id = None
def __get_db_destinationId(self):
return self.__db_destinationId
def __set_db_destinationId(self, destinationId):
self.__db_destinationId = destinationId
db_destinationId = property(__get_db_destinationId, __set_db_destinationId)
def db_add_destinationId(self, destinationId):
self.__db_destinationId = destinationId
def db_change_destinationId(self, destinationId):
self.__db_destinationId = destinationId
def db_delete_destinationId(self, destinationId):
self.__db_destinationId = None
def __get_db_destinationModule(self):
return self.__db_destinationModule
def __set_db_destinationModule(self, destinationModule):
self.__db_destinationModule = destinationModule
db_destinationModule = property(__get_db_destinationModule, __set_db_destinationModule)
def db_add_destinationModule(self, destinationModule):
self.__db_destinationModule = destinationModule
def db_change_destinationModule(self, destinationModule):
self.__db_destinationModule = destinationModule
def db_delete_destinationModule(self, destinationModule):
self.__db_destinationModule = None
def __get_db_destinationPort(self):
return self.__db_destinationPort
def __set_db_destinationPort(self, destinationPort):
self.__db_destinationPort = destinationPort
db_destinationPort = property(__get_db_destinationPort, __set_db_destinationPort)
def db_add_destinationPort(self, destinationPort):
self.__db_destinationPort = destinationPort
def db_change_destinationPort(self, destinationPort):
self.__db_destinationPort = destinationPort
def db_delete_destinationPort(self, destinationPort):
self.__db_destinationPort = None
def __get_db_sourceId(self):
return self.__db_sourceId
def __set_db_sourceId(self, sourceId):
self.__db_sourceId = sourceId
db_sourceId = property(__get_db_sourceId, __set_db_sourceId)
def db_add_sourceId(self, sourceId):
self.__db_sourceId = sourceId
def db_change_sourceId(self, sourceId):
self.__db_sourceId = sourceId
def db_delete_sourceId(self, sourceId):
self.__db_sourceId = None
def __get_db_sourceModule(self):
return self.__db_sourceModule
def __set_db_sourceModule(self, sourceModule):
self.__db_sourceModule = sourceModule
db_sourceModule = property(__get_db_sourceModule, __set_db_sourceModule)
def db_add_sourceModule(self, sourceModule):
self.__db_sourceModule = sourceModule
def db_change_sourceModule(self, sourceModule):
self.__db_sourceModule = sourceModule
def db_delete_sourceModule(self, sourceModule):
self.__db_sourceModule = None
def __get_db_sourcePort(self):
return self.__db_sourcePort
def __set_db_sourcePort(self, sourcePort):
self.__db_sourcePort = sourcePort
db_sourcePort = property(__get_db_sourcePort, __set_db_sourcePort)
def db_add_sourcePort(self, sourcePort):
self.__db_sourcePort = sourcePort
def db_change_sourcePort(self, sourcePort):
self.__db_sourcePort = sourcePort
def db_delete_sourcePort(self, sourcePort):
self.__db_sourcePort = None
def getPrimaryKey(self):
return self.__db_id
"""generated automatically by auto_dao.py"""
class DBMoveModule(object):
vtType = 'moveModule'
def __init__(self, id=None, dx=None, dy=None):
self.__db_id = id
self.__db_dx = dx
self.__db_dy = dy
def __get_db_id(self):
return self.__db_id
def __set_db_id(self, id):
self.__db_id = id
db_id = property(__get_db_id, __set_db_id)
def db_add_id(self, id):
self.__db_id = id
def db_change_id(self, id):
self.__db_id = id
def db_delete_id(self, id):
self.__db_id = None
def __get_db_dx(self):
return self.__db_dx
def __set_db_dx(self, dx):
self.__db_dx = dx
db_dx = property(__get_db_dx, __set_db_dx)
def db_add_dx(self, dx):
self.__db_dx = dx
def db_change_dx(self, dx):
self.__db_dx = dx
def db_delete_dx(self, dx):
self.__db_dx = None
def __get_db_dy(self):
return self.__db_dy
def __set_db_dy(self, dy):
self.__db_dy = dy
db_dy = property(__get_db_dy, __set_db_dy)
def db_add_dy(self, dy):
self.__db_dy = dy
def db_change_dy(self, dy):
self.__db_dy = dy
def db_delete_dy(self, dy):
self.__db_dy = None
def getPrimaryKey(self):
return self.__db_id
"""generated automatically by auto_dao.py"""
class DBVistrail(object):
vtType = 'vistrail'
def __init__(self, version=None, actions=None, tags=None):
self.__db_version = version
if actions is None:
self.__db_actions = {}
else:
self.__db_actions = actions
if tags is None:
self.__db_tags = {}
else:
self.__db_tags = tags
def __get_db_version(self):
return self.__db_version
def __set_db_version(self, version):
self.__db_version = version
db_version = property(__get_db_version, __set_db_version)
def db_add_version(self, version):
self.__db_version = version
def db_change_version(self, version):
self.__db_version = version
def db_delete_version(self, version):
self.__db_version = None
def __get_db_actions(self):
return self.__db_actions
def __set_db_actions(self, actions):
self.__db_actions = actions
db_actions = property(__get_db_actions, __set_db_actions)
def db_get_actions(self):
return self.__db_actions.values()
def db_add_action(self, action):
self.__db_actions[action.db_time] = action
def db_change_action(self, action):
self.__db_actions[action.db_time] = action
def db_delete_action(self, action):
del self.__db_actions[action.db_time]
def db_get_action(self, key):
if self.__db_actions.has_key(key):
return self.__db_actions[key]
return None
def __get_db_tags(self):
return self.__db_tags
def __set_db_tags(self, tags):
self.__db_tags = tags
db_tags = property(__get_db_tags, __set_db_tags)
def db_get_tags(self):
return self.__db_tags.values()
def db_add_tag(self, tag):
self.__db_tags[tag.db_time] = tag
def db_change_tag(self, tag):
self.__db_tags[tag.db_time] = tag
def db_delete_tag(self, tag):
del self.__db_tags[tag.db_time]
def db_get_tag(self, key):
if self.__db_tags.has_key(key):
return self.__db_tags[key]
return None
def getPrimaryKey(self):
return self.__db_version
"""generated automatically by auto_dao.py"""
class DBChangeAnnotation(object):
vtType = 'changeAnnotation'
def __init__(self, moduleId=None, key=None, value=None):
self.__db_moduleId = moduleId
self.__db_key = key
self.__db_value = value
def __get_db_moduleId(self):
return self.__db_moduleId
def __set_db_moduleId(self, moduleId):
self.__db_moduleId = moduleId
db_moduleId = property(__get_db_moduleId, __set_db_moduleId)
def db_add_moduleId(self, moduleId):
self.__db_moduleId = moduleId
def db_change_moduleId(self, moduleId):
self.__db_moduleId = moduleId
def db_delete_moduleId(self, moduleId):
self.__db_moduleId = None
def __get_db_key(self):
return self.__db_key
def __set_db_key(self, key):
self.__db_key = key
db_key = property(__get_db_key, __set_db_key)
def db_add_key(self, key):
self.__db_key = key
def db_change_key(self, key):
self.__db_key = key
def db_delete_key(self, key):
self.__db_key = None
def __get_db_value(self):
return self.__db_value
def __set_db_value(self, value):
self.__db_value = value
db_value = property(__get_db_value, __set_db_value)
def db_add_value(self, value):
self.__db_value = value
def db_change_value(self, value):
self.__db_value = value
def db_delete_value(self, value):
self.__db_value = None
def getPrimaryKey(self):
return self.__db_moduleId
| {
"content_hash": "0cde2962949e864950b0475889e6869f",
"timestamp": "",
"source": "github",
"line_count": 841,
"max_line_length": 149,
"avg_line_length": 33.72294887039239,
"alnum_prop": 0.6011071541906139,
"repo_name": "CMUSV-VisTrails/WorkflowRecommendation",
"id": "ccc0198c70be9785335dd0c00a4860486a3e3b1a",
"size": "30204",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "vistrails/db/versions/v0_3_0/domain/auto_gen.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "57"
},
{
"name": "PHP",
"bytes": "48730"
},
{
"name": "Python",
"bytes": "12760768"
},
{
"name": "Shell",
"bytes": "33785"
},
{
"name": "XSLT",
"bytes": "1090"
}
],
"symlink_target": ""
} |
"""
This file contains the class definition for the sampler MCMCSample classes.
"""
__author__ = 'Brandon C. Kelly'
import numpy as np
from matplotlib import pyplot as plt
import scipy.stats
import acor
class MCMCSample(object):
"""
Class for parameter samples generated by a yamcmc++ sampler. This class contains a dictionary of samples
generated by an MCMC sampler for a set of parameters, as well as methods for plotting and summarizing the results.
In general, the MCMCSample object is empty upon instantiation. One adds parameters to the dictionary through the
AddStep method of a Sampler object. Running a Sampler object then fills the dictionary up with the parameter values.
After running a Sampler object, the MCMCSample object will contain the parameter values, which can then be analyzed
further.
Alternatively, one can load the parameters and their values from a file. This is done through the method
generate_from_file. This is helpful if one has a set of MCMC samples generated by a different program.
"""
def __init__(self, filename=None, logpost=None, trace=None):
"""
Constructor for an MCMCSample object. If no arguments are supplied, then this just creates an empty dictionary
that will contain the MCMC samples. In this case parameters are added to the dictionary through the addstep
method of a Sampler object, and the values are generated by running the Sampler object. Otherwise, if a
filename is supplied then the parameter names and MCMC samples are read in from that file.
:param filename: A string giving the name of an asciifile containing the MCMC samples.
"""
self._samples = dict() # Empty dictionary. We will place the samples for each tracked parameter here.
if logpost is not None:
self.set_logpost(logpost)
if trace is not None:
self.generate_from_trace(trace)
elif filename is not None:
# Construct MCMCSample object by reading in MCMC samples from one or more asciifiles.
self.generate_from_file([filename])
def get_samples(self, name):
"""
Returns a copy of the numpy array containing the samples for a parameter. This is safer then directly
accessing the dictionary object containing the samples to prevent one from inadvertently changes the values of
the samples output from an MCMC sampler.
:param name: The name of the parameter for which the samples are desired.
"""
return self._samples[name].copy()
def generate_from_file(self, filename):
"""
Build the dictionary of parameter samples from an ascii file of MCMC samples. The first line of this file
should contain the parameter names.
:param filename: The name of the file containing the MCMC samples.
"""
# TODO: put in exceptions to make sure files are ready correctly
for fname in filename:
file = open(fname, 'r')
name = file.readline()
# Grab the MCMC output
trace = np.genfromtxt(fname, skip_header=1)
if name not in self._samples:
# Parameter is not already in the dictionary, so add it. Otherwise do nothing.
self._samples[name] = trace
def autocorr_timescale(self, trace):
"""
Compute the autocorrelation time scale as estimated by the `acor` module.
:param trace: The parameter trace, a numpy array.
"""
acors = []
for i in range(trace.shape[1]):
tau, mean, sigma = acor.acor(trace[:, i].real) # Warning, does not work with numpy.complex
acors.append(tau)
return np.array(acors)
def effective_samples(self, name):
"""
Return the effective number of independent samples of the MCMC sampler.
:param name: The name of the parameter to compute the effective number of independent samples for.
"""
if name not in self._samples:
print("WARNING: sampler does not have", name)
return
else:
print("Calculating effective number of samples")
traces = self._samples[name] # Get the sampled parameter values
npts = traces.shape[0]
timescale = self.autocorr_timescale(traces)
return npts / timescale
def plot_trace(self, name, doShow=False):
"""
Plot the trace of the values, a time series showing the evolution of the parameter values for the MCMC sampler.
Only a single parameter element trace is shown per plot, and all plots are shown on the same plotting window. In
particular, if a parameter is array-valued, then the traces for each element of its array are plotted on a
separate subplot.
:param name: The parameter name.
:param doShow: If true, then show the plot.
"""
if name not in self._samples:
print("WARNING: sampler does not have", name)
return
else:
print("Plotting Trace")
fig = plt.figure()
traces = self._samples[name] # Get the sampled parameter values
ntrace = traces.shape[1]
spN = plt.subplot(ntrace, 1, ntrace)
spN.plot(traces[:,-1], ".", markersize=2)
spN.set_xlabel("Step")
spN.set_ylabel("par %d" % (ntrace-1))
for i in range(ntrace-1):
sp = plt.subplot(ntrace, 1, i+1, sharex=spN)
sp.plot(traces[:,i], ".", markersize=2)
sp.set_ylabel("par %d" % (i))
plt.setp(sp.get_xticklabels(), visible=False)
plt.suptitle(name)
if doShow:
plt.show()
def plot_1dpdf(self, name, doShow=False):
"""
Plot histograms of the parameter values generated by the MCMC sampler. If the parameter is array valued then
histograms of all of the parameter's elements will be plotted.
:param name: The parameter name.
:param doShow: If true, then show the plot.
"""
if name not in self._samples:
print("WARNING: sampler does not have", name)
return
else:
print("Plotting 1d PDF")
fig = plt.figure()
traces = self._samples[name] # Get the sampled parameter values
ntrace = traces.shape[1]
for i in range(ntrace):
sp = plt.subplot(ntrace, 1, i+1)
sp.hist(traces[:,i], bins=50, normed=True)
sp.set_ylabel("par %d" % (i))
if i == ntrace-1:
sp.set_xlabel("val")
plt.suptitle(name)
if doShow:
plt.show()
def plot_2dpdf(self, name1, name2, pindex1=0, pindex2=0, doShow=False):
"""
Plot joint distribution of the parameter values generated by the MCMC sampler.
:param name1: The parameter name along x-axis
:param name2: The parameter name along y-axis
:param pindex1: Which element of the array to plot
:param pindex2: Which element of the array to plot
:param doShow: Call plt.show()
"""
if ((name1 not in self._samples) or (name2 not in self._samples)):
print("WARNING: sampler does not have", name1, name2)
return
if pindex1 >= self._samples[name1].shape[1]:
print("WARNING: not enough data in", name1)
return
if pindex2 >= self._samples[name2].shape[1]:
print("WARNING: not enough data in", name2)
return
print("Plotting 2d PDF")
fig = plt.figure()
trace1 = self._samples[name1][:,pindex1]
trace2 = self._samples[name2][:,pindex2]
# joint distribution
axJ = fig.add_axes([0.1, 0.1, 0.7, 0.7]) # [left, bottom, width, height]
# y histogram
axY = fig.add_axes([0.8, 0.1, 0.125, 0.7], sharey=axJ)
# x histogram
axX = fig.add_axes([0.1, 0.8, 0.7, 0.125], sharex=axJ)
axJ.plot(trace1, trace2, 'ro', ms=1, alpha=0.5)
axX.hist(trace1, bins=100)
axY.hist(trace2, orientation='horizontal', bins=100)
axJ.set_xlabel("%s %d" % (name1, pindex1))
axJ.set_ylabel("%s %d" % (name2, pindex2))
plt.setp(axX.get_xticklabels()+axX.get_yticklabels(), visible=False)
plt.setp(axY.get_xticklabels()+axY.get_yticklabels(), visible=False)
if doShow:
plt.show()
def plot_2dkde(self, name1, name2, pindex1=0, pindex2=0,
nbins=100, doPlotStragglers=True, doShow=False):
"""
Plot joint distribution of the parameter values generated by the MCMC sampler using a kernel density estimate.
:param name1: The parameter name along x-axis
:param name2: The parameter name along y-axis
:param pindex1: Which element of the array to plot
:param pindex2: Which element of the array to plot
:param doShow: Call plt.show()
:param nbins: Number of bins along each axis for KDE
:param doPlotStragglers: Plot individual data points outside KDE contours. Works poorly for small samples.
"""
if ((name1 not in self._samples) or (name2 not in self._samples)):
print("WARNING: sampler does not have", name1, name2)
return
if pindex1 >= self._samples[name1].shape[1]:
print("WARNING: not enough data in", name1)
return
if pindex2 >= self._samples[name2].shape[1]:
print("WARNING: not enough data in", name2)
return
print("Plotting 2d PDF w KDE")
fig = plt.figure()
trace1 = self._samples[name1][:,pindex1].real # JIC we get something imaginary?
trace2 = self._samples[name2][:,pindex2].real
npts = trace1.shape[0]
kde = scipy.stats.gaussian_kde((trace1, trace2))
bins1 = np.linspace(trace1.min(), trace1.max(), nbins)
bins2 = np.linspace(trace2.min(), trace2.max(), nbins)
mesh1, mesh2 = np.meshgrid(bins1, bins2)
hist = kde([mesh1.ravel(), mesh2.ravel()]).reshape(mesh1.shape)
clevels = []
for frac in [0.9973, 0.9545, 0.6827]:
hfrac = lambda level, hist=hist, frac=frac: hist[hist>=level].sum()/hist.sum() - frac
level = scipy.optimize.bisect(hfrac, hist.min(), hist.max())
clevels.append(level)
# joint distribution
axJ = fig.add_axes([0.1, 0.1, 0.7, 0.7]) # [left, bottom, width, height]
# y histogram
axY = fig.add_axes([0.8, 0.1, 0.125, 0.7], sharey=axJ)
# x histogram
axX = fig.add_axes([0.1, 0.8, 0.7, 0.125], sharex=axJ)
cont = axJ.contour(mesh1, mesh2, hist, clevels, linestyles="solid", cmap=plt.cm.jet)
axX.hist(trace1, bins=100)
axY.hist(trace2, orientation='horizontal', bins=100)
axJ.set_xlabel(name1 + '[' + str(pindex1) + ']')
axJ.set_ylabel(name2 + '[' + str(pindex2) + ']')
plt.setp(axX.get_xticklabels()+axX.get_yticklabels(), visible=False)
plt.setp(axY.get_xticklabels()+axY.get_yticklabels(), visible=False)
# Note to self: you need to set up the contours above to have
# the outer one first, for collections[0] to work below.
#
# Also a note: this does not work if the outer contour is not
# fully connected.
if doPlotStragglers:
outer = cont.collections[0]._paths
sx = []
sy = []
for i in range(npts):
found = [o.contains_point((trace1[i], trace2[i])) for o in outer]
if not (True in found):
sx.append(trace1[i])
sy.append(trace2[i])
axJ.plot(sx, sy, 'k.', ms = 1, alpha = 0.1)
if doShow:
plt.show()
def plot_autocorr(self, name, acorrFac = 10.0, doShow=False):
"""
Plot the autocorrelation functions of the traces for a parameter. If the parameter is array-value then
autocorrelation plots for each of the parameter's elements will be plotted.
:param name: The parameter name.
:param acorrFac: The maximum number of lags to plot, in terms of the autocorrelation time scale of the MCMC
samples. The default is 10 autocorrelation time scales.
:param doShow:
"""
if name not in self._samples:
print("WARNING: sampler does not have", name)
return
else:
print("Plotting autocorrelation function (this make take a while)")
fig = plt.figure()
traces = self._samples[name] # Get the sampled parameter values
mtrace = np.mean(traces, axis=0)
ntrace = traces.shape[1]
acorr = self.autocorr_timescale(traces)
for i in range(ntrace):
sp = plt.subplot(ntrace, 1, i+1)
lags, acf, not_needed1, not_needed2 = plt.acorr(traces[:, i] - mtrace[i], maxlags=traces.shape[0]-1, lw=2)
#sp.set_xlim(-0.5, acorrFac * acorr[i])
sp.set_ylim(-0.01, 1.01)
sp.axhline(y=0.5, c='k', linestyle='--')
sp.axvline(x=acorr[i], c='r', linestyle='--')
sp.set_ylabel("par %d autocorr" % (i))
if i == ntrace-1:
sp.set_xlabel("lag")
plt.suptitle(name)
if doShow:
plt.show()
def plot_parameter(self, name, pindex=0, doShow=False):
"""
Simultaneously plots the trace, histogram, and autocorrelation of this parameter's values. If the parameter
is array-valued, then the user must specify the index of the array to plot, as these are all 1-d plots on a
single plotting window.
:param name: The name of the parameter that the plots are made for.
:param pindex: If the parameter is array-valued, then this is the index of the array that the plots are made
for.
:param doShow: Call plt.show().
"""
if name not in self._samples:
print("WARNING: sampler does not have", name)
return
else:
print("Plotting parameter summary")
fig = plt.figure()
traces = self._samples[name]
plot_title = name
if traces.ndim > 1:
# Parameter is array valued, grab the column corresponding to pindex
if traces.ndim > 2:
# Parameter values are at least matrix-valued, reshape to a vector
traces = traces.reshape(traces.shape[0], np.prod(traces.shape[1:]))
traces = traces[:, pindex]
plot_title = name + "[" + str(pindex) + "]"
# First plot the trace
plt.subplot(211)
plt.plot(traces, '.', markersize=2, alpha=0.5, rasterized=(len(traces) > 1e4))
plt.xlim(0, traces.size)
plt.xlabel("Iteration")
plt.ylabel("Value")
plt.title(plot_title)
# Now add the histogram of values to the trace plot axes
pdf, bin_edges = np.histogram(traces, bins=25)
bin_edges = bin_edges[0:pdf.size]
# Stretch the PDF so that it is readable on the trace plot when plotted horizontally
pdf = pdf / float(pdf.max()) * 0.34 * traces.size
# Add the histogram to the plot
plt.barh(bin_edges, pdf, height=bin_edges[1] - bin_edges[0], alpha=0.5, color='DarkOrange')
# Finally, plot the autocorrelation function of the trace
plt.subplot(212)
centered_trace = traces - traces.mean()
lags, acf, not_needed1, not_needed2 = plt.acorr(centered_trace, maxlags=traces.size - 1, lw=2)
plt.ylabel("ACF")
plt.xlabel("Lag")
# Compute the autocorrelation timescale, and then reset the x-axis limits accordingly
acf_timescale = self.autocorr_timescale(traces[:, np.newaxis])
plt.xlim(0, np.min([5 * acf_timescale[0], len(traces)]))
if doShow:
plt.show()
def posterior_summaries(self, name):
"""
Print out the posterior medians, standard deviations, and 68th, 95th, and 99th credibility intervals.
:param name: The name of the parameter for which the summaries are desired.
"""
traces = self._samples[name] # Get the sampled parameter values
effective_nsamples = self.effective_samples(name) # Get the effective number of independent samples
if traces.ndim == 1:
# Parameter is scalar valued, so this is easy
print("Posterior summary for parameter", name)
print("----------------------------------------------")
print("Effective number of independent samples:", effective_nsamples)
print("Median:", np.median(traces))
print("Standard deviation:", np.std(traces))
print("68% credibility interval:", np.percentile(traces, (16.0, 84.0)))
print("95% credibility interval:", np.percentile(traces, (2.5, 97.5)))
print("99% credibility interval:", np.percentile(traces, (0.5, 99.5)))
else:
if traces.ndim > 2:
# Parameter values are at least matrix-valued, reshape to a vector.
traces = traces.reshape(traces.shape[0], np.prod(traces.shape[1:]))
for i in range(traces.shape[1]):
# give summary for each element of this parameter separately
# Parameter is scalar valued, so this is easy
print("Posterior summary for parameter", name, " element", i)
print("----------------------------------------------")
print("Effective number of independent samples:", effective_nsamples[i])
print("Median:", np.median(traces[:, i]))
print("Standard deviation:", np.std(traces[:, i]))
print("68% credibility interval:", np.percentile(traces[:, i], (16.0, 84.0)))
print("95% credibility interval:", np.percentile(traces[:, i], (2.5, 97.5)))
print("99% credibility interval:", np.percentile(traces[:, i], (0.5, 99.5)))
def newaxis(self):
for key in self._samples.keys():
if len(self._samples[key].shape) == 1:
self._samples[key] = self._samples[key][:,np.newaxis]
| {
"content_hash": "980848820bd3a467977f2cca42438c73",
"timestamp": "",
"source": "github",
"line_count": 408,
"max_line_length": 120,
"avg_line_length": 44.97794117647059,
"alnum_prop": 0.597896572393875,
"repo_name": "brandonckelly/carma_pack",
"id": "057afae722b9581e6d0186bfd2cd44395214b0dc",
"size": "18351",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/carmcmc/samplers.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "207171"
},
{
"name": "Python",
"bytes": "140314"
},
{
"name": "Roff",
"bytes": "3126"
}
],
"symlink_target": ""
} |
class SummitRestException(Exception):
def __init__(self, status, uri, msg='', code=None, method='GET'):
self.status = status
self.uri = uri
self.msg = msg
self.code = code
self.method = method
def __str__(self):
return 'HTTP {0} error: {1}'.format(self.status, self.msg)
| {
"content_hash": "e7cbe45a7e1fc351c6146982fbf22303",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 69,
"avg_line_length": 29.90909090909091,
"alnum_prop": 0.5683890577507599,
"repo_name": "josephl/summit-python",
"id": "92c15cf29eb54910fd21395b740f579d8f893dfd",
"size": "329",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "summit/exceptions.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "6590"
}
],
"symlink_target": ""
} |
"""Classes and functions implementing Layer SavedModel serialization."""
from tensorflow.python.keras.mixed_precision import policy
from tensorflow.python.keras.saving.saved_model import base_serialization
from tensorflow.python.keras.saving.saved_model import constants
from tensorflow.python.keras.saving.saved_model import save_impl
from tensorflow.python.keras.saving.saved_model import serialized_attributes
from tensorflow.python.keras.utils import generic_utils
from tensorflow.python.trackable import data_structures
from tensorflow.python.util import nest
class LayerSavedModelSaver(base_serialization.SavedModelSaver):
"""Implements Layer SavedModel serialization."""
@property
def object_identifier(self):
return constants.LAYER_IDENTIFIER
@property
def python_properties(self):
# TODO(kathywu): Add python property validator
return self._python_properties_internal()
def _python_properties_internal(self):
"""Returns dictionary of all python properties."""
# TODO(kathywu): Add support for metrics serialization.
# TODO(kathywu): Synchronize with the keras spec (go/keras-json-spec) once
# the python config serialization has caught up.
metadata = dict(
name=self.obj.name,
trainable=self.obj.trainable,
expects_training_arg=self.obj._expects_training_arg, # pylint: disable=protected-access
dtype=policy.serialize(self.obj._dtype_policy), # pylint: disable=protected-access
batch_input_shape=getattr(self.obj, '_batch_input_shape', None),
stateful=self.obj.stateful,
must_restore_from_config=self.obj._must_restore_from_config, # pylint: disable=protected-access
)
metadata.update(get_serialized(self.obj))
if self.obj.input_spec is not None:
# Layer's input_spec has already been type-checked in the property setter.
metadata['input_spec'] = nest.map_structure(
lambda x: generic_utils.serialize_keras_object(x) if x else None,
self.obj.input_spec)
if (self.obj.activity_regularizer is not None and
hasattr(self.obj.activity_regularizer, 'get_config')):
metadata['activity_regularizer'] = generic_utils.serialize_keras_object(
self.obj.activity_regularizer)
if self.obj._build_input_shape is not None: # pylint: disable=protected-access
metadata['build_input_shape'] = self.obj._build_input_shape # pylint: disable=protected-access
return metadata
def objects_to_serialize(self, serialization_cache):
return (self._get_serialized_attributes(
serialization_cache).objects_to_serialize)
def functions_to_serialize(self, serialization_cache):
return (self._get_serialized_attributes(
serialization_cache).functions_to_serialize)
def _get_serialized_attributes(self, serialization_cache):
"""Generates or retrieves serialized attributes from cache."""
keras_cache = serialization_cache.setdefault(constants.KERAS_CACHE_KEY, {})
if self.obj in keras_cache:
return keras_cache[self.obj]
serialized_attr = keras_cache[self.obj] = (
serialized_attributes.SerializedAttributes.new(self.obj))
if (save_impl.should_skip_serialization(self.obj) or
self.obj._must_restore_from_config): # pylint: disable=protected-access
return serialized_attr
object_dict, function_dict = self._get_serialized_attributes_internal(
serialization_cache)
serialized_attr.set_and_validate_objects(object_dict)
serialized_attr.set_and_validate_functions(function_dict)
return serialized_attr
def _get_serialized_attributes_internal(self, serialization_cache):
"""Returns dictionary of serialized attributes."""
objects = save_impl.wrap_layer_objects(self.obj, serialization_cache)
functions = save_impl.wrap_layer_functions(self.obj, serialization_cache)
# Attribute validator requires that the default save signature is added to
# function dict, even if the value is None.
functions['_default_save_signature'] = None
return objects, functions
# TODO(kathywu): Move serialization utils (and related utils from
# generic_utils.py) to a separate file.
def get_serialized(obj):
with generic_utils.skip_failed_serialization():
# Store the config dictionary, which may be used when reviving the object.
# When loading, the program will attempt to revive the object from config,
# and if that fails, the object will be revived from the SavedModel.
return generic_utils.serialize_keras_object(obj)
class InputLayerSavedModelSaver(base_serialization.SavedModelSaver):
"""InputLayer serialization."""
@property
def object_identifier(self):
return constants.INPUT_LAYER_IDENTIFIER
@property
def python_properties(self):
return dict(
class_name=type(self.obj).__name__,
name=self.obj.name,
dtype=self.obj.dtype,
sparse=self.obj.sparse,
ragged=self.obj.ragged,
batch_input_shape=self.obj._batch_input_shape, # pylint: disable=protected-access
config=self.obj.get_config())
def objects_to_serialize(self, serialization_cache):
return {}
def functions_to_serialize(self, serialization_cache):
return {}
class RNNSavedModelSaver(LayerSavedModelSaver):
"""RNN layer serialization."""
@property
def object_identifier(self):
return constants.RNN_LAYER_IDENTIFIER
def _get_serialized_attributes_internal(self, serialization_cache):
objects, functions = (
super(RNNSavedModelSaver, self)._get_serialized_attributes_internal(
serialization_cache))
states = data_structures.wrap_or_unwrap(self.obj.states)
# SaveModel require all the objects to be Trackable when saving.
# If the states is still a tuple after wrap_or_unwrap, it means it doesn't
# contain any trackable item within it, eg empty tuple or (None, None) for
# stateless ConvLSTM2D. We convert them to list so that wrap_or_unwrap can
# make it a Trackable again for saving. When loaded, ConvLSTM2D is
# able to handle the tuple/list conversion.
if isinstance(states, tuple):
states = data_structures.wrap_or_unwrap(list(states))
objects['states'] = states
return objects, functions
class IndexLookupLayerSavedModelSaver(LayerSavedModelSaver):
"""Index lookup layer serialization."""
@property
def python_properties(self):
# TODO(kathywu): Add python property validator
metadata = self._python_properties_internal()
if metadata['config'].get('has_static_table', False):
metadata['config']['vocabulary'] = None
return metadata
| {
"content_hash": "6fc1d63aab83bb24a211c067184dada7",
"timestamp": "",
"source": "github",
"line_count": 161,
"max_line_length": 104,
"avg_line_length": 41.08695652173913,
"alnum_prop": 0.7298563869992442,
"repo_name": "karllessard/tensorflow",
"id": "9e6f2c55615d63ec59ecc06131eb9ad09facbf7e",
"size": "7304",
"binary": false,
"copies": "10",
"ref": "refs/heads/master",
"path": "tensorflow/python/keras/saving/saved_model/layer_serialization.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "36962"
},
{
"name": "C",
"bytes": "1366182"
},
{
"name": "C#",
"bytes": "13584"
},
{
"name": "C++",
"bytes": "124800442"
},
{
"name": "CMake",
"bytes": "183072"
},
{
"name": "Cython",
"bytes": "5003"
},
{
"name": "Dockerfile",
"bytes": "416070"
},
{
"name": "Go",
"bytes": "2104698"
},
{
"name": "HTML",
"bytes": "4686483"
},
{
"name": "Java",
"bytes": "1074438"
},
{
"name": "Jupyter Notebook",
"bytes": "792868"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "MLIR",
"bytes": "11176792"
},
{
"name": "Makefile",
"bytes": "2760"
},
{
"name": "Objective-C",
"bytes": "169288"
},
{
"name": "Objective-C++",
"bytes": "294187"
},
{
"name": "Pawn",
"bytes": "5552"
},
{
"name": "Perl",
"bytes": "7536"
},
{
"name": "Python",
"bytes": "42620525"
},
{
"name": "Roff",
"bytes": "5034"
},
{
"name": "Ruby",
"bytes": "9199"
},
{
"name": "Shell",
"bytes": "620121"
},
{
"name": "Smarty",
"bytes": "89545"
},
{
"name": "SourcePawn",
"bytes": "14607"
},
{
"name": "Starlark",
"bytes": "7545879"
},
{
"name": "Swift",
"bytes": "78435"
},
{
"name": "Vim Snippet",
"bytes": "58"
}
],
"symlink_target": ""
} |
"""
The MatchMaker classes should except a Topic or Fanout exchange key and
return keys for direct exchanges, per (approximate) AMQP parlance.
"""
import itertools
import json
from oslo.config import cfg
from logcollector.openstack.common.gettextutils import _
from logcollector.openstack.common import log as logging
from logcollector.openstack.common.rpc import matchmaker as mm
matchmaker_opts = [
# Matchmaker ring file
cfg.StrOpt('ringfile',
deprecated_name='matchmaker_ringfile',
deprecated_group='DEFAULT',
default='/etc/oslo/matchmaker_ring.json',
help='Matchmaker ring file (JSON)'),
]
CONF = cfg.CONF
CONF.register_opts(matchmaker_opts, 'matchmaker_ring')
LOG = logging.getLogger(__name__)
class RingExchange(mm.Exchange):
"""Match Maker where hosts are loaded from a static JSON formatted file.
__init__ takes optional ring dictionary argument, otherwise
loads the ringfile from CONF.mathcmaker_ringfile.
"""
def __init__(self, ring=None):
super(RingExchange, self).__init__()
if ring:
self.ring = ring
else:
with open(CONF.matchmaker_ring.ringfile, 'r') as fh:
self.ring = json.load(fh)
self.ring0 = {}
for k in self.ring.keys():
self.ring0[k] = itertools.cycle(self.ring[k])
def _ring_has(self, key):
return key in self.ring0
class RoundRobinRingExchange(RingExchange):
"""A Topic Exchange based on a hashmap."""
def __init__(self, ring=None):
super(RoundRobinRingExchange, self).__init__(ring)
def run(self, key):
if not self._ring_has(key):
LOG.warn(
_("No key defining hosts for topic '%s', "
"see ringfile") % (key, )
)
return []
host = next(self.ring0[key])
return [(key + '.' + host, host)]
class FanoutRingExchange(RingExchange):
"""Fanout Exchange based on a hashmap."""
def __init__(self, ring=None):
super(FanoutRingExchange, self).__init__(ring)
def run(self, key):
# Assume starts with "fanout~", strip it for lookup.
nkey = key.split('fanout~')[1:][0]
if not self._ring_has(nkey):
LOG.warn(
_("No key defining hosts for topic '%s', "
"see ringfile") % (nkey, )
)
return []
return map(lambda x: (key + '.' + x, x), self.ring[nkey])
class MatchMakerRing(mm.MatchMakerBase):
"""Match Maker where hosts are loaded from a static hashmap."""
def __init__(self, ring=None):
super(MatchMakerRing, self).__init__()
self.add_binding(mm.FanoutBinding(), FanoutRingExchange(ring))
self.add_binding(mm.DirectBinding(), mm.DirectExchange())
self.add_binding(mm.TopicBinding(), RoundRobinRingExchange(ring))
| {
"content_hash": "073df7721456cf7f3046ddb14424c604",
"timestamp": "",
"source": "github",
"line_count": 92,
"max_line_length": 76,
"avg_line_length": 31.597826086956523,
"alnum_prop": 0.60921912624699,
"repo_name": "redhat-cip/openstack-logcollector",
"id": "1f6e3331b1e1075b0972fcc7ca5203623b10bea2",
"size": "3531",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "openstack-logcollector/openstack/common/rpc/matchmaker_ring.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "619436"
}
],
"symlink_target": ""
} |
"""AppAssure 5 Core API"""
from appassure.api import AppAssureAPI
class IDiagnosticsManagement(AppAssureAPI):
"""Full documentation online at
http://docs.appassure.com/display/AA50D/IDiagnosticsManagement
"""
def executeRemoteCommand(self, data):
"""Executes an arbitrary remote command."""
return self.session.request('diag/command/', 'POST',
self.getXML(data, 'remoteCommand'))
def readFile(self, path):
"""Reads a file from the local file system and
streams it back to the client.
"""
return self.session.request('diag/files/?q=%s'
% (path))
def getLog(self):
"""Gets the entire contents of the replay.log file."""
return self.session.request('diag/log/')
def getLogSession(self):
"""Packages the current log session and returns it as
a byte stream. The contents of the stream is a Gibraltar
.glp file.
"""
return self.session.request('diag/logSession/')
def uploadLogSessions(self):
"""Uploads the current log session to the Gibraltar
(http:// www.gibraltarsoftware.com/) logging framework.
"""
return self.session.request('diag/logSession/', 'POST')
def restartService(self):
"""Stops, forcibly kills (if necessary), and
re-starts the service.
"""
return self.session.request('diag/service/', 'DELETE')
| {
"content_hash": "df7ac67bf0372657978ceb7f9a218182",
"timestamp": "",
"source": "github",
"line_count": 43,
"max_line_length": 66,
"avg_line_length": 33.86046511627907,
"alnum_prop": 0.6263736263736264,
"repo_name": "rshipp/python-appassure",
"id": "99b0864422153782a337b28d0176aad80ef9d292",
"size": "1456",
"binary": false,
"copies": "1",
"ref": "refs/heads/development",
"path": "appassure/core/IDiagnosticsManagement.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "174855"
}
],
"symlink_target": ""
} |
from datetime import datetime, timedelta
from rdr_service.dao.ghost_check_dao import GhostCheckDao
from tests.helpers.unittest_base import BaseTestCase
class GhostCheckDaoTest(BaseTestCase):
def test_loads_only_vibrent(self):
"""We might accidentally start flagging CE participants as ghosts if they're returned"""
vibrent_participant = self.data_generator.create_database_participant(participantOrigin='vibrent')
self.data_generator.create_database_participant(participantOrigin='careevolution')
self.data_generator.create_database_participant(participantOrigin='anotherplatform')
participants = GhostCheckDao.get_participants_needing_checked(
session=self.data_generator.session,
earliest_signup_time=datetime.now() - timedelta(weeks=1)
)
self.assertEqual(1, len(participants), 'Should only be the Vibrent participant')
self.assertEqual(vibrent_participant.participantId, participants[0].participantId)
def test_ghost_flag_returned(self):
"""Ensure we get back the ghost data field"""
ghost_participant = self.data_generator.create_database_participant(
participantOrigin='vibrent',
isGhostId=True
)
self.data_generator.create_database_participant(
participantOrigin='vibrent',
isGhostId=None
)
self.data_generator.create_database_participant(
participantOrigin='vibrent',
isGhostId=False
)
results = GhostCheckDao.get_participants_needing_checked(
session=self.data_generator.session,
earliest_signup_time=datetime.now() - timedelta(weeks=1)
)
for participant in results:
if participant.participantId == ghost_participant.participantId:
self.assertTrue(participant.isGhostId)
else:
self.assertFalse(participant.isGhostId)
| {
"content_hash": "97a5f8374d42f43ffa4095ae4599a714",
"timestamp": "",
"source": "github",
"line_count": 44,
"max_line_length": 106,
"avg_line_length": 44.52272727272727,
"alnum_prop": 0.6865747830525778,
"repo_name": "all-of-us/raw-data-repository",
"id": "a8f25d5bc9708496e9e835a2692e8fdbbd25b4c6",
"size": "1959",
"binary": false,
"copies": "1",
"ref": "refs/heads/devel",
"path": "tests/dao_tests/test_ghost_check_dao.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "1866"
},
{
"name": "Mako",
"bytes": "1715"
},
{
"name": "Python",
"bytes": "17040924"
},
{
"name": "R",
"bytes": "2212"
},
{
"name": "Shell",
"bytes": "92213"
}
],
"symlink_target": ""
} |
from google.cloud import assuredworkloads_v1
def sample_acknowledge_violation():
# Create a client
client = assuredworkloads_v1.AssuredWorkloadsServiceClient()
# Initialize request argument(s)
request = assuredworkloads_v1.AcknowledgeViolationRequest(
name="name_value",
comment="comment_value",
)
# Make the request
response = client.acknowledge_violation(request=request)
# Handle the response
print(response)
# [END assuredworkloads_v1_generated_AssuredWorkloadsService_AcknowledgeViolation_sync]
| {
"content_hash": "f15080288ea64216eb36fdf1eca8c7dd",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 87,
"avg_line_length": 27.9,
"alnum_prop": 0.7401433691756273,
"repo_name": "googleapis/python-assured-workloads",
"id": "4c772f772f8e7a633ffd1d735a7f667c4300a518",
"size": "1986",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "samples/generated_samples/assuredworkloads_v1_generated_assured_workloads_service_acknowledge_violation_sync.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2050"
},
{
"name": "Python",
"bytes": "768919"
},
{
"name": "Shell",
"bytes": "30693"
}
],
"symlink_target": ""
} |
import unittest
from programy.dialog import Sentence, Question, Conversation
from programy.bot import Bot
from programy.brain import Brain
from programy.config.brain import BrainConfiguration
from programy.config.bot import BotConfiguration
#############################################################################
#
class SentenceTests(unittest.TestCase):
def test_sentence_creation_empty(self):
sentence = Sentence("")
self.assertIsNotNone(sentence)
self.assertEqual(0, sentence.num_words())
with self.assertRaises(Exception):
sentence.sentence.word(0)
def test_sentence_creation_spaces(self):
sentence = Sentence(" ")
self.assertIsNotNone(sentence)
self.assertEqual(0, sentence.num_words())
with self.assertRaises(Exception):
sentence.sentence.word(0)
def test_split_into_words(self):
sentence = Sentence("HELLO")
self.assertIsNotNone(sentence)
self.assertEqual(1, sentence.num_words())
self.assertEqual("HELLO", sentence.word(0))
self.assertEqual("HELLO", sentence.words_from_current_pos(0))
with self.assertRaises(Exception):
sentence.sentence.word(1)
self.assertEqual("HELLO", sentence.text())
def test_sentence_creation_one_word(self):
sentence = Sentence("One")
self.assertIsNotNone(sentence)
self.assertEqual(1, sentence.num_words())
with self.assertRaises(Exception):
sentence.sentence.word(1)
self.assertEqual("One", sentence.text())
def test_sentence_creation_two_words(self):
sentence = Sentence("One Two")
self.assertIsNotNone(sentence)
self.assertEqual(2, sentence.num_words())
self.assertEqual("One", sentence.word(0))
self.assertEqual("Two", sentence.word(1))
with self.assertRaises(Exception):
sentence.sentence.word(2)
self.assertEqual("One Two", sentence.text())
def test_sentence_creation_two_words_diff_split_char(self):
sentence = Sentence("One,Two", ",")
self.assertIsNotNone(sentence)
self.assertEqual(2, sentence.num_words())
self.assertEqual("One", sentence.word(0))
self.assertEqual("Two", sentence.word(1))
with self.assertRaises(Exception):
sentence.sentence.word(2)
self.assertEqual("One Two", sentence.text())
def test_words_from_current_pos(self):
sentence = Sentence("One Two Three")
self.assertIsNotNone(sentence)
self.assertEqual("One Two Three", sentence.words_from_current_pos(0))
self.assertEqual("Two Three", sentence.words_from_current_pos(1))
self.assertEqual("Three", sentence.words_from_current_pos(2))
with self.assertRaises(Exception):
self.assertEqual("Three", sentence.words_from_current_pos(3))
self.assertEqual("One Two Three", sentence.text())
#############################################################################
#
class QuestionTests(unittest.TestCase):
def test_question_no_sentences_empty(self):
question = Question.create_from_text("")
self.assertIsNotNone(question)
self.assertEqual(0, len(question.sentences))
def test_question_no_sentences_blank(self):
question = Question.create_from_text(" ")
self.assertIsNotNone(question)
self.assertEqual(0, len(question.sentences))
def test_question_one_sentence(self):
question = Question.create_from_text("Hello There")
self.assertIsNotNone(question)
self.assertEqual(1, len(question.sentences))
def test_question_multi_sentence(self):
question = Question.create_from_text("Hello There. How Are you")
self.assertIsNotNone(question)
self.assertEqual(2, len(question.sentences))
self.assertEqual("Hello There", question.sentence(0).text())
self.assertEqual("How Are you", question.sentence(1).text())
with self.assertRaises(Exception):
question.sentence(2)
def test_question_create_from_sentence(self):
sentence = Sentence("One Two Three")
question = Question.create_from_sentence(sentence)
self.assertIsNotNone(question)
self.assertEqual(1, len(question.sentences))
self.assertEqual(sentence.text(), question.sentence(0).text())
with self.assertRaises(Exception):
question.sentence(1)
def test_question_create_from_question(self):
question = Question.create_from_text("Hello There")
new_question = Question.create_from_question(question)
self.assertIsNotNone(new_question)
self.assertEqual(1, len(new_question.sentences))
self.assertEqual("Hello There", question.sentence(0).text())
with self.assertRaises(Exception):
question.sentence(1)
def test_combine_answers(self):
question = Question()
sentence1 = Sentence("Hi")
sentence1._response = "Hello"
question._sentences.append(sentence1)
sentence2 = Sentence("Hi Again")
question._sentences.append(sentence2)
sentence2._response = "World"
self.assertEqual(2, len(question._sentences))
self.assertEqual(question._sentences[0]._response, "Hello")
self.assertEqual(question._sentences[1]._response, "World")
sentences = question.combine_sentences()
self.assertEqual("Hi. Hi Again", sentences)
combined = question.combine_answers()
self.assertIsNotNone(combined)
self.assertEqual(combined, "Hello. World")
def test_next_previous_sentences(self):
question = Question.create_from_text("Hello There. How Are you")
self.assertEqual("How Are you", question.current_sentence().text())
# TODO This should be 1 and will need to be changed in the code too
self.assertEqual("Hello There", question.previous_sentence(2).text())
#############################################################################
#
class ConversationTests(unittest.TestCase):
def test_conversation(self):
test_brain = Brain(BrainConfiguration())
test_bot = Bot(test_brain, BotConfiguration())
conversation = Conversation("test", test_bot, max_histories=3)
self.assertIsNotNone(conversation)
self.assertIsNotNone(conversation._bot)
self.assertIsNotNone(conversation._clientid)
self.assertEqual(conversation._clientid, "test")
self.assertEqual(0, len(conversation._questions))
self.assertEqual(3, conversation._max_histories)
self.assertEqual(1, len(conversation._predicates))
with self.assertRaises(Exception):
conversation.current_question()
with self.assertRaises(Exception):
conversation.nth_question(0)
question1 = Question.create_from_text("Hello There")
conversation.record_dialog(question1)
self.assertEqual(1, len(conversation.all_sentences()))
self.assertEqual(question1, conversation.current_question())
self.assertEqual(question1, conversation.nth_question(1))
with self.assertRaises(Exception):
conversation.nth_question(2)
questions = conversation.all_sentences()
self.assertEqual(1, len(questions))
question2 = Question.create_from_text("Hello There Again")
conversation.record_dialog(question2)
self.assertEqual(2, len(conversation.all_sentences()))
self.assertEqual(question2, conversation.current_question())
self.assertEqual(question2, conversation.nth_question(1))
with self.assertRaises(Exception):
conversation.nth_question(3)
questions = conversation.all_sentences()
self.assertEqual(2, len(questions))
question3 = Question.create_from_text("Hello There Again Again")
conversation.record_dialog(question3)
self.assertEqual(3, len(conversation.all_sentences()))
self.assertEqual(question3, conversation.current_question())
self.assertEqual(question3, conversation.nth_question(1))
with self.assertRaises(Exception):
conversation.nth_question(4)
questions = conversation.all_sentences()
self.assertEqual(3, len(questions))
# Max Histories for this test is 3
# Therefore we should see the first question, pop of the stack
question4 = Question.create_from_text("Hello There Again Again Again")
conversation.record_dialog(question4)
self.assertEqual(3, len(conversation.all_sentences()))
self.assertEqual(question4, conversation.current_question())
self.assertEqual(question4, conversation.nth_question(1))
with self.assertRaises(Exception):
conversation.nth_question(5)
questions = conversation.all_sentences()
self.assertEqual(3, len(questions)) | {
"content_hash": "616f257538901f09f8809fe632c7f1f8",
"timestamp": "",
"source": "github",
"line_count": 215,
"max_line_length": 78,
"avg_line_length": 41.58139534883721,
"alnum_prop": 0.6533557046979865,
"repo_name": "JustArchi/program-y",
"id": "4cc48e9626c1fd2a54f7674b91cc13a5be8a8a20",
"size": "8940",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "src/test/test_dialog.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "937"
},
{
"name": "HTML",
"bytes": "1580"
},
{
"name": "Python",
"bytes": "1027605"
},
{
"name": "Shell",
"bytes": "2835"
}
],
"symlink_target": ""
} |
import warnings
import asyncio
import collections
import logging
from datetime import datetime, timedelta
from asyncio import ensure_future
from .protocol import STATE_NEW, STATE_OPEN, STATE_CLOSING, STATE_CLOSED
from .protocol import FRAME_OPEN, FRAME_CLOSE
from .protocol import FRAME_MESSAGE, FRAME_MESSAGE_BLOB, FRAME_HEARTBEAT
from .exceptions import SessionIsAcquired, SessionIsClosed
from .protocol import MSG_CLOSE, MSG_MESSAGE
from .protocol import close_frame, message_frame, messages_frame
from .protocol import SockjsMessage, OpenMessage, ClosedMessage
log = logging.getLogger("sockjs")
class Session(object):
"""SockJS session object
``state``: Session state
``manager``: Session manager that hold this session
``acquired``: Acquired state, indicates that transport is using session
``timeout``: Session timeout
"""
manager = None
acquired = False
state = STATE_NEW
interrupted = False
exception = None
def __init__(
self, id, handler, request, *, timeout=timedelta(seconds=10), debug=False
):
self.id = id
self.handler = handler
self.request = request
self.expired = False
self.timeout = timeout
self.expires = datetime.now() + timeout
self._hits = 0
self._heartbeats = 0
self._heartbeat_transport = False
self._debug = debug
self._waiter = None
self._queue = collections.deque()
def __str__(self):
result = ["id=%r" % (self.id,)]
if self.state == STATE_OPEN:
result.append("connected")
elif self.state == STATE_CLOSED:
result.append("closed")
else:
result.append("disconnected")
if self.acquired:
result.append("acquired")
if len(self._queue):
result.append("queue[%s]" % len(self._queue))
if self._hits:
result.append("hits=%s" % self._hits)
if self._heartbeats:
result.append("heartbeats=%s" % self._heartbeats)
return " ".join(result)
def _tick(self, timeout=None):
if timeout is None:
self.expires = datetime.now() + self.timeout
else:
self.expires = datetime.now() + timeout
async def _acquire(self, manager, heartbeat=True):
self.acquired = True
self.manager = manager
self._heartbeat_transport = heartbeat
self._tick()
self._hits += 1
if self.state == STATE_NEW:
log.debug("open session: %s", self.id)
self.state = STATE_OPEN
self._feed(FRAME_OPEN, FRAME_OPEN)
try:
await self.handler(OpenMessage, self)
except asyncio.CancelledError:
raise
except Exception as exc:
self.state = STATE_CLOSING
self.exception = exc
self.interrupted = True
self._feed(FRAME_CLOSE, (3000, "Internal error"))
log.exception("Exception in open session handling.")
def _release(self):
self.acquired = False
self.manager = None
self._heartbeat_transport = False
def _heartbeat(self):
self._heartbeats += 1
if self._heartbeat:
self._feed(FRAME_HEARTBEAT, FRAME_HEARTBEAT)
def _feed(self, frame, data):
# pack messages
if frame == FRAME_MESSAGE:
if self._queue and self._queue[-1][0] == FRAME_MESSAGE:
self._queue[-1][1].append(data)
else:
self._queue.append((frame, [data]))
else:
self._queue.append((frame, data))
# notify waiter
waiter = self._waiter
if waiter is not None:
self._waiter = None
if not waiter.cancelled():
waiter.set_result(True)
async def _wait(self, pack=True):
if not self._queue and self.state != STATE_CLOSED:
assert not self._waiter
loop = asyncio.get_event_loop()
self._waiter = loop.create_future()
await self._waiter
if self._queue:
frame, payload = self._queue.popleft()
self._tick()
if pack:
if frame == FRAME_CLOSE:
return FRAME_CLOSE, close_frame(*payload)
elif frame == FRAME_MESSAGE:
return FRAME_MESSAGE, messages_frame(payload)
return frame, payload
else:
raise SessionIsClosed()
async def _remote_close(self, exc=None):
"""close session from remote."""
if self.state in (STATE_CLOSING, STATE_CLOSED):
return
log.info("close session: %s", self.id)
self.state = STATE_CLOSING
if exc is not None:
self.exception = exc
self.interrupted = True
try:
await self.handler(SockjsMessage(MSG_CLOSE, exc), self)
except Exception:
log.exception("Exception in close handler.")
async def _remote_closed(self):
if self.state == STATE_CLOSED:
return
log.info("session closed: %s", self.id)
self.state = STATE_CLOSED
self.expire()
try:
await self.handler(ClosedMessage, self)
except Exception:
log.exception("Exception in closed handler.")
# notify waiter
waiter = self._waiter
if waiter is not None:
self._waiter = None
if not waiter.cancelled():
waiter.set_result(True)
async def _remote_message(self, msg):
log.debug("incoming message: %s, %s", self.id, msg[:200])
self._tick()
try:
await self.handler(SockjsMessage(MSG_MESSAGE, msg), self)
except Exception:
log.exception("Exception in message handler.")
async def _remote_messages(self, messages):
self._tick()
for msg in messages:
log.debug("incoming message: %s, %s", self.id, msg[:200])
try:
await self.handler(SockjsMessage(MSG_MESSAGE, msg), self)
except Exception:
log.exception("Exception in message handler.")
def expire(self):
"""Manually expire a session."""
self.expired = True
def send(self, msg):
"""send message to client."""
assert isinstance(msg, str), "String is required"
if self._debug:
log.info("outgoing message: %s, %s", self.id, str(msg)[:200])
if self.state != STATE_OPEN:
return
self._feed(FRAME_MESSAGE, msg)
def send_frame(self, frm):
"""send message frame to client."""
if self._debug:
log.info("outgoing message: %s, %s", self.id, frm[:200])
if self.state != STATE_OPEN:
return
self._feed(FRAME_MESSAGE_BLOB, frm)
def close(self, code=3000, reason="Go away!"):
"""close session"""
if self.state in (STATE_CLOSING, STATE_CLOSED):
return
if self._debug:
log.debug("close session: %s", self.id)
self.state = STATE_CLOSING
self._feed(FRAME_CLOSE, (code, reason))
_marker = object()
class SessionManager(dict):
"""A basic session manager."""
_hb_handle = None # heartbeat event loop timer
_hb_task = None # gc task
def __init__(
self,
name,
app,
handler,
heartbeat=25.0,
timeout=timedelta(seconds=5),
debug=False,
):
self.name = name
self.route_name = "sockjs-url-%s" % name
self.app = app
self.handler = handler
self.factory = Session
self.acquired = {}
self.sessions = []
self.heartbeat = heartbeat
self.timeout = timeout
self.debug = debug
def route_url(self, request):
return request.route_url(self.route_name)
@property
def started(self):
return self._hb_handle is not None
def start(self):
if not self._hb_handle:
loop = asyncio.get_event_loop()
self._hb_handle = loop.call_later(self.heartbeat, self._heartbeat)
def stop(self):
if self._hb_handle is not None:
self._hb_handle.cancel()
self._hb_handle = None
if self._hb_task is not None:
self._hb_task.cancel()
self._hb_task = None
def _heartbeat(self):
if self._hb_task is None:
self._hb_task = ensure_future(self._heartbeat_task())
async def _heartbeat_task(self):
sessions = self.sessions
if sessions:
now = datetime.now()
idx = 0
while idx < len(sessions):
session = sessions[idx]
session._heartbeat()
if session.expires < now:
# Session is to be GC'd immedietely
if session.id in self.acquired:
await self.release(session)
if session.state == STATE_OPEN:
await session._remote_close()
if session.state == STATE_CLOSING:
await session._remote_closed()
del self[session.id]
del self.sessions[idx]
continue
idx += 1
self._hb_task = None
loop = asyncio.get_event_loop()
self._hb_handle = loop.call_later(self.heartbeat, self._heartbeat)
def _add(self, session):
if session.expired:
raise ValueError("Can not add expired session")
session.manager = self
session.registry = self.app
self[session.id] = session
self.sessions.append(session)
return session
def get(self, id, create=False, request=None, default=_marker):
session = super(SessionManager, self).get(id, None)
if session is None:
if create:
session = self._add(
self.factory(
id,
self.handler,
request,
timeout=self.timeout,
debug=self.debug,
)
)
else:
if default is not _marker:
return default
raise KeyError(id)
return session
async def acquire(self, s):
sid = s.id
if sid in self.acquired:
raise SessionIsAcquired("Another connection still open")
if sid not in self:
raise KeyError("Unknown session")
await s._acquire(self)
self.acquired[sid] = True
return s
def is_acquired(self, session):
return session.id in self.acquired
async def release(self, s):
if s.id in self.acquired:
s._release()
del self.acquired[s.id]
def active_sessions(self):
for session in list(self.values()):
if not session.expired:
yield session
async def clear(self):
"""Manually expire all sessions in the pool."""
for session in list(self.values()):
if session.state != STATE_CLOSED:
await session._remote_closed()
self.sessions.clear()
super(SessionManager, self).clear()
def broadcast(self, message):
blob = message_frame(message)
for session in list(self.values()):
if not session.expired:
session.send_frame(blob)
def __del__(self):
if len(self.sessions):
warnings.warn(
"Unclosed sessions! "
"Please call `await SessionManager.clear()` before del",
RuntimeWarning,
)
self.stop()
| {
"content_hash": "4626db0616ae31c61940b2eaffae3ed1",
"timestamp": "",
"source": "github",
"line_count": 411,
"max_line_length": 81,
"avg_line_length": 28.99270072992701,
"alnum_prop": 0.5451493789862369,
"repo_name": "aio-libs/sockjs",
"id": "7c8c425e68c60cb566e95dadb94fe82e29b1525d",
"size": "11916",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sockjs/session.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "1116"
},
{
"name": "Python",
"bytes": "94958"
}
],
"symlink_target": ""
} |
from colossus.game.route import Route
from colossus.game.core import Core
from colossus.game.ice import Ice
from colossus.game.entity import Node, Packet | {
"content_hash": "902119e6ab4d393b29bdfed8d802762e",
"timestamp": "",
"source": "github",
"line_count": 4,
"max_line_length": 45,
"avg_line_length": 38.25,
"alnum_prop": 0.8366013071895425,
"repo_name": "spectralflux/colossus",
"id": "fa53fd8917aede2477931db6f3eecb8efdf295de",
"size": "153",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "colossus/game/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "10089"
}
],
"symlink_target": ""
} |
from freebase.schema import dump_base, dump_type, restore
try:
import jsonlib2 as json
except ImportError:
try:
import simplejson as json
except ImportError:
import json
import sys
def cmd_dump_base(fb, baseid):
"""dump a base to stdout
%prog dump_base baseid
Dump a base by outputting a json representation
of the types and properties involved.
"""
print >> sys.stdout, json.dumps(dump_base(fb.mss, baseid), indent=2)
def cmd_dump_type(fb, typeid, follow_types=True):
"""dump a type to stdout
%prog dump_type typeid [follow_types=True]
Dump a type by outputting a json representation
of the type and properties involved.
"""
print >> sys.stdout, json.dumps(dump_type(fb.mss, typeid, follow_types), indent=2)
def cmd_restore(fb, newlocation, graphfile):
"""restore a graph object to the graph
%prog restore newlocation graphfile
Restore a graph object to the newlocation
"""
fh = open(graphfile, "r")
graph = json.loads(fh.read())
fh.close()
return restore(fb.mss, graph, newlocation, ignore_types=None)
| {
"content_hash": "aae42d0cfe8a960979ed00c33d405594",
"timestamp": "",
"source": "github",
"line_count": 41,
"max_line_length": 86,
"avg_line_length": 27.341463414634145,
"alnum_prop": 0.6815343443354148,
"repo_name": "yuvadm/freebase-python",
"id": "b5a30da071d0166dcf4a25957be6270741376589",
"size": "1121",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "freebase/fcl/schema.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "JavaScript",
"bytes": "90677"
},
{
"name": "Python",
"bytes": "413743"
}
],
"symlink_target": ""
} |
from typing import Any, Dict, List, Optional
import numpy as np
from paralleldomain.decoding.common import DecoderSettings
from paralleldomain.decoding.directory.sensor_frame_decoder import DirectoryCameraSensorFrameDecoder
from paralleldomain.decoding.frame_decoder import FrameDecoder, TDateTime
from paralleldomain.decoding.sensor_frame_decoder import (
CameraSensorFrameDecoder,
LidarSensorFrameDecoder,
RadarSensorFrameDecoder,
)
from paralleldomain.model.class_mapping import ClassDetail
from paralleldomain.model.ego import EgoPose
from paralleldomain.model.sensor import CameraSensorFrame, LidarSensorFrame, RadarSensorFrame
from paralleldomain.model.type_aliases import FrameId, SceneName, SensorName
from paralleldomain.utilities.any_path import AnyPath
from paralleldomain.utilities.fsio import read_json
class DirectoryFrameDecoder(FrameDecoder[None]):
def __init__(
self,
dataset_name: str,
scene_name: SceneName,
dataset_path: AnyPath,
settings: DecoderSettings,
image_folder: str,
semantic_segmentation_folder: str,
metadata_folder: Optional[str],
camera_name: str,
class_map: List[ClassDetail],
):
super().__init__(dataset_name=dataset_name, scene_name=scene_name, settings=settings)
self.dataset_path = dataset_path
self.image_folder = image_folder
self.semantic_segmentation_folder = semantic_segmentation_folder
self.camera_name = camera_name
self._metadata_folder = metadata_folder
self._class_map = class_map
def _decode_ego_pose(self, frame_id: FrameId) -> EgoPose:
raise ValueError("Loading from directory does not support ego pose!")
def _decode_available_sensor_names(self, frame_id: FrameId) -> List[SensorName]:
return [self.camera_name]
def _decode_available_camera_names(self, frame_id: FrameId) -> List[SensorName]:
return [self.camera_name]
def _decode_available_lidar_names(self, frame_id: FrameId) -> List[SensorName]:
raise ValueError("Loading from directory does not support lidar data!")
def _decode_datetime(self, frame_id: FrameId) -> None:
return None
def _create_camera_sensor_frame_decoder(self) -> CameraSensorFrameDecoder[None]:
return DirectoryCameraSensorFrameDecoder(
dataset_name=self.dataset_name,
scene_name=self.scene_name,
dataset_path=self.dataset_path,
settings=self.settings,
image_folder=self.image_folder,
semantic_segmentation_folder=self.semantic_segmentation_folder,
metadata_folder=self._metadata_folder,
class_map=self._class_map,
)
def _decode_camera_sensor_frame(
self, decoder: CameraSensorFrameDecoder[None], frame_id: FrameId, sensor_name: SensorName
) -> CameraSensorFrame[None]:
return CameraSensorFrame[None](sensor_name=sensor_name, frame_id=frame_id, decoder=decoder)
def _create_lidar_sensor_frame_decoder(self) -> LidarSensorFrameDecoder[None]:
raise ValueError("Loading from directory does not support lidar data!")
def _decode_lidar_sensor_frame(
self, decoder: LidarSensorFrameDecoder[None], frame_id: FrameId, sensor_name: SensorName
) -> LidarSensorFrame[None]:
raise ValueError("Loading from directoy does not support lidar data!")
def _decode_available_radar_names(self, frame_id: FrameId) -> List[SensorName]:
raise ValueError("Loading from directory does not support radar data!")
def _create_radar_sensor_frame_decoder(self) -> RadarSensorFrameDecoder[TDateTime]:
raise ValueError("Loading from directory does not support radar data!")
def _decode_radar_sensor_frame(
self, decoder: RadarSensorFrameDecoder[TDateTime], frame_id: FrameId, sensor_name: SensorName
) -> RadarSensorFrame[TDateTime]:
raise ValueError("Loading from directory does not support radar data!")
def _decode_metadata(self, frame_id: FrameId) -> Dict[str, Any]:
if self._metadata_folder is None:
return dict()
metadata_path = self.dataset_path / self._metadata_folder / f"{AnyPath(frame_id).stem + '.json'}"
return read_json(metadata_path)
| {
"content_hash": "775dca633ab8a8940a5a3a450d3f199f",
"timestamp": "",
"source": "github",
"line_count": 97,
"max_line_length": 105,
"avg_line_length": 44.23711340206186,
"alnum_prop": 0.7093917501747844,
"repo_name": "parallel-domain/pd-sdk",
"id": "2b95c8c26e0083eed4b7c62887532f56e8416658",
"size": "4291",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "paralleldomain/decoding/directory/frame_decoder.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "1030434"
},
{
"name": "Shell",
"bytes": "1375"
}
],
"symlink_target": ""
} |
"""
debug.py
"""
import re
import time
import sys
import types
import gc
import os
from cStringIO import StringIO
normalize_re = re.compile(r'\d+')
class ObjCallLogger(object):
"""Log every call"""
def __init__(self, obj):
self.obj = obj
self.log = []
def __getattr__(self, name):
attr = getattr(self.obj, name)
def _(*a, **kw):
call = format_call(name, *a, **kw)
ncall = normalize_re.sub('-', call)
t1 = time.time()
r = attr(*a, **kw)
cost = time.time() - t1
self.log.append((call, ncall, cost))
return r
return _
def format_call(funcname, *a, **kw):
arglist = [(repr(x)) for x in a]
arglist += ["%s=%r" % (k, v) for k, v in kw.iteritems()]
return "%s(%s)" % (funcname, ", ".join(arglist))
class CallLogger(object):
def __init__(self, funcpath):
self.orig_func = obj_from_string(funcpath)
if isinstance(self.orig_func, types.MethodType):
raise NotImplementedError("Do not support methods yet.")
self.funcpath = funcpath
self.log = []
self.__global_replace__ = False
global_replace(self.orig_func, self)
def __call__(self, *a, **kw):
call = format_call(self.funcpath, *a, **kw)
t1 = time.time()
try:
return self.orig_func(*a, **kw)
finally:
cost = time.time() - t1
self.log.append((call, cost))
def close(self):
global_replace(self, self.orig_func)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, tb):
self.close()
def obj_from_string(s):
# stolen from Mocker http://labix.org/mocker
import_stack = s.split(".")
attr_stack = []
while import_stack:
module_path = ".".join(import_stack)
try:
object = __import__(module_path, {}, {}, [""])
except ImportError:
attr_stack.insert(0, import_stack.pop())
if not import_stack:
raise
continue
else:
for attr in attr_stack:
object = getattr(object, attr)
break
return object
def global_replace(remove, install):
"""Replace object 'remove' with object 'install' on all dictionaries."""
# stolen from Mocker http://labix.org/mocker
for referrer in gc.get_referrers(remove):
if (type(referrer) is dict and
referrer.get("__global_replace__", True)):
for key, value in referrer.items():
if value is remove:
referrer[key] = install
def capture_output(func, *args, **kw):
# Not threadsafe!
out = StringIO()
old_stdout = sys.stdout
sys.stdout = out
try:
func(*args, **kw)
finally:
sys.stdout = old_stdout
return out.getvalue()
def is_process_alive(pid):
try:
os.kill(pid, 0)
except OSError, e:
if e.errno == 3: # process is dead
return False
elif e.errno == 1: # no permission
return True
else:
raise
else:
return True
| {
"content_hash": "d9f0fefef1e8cf67660fa06780a01bd5",
"timestamp": "",
"source": "github",
"line_count": 123,
"max_line_length": 76,
"avg_line_length": 26,
"alnum_prop": 0.532520325203252,
"repo_name": "douban/douban-utils",
"id": "2912d5aa3631053aeb7ab4677557224702cae8e2",
"size": "3238",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "douban/utils/debug.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "62678"
}
],
"symlink_target": ""
} |
"""
this is the code to accompany the Lesson 3 (decision tree) mini-project
use an DT to identify emails from the Enron corpus by their authors
Sara has label 0
Chris has label 1
"""
import sys
import os
from time import time
#sys.path.append("../tools/")
os.chdir("C:/Vindico/Projects/Code/Python/Python/Course/Udacity/Intro to Machine Learning/ud120-projects-master/tools/")
from email_preprocess import preprocess
from sklearn import tree
from sklearn.metrics import accuracy_score
### features_train and features_test are the features for the training
### and testing datasets, respectively
### labels_train and labels_test are the corresponding item labels
features_train, features_test, labels_train, labels_test = preprocess()
#########################################################
### your code goes here ###
#########################################################
clf = tree.DecisionTreeClassifier(min_samples_split=40)
t0 = time()
clf.fit(features_train, labels_train)
print "training time:", round(time()-t0, 3), "s"
t0 = time()
pred = clf.predict(features_test)
print "predicting time:", round(time()-t0, 3), "s"
acc = accuracy_score(pred, labels_test)
print acc
# What's the number of features in your data?
len(features_train[0]) | {
"content_hash": "61b6fecc330eea5a150858560a71aea3",
"timestamp": "",
"source": "github",
"line_count": 45,
"max_line_length": 120,
"avg_line_length": 28.622222222222224,
"alnum_prop": 0.6669254658385093,
"repo_name": "tuanvu216/udacity-course",
"id": "3faa9521c6de3f27aa27b699e3dafa11c5435b17",
"size": "1307",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "intro_to_machine_learning/lesson/lesson_3_decision_trees/dt_author_id.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "3736"
},
{
"name": "HTML",
"bytes": "143388"
},
{
"name": "JavaScript",
"bytes": "169689"
},
{
"name": "Jupyter Notebook",
"bytes": "3237655"
},
{
"name": "Python",
"bytes": "400129"
},
{
"name": "Ruby",
"bytes": "448"
},
{
"name": "Shell",
"bytes": "538"
}
],
"symlink_target": ""
} |
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'TeamResult.start_time'
db.add_column('scorebrd_teamresult', 'start_time',
self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime(2013, 1, 26, 0, 0)),
keep_default=False)
# Adding field 'TeamResult.end_time'
db.add_column('scorebrd_teamresult', 'end_time',
self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime(2013, 1, 26, 0, 0)),
keep_default=False)
def backwards(self, orm):
# Deleting field 'TeamResult.start_time'
db.delete_column('scorebrd_teamresult', 'start_time')
# Deleting field 'TeamResult.end_time'
db.delete_column('scorebrd_teamresult', 'end_time')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'scorebrd.competition': {
'Meta': {'object_name': 'Competition'},
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['scorebrd.Group']", 'symmetrical': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'scorebrd.event': {
'Meta': {'object_name': 'Event'},
'competitions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['scorebrd.Competition']", 'symmetrical': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'scorebrd.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'matches': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['scorebrd.Match']", 'symmetrical': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'teams': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['scorebrd.Team']", 'symmetrical': 'False'})
},
'scorebrd.match': {
'Meta': {'object_name': 'Match'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'playing': ('django.db.models.fields.CharField', [], {'default': "'N'", 'max_length': '1'}),
'referee': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'scoreA': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'scoreB': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'teamA': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'homelanders'", 'to': "orm['scorebrd.Team']"}),
'teamB': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'foreigners'", 'to': "orm['scorebrd.Team']"})
},
'scorebrd.team': {
'Meta': {'object_name': 'Team'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'scorebrd.teamresult': {
'Meta': {'object_name': 'TeamResult'},
'draws': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'end_time': ('django.db.models.fields.DateTimeField', [], {}),
'goal_diff': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'goal_shot': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'group': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['scorebrd.Group']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'loses': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'matches_played': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'points': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'start_time': ('django.db.models.fields.DateTimeField', [], {}),
'team': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['scorebrd.Team']"}),
'wins': ('django.db.models.fields.IntegerField', [], {'default': '0'})
}
}
complete_apps = ['scorebrd'] | {
"content_hash": "8d9a9f9f7375347bcdfcf2637d0d4fc1",
"timestamp": "",
"source": "github",
"line_count": 117,
"max_line_length": 182,
"avg_line_length": 65.51282051282051,
"alnum_prop": 0.549119373776908,
"repo_name": "xlcteam/scoreBoard",
"id": "baab0908a88db40d6443cecade497ac3e6e19375",
"size": "7689",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scorebrd/migrations/0006_auto__add_field_teamresult_start_time__add_field_teamresult_end_time.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "JavaScript",
"bytes": "32311"
},
{
"name": "Python",
"bytes": "99306"
}
],
"symlink_target": ""
} |
from settings import *
import nltk
import sys
import textract
import re
import unicodedata
class Opinion:
def __init__(self, name):
self.name = name
self.total = 0
self.fore = 0
self.against = 0
self.no_opinions = 0
self.ratio_for = 0
self.ratio_against = 0
self.ratio_no_opinions = 0
def finalize(self):
if self.fore > self.total:
self.fore = self.total
if self.against > self.total:
self.against = self.total
if self.total < (self.fore + self.against):
self.total = self.fore + self.against
self.no_opinions = self.total - self.fore - self.against
if self.total != 0 and self.total != self.no_opinions:
self.ratio_for = self.fore / (self.total-self.no_opinions)
self.ratio_against = self.against / (self.total-self.no_opinions)
def parse_project(filename):
text = textract.process(project_directory + filename)
text = text.decode('utf-8')
text = text.strip().lower()
return text
def analyze_subject(candidate, subject):
words_subjects = subjects.get(subject, None)
if not words_subjects:
print("Subject " + subject + " does not exist")
exit()
if not candidate.get('opinions', None):
candidate['opinions'] = {}
candidate['opinions'][subject] = Opinion(subject.title())
sentences = candidate['sentences']
for sentence in sentences:
for token in sentence:
t = unicodedata.normalize('NFD', token).encode('ascii', 'ignore')
libre_echange = subjects[subject]
for word in words_subjects:
reg = re.compile(r".*" + word + ".*")
if re.search(reg, t.decode('utf-8')):
candidate['opinions'][subject].total += 1
for token in sentence:
#Suppression des accents
t2 = unicodedata.normalize('NFD', token).encode('ascii', 'ignore')
for a in againsts:
reg = re.compile(r".*" + a + ".*")
if re.search(reg, t2.decode('utf-8')):
candidate['opinions'][subject].against += 1
for f in fors:
reg = re.compile(r".*" + f + ".*")
if re.search(reg, t2.decode('utf-8')):
candidate['opinions'][subject].fore += 1
candidate['opinions'][subject].finalize()
def tokenize_project(candidate):
sentences = nltk.sent_tokenize(candidate['project'], 'french')
tokens = []
for sentence in sentences:
tokens.append(nltk.word_tokenize(sentence, 'french'))
return tokens
def print_results(candidate):
print('\n'+candidate['name'])
for opinion in candidate['opinions'].values():
print("\n"+opinion.name+" :")
print("Phrases concernées : " + str(opinion.total))
print("Avis pour : " + str(opinion.fore))
print("Avis contre : " + str(opinion.against))
print("Sans avis : " + str(opinion.no_opinions))
print("Indice pour : " + str(opinion.ratio_for))
print("Indice contre : " + str(opinion.ratio_against))
if(opinion.ratio_for>opinion.ratio_against):
print("Pour")
elif(opinion.ratio_against>opinion.ratio_for):
print("Contre")
else:
print("Neutre")
print('\n\n')
if __name__ == '__main__':
print("Analyse des programmes...\n\n")
for candidate in candidates:
candidate['project'] = parse_project(candidate.get('file'))
candidate['sentences'] = tokenize_project(candidate)
for subject in subjects:
analyze_subject(candidate, subject)
print_results(candidate)
subject = input("How about you choose a subject now : ")
subjects[subject] = []
key = input("key words for this subject(separated by ',') : ")
subjects[subject] = key.split(',')
for candidate in candidates:
analyze_subject(candidate, subject)
print_results(candidate)
| {
"content_hash": "2bc895d27ac2bbbb55415b81fa307d51",
"timestamp": "",
"source": "github",
"line_count": 121,
"max_line_length": 101,
"avg_line_length": 36.67768595041322,
"alnum_prop": 0.5297431275349257,
"repo_name": "zozoh94/PolReview",
"id": "d1628a32c8b27cd5bb9ecb8b89278be3e0f48bfc",
"size": "4477",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pol_review.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "8930"
},
{
"name": "TeX",
"bytes": "5767"
}
],
"symlink_target": ""
} |
'''
Split methylation BigQuery table into chromosomes
'''
import pprint
import sys
import json
from apiclient.errors import HttpError
from googleapiclient import discovery
from oauth2client.client import GoogleCredentials
from oauth2client.client import AccessTokenRefreshError
import time
from bigquery_etl.utils.logging_manager import configure_logging
# TODO change this to single auth script (GOOGLE_APPLICATION_CREDENTIALS)
# still is still in progress
def get_service():
# Grab the application's default credentials from the environment.
credentials = GoogleCredentials.get_application_default()
# Construct the service object for interacting with the BigQuery API.
bigquery = discovery.build('bigquery', 'v2', credentials=credentials)
return bigquery
def split_table_by_chr(chromosome, project_id, dataset_id, log):
# this is a new connection to the new project
bigquery_service = get_service()
jobCollection = bigquery_service.jobs()
try:
query_request = bigquery_service.jobs()
# maybe there is a nice way to format this one?
query = """\
SELECT data.ParticipantBarcode AS ParticipantBarcode, data.SampleBarcode AS SampleBarcode, data.SampleTypeLetterCode AS SampleTypeLetterCode, \
data.AliquotBarcode AS AliquotBarcode, data.Platform AS Platform, data.Study AS Study, data.Probe_Id AS Probe_Id, data.Beta_Value as Beta_Value
FROM \
( \
SELECT IlmnID \
FROM [platform_reference.methylation_annotation] \
WHERE ( CHR == "{0}")\
) AS ids \
JOIN EACH \
(\
SELECT * \
FROM [{1}.Methylation] \
) AS data \
ON ids.IlmnID == data.Probe_Id""".format(chromosome, dataset_id)
log.info('importing chromosome %s\n%s' % (chromosome, query))
# query_data = {'query': query}
query_data = {
'configuration': {
'query': {
'query': query,
'useQueryCache': False,
'destinationTable': {
'projectId': project_id,
'datasetId': dataset_id,
'tableId': 'Methylation_chr{0}'.format(chromosome)
},
'createDisposition': 'CREATE_IF_NEEDED',
'writeDisposition': 'WRITE_EMPTY',
'allowLargeResults': True
}
}
}
insertResponse = query_request.insert(projectId=project_id, body=query_data).execute()
# Ping for status until it is done, with a short pause between calls.
while True:
result = jobCollection.get(projectId=project_id,
jobId=insertResponse['jobReference']['jobId']).execute()
status = result['status']
if 'DONE' == status['state']:
if 'errorResult' in status and status['errorResult']:
log.error('an error occurred completing import at \'%s\': %s \'%s\' for chormosome %s' %
(status['errorResult']['location'], status['errorResult']['reason'], status['errorResult']['message'], chromosome))
else:
log.info('completed import chromosome %s' % (chromosome))
break
if 'errors' in status and status['errors'] and 0 < len(status['errors']):
for error in status['errors']:
log.warning('\terror while importing chromosome %s: %s' % (chromosome, error))
log.info('\tWaiting for the import to complete for chromosome %s...' % (chromosome))
time.sleep(20)
except HttpError as err:
print 'Error:', pprint.pprint(err.content)
except AccessTokenRefreshError:
print ("Credentials have been revoked or expired, please re-run"
"the application to re-authorize")
def main(config, log):
log.info('start splitting methylation data by chromosome')
project_id = config['project_id']
dataset_id = config['bq_dataset']
chromosomes = map(str,range(1,23)) + ['X', 'Y']
# chromosomes = map(lambda orig_string: 'chr' + orig_string, chr_nums)
for chromosome in chromosomes:
split_table_by_chr(chromosome, project_id, dataset_id, log)
log.info('done splitting methylation data by chromosome')
if __name__ == '__main__':
# setup logging
with open(sys.argv[1]) as configfile:
config = json.load(configfile)
log = configure_logging('methylation_split', "logs/methylation_transform_split" + '.log')
main(config, log)
| {
"content_hash": "846d5549d9044fd51f369b6a90e0babe",
"timestamp": "",
"source": "github",
"line_count": 108,
"max_line_length": 160,
"avg_line_length": 43.5,
"alnum_prop": 0.6028097062579821,
"repo_name": "isb-cgc/ISB-CGC-data-proc",
"id": "572f84f7c51c084109df6cea781029ea1fcc9847",
"size": "5310",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "tcga_etl_pipeline/methylation/split_table.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Perl",
"bytes": "6576"
},
{
"name": "Python",
"bytes": "1169886"
},
{
"name": "Shell",
"bytes": "1068"
}
],
"symlink_target": ""
} |
'''Test percolator.py.'''
from test.support import requires
requires('gui')
import unittest
from tkinter import Text, Tk, END
from idlelib.percolator import Percolator, Delegator
class MyFilter(Delegator):
def __init__(self):
Delegator.__init__(self, None)
def insert(self, *args):
self.insert_called_with = args
self.delegate.insert(*args)
def delete(self, *args):
self.delete_called_with = args
self.delegate.delete(*args)
def uppercase_insert(self, index, chars, tags=None):
chars = chars.upper()
self.delegate.insert(index, chars)
def lowercase_insert(self, index, chars, tags=None):
chars = chars.lower()
self.delegate.insert(index, chars)
def dont_insert(self, index, chars, tags=None):
pass
class PercolatorTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.root = Tk()
cls.text = Text(cls.root)
@classmethod
def tearDownClass(cls):
del cls.text
cls.root.destroy()
del cls.root
def setUp(self):
self.percolator = Percolator(self.text)
self.filter_one = MyFilter()
self.filter_two = MyFilter()
self.percolator.insertfilter(self.filter_one)
self.percolator.insertfilter(self.filter_two)
def tearDown(self):
self.percolator.close()
self.text.delete('1.0', END)
def test_insertfilter(self):
self.assertIsNotNone(self.filter_one.delegate)
self.assertEqual(self.percolator.top, self.filter_two)
self.assertEqual(self.filter_two.delegate, self.filter_one)
self.assertEqual(self.filter_one.delegate, self.percolator.bottom)
def test_removefilter(self):
filter_three = MyFilter()
self.percolator.removefilter(self.filter_two)
self.assertEqual(self.percolator.top, self.filter_one)
self.assertIsNone(self.filter_two.delegate)
filter_three = MyFilter()
self.percolator.insertfilter(self.filter_two)
self.percolator.insertfilter(filter_three)
self.percolator.removefilter(self.filter_one)
self.assertEqual(self.percolator.top, filter_three)
self.assertEqual(filter_three.delegate, self.filter_two)
self.assertEqual(self.filter_two.delegate, self.percolator.bottom)
self.assertIsNone(self.filter_one.delegate)
def test_insert(self):
self.text.insert('insert', 'foo')
self.assertEqual(self.text.get('1.0', END), 'foo\n')
self.assertTupleEqual(self.filter_one.insert_called_with,
('insert', 'foo', None))
def test_modify_insert(self):
self.filter_one.insert = self.filter_one.uppercase_insert
self.text.insert('insert', 'bAr')
self.assertEqual(self.text.get('1.0', END), 'BAR\n')
def test_modify_chain_insert(self):
filter_three = MyFilter()
self.percolator.insertfilter(filter_three)
self.filter_two.insert = self.filter_two.uppercase_insert
self.filter_one.insert = self.filter_one.lowercase_insert
self.text.insert('insert', 'BaR')
self.assertEqual(self.text.get('1.0', END), 'bar\n')
def test_dont_insert(self):
self.filter_one.insert = self.filter_one.dont_insert
self.text.insert('insert', 'foo bar')
self.assertEqual(self.text.get('1.0', END), '\n')
self.filter_one.insert = self.filter_one.dont_insert
self.text.insert('insert', 'foo bar')
self.assertEqual(self.text.get('1.0', END), '\n')
def test_without_filter(self):
self.text.insert('insert', 'hello')
self.assertEqual(self.text.get('1.0', 'end'), 'hello\n')
def test_delete(self):
self.text.insert('insert', 'foo')
self.text.delete('1.0', '1.2')
self.assertEqual(self.text.get('1.0', END), 'o\n')
self.assertTupleEqual(self.filter_one.delete_called_with,
('1.0', '1.2'))
if __name__ == '__main__':
unittest.main(verbosity=2)
| {
"content_hash": "18644620a382a9c90a96e09ebf83e0dc",
"timestamp": "",
"source": "github",
"line_count": 118,
"max_line_length": 74,
"avg_line_length": 34.38135593220339,
"alnum_prop": 0.634458959822529,
"repo_name": "Microsoft/PTVS",
"id": "573b9a1e8e69e3905122b905c4f30ee3958b17d7",
"size": "4057",
"binary": false,
"copies": "7",
"ref": "refs/heads/master",
"path": "Python/Product/Miniconda/Miniconda3-x64/Lib/idlelib/idle_test/test_percolator.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ASP",
"bytes": "109"
},
{
"name": "Batchfile",
"bytes": "10898"
},
{
"name": "C",
"bytes": "23236"
},
{
"name": "C#",
"bytes": "12235396"
},
{
"name": "C++",
"bytes": "212001"
},
{
"name": "CSS",
"bytes": "7025"
},
{
"name": "HTML",
"bytes": "34251"
},
{
"name": "JavaScript",
"bytes": "87257"
},
{
"name": "PowerShell",
"bytes": "44322"
},
{
"name": "Python",
"bytes": "847130"
},
{
"name": "Rich Text Format",
"bytes": "260880"
},
{
"name": "Smarty",
"bytes": "8156"
},
{
"name": "Tcl",
"bytes": "24968"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
import django.core.validators
from django.db import migrations, models
import re
class Migration(migrations.Migration):
dependencies = [
('livelayermanager', '0009_auto_20170602_1459'),
]
operations = [
migrations.AlterField(
model_name='sqlviewlayer',
name='name',
field=models.SlugField(help_text='The name of live layer', max_length=60, unique=True, validators=[django.core.validators.RegexValidator(re.compile('^[a-z_][a-z0-9_]+$'), 'Slug can only start with lowercase letters or underscore, and contain lowercase letters, numbers and underscore', 'invalid')]),
),
]
| {
"content_hash": "fd05949bc2edf45e27e0fe63a81c2868",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 311,
"avg_line_length": 34.6,
"alnum_prop": 0.6734104046242775,
"repo_name": "rockychen-dpaw/borgcollector",
"id": "9b790ac6cd18f8edcbf931cbddce23b37ec13c15",
"size": "764",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "livelayermanager/migrations/0010_auto_20170602_1609.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "9821"
},
{
"name": "JavaScript",
"bytes": "55"
},
{
"name": "Python",
"bytes": "720469"
},
{
"name": "TSQL",
"bytes": "9939"
}
],
"symlink_target": ""
} |
import tensorflow as tf
with tf.name_scope('policy') as scope:
# tf Graph Input
features = 4
objects = 8
layers = 3
x = tf.placeholder(tf.float32, [None, 8, 8, features], name='input')
y = tf.placeholder(tf.float32, [None, 8, 8, 1], name='expected')
# Set model weights
#first pass filters
Fa = tf.Variable(tf.random_normal([1,8,features,objects]))
Fb = tf.Variable(tf.random_normal([8,1,features,objects]))
b = tf.Variable(tf.random_normal([objects,8,8]))
# Construct model
La = tf.transpose(tf.nn.conv2d(x, Fa, [1,1,1,1], 'VALID'),[0,3,1,2])
Lb = tf.transpose(tf.nn.conv2d(x, Fb, [1,1,1,1], 'VALID'),[0,3,1,2])
L = tf.transpose(tf.tanh(tf.matmul(La, Lb) + b), [0,2,3,1])
for i in range(layers):
Fa = tf.Variable(tf.random_normal([1,8,objects,objects]))
Fb = tf.Variable(tf.random_normal([8,1,objects,objects]))
b = tf.Variable(tf.random_normal([objects,8,8]))
La = tf.transpose(tf.nn.conv2d(L, Fa, [1,1,1,1], 'VALID'),[0,3,1,2])
Lb = tf.transpose(tf.nn.conv2d(L, Fb, [1,1,1,1], 'VALID'),[0,3,1,2])
L = tf.transpose(tf.tanh(tf.matmul(La, Lb) + b), [0,2,3,1])
#Consolidation filters
F = tf.Variable(tf.random_normal([1,1,objects,1]))
b = tf.Variable(tf.random_normal([8,8,1]))
# the output of the model
pred = tf.nn.sigmoid(tf.nn.conv2d(L, F, [1,1,1,1], 'VALID') + b, name='output')
cost = tf.pow(tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(
labels = tf.reshape(y, [-1, 64]),
logits = tf.log(tf.reshape(pred, [-1, 64])),
)), 2, name='cost')
learning_rate = tf.placeholder(tf.float32, [], name='learning_rate')
optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(cost, name='train')
tf.variables_initializer(tf.global_variables(), name = 'init')
definition = tf.Session().graph_def
directory = '../../data/policy'
saver = tf.train.Saver(tf.global_variables(), name='saver')
saver_def = saver.as_saver_def()
# The name of the tensor you must feed with a filename when saving/restoring.
print(saver_def.filename_tensor_name)
# The name of the target operation you must run when restoring.
print(saver_def.restore_op_name)
# The name of the target operation you must run when saving.
print(saver_def.save_tensor_name)
tf.train.write_graph(definition, directory, 'policy-{}x{}.pb'.format(objects, layers), as_text=False)
exit()
# Initializing the variables
init = tf.global_variables_initializer()
import random
training_epochs = 20
display_step = 1
lr = 0.01
with tf.Session() as sess:
sess.run(init)
batch_size = 100
# Training cycle
for epoch in range(training_epochs):
avg_cost = 0.
total_batch = 1000
# Loop over all batches
batch_xs = []
batch_ys = []
for _ in range(batch_size):
itmi = [[[0 for ___ in range(4)] for __ in range(8)] for _ in range(8)]
itmj = [[[0] for __ in range(8)] for _ in range(8)]
batch_xs.append(itmi)
batch_ys.append(itmj)
for i in range(total_batch):
ix = i * batch_size
for k in range(batch_size):
for Y in range(8):
for X in range(8):
itmj[Y][X][0] = random.choice([0.0,1.0])
for j in range(4):
itmi[Y][X][j] = random.choice([0.0, 1.0])
batch_xs.append(itmi)
batch_ys.append(itmj)
# Run optimization op (backprop) and cost op (to get loss value)
_, c = sess.run([optimizer, cost], feed_dict={x: batch_xs,
y: batch_ys,
learning_rate: lr})
# Compute average loss
del batch_xs[:]
del batch_ys[:]
avg_cost += c
#print("cost=",c," avg=",avg_cost/(i+1))
if (i % 100 == 0):
print(100.0 * i/float(total_batch), '%')
# Display logs per epoch step
if (epoch+1) % display_step == 0:
print("Epoch:", '%04d' % (epoch+1), "cost=", "{:.9f}".format(avg_cost/total_batch))
saver.save(sess, 'policy_net', global_step=epoch)
lr = lr * 0.97
print("Optimization Finished!") | {
"content_hash": "bb61f38e013a549471c95015e0040f6c",
"timestamp": "",
"source": "github",
"line_count": 123,
"max_line_length": 101,
"avg_line_length": 35.59349593495935,
"alnum_prop": 0.5600730927364094,
"repo_name": "Tenebryo/coin",
"id": "6a10ed6975deea4245c15d71f79e160fcfcbd483",
"size": "4378",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ml/policy/src/model.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "18015"
},
{
"name": "Rust",
"bytes": "409527"
},
{
"name": "Shell",
"bytes": "154"
}
],
"symlink_target": ""
} |
import config, webui, BaseHTTPServer, urllib, sys, getopt, os
class RequestHandler(BaseHTTPServer.BaseHTTPRequestHandler):
config = config.Config("config.ini")
ssh_user = None
def set_content_type(self, content_type):
self.send_header('Content-Type', content_type)
def run(self):
if self.path == '/':
self.send_response(301)
self.send_header('Location', '/pypi')
return
for scriptname in ('/mirrors', '/simple', '/pypi',
'/serversig', '/daytime', '/id'):
if self.path.startswith(scriptname):
rest = self.path[len(scriptname):]
break
else:
# invalid URL
return
# The text below is mostly copied from CGIHTTPServer
i = rest.rfind('?')
if i >= 0:
rest, query = rest[:i], rest[i+1:]
else:
query = ''
env = {}
#env['SERVER_SOFTWARE'] = self.version_string()
#env['SERVER_NAME'] = self.server.server_name
#env['SERVER_PORT'] = str(self.server.server_port)
env['GATEWAY_INTERFACE'] = 'CGI/1.1'
env['SERVER_PROTOCOL'] = self.protocol_version
env['REQUEST_METHOD'] = self.command
uqrest = urllib.unquote(rest)
env['PATH_INFO'] = uqrest
# env['PATH_TRANSLATED'] = self.translate_path(uqrest)
env['SCRIPT_NAME'] = scriptname
if query:
env['QUERY_STRING'] = query
host = self.address_string()
if host != self.client_address[0]:
env['REMOTE_HOST'] = host
env['REMOTE_ADDR'] = self.client_address[0]
if self.ssh_user:
# ignore authorization headers if this is a SSH client
authorization = None
env['SSH_USER'] = self.ssh_user
else:
authorization = self.headers.getheader("authorization")
if authorization:
env['HTTP_CGI_AUTHORIZATION'] = authorization
authorization = authorization.split()
if len(authorization) == 2:
import base64, binascii
env['AUTH_TYPE'] = authorization[0]
if self.headers.typeheader is None:
env['CONTENT_TYPE'] = self.headers.type
else:
env['CONTENT_TYPE'] = self.headers.typeheader
length = self.headers.getheader('content-length')
if length:
env['CONTENT_LENGTH'] = length
referer = self.headers.getheader('referer')
if referer:
env['HTTP_REFERER'] = referer
accept = []
for line in self.headers.getallmatchingheaders('accept'):
if line[:1] in "\t\n\r ":
accept.append(line.strip())
else:
accept = accept + line[7:].split(',')
env['HTTP_ACCEPT'] = ','.join(accept)
ua = self.headers.getheader('user-agent')
if ua:
env['HTTP_USER_AGENT'] = ua
co = filter(None, self.headers.getheaders('cookie'))
if co:
env['HTTP_COOKIE'] = ', '.join(co)
ac = self.headers.getheader('accept-encoding')
if ac:
env['HTTP_ACCEPT_ENCODING'] = ac
webui.WebUI(self, env).run()
do_GET = do_POST = run
class StdinoutHandler(RequestHandler):
def __init__(self, remote_user):
self.ssh_user = remote_user
try:
host,port,_ = os.environ['SSH_CLIENT'].split()
except KeyError:
host = port = ''
# request, client_address, server
RequestHandler.__init__(self, None, (host, port), None)
def setup(self):
self.rfile = sys.stdin
#import StringIO
#self.rfile = StringIO.StringIO('GET /pypi HTTP/1.0\r\n\r\n')
self.wfile = sys.stdout
def main():
os.umask(002) # make directories group-writable
port = 8000
remote_user = None
opts, args = getopt.getopt(sys.argv[1:], 'ir:p:',
['interactive', 'remote-user=', 'port='])
assert not args
for opt, val in opts:
if opt in ('-i', '--interactive'):
port = None
elif opt in ('-r','--remote-user'):
port = None # implies -i
remote_user = val
elif opt in ('-p', '--port'):
port = int(val)
if port:
httpd = BaseHTTPServer.HTTPServer(('',port), RequestHandler)
httpd.serve_forever()
else:
StdinoutHandler(remote_user)
if __name__=='__main__':
main()
| {
"content_hash": "ca815a13f12b1b9f744d0d95ff130bd0",
"timestamp": "",
"source": "github",
"line_count": 128,
"max_line_length": 73,
"avg_line_length": 35.46875,
"alnum_prop": 0.5378854625550661,
"repo_name": "techtonik/pydotorg.pypi",
"id": "94f7d319cd7cc8be30ebeffc5a462a4503d6c845",
"size": "4558",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "standalone.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "195"
},
{
"name": "CSS",
"bytes": "52191"
},
{
"name": "Makefile",
"bytes": "208"
},
{
"name": "PLpgSQL",
"bytes": "10792"
},
{
"name": "Python",
"bytes": "397864"
}
],
"symlink_target": ""
} |
"""
ABP analyzer and graphics tests
"""
cases = [
('Run Pymodel Graphics to generate dot file from FSM model, no need use pma',
'pmg.py ABP'),
('Generate SVG file from dot',
'dotsvg ABP'),
# Now display ABP.dot in browser
('Run PyModel Analyzer to generate FSM from original FSM, should be the same',
'pma.py ABP'),
('Run PyModel Graphics to generate a file of graphics commands from new FSM',
'pmg.py ABPFSM'),
('Generate an svg file from the graphics commands',
'dotsvg ABPFSM'),
# Now display ABPFSM.svg in browser, should look the same as ABP.svg
]
| {
"content_hash": "72d9f98e3cfab3d9c294da53322e3d50",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 82,
"avg_line_length": 25.541666666666668,
"alnum_prop": 0.6541598694942904,
"repo_name": "nfredrik/pyModelStuff",
"id": "3582ef463bc23bae59174a0b2b8c276b24a2439a",
"size": "613",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "samples/abp/test_graphics.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Perl",
"bytes": "107"
},
{
"name": "Python",
"bytes": "64694"
},
{
"name": "Ruby",
"bytes": "128"
}
],
"symlink_target": ""
} |
import sys
_b = sys.version_info[0] < 3 and (lambda x: x) or (lambda x: x.encode("latin1"))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from google.protobuf import timestamp_pb2 as google_dot_protobuf_dot_timestamp__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name="google/cloud/bigquery/storage_v1beta1/proto/table_reference.proto",
package="google.cloud.bigquery.storage.v1beta1",
syntax="proto3",
serialized_pb=_b(
'\nAgoogle/cloud/bigquery/storage_v1beta1/proto/table_reference.proto\x12%google.cloud.bigquery.storage.v1beta1\x1a\x1fgoogle/protobuf/timestamp.proto"J\n\x0eTableReference\x12\x12\n\nproject_id\x18\x01 \x01(\t\x12\x12\n\ndataset_id\x18\x02 \x01(\t\x12\x10\n\x08table_id\x18\x03 \x01(\t"C\n\x0eTableModifiers\x12\x31\n\rsnapshot_time\x18\x01 \x01(\x0b\x32\x1a.google.protobuf.TimestampB\x8e\x01\n)com.google.cloud.bigquery.storage.v1beta1B\x13TableReferenceProtoZLgoogle.golang.org/genproto/googleapis/cloud/bigquery/storage/v1beta1;storageb\x06proto3'
),
dependencies=[google_dot_protobuf_dot_timestamp__pb2.DESCRIPTOR],
)
_TABLEREFERENCE = _descriptor.Descriptor(
name="TableReference",
full_name="google.cloud.bigquery.storage.v1beta1.TableReference",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="project_id",
full_name="google.cloud.bigquery.storage.v1beta1.TableReference.project_id",
index=0,
number=1,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="dataset_id",
full_name="google.cloud.bigquery.storage.v1beta1.TableReference.dataset_id",
index=1,
number=2,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="table_id",
full_name="google.cloud.bigquery.storage.v1beta1.TableReference.table_id",
index=2,
number=3,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
file=DESCRIPTOR,
),
],
extensions=[],
nested_types=[],
enum_types=[],
options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=141,
serialized_end=215,
)
_TABLEMODIFIERS = _descriptor.Descriptor(
name="TableModifiers",
full_name="google.cloud.bigquery.storage.v1beta1.TableModifiers",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="snapshot_time",
full_name="google.cloud.bigquery.storage.v1beta1.TableModifiers.snapshot_time",
index=0,
number=1,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
file=DESCRIPTOR,
)
],
extensions=[],
nested_types=[],
enum_types=[],
options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=217,
serialized_end=284,
)
_TABLEMODIFIERS.fields_by_name[
"snapshot_time"
].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP
DESCRIPTOR.message_types_by_name["TableReference"] = _TABLEREFERENCE
DESCRIPTOR.message_types_by_name["TableModifiers"] = _TABLEMODIFIERS
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
TableReference = _reflection.GeneratedProtocolMessageType(
"TableReference",
(_message.Message,),
dict(
DESCRIPTOR=_TABLEREFERENCE,
__module__="google.cloud.bigquery.storage_v1beta1.proto.table_reference_pb2",
__doc__="""Table reference that includes just the 3 strings needed to identify a
table.
Attributes:
project_id:
The assigned project ID of the project.
dataset_id:
The ID of the dataset in the above project.
table_id:
The ID of the table in the above dataset.
""",
# @@protoc_insertion_point(class_scope:google.cloud.bigquery.storage.v1beta1.TableReference)
),
)
_sym_db.RegisterMessage(TableReference)
TableModifiers = _reflection.GeneratedProtocolMessageType(
"TableModifiers",
(_message.Message,),
dict(
DESCRIPTOR=_TABLEMODIFIERS,
__module__="google.cloud.bigquery.storage_v1beta1.proto.table_reference_pb2",
__doc__="""All fields in this message optional.
Attributes:
snapshot_time:
The snapshot time of the table. If not set, interpreted as
now.
""",
# @@protoc_insertion_point(class_scope:google.cloud.bigquery.storage.v1beta1.TableModifiers)
),
)
_sym_db.RegisterMessage(TableModifiers)
DESCRIPTOR.has_options = True
DESCRIPTOR._options = _descriptor._ParseOptions(
descriptor_pb2.FileOptions(),
_b(
"\n)com.google.cloud.bigquery.storage.v1beta1B\023TableReferenceProtoZLgoogle.golang.org/genproto/googleapis/cloud/bigquery/storage/v1beta1;storage"
),
)
# @@protoc_insertion_point(module_scope)
| {
"content_hash": "6adced89cff4ebdd58047804f43dd72d",
"timestamp": "",
"source": "github",
"line_count": 199,
"max_line_length": 560,
"avg_line_length": 32.5678391959799,
"alnum_prop": 0.6340070976701127,
"repo_name": "dhermes/gcloud-python",
"id": "5208450f5deae1866048e7a1d79c26b08b20accd",
"size": "6617",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "bigquery_storage/google/cloud/bigquery_storage_v1beta1/proto/table_reference_pb2.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "3366"
},
{
"name": "PowerShell",
"bytes": "7195"
},
{
"name": "Protocol Buffer",
"bytes": "95635"
},
{
"name": "Python",
"bytes": "2871895"
},
{
"name": "Shell",
"bytes": "4683"
}
],
"symlink_target": ""
} |
import abc
class BaseWSGIContainer(object):
__metaclass__ = abc.ABCMeta
DESCRIPTION = abc.abstractproperty
@abc.abstractproperty
def name(self):
pass
@abc.abstractmethod
def send_request(self):
"""Should be implemented by subclasses""" | {
"content_hash": "5c0cbbe31975a44e872b9dfb2336e904",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 43,
"avg_line_length": 18.714285714285715,
"alnum_prop": 0.7099236641221374,
"repo_name": "rishimishra/flask_tornado",
"id": "eb6a4d85ef8f71f07f227552415ea2786881ad47",
"size": "262",
"binary": false,
"copies": "1",
"ref": "refs/heads/firstbranch",
"path": "app/base.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "3212"
}
],
"symlink_target": ""
} |
"""Pyxie's image packing implementation. Based on greedy algorithm described
in this stack overflow question:
http://stackoverflow.com/questions/1213394/algorithm-needed-for-packing-\
rectangles-in-a-fairly-optimal-way
Start with the largest rectangle. Add the next largest rectangle to the place
that extends the pack region as little as possible. Repeat until you have
placed the smallest rectangle.
The coordinate system used here has the origin (0, 0) at the top-left.
"""
__all__ = ['Rectangle', 'Field', 'VerticalField']
class Rectangle(object):
def __init__(self, x, y, data=None):
self.x, self.y = x, y
self.data = data
def __repr__(self):
return '<Rectangle %d, %d>' % (self.x, self.y)
class Point(object):
def __init__(self, x, y):
self.x, self.y = x, y
class Line(object):
"""A very simplistic grid line class, where all lines are either vertical
or horizontal."""
def __init__(self, p1, p2):
"""Make sure that p1.x is the left-most top-most point in the line."""
if p1.x == p2.x: # vertical
self.vertical = True
if p1.y < p2.y: self.p1, self.p2 = p1, p2
else: self.p1, self.p2 = p2, p1
elif p1.y == p2.y: # horizontal
self.vertical = False
if p1.x < p2.x: self.p1, self.p2 = p1, p2
else: self.p1, self.p2 = p2, p1
else:
raise Exception("Line objects can only have horizontal or vertical lines.")
def overlap(self, p1, p2):
"""Return True if there's any overlap between this line and _region_,
which is assumed to be a span if we are horizontal and a range if are
vertical. There is overlap if any of the points of either line exists
within the other line."""
if self.vertical:
y1, y2 = self.p1.y, self.p2.y
return (p1 > y1 and p1 < y2) or (p2 > y1 and p2 < y2) or\
(y1 > p1 and y1 < p2) or (y2 > p1 and y2 < p2)
x1, x2 = self.p1.x, self.p2.x
return (p1 > x1 and p1 < x2) or (p2 > x1 and p2 < x2) or\
(x1 > p1 and x1 < p2) or (x2 > p1 and x2 < p2)
def __contains__(self, p):
"""Return whether or not this line contains a point p."""
if self.vertical: # vertical line
if p.x == self.p1.x and p.y >= self.p1.y and p.y <= self.p2.y:
return True
return False
else:
if p.y == self.p1.y and p.x >= self.p1.x and p.x <= self.p2.x:
return True
return False
def __repr__(self):
return '<Line (%d, %d) -> (%d, %d) %s >' % (self.p1.x, self.p1.y,
self.p2.x, self.p2.y, '|' if self.vertical else '-')
class PositionedRectangle(object):
"""A rectangle positioned within a field. Has the coordinates of the
rectangle and whether or not there's another rectangle positioned at
its top-right or bottom-left corner."""
def __init__(self, x, y, rect):
self.x, self.y, self.rect = x, y, rect
self.bl, self.tr = None, None
def __contains__(self, point):
"""This positioned rectangle contains point (x,y) if x is between
the left-most x and the right-most x, and y is between the top-most
y and bottoom-most y."""
if (point.x > self.x) and (point.x < (self.x + self.rect.x)) and\
(point.y > self.y) and (point.y < (self.y + self.rect.y)):
return True
return False
def __repr__(self):
return '<pRect @ (%d, %d), %dx%d (tr/bl: %r, %r)>' % (self.x, self.y,
self.rect.x, self.rect.y, self.tr, self.bl)
class Field(object):
def __init__(self):
self.x, self.y = 0, 0
self.rectangles = []
def area(self):
return self.x * self.y
def add_rectangle(self, rectangle):
"""Attempt to add a rectangle to this field increasing the packed area
as little as possible. To do this, it goes over all of the other
rectangles, attempts to add to bottom left or top right corner, then
checks for collisions. If a position is found that does not create a
collision and does not increase the area of the Field, it is used.
Otherwise, the "optimal" solution found is used. This is very time
intensive, but we should never be dealing with a great deal of images."""
if not self.rectangles:
self.rectangles.append(PositionedRectangle(0, 0, rectangle))
self.x, self.y = self.calculate_bounds()
return
attempts = []
for rect in self.rectangles:
for placement in (self.bottom_left, self.top_right):
result = placement(rect, rectangle)
if result is 0:
placement(rect, rectangle, place=True)
return
# if we didn't have a collision
if result is not None:
attempts.append((result, -self.rectangles.index(rect), placement, rect))
attempts.sort()
if not attempts:
import ipdb; ipdb.set_trace();
result, blah, placement, rect = attempts[0]
#print "Area increasing from %d to %d" % (self.area(), result)
placement(rect, rectangle, place=True)
self.x, self.y = self.calculate_bounds()
def bottom_left(self, placed, new, place=False):
"""Attempt to place a new rectangle on the bottom left corner of a
previously placed rectangle. Return the amt that the overall area of
the field would increase, or None if a collision is detected."""
if place:
self.mark_corners(placed.x, placed.y + placed.rect.y, new)
self.rectangles.append(PositionedRectangle(placed.x, placed.y + placed.rect.y, new))
self.x, self.y = self.calculate_bounds()
return
if placed.bl:
return None
# the corner we're adding it to is here:
corner = (placed.x, placed.y + placed.rect.y)
if not self.collision(corner, new):
return self.new_area(corner, new)
def top_right(self, placed, new, place=False):
if place:
self.mark_corners(placed.x + placed.rect.x, placed.y, new)
self.rectangles.append(PositionedRectangle(placed.x + placed.rect.x, placed.y, new))
self.x, self.y = self.calculate_bounds()
return
if placed.tr:
return None
corner = (placed.x + placed.rect.x, placed.y)
if not self.collision(corner, new):
return self.new_area(corner, new)
def mark_corners(self, x, y, rect):
"""Find all of the rectangles whose top-right or bottom-left corner are
"occupied" by the new rectangle, and mark them appropriately."""
left = Line(Point(x, y), Point(x, y + rect.y))
top = Line(Point(x, y), Point(x + rect.x, y))
# print "Adding rectangle %r to %d, %d (t/l: %s, %s)" % (rect, x, y, top, left)
# for every rectangle, if the top right or bottom left corners are in
# these lines, mark them as blocked
for pos in self.rectangles:
if not pos.tr:
p = Point(pos.x + pos.rect.x, pos.y)
if p in top or p in left:
pos.tr = True
if not pos.bl:
p = Point(pos.x, pos.y + pos.rect.y)
if p in top or p in left:
pos.bl = True
return True
def new_area(self, corner, new):
"""Return the new area of the field given a rectangle is positioned
with its top left corner at `corner`."""
if isinstance(corner, tuple):
corner = Point(*corner)
x, y = self.calculate_bounds(self.rectangles + [PositionedRectangle(corner.x, corner.y, new)])
return x * y
def calculate_bounds(self, rectangles=None):
"""Calculate x/y bounds for a field with the given rectangles. If
rectangles is None, calculate it for self's rectangles."""
if rectangles is None:
rectangles = self.rectangles
def span(rectangles):
# only rectangles without another positioned in the top right
possibilities = [r for r in rectangles if not r.tr]
return max([(r.x + r.rect.x) for r in possibilities])
def range(rectangles):
# only rectangles without another positioned in the bottom left
possibilities = [r for r in rectangles if not r.bl]
return max([(r.y + r.rect.y) for r in possibilities])
return span(rectangles), range(rectangles)
def collision(self, corner, new):
def collide(rect, top, left):
"""If any of these lines intersect with any other rectangles, it's
a collision."""
# if the x components and y components of the rectangle overlap, then
# the rectangles overlap; if they don't, then they don't.
# first, we need to check an edge case:
# it's possible for the rectangle to overlap in some way, but only
# at the top-left corner; so we check that if the top left corners
# are the same, and if they are, return false
#+-------+-+
#| | |
#+-------+ |
#+---------+
if top.p1.x == rect.x and top.p1.y == rect.y:
return True
# if the top-left point is the same, and the left & top lines go in
# the same direction, they collide
if not top.overlap(rect.x, rect.x + rect.rect.x):
return False
if not left.overlap(rect.y, rect.y + rect.rect.y):
return False
return True
p = Point(*corner)
# lines representing the top, bottom, and left line of where this rectangle would be
left = Line(Point(p.x, p.y), Point(p.x, p.y + new.y))
# bottom = Line(Point(p.x, p.y + new.y), Point(p.x + new.x, p.y + new.y))
top = Line(Point(p.x, p.y), Point(p.x + new.x, p.y))
for rect in self.rectangles:
if collide(rect, top, left):
return True
return False
class VerticalField(Field):
"""A field that only packs itself vertically."""
def __init__(self, padding=0):
super(VerticalField, self).__init__()
self.padding = padding
def add_rectangle(self, rectangle):
"""Add a rectangle to this field underneath the previous rectangle."""
if not self.rectangles:
self.rectangles.append(PositionedRectangle(0, 0, rectangle))
self.x, self.y = self.calculate_bounds()
return
self.bottom_left(self.rectangles[-1], rectangle, place=True)
self.x, self.y = self.calculate_bounds()
def bottom_left(self, placed, new, place=False):
"""Attempt to place a new rectangle on the bottom left corner of a
previously placed rectangle. Return the amt that the overall area of
the field would increase, or None if a collision is detected."""
# self.mark_corners(placed.x, placed.y + placed.rect.y, new)
self.rectangles.append(PositionedRectangle(placed.x, placed.y + placed.rect.y + self.padding, new))
self.x, self.y = self.calculate_bounds()
class HorizontalField(Field):
"""A field that only packs itself horizontally."""
def __init__(self, padding=0):
super(HorizontalField, self).__init__()
self.padding = padding
def add_rectangle(self, rectangle):
"""Add a rectangle to this field underneath the previous rectangle."""
if not self.rectangles:
self.rectangles.append(PositionedRectangle(0, 0, rectangle))
self.x, self.y = self.calculate_bounds()
return
self.top_right(self.rectangles[-1], rectangle, place=True)
self.x, self.y = self.calculate_bounds()
def top_right(self, placed, new, place=False):
"""Place a rectangle off the top right of a previous one, with
applied x-padding."""
self.rectangles.append(PositionedRectangle(placed.x + placed.rect.x + self.padding, 0, new))
self.x, self.y = self.calculate_bounds()
class BoxField(Field):
"""A field that packs itself into a box, ie for rounded corner use."""
def __init__(self, xpadding=0, ypadding=0):
super(BoxField, self).__init__()
self.xpadding = xpadding
self.ypadding = ypadding
def add_rectangle(self, rectangle):
"""Add a rectangle to this field. Note that this field only packs
in boxes, starting from the top left and going clockwise."""
if not self.rectangles:
self.rectangles.append(PositionedRectangle(0, 0, rectangle))
elif len(self.rectangles) == 1: # top right
tl = self.rectangles[0]
self.rectangles.append(PositionedRectangle(tl.rect.x + self.xpadding, 0, rectangle))
elif len(self.rectangles) == 2: # bottom right
tl, tr = self.rectangles
# find the max value we'd need to get the vertical padding we want
# even if that padding is from the top-left rectangle
maxy = max([tl.rect.y, tr.rect.y])
# adjust the x positioning so that the bottom right corner of this
# rectangle goes into the bottom right
xadjust = tr.rect.x - rectangle.x
self.rectangles.append(PositionedRectangle(tr.x, maxy + self.ypadding, rectangle))
elif len(self.rectangles) == 3: # bottom left
br = self.rectangles[-1]
# get a height adjustment so that the bottom-left corner of this
# rectangle goes into the bottom-left corner
yadjust = br.rect.y - rectangle.y
self.rectangles.append(PositionedRectangle(0, br.y + yadjust, rectangle))
else:
raise Exception("BoxField can only accept 4 rectangles; "
"You've packed too many images!")
self.x, self.y = self.calculate_bounds()
class AlternatingField(Field):
"""A field that packs vertically, alternating from left ot right. This
is useful for buttons that have a left-side and a right side."""
def __init__(self, padding=0):
super(AlternatingField, self).__init__()
self.padding = padding
def add_rectangle(self, rectangle):
"""Rectangles must be sorted width-wise for this!"""
if not self.rectangles:
self.rectangles.append(PositionedRectangle(0, 0, rectangle))
self.align = 'right'
elif self.align == 'right':
# align this rectangle along the right edge; the max width of the
# sprite is already determined by the first rectangle, which must be
# the widest rectangle
xpos = self.x - rectangle.x
self.rectangles.append(PositionedRectangle(xpos, self.y + self.padding, rectangle))
self.align = 'left'
elif self.align == 'left':
self.rectangles.append(PositionedRectangle(0, self.y + self.padding, rectangle))
self.align = 'right'
self.x, self.y = self.calculate_bounds()
| {
"content_hash": "4105058f1a10d9861dd8dfa55e424bba",
"timestamp": "",
"source": "github",
"line_count": 344,
"max_line_length": 107,
"avg_line_length": 44.35755813953488,
"alnum_prop": 0.5907333377023396,
"repo_name": "shopwiki/pyxie",
"id": "a1ccdffe622c98ed756b7e597b1d956bef018fdd",
"size": "15306",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pyxie/packer.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "37819"
}
],
"symlink_target": ""
} |
"""Wraps the body of a converted function with auxiliary constructs."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import gast
from tensorflow.python.autograph.core import converter
from tensorflow.python.autograph.pyct import anno
from tensorflow.python.autograph.pyct import templates
from tensorflow.python.autograph.pyct.static_analysis import annos
class _Function(object):
def __init__(self):
self.context_name = None
class FunctionBodyTransformer(converter.Base):
"""Wraps function bodies around autograph-specific boilerplate."""
def visit_Return(self, node):
if node.value is None:
return node
return templates.replace(
'return function_context_name.mark_return_value(value)',
function_context_name=self.state[_Function].context_name,
value=node.value)
def _function_scope_options(self):
"""Returns the options with which to create function scopes."""
# Top-level function receive the options that were directly requested.
# All others receive the options corresponding to a recursive conversion.
# Note: this mainly controls the user_requested flag, which is important
# primarily because the FunctionScope context also creates a
# ControlStatusCtx(autograph=ENABLED) when user_requested is True. See
# function_wrappers.py.
if self.state[_Function].level == 2:
return self.ctx.program.options
return self.ctx.program.options.call_options()
def visit_Lambda(self, node):
self.state[_Function].enter()
node = self.generic_visit(node)
# Only wrap the top-level function. Theoretically, we can and should wrap
# everything, but that can lead to excessive boilerplate when lambdas are
# nested.
# TODO(mdan): Looks more closely for use cases that actually require this.
if self.state[_Function].level > 2:
self.state[_Function].exit()
return node
scope = anno.getanno(node, anno.Static.SCOPE)
function_context_name = self.ctx.namer.new_symbol('lscope',
scope.referenced)
self.state[_Function].context_name = function_context_name
anno.setanno(node, 'function_context_name', function_context_name)
template = """
ag__.with_function_scope(
lambda function_context: body, function_context_name, options)
"""
node.body = templates.replace_as_expression(
template,
options=self._function_scope_options().to_ast(),
function_context=function_context_name,
function_context_name=gast.Constant(function_context_name, kind=None),
body=node.body)
self.state[_Function].exit()
return node
def visit_FunctionDef(self, node):
self.state[_Function].enter()
scope = anno.getanno(node, annos.NodeAnno.BODY_SCOPE)
function_context_name = self.ctx.namer.new_symbol('fscope',
scope.referenced)
self.state[_Function].context_name = function_context_name
anno.setanno(node, 'function_context_name', function_context_name)
node = self.generic_visit(node)
docstring_node = None
if node.body:
first_statement = node.body[0]
if (isinstance(first_statement, gast.Expr) and
isinstance(first_statement.value, gast.Constant)):
docstring_node = first_statement
node.body = node.body[1:]
template = """
with ag__.FunctionScope(
function_name, context_name, options) as function_context:
body
"""
wrapped_body = templates.replace(
template,
function_name=gast.Constant(node.name, kind=None),
context_name=gast.Constant(function_context_name, kind=None),
options=self._function_scope_options().to_ast(),
function_context=function_context_name,
body=node.body)
if docstring_node is not None:
wrapped_body = [docstring_node] + wrapped_body
node.body = wrapped_body
self.state[_Function].exit()
return node
def transform(node, ctx):
return FunctionBodyTransformer(ctx).visit(node)
| {
"content_hash": "8e92cc87db138a3ff5265ef6557f4b2e",
"timestamp": "",
"source": "github",
"line_count": 118,
"max_line_length": 78,
"avg_line_length": 35.21186440677966,
"alnum_prop": 0.6813477737665463,
"repo_name": "renyi533/tensorflow",
"id": "100a14e44949aa9df614e7dd7dce9fe85b98ec1e",
"size": "4844",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "tensorflow/python/autograph/converters/function_scopes.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "31572"
},
{
"name": "Batchfile",
"bytes": "55269"
},
{
"name": "C",
"bytes": "903309"
},
{
"name": "C#",
"bytes": "8562"
},
{
"name": "C++",
"bytes": "82507951"
},
{
"name": "CMake",
"bytes": "6967"
},
{
"name": "Dockerfile",
"bytes": "113964"
},
{
"name": "Go",
"bytes": "1871425"
},
{
"name": "HTML",
"bytes": "4686483"
},
{
"name": "Java",
"bytes": "988219"
},
{
"name": "Jupyter Notebook",
"bytes": "550861"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "MLIR",
"bytes": "2073744"
},
{
"name": "Makefile",
"bytes": "66796"
},
{
"name": "Objective-C",
"bytes": "116558"
},
{
"name": "Objective-C++",
"bytes": "319021"
},
{
"name": "PHP",
"bytes": "4236"
},
{
"name": "Pascal",
"bytes": "318"
},
{
"name": "Pawn",
"bytes": "20422"
},
{
"name": "Perl",
"bytes": "7536"
},
{
"name": "Python",
"bytes": "37811412"
},
{
"name": "RobotFramework",
"bytes": "1779"
},
{
"name": "Roff",
"bytes": "2705"
},
{
"name": "Ruby",
"bytes": "7464"
},
{
"name": "SWIG",
"bytes": "6846"
},
{
"name": "Shell",
"bytes": "696058"
},
{
"name": "Smarty",
"bytes": "35725"
},
{
"name": "Starlark",
"bytes": "3655758"
},
{
"name": "Swift",
"bytes": "62814"
},
{
"name": "Vim Snippet",
"bytes": "58"
}
],
"symlink_target": ""
} |
"""This code example creates new activity groups.
To determine which activity groups exist, run get_all_activity_groups.py.
Tags: ActivityGroupService.createActivityGroups
"""
__author__ = 'Vincent Tsao'
# Locate the client library. If module was installed via "setup.py" script, then
# the following two lines are not needed.
import os
import sys
sys.path.insert(0, os.path.join('..', '..', '..', '..', '..'))
# Import appropriate classes from the client library.
from adspygoogle import DfpClient
from adspygoogle.common import Utils
# Set the ID of the advertiser company this activity group is associated with.
ADVERTISER_COMPANY_ID = 'INSERT_ADVERTISER_COMPANY_ID_HERE'
def main(client, advertiser_company_id):
# Initialize appropriate service.
activity_group_service = client.GetService('ActivityGroupService',
version='v201308')
# Create a short-term activity group.
short_term_activity_group = {
'name': 'Short-term activity group #%s' % Utils.GetUniqueName(),
'companyIds': [advertiser_company_id],
'clicksLookback': '1',
'impressionsLookback': '1'
}
# Create a long-term activity group.
long_term_activity_group = {
'name': 'Long-term activity group #%s' % Utils.GetUniqueName(),
'companyIds': [advertiser_company_id],
'clicksLookback': '30',
'impressionsLookback': '30'
}
# Create the activity groups on the server.
activity_groups = activity_group_service.CreateActivityGroups([
short_term_activity_group, long_term_activity_group])
# Display results.
for activity_group in activity_groups:
print ('Activity group with ID \'%s\' and name \'%s\' was created.'
% (activity_group['id'], activity_group['name']))
if __name__ == '__main__':
# Initialize client object.
dfp_client = DfpClient(path=os.path.join('..', '..', '..', '..', '..'))
main(dfp_client, ADVERTISER_COMPANY_ID)
| {
"content_hash": "f191971e2df9621e829b5a577acca543",
"timestamp": "",
"source": "github",
"line_count": 57,
"max_line_length": 80,
"avg_line_length": 34.12280701754386,
"alnum_prop": 0.6730077120822622,
"repo_name": "lociii/googleads-python-lib",
"id": "24f94f32f4a6b31f9ed38162147d9fe85b3ecc9d",
"size": "2563",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "examples/adspygoogle/dfp/v201308/activity_group_service/create_activity_groups.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "3481618"
},
{
"name": "Shell",
"bytes": "603"
}
],
"symlink_target": ""
} |
from .download import *
from .login import login_session as login
from .config import load_config as config
def sort_teacher(teacher_list: list) -> str:
teacher_name = []
for i in teacher_list:
teacher_name.append(i.string)
if len(teacher_name) >= 3:
teacher_name[2] += '等'
break
return '、'.join(teacher_name)
| {
"content_hash": "ccca416c83c0ad7b5c85ac34e3e1dd05",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 44,
"avg_line_length": 28,
"alnum_prop": 0.6208791208791209,
"repo_name": "Rhilip/cn-mooc-dl",
"id": "5b88483aa5f3475f3aaf790dc8351928a4d85347",
"size": "392",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "model/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "47992"
}
],
"symlink_target": ""
} |
"""Cancel an existing iSCSI account."""
# :license: MIT, see LICENSE for more details.
import click
import SoftLayer
from SoftLayer.CLI import environment
from SoftLayer.CLI import exceptions
from SoftLayer.CLI import formatting
from SoftLayer.CLI import helpers
@click.command()
@click.argument('identifier')
@click.option('--reason', help="An optional reason for cancellation")
@click.option('--immediate',
is_flag=True,
help="Cancels the iSCSI immediately instead of on the billing "
"anniversary")
@environment.pass_env
def cli(env, identifier, reason, immediate):
"""Cancel an existing iSCSI account."""
iscsi_mgr = SoftLayer.ISCSIManager(env.client)
iscsi_id = helpers.resolve_id(iscsi_mgr.resolve_ids, identifier, 'iSCSI')
if not (env.skip_confirmations or formatting.no_going_back(iscsi_id)):
raise exceptions.CLIAbort('Aborted')
iscsi_mgr.cancel_iscsi(iscsi_id, reason, immediate)
| {
"content_hash": "e3af1c56be97c468968cd5941e5268b1",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 77,
"avg_line_length": 32.4,
"alnum_prop": 0.7129629629629629,
"repo_name": "skraghu/softlayer-python",
"id": "f744cb62723aac06d7af3ba689ca377319e5ae3c",
"size": "972",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "SoftLayer/CLI/iscsi/cancel.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "DIGITAL Command Language",
"bytes": "854"
},
{
"name": "Python",
"bytes": "1039495"
}
],
"symlink_target": ""
} |
import sys
import tweepy # https://github.com/tweepy/tweepy
import csv
import api_keys
import xlsxwriter
import tweet_cleaner
import json
import argparse
parser = argparse.ArgumentParser(description='collect user tweets')
parser.add_argument('-u', '--user', type=str,
help='user', required=True)
def get_all_tweets(screen_name):
# Twitter only allows access to a users most recent 3240 tweets with this method
# authorize twitter, initialize tweepy
if api_keys.consumer_key == '' or api_keys.consumer_secret == '' \
or api_keys.access_token == '' or api_keys.access_secret == '':
print("API key not found. Please check api_keys.py file")
sys.exit(-1)
auth = tweepy.OAuthHandler(api_keys.consumer_key, api_keys.consumer_secret)
auth.set_access_token(api_keys.access_token, api_keys.access_secret)
api = tweepy.API(auth)
# initialize a list to hold all the tweepy Tweets
alltweets = []
# make initial request for most recent tweets (200 is the maximum allowed count)
new_tweets = api.user_timeline(screen_name=screen_name, count=200,
tweet_mode='extended')
# save most recent tweets
alltweets.extend(new_tweets)
# save the id of the oldest tweet less one
oldest = alltweets[-1].id - 1
# keep grabbing tweets until there are no tweets left to grab
while len(new_tweets) > 0:
print("getting tweets before {}".format(oldest))
# all subsiquent requests use the max_id param to prevent duplicates
new_tweets = api.user_timeline(screen_name=screen_name, count=200,
max_id=oldest, tweet_mode='extended')
# save most recent tweets
alltweets.extend(new_tweets)
# update the id of the oldest tweet less one
oldest = alltweets[-1].id - 1
print("...{} tweets downloaded so far".format((len(alltweets))))
with open('{}.json'.format(screen_name), 'w') as outfile:
for t in alltweets:
outfile.write(json.dumps(t._json))
outfile.write('\n')
print('{} tweets have been written successfully to {}.json'.format((len(alltweets)), screen_name))
def process_json(screen_name):
workbook = xlsxwriter.Workbook('%s_tweets.xlsx' % screen_name)
worksheet = workbook.add_worksheet()
# Start from the first cell. Rows and columns are zero indexed.
row = 0
col = 0
worksheet.write(row, 0, 'id')
worksheet.write(row, 1, 'created_at')
worksheet.write(row, 2, 'full_text')
worksheet.write(row, 3, 'clean_text')
row += 1
with open('{}.json'.format(screen_name)) as json_reader:
lines = json_reader.readlines()
for line in lines:
json_tweet = json.loads(line)
if 'retweeted_status' in json_tweet:
text = json_tweet['retweeted_status']['full_text']
else:
text = json_tweet['full_text']
clean_text = tweet_cleaner.clean_tweet(text)
clean_text = tweet_cleaner.normalize_arabic(clean_text)
clean_text = tweet_cleaner.remove_repeating_char(clean_text)
clean_text = tweet_cleaner.keep_only_arabic(clean_text.split())
worksheet.write(row, col, json_tweet['id_str'])
worksheet.write(row, col + 1, json_tweet['created_at'])
worksheet.write(row, col + 2, text)
worksheet.write(row, col + 3, clean_text)
row += 1
workbook.close()
if __name__ == '__main__':
# pass in the username of the account you want to download
args = parser.parse_args()
user = args.user
get_all_tweets(user)
process_json(user)
| {
"content_hash": "2b3cb59a13b2faed4b6c1911985f8ef5",
"timestamp": "",
"source": "github",
"line_count": 99,
"max_line_length": 102,
"avg_line_length": 37.5959595959596,
"alnum_prop": 0.6265448683503493,
"repo_name": "motazsaad/tweets-collector",
"id": "f77875ceec7c5aa78cf2ffcc3e4d7864eec2118b",
"size": "3838",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "user_tweets_history.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "25902"
}
],
"symlink_target": ""
} |
from __future__ import division
from random import random, seed, expovariate, uniform, triangular, gammavariate, gauss, lognormvariate, weibullvariate
from datetime import datetime
import os
from csv import writer
import yaml
import shutil
import networkx as nx
from data_record import DataRecord
from server import Server
class Node:
"""
Class for a node on our network
"""
def __init__(self, id_number, simulation):
"""
Initialise a node.
"""
self.simulation = simulation
self.mu = [self.simulation.mu[cls][id_number-1] for cls in range(len(self.simulation.mu))]
self.scheduled_servers = self.simulation.schedules[id_number-1]
if self.scheduled_servers:
self.schedule = self.simulation.parameters[self.simulation.c[id_number-1]]
self.cyclelength = self.simulation.parameters['cycle_length']
self.c = self.schedule[0][1]
self.masterschedule = [i*self.cyclelength + obs for i in range(self.simulation.max_simulation_time//self.cyclelength + 1) for obs in [t[0] for t in self.schedule]][1:]
else:
self.c = self.simulation.c[id_number-1]
self.node_capacity = "Inf" if self.simulation.queue_capacities[id_number-1] == "Inf" else self.simulation.queue_capacities[id_number-1] + self.c
self.transition_row = [self.simulation.transition_matrix[j][id_number-1] for j in range(len(self.simulation.transition_matrix))]
if self.simulation.class_change_matrix != 'NA':
self.class_change_for_node = self.simulation.class_change_matrix[id_number-1]
self.class_change_cdf = self.find_cdf_class_changes()
self.individuals = []
self.id_number = id_number
self.cum_transition_row = self.find_cum_transition_row()
if self.scheduled_servers:
self.next_event_date = self.masterschedule[0]
else:
self.next_event_date = "Inf"
self.blocked_queue = []
if self.c < 'Inf':
self.servers = [Server(self, i+1) for i in range(self.c)]
if simulation.detecting_deadlock:
self.simulation.digraph.add_nodes_from([str(s) for s in self.servers])
def find_cdf_class_changes(self):
"""
Turning the pdf of the class change probabilities into a cdf.
"""
return [[sum(self.class_change_for_node[j][0:i+1]) for i in range(len(self.class_change_for_node[j]))] for j in range(len(self.class_change_for_node))]
def find_cum_transition_row(self):
"""
Finds the cumulative transition row for the node
"""
cum_transition_row = []
for cls in range(len(self.transition_row)):
sum_p = 0
cum_transition_row.append([])
for p in self.transition_row[cls]:
sum_p += p
cum_transition_row[cls].append(sum_p)
return cum_transition_row
def __repr__(self):
"""
Representation of a node::
"""
return 'Node %s' % self.id_number
def attach_server(self, server, individual):
"""
Attaches a server to an individual, and vice versa
"""
server.cust = individual
server.busy = True
individual.server = server
if self.simulation.detecting_deadlock:
for blq in self.blocked_queue:
inds = [ind for ind in self.simulation.nodes[blq[0]].individuals if ind.id_number==blq[1]]
ind = inds[0]
if ind != individual:
self.simulation.digraph.add_edge(str(ind.server), str(server))
def detatch_server(self, server, individual):
"""
Detatches a server from an individual, and vice versa
"""
server.cust = False
server.busy = False
individual.server = False
if self.simulation.detecting_deadlock:
self.simulation.digraph.remove_edges_from(self.simulation.digraph.in_edges(str(server)) + self.simulation.digraph.out_edges(str(server)))
if server.offduty:
self.kill_server(server)
def have_event(self):
"""
Has an event
"""
if self.check_if_shiftchange():
self.change_shift()
else:
self.finish_service
self.finish_service()
def change_shift(self):
"""
Add servers and deletes or indicates which servers should go off duty
"""
highest_id = max([srvr.id_number for srvr in self.servers])
shift = self.next_event_date%self.cyclelength
self.take_servers_off_duty()
self.add_new_server(shift,highest_id)
indx = [obs[0] for obs in self.schedule].index(shift)
self.c = self.schedule[indx][1]
def take_servers_off_duty(self):
"""
Gathers servers that should be deleted
>>> from simulation import Simulation
>>> from import_params import load_parameters
>>> Q = Simulation(load_parameters('tests/datafortesting/logs_test_for_server_schedule/'))
>>> N = Q.transitive_nodes[0]
>>> N.add_new_server(90, 1)
>>> N.servers
[Server 1 at Node 1, Server 2 at Node 1, Server 3 at Node 1, Server 4 at Node 1]
>>> N.servers[1].busy = True
>>> N.servers[2].busy = True
>>> [obs.busy for obs in N.servers]
[False, True, True, False]
>>> [obs.offduty for obs in N.servers]
[False, False, False, False]
>>> N.take_servers_off_duty()
>>> N.servers
[Server 2 at Node 1, Server 3 at Node 1]
>>> [obs.busy for obs in N.servers]
[True, True]
>>> [obs.offduty for obs in N.servers]
[True, True]
"""
to_delete = []
for srvr in self.servers:
if srvr.busy:
srvr.offduty = True
else:
to_delete.append(srvr)
for obs in to_delete:
self.kill_server(obs)
def check_if_shiftchange(self):
"""
Check whether current time is a shift change
>>> from simulation import Simulation
>>> from import_params import load_parameters
>>> Q = Simulation(load_parameters('tests/datafortesting/logs_test_for_server_schedule/'))
>>> N = Q.transitive_nodes[0]
>>> N.next_event_date = 12.0
>>> N.check_if_shiftchange()
False
>>> N.next_event_date = 30.0
>>> N.check_if_shiftchange()
True
>>> Q = Simulation(load_parameters('tests/datafortesting/logs_test_for_simulation/'))
>>> N = Q.transitive_nodes[0]
>>> N.next_event_date = 12.0
>>> N.check_if_shiftchange()
False
>>> N.next_event_date = 30.0
>>> N.check_if_shiftchange()
False
"""
if self.scheduled_servers:
return self.next_event_date == self.masterschedule[0]
return False
def finish_service(self):
"""
The next individual finishes service
>>> from simulation import Simulation
>>> from import_params import load_parameters
>>> from individual import Individual
>>> seed(4)
>>> Q = Simulation(load_parameters('tests/datafortesting/logs_test_for_simulation/'))
>>> N = Q.transitive_nodes[0]
>>> inds = [Individual(i+1) for i in range(3)]
>>> for current_time in [0.01, 0.02, 0.03]:
... N.accept(inds[int(current_time*100 - 1)], current_time)
>>> N.individuals
[Individual 1, Individual 2, Individual 3]
>>> N.update_next_event_date(0.03)
>>> round(N.next_event_date, 5)
0.03604
>>> N.finish_service()
>>> N.individuals
[Individual 1, Individual 3]
"""
if self.c == "Inf":
next_individual_index = [ind.service_end_date for ind in self.individuals].index(self.next_event_date)
else:
next_individual_index = [ind.service_end_date for ind in self.individuals[:self.c]].index(self.next_event_date)
next_individual = self.individuals[next_individual_index]
self.change_customer_class(next_individual)
next_node = self.next_node(next_individual.customer_class)
if len(next_node.individuals) < next_node.node_capacity:
self.release(next_individual_index, next_node, self.next_event_date)
else:
self.block_individual(next_individual, next_node)
def change_customer_class(self,individual):
"""
Takes individual and changes customer class according to a probability distribution.
>>> from simulation import Simulation
>>> from individual import Individual
>>> from import_params import load_parameters
>>> seed(14)
>>> Q = Simulation(load_parameters('tests/datafortesting/logs_test_for_dynamic_classes/'))
>>> N1 = Q.transitive_nodes[0]
>>> ind = Individual(254, 0)
>>> ind.customer_class
0
>>> ind.previous_class
0
>>> N1.change_customer_class(ind)
>>> ind.customer_class
0
>>> ind.previous_class
0
>>> N1.change_customer_class(ind)
>>> ind.customer_class
0
>>> ind.previous_class
0
>>> N1.change_customer_class(ind)
>>> ind.customer_class
1
>>> ind.previous_class
0
>>> N1.change_customer_class(ind)
>>> ind.customer_class
1
>>> ind.previous_class
1
"""
if self.simulation.class_change_matrix != 'NA':
rnd_num=random()
cdf=self.class_change_cdf[individual.customer_class]
individual.previous_class=individual.customer_class
inx=0
for i in cdf:
if rnd_num<=i:
individual.customer_class=inx
break
inx+=1
def block_individual(self, individual, next_node):
"""
Blocks the individual from entering the next node
>>> from simulation import Simulation
>>> from individual import Individual
>>> from import_params import load_parameters
>>> seed(4)
>>> Q = Simulation(load_parameters('tests/datafortesting/logs_test_for_deadlock_sim/'))
>>> inds = [Individual(i+1) for i in range(7)]
>>> N1 = Q.transitive_nodes[0]
>>> N1.individuals = inds[:6]
>>> N2 = Q.transitive_nodes[1]
>>> N2.accept(inds[6], 2)
>>> inds[6].is_blocked
False
>>> N1.blocked_queue
[]
>>> Q.digraph.edges()
[]
>>> N2.block_individual(inds[6], N1)
>>> inds[6].is_blocked
True
>>> N1.blocked_queue
[(2, 7)]
>>> Q.digraph.edges()
[('Server 1 at Node 2', 'Server 2 at Node 1'), ('Server 1 at Node 2', 'Server 1 at Node 1')]
"""
individual.is_blocked = True
self.change_state_block()
next_node.blocked_queue.append((self.id_number, individual.id_number))
if self.simulation.detecting_deadlock:
for svr in next_node.servers:
self.simulation.digraph.add_edge(str(individual.server), str(svr))
def release(self, next_individual_index, next_node, current_time):
"""
Update node when an individual is released.
>>> from simulation import Simulation
>>> from individual import Individual
>>> from import_params import load_parameters
>>> seed(4)
>>> Q = Simulation(load_parameters('tests/datafortesting/logs_test_for_simulation/'))
>>> N = Q.transitive_nodes[0]
>>> inds = [Individual(i+1) for i in range(3)]
>>> for current_time in [0.01, 0.02, 0.03]:
... N.accept(inds[int(current_time*100 - 1)], current_time)
>>> N.individuals
[Individual 1, Individual 2, Individual 3]
>>> N.update_next_event_date(0.03)
>>> round(N.next_event_date, 5)
0.03604
>>> N.individuals[1].exit_date = 0.04 #shouldn't affect the next event date
>>> N.update_next_event_date(N.next_event_date)
>>> round(N.next_event_date, 5)
0.03708
>>> N.release(1, Q.transitive_nodes[1], N.next_event_date)
>>> N.individuals
[Individual 1, Individual 3]
>>> N.update_next_event_date(N.next_event_date)
>>> round(N.next_event_date, 5)
0.06447
"""
next_individual = self.individuals.pop(next_individual_index)
next_individual.exit_date = current_time
if self.c < 'Inf':
self.detatch_server(next_individual.server, next_individual)
self.write_individual_record(next_individual)
self.change_state_release(next_individual)
self.release_blocked_individual(current_time)
self.begin_service_if_possible_release(current_time)
next_node.accept(next_individual, current_time)
def begin_service_if_possible_release(self, current_time):
"""
Begins the service of the next individual, giving that customer a service time, end date and node
>>> from simulation import Simulation
>>> from individual import Individual
>>> from import_params import load_parameters
>>> seed(50)
>>> Q = Simulation(load_parameters('tests/datafortesting/logs_test_for_deadlock_sim/'))
>>> inds = [Individual(i) for i in range(30)]
>>> Q.transitive_nodes[0].individuals = inds
>>> ind = Q.transitive_nodes[0].individuals[Q.transitive_nodes[0].c - 1]
>>> ind.service_time = 3.14
>>> ind.arrival_date = 100.0
>>> Q.digraph.nodes()
['Server 2 at Node 1', 'Server 1 at Node 2', 'Server 1 at Node 1']
>>> ind.arrival_date
100.0
>>> ind.service_time
3.14
>>> ind.service_start_date
False
>>> ind.service_end_date
False
>>> Q.transitive_nodes[0].begin_service_if_possible_release(200.0)
>>> ind.arrival_date
100.0
>>> round(ind.service_time,5)
3.14
>>> ind.service_start_date
200.0
>>> round(ind.service_end_date,5)
203.14
"""
if len(self.individuals) >= self.c:
for ind in self.individuals[:self.c]:
if not ind.service_start_date:
self.attach_server(self.find_free_server(), ind)
ind.service_start_date = current_time
ind.service_end_date = ind.service_start_date + ind.service_time
def release_blocked_individual(self, current_time):
"""
Releases an individual who becomes unblocked when another individual is released
>>> from simulation import Simulation
>>> from individual import Individual
>>> from import_params import load_parameters
>>> Q = Simulation(load_parameters('tests/datafortesting/logs_test_for_deadlock_sim/'))
>>> N1 = Q.transitive_nodes[0]
>>> N2 = Q.transitive_nodes[1]
>>> N1.individuals = [Individual(i) for i in range(N1.c + 3)]
>>> N2.individuals = [Individual(i + 100) for i in range(N2.c + 4)]
>>> for ind in N1.individuals[:2]:
... N1.attach_server(N1.find_free_server(), ind)
>>> for ind in N2.individuals[:1]:
... N2.attach_server(N2.find_free_server(), ind)
>>> N1.individuals
[Individual 0, Individual 1, Individual 2, Individual 3, Individual 4]
>>> N2.individuals
[Individual 100, Individual 101, Individual 102, Individual 103, Individual 104]
>>> N1.release_blocked_individual(100)
>>> N1.individuals
[Individual 0, Individual 1, Individual 2, Individual 3, Individual 4]
>>> N2.individuals
[Individual 100, Individual 101, Individual 102, Individual 103, Individual 104]
>>> N1.blocked_queue = [(1, 1), (2, 100)]
>>> rel_ind = N1.individuals.pop(0)
>>> N1.detatch_server(rel_ind.server, rel_ind)
>>> N1.release_blocked_individual(110)
>>> N1.individuals
[Individual 2, Individual 3, Individual 4, Individual 100, Individual 1]
>>> N2.individuals
[Individual 101, Individual 102, Individual 103, Individual 104]
"""
if len(self.blocked_queue) > 0:
node_to_receive_from = self.simulation.nodes[self.blocked_queue[0][0]]
individual_to_receive_index = [ind.id_number for ind in node_to_receive_from.individuals].index(self.blocked_queue[0][1])
individual_to_receive = node_to_receive_from.individuals[individual_to_receive_index]
self.blocked_queue.pop(0)
node_to_receive_from.release(individual_to_receive_index, self, current_time)
def change_state_release(self, next_individual):
"""
Changes the state of the system when a customer gets blocked
>>> from simulation import Simulation
>>> from individual import Individual
>>> from import_params import load_parameters
>>> Q = Simulation(load_parameters('tests/datafortesting/logs_test_for_simulation/'))
>>> Q.state = [[0, 0], [0, 0], [2, 1], [0, 0]]
>>> N = Q.transitive_nodes[2]
>>> inds = [Individual(i) for i in range(3)]
>>> N.individuals = inds
>>> N.change_state_release(inds[0])
>>> Q.state
[[0, 0], [0, 0], [1, 1], [0, 0]]
>>> inds[1].is_blocked = True
>>> N.change_state_release(inds[1])
>>> Q.state
[[0, 0], [0, 0], [1, 0], [0, 0]]
"""
if next_individual.is_blocked:
self.simulation.state[self.id_number-1][1] -= 1
else:
self.simulation.state[self.id_number-1][0] -= 1
def change_state_block(self):
"""
Changes the state of the system when a customer gets blocked
>>> from simulation import Simulation
>>> from import_params import load_parameters
>>> Q = Simulation(load_parameters('tests/datafortesting/logs_test_for_simulation/'))
>>> Q.state = [[0, 0], [0, 0], [2, 1], [0, 0]]
>>> N = Q.transitive_nodes[2]
>>> N.change_state_block()
>>> Q.state
[[0, 0], [0, 0], [1, 2], [0, 0]]
>>> N.change_state_block()
>>> Q.state
[[0, 0], [0, 0], [0, 3], [0, 0]]
"""
self.simulation.state[self.id_number-1][1] += 1
self.simulation.state[self.id_number-1][0] -= 1
def change_state_accept(self):
"""
Changes the state of the system when a customer gets blocked
>>> from simulation import Simulation
>>> from import_params import load_parameters
>>> Q = Simulation(load_parameters('tests/datafortesting/logs_test_for_simulation/'))
>>> Q.state = [[0, 0], [0, 0], [2, 1], [0, 0]]
>>> N = Q.transitive_nodes[2]
>>> N.change_state_accept()
>>> Q.state
[[0, 0], [0, 0], [3, 1], [0, 0]]
>>> N.change_state_accept()
>>> Q.state
[[0, 0], [0, 0], [4, 1], [0, 0]]
"""
self.simulation.state[self.id_number-1][0] += 1
def accept(self, next_individual, current_time):
"""
Accepts a new customer to the queue
>>> from simulation import Simulation
>>> from individual import Individual
>>> from import_params import load_parameters
>>> seed(6)
>>> Q = Simulation(load_parameters('tests/datafortesting/logs_test_for_simulation/'))
>>> N = Q.transitive_nodes[0]
>>> N.next_event_date = 0.0
>>> N.individuals
[]
>>> ind1 = Individual(1)
>>> ind2 = Individual(2)
>>> ind3 = Individual(3)
>>> ind4 = Individual(4)
>>> ind5 = Individual(5)
>>> ind6 = Individual(6)
>>> ind7 = Individual(7)
>>> ind8 = Individual(8)
>>> ind9 = Individual(9)
>>> ind10 = Individual(10)
>>> N.accept(ind1, 0.01)
>>> N.individuals
[Individual 1]
>>> ind1.arrival_date
0.01
>>> ind1.service_start_date
0.01
>>> round(ind1.service_time, 5)
0.18695
>>> round(ind1.service_end_date, 5)
0.19695
>>> N.accept(ind2, 0.02)
>>> N.accept(ind3, 0.03)
>>> N.accept(ind4, 0.04)
>>> N.individuals
[Individual 1, Individual 2, Individual 3, Individual 4]
>>> round(ind4.arrival_date, 5)
0.04
>>> round(ind4.service_start_date, 5)
0.04
>>> round(ind4.service_time, 5)
0.1637
>>> round(ind4.service_end_date, 5)
0.2037
>>> N.accept(ind5, 0.05)
>>> N.accept(ind6, 0.06)
>>> N.accept(ind7, 0.07)
>>> N.accept(ind8, 0.08)
>>> N.accept(ind9, 0.09)
>>> N.accept(ind10, 0.1)
>>> N.individuals
[Individual 1, Individual 2, Individual 3, Individual 4, Individual 5, Individual 6, Individual 7, Individual 8, Individual 9, Individual 10]
>>> round(ind10.arrival_date, 5)
0.1
>>> ind10.service_start_date
False
>>> round(ind10.service_time, 5)
0.16534
"""
next_individual.exit_date = False
next_individual.is_blocked = False
self.begin_service_if_possible_accept(next_individual, current_time)
self.individuals.append(next_individual)
self.change_state_accept()
def begin_service_if_possible_accept(self, next_individual, current_time):
"""
Begins the service of the next individual, giving that customer a service time, end date and node
>>> from simulation import Simulation
>>> from individual import Individual
>>> from import_params import load_parameters
>>> seed(50)
>>> Q = Simulation(load_parameters('tests/datafortesting/logs_test_for_deadlock_sim/'))
>>> ind = Individual(1)
>>> Q.digraph.nodes()
['Server 2 at Node 1', 'Server 1 at Node 2', 'Server 1 at Node 1']
>>> ind.arrival_date
False
>>> ind.service_time
False
>>> ind.service_start_date
False
>>> ind.service_end_date
False
>>> Q.transitive_nodes[0].begin_service_if_possible_accept(ind, 300)
>>> ind.arrival_date
300
>>> round(ind.service_time,5)
0.03382
>>> ind.service_start_date
300
>>> round(ind.service_end_date,5)
300.03382
"""
next_individual.arrival_date = current_time
next_individual.service_time = self.simulation.service_times[self.id_number][next_individual.customer_class]()
if len(self.individuals) < self.c:
if self.c < 'Inf':
self.attach_server(self.find_free_server(), next_individual)
next_individual.service_start_date = current_time
next_individual.service_end_date = current_time + next_individual.service_time
def find_free_server(self):
"""
Finds a free server
"""
free_servers = [svr for svr in self.servers if not svr.busy]
return free_servers[0]
def kill_server(self,srvr):
"""
Kills server
>>> from simulation import Simulation
>>> from import_params import load_parameters
>>> Q = Simulation(load_parameters('tests/datafortesting/logs_test_for_server_schedule/'))
>>> N = Q.transitive_nodes[0]
>>> s = N.servers[0]
>>> N.servers
[Server 1 at Node 1]
>>> N.kill_server(s)
>>> N.servers
[]
"""
indx = self.servers.index(srvr)
del self.servers[indx]
def add_new_server(self, shift, highest_id):
"""
Add appropriate amount of servers for the given shift
>>> from simulation import Simulation
>>> from import_params import load_parameters
>>> Q = Simulation(load_parameters('tests/datafortesting/logs_test_for_server_schedule/'))
>>> N = Q.transitive_nodes[0]
>>> s = 90
>>> N.servers
[Server 1 at Node 1]
>>> N.add_new_server(s,1)
>>> N.servers
[Server 1 at Node 1, Server 2 at Node 1, Server 3 at Node 1, Server 4 at Node 1]
"""
indx = [obs[0] for obs in self.schedule].index(shift)
num_servers = self.schedule[indx][1]
for i in range(num_servers):
self.servers.append(Server(self, highest_id+i+1))
def update_next_event_date(self, current_time):
"""
Finds the time of the next event at this node
>>> from simulation import Simulation
>>> from individual import Individual
>>> from import_params import load_parameters
>>> Q = Simulation(load_parameters('tests/datafortesting/logs_test_for_simulation/'))
>>> N = Q.transitive_nodes[0]
>>> N.next_event_date
'Inf'
>>> N.individuals
[]
>>> N.update_next_event_date(0.0)
>>> N.next_event_date
'Inf'
>>> ind1 = Individual(1)
>>> ind1.arrival_date = 0.3
>>> ind1.service_time = 0.2
>>> ind1.service_end_date = 0.5
>>> N.next_event_date = 0.3
>>> N.individuals = [ind1]
>>> N.update_next_event_date(N.next_event_date)
>>> N.next_event_date
0.5
>>> ind2 = Individual(2)
>>> ind2.arrival_date = 0.4
>>> ind2.service_time = 0.2
>>> ind2.service_end_date = 0.6
>>> ind2.exit_date = False
>>> N.individuals = [ind1, ind2]
>>> N.update_next_event_date(N.next_event_date)
>>> N.next_event_date
0.6
>>> ind2.exit_date = 0.9 # shouldn't affect next_event_date
>>> N.update_next_event_date(N.next_event_date)
>>> N.next_event_date
'Inf'
>>> Q = Simulation(load_parameters('tests/datafortesting/logs_test_for_server_schedule/'))
>>> N = Q.transitive_nodes[0]
>>> N.next_event_date
30
>>> N.individuals
[]
>>> N.update_next_event_date(0.0)
>>> N.next_event_date
30
>>> ind1 = Individual(1)
>>> ind1.arrival_date = 0.3
>>> ind1.service_time = 0.2
>>> ind1.service_end_date = 0.5
>>> N.next_event_date = 0.3
>>> N.individuals = [ind1]
>>> N.update_next_event_date(N.next_event_date)
>>> N.next_event_date
0.5
>>> ind2 = Individual(2)
>>> ind2.arrival_date = 0.7
>>> ind2.service_time = 0.2
>>> ind2.service_end_date = 0.9
>>> ind2.exit_date = False
>>> N.individuals = [ind1, ind2]
>>> N.update_next_event_date(N.next_event_date)
>>> N.next_event_date
30
"""
if self.c == "Inf":
next_end_service = min([ind.service_end_date for ind in self.individuals if not ind.is_blocked if ind.service_end_date>current_time] + ["Inf"])
else:
next_end_service = min([ind.service_end_date for ind in self.individuals[:self.c] if not ind.is_blocked if ind.service_end_date>current_time] + ["Inf"])
if self.scheduled_servers:
next_shift_change = self.masterschedule[0]
self.next_event_date = min(next_end_service, next_shift_change)
else:
self.next_event_date = next_end_service
def next_node(self, customer_class):
"""
Finds the next node according the random distribution.
An example showing a node choosing both nodes and exit node randomly.
>>> from simulation import Simulation
>>> from import_params import load_parameters
>>> seed(6)
>>> Q = Simulation(load_parameters('tests/datafortesting/logs_test_for_simulation/'))
>>> node = Q.transitive_nodes[0]
>>> node.next_node(0)
Node 4
>>> node.next_node(0)
Node 4
>>> node.next_node(0)
Node 4
>>> node.next_node(0)
Node 4
>>> node.next_node(0)
Node 2
>>> node.next_node(0)
Node 4
Another example.
>>> seed(54)
>>> Q = Simulation(load_parameters('tests/datafortesting/logs_test_for_simulation/'))
>>> node = Q.transitive_nodes[2]
>>> node.next_node(0)
Node 2
>>> node.next_node(0)
Node 4
>>> node.next_node(0)
Node 2
>>> node.next_node(0)
Node 2
>>> node.next_node(0)
Node 2
>>> node.next_node(0)
Node 2
>>> node.next_node(0)
Node 4
>>> node.next_node(0)
Node 2
"""
rnd_num = random()
for p in range(len(self.cum_transition_row[customer_class])):
if rnd_num < self.cum_transition_row[customer_class][p]:
return self.simulation.transitive_nodes[p]
return self.simulation.nodes[-1]
def write_individual_record(self, individual):
"""
Write a data record for an individual:
- Arrival date
- Wait
- Service start date
- Service time
- Service end date
- Blocked
- Exit date
An example showing the data records written; can only write records once an exit date has been determined.
>>> from simulation import Simulation
>>> from import_params import load_parameters
>>> from individual import Individual
>>> seed(7)
>>> Q = Simulation(load_parameters('tests/datafortesting/logs_test_for_simulation/'))
>>> N = Q.transitive_nodes[0]
>>> ind = Individual(6)
>>> N.accept(ind, 3)
>>> ind.service_start_date = 3.5
>>> ind.exit_date = 9
>>> N.write_individual_record(ind)
>>> ind.data_records[1][0].arrival_date
3
>>> ind.data_records[1][0].wait
0.5
>>> ind.data_records[1][0].service_start_date
3.5
>>> round(ind.data_records[1][0].service_time, 5)
0.07894
>>> round(ind.data_records[1][0].service_end_date, 5)
3.57894
>>> round(ind.data_records[1][0].blocked, 5)
5.42106
>>> ind.data_records[1][0].exit_date
9
>>> ind.data_records[1][0].customer_class
0
"""
record = DataRecord(individual.arrival_date, individual.service_time, individual.service_start_date, individual.exit_date, self.id_number, individual.previous_class)
if self.id_number in individual.data_records:
individual.data_records[self.id_number].append(record)
else:
individual.data_records[self.id_number] = [record]
individual.arrival_date = False
individual.service_time = False
individual.service_start_date = False
individual.service_end_date = False
individual.exit_date = False | {
"content_hash": "a05e28aaa3a619235f47a7080d8de163",
"timestamp": "",
"source": "github",
"line_count": 858,
"max_line_length": 180,
"avg_line_length": 38.35081585081585,
"alnum_prop": 0.534599604923264,
"repo_name": "geraintpalmer/ASQ",
"id": "99a283600242bc95b026315ea2633bf05142fef0",
"size": "32905",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "asq/node.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "73667"
}
],
"symlink_target": ""
} |
''' Provide the Auto property.
'''
#-----------------------------------------------------------------------------
# Boilerplate
#-----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function, unicode_literals
import logging
log = logging.getLogger(__name__)
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# Standard library imports
# External imports
# Bokeh imports
from .enum import Enum
#-----------------------------------------------------------------------------
# Globals and constants
#-----------------------------------------------------------------------------
__all__ = (
'Auto',
)
#-----------------------------------------------------------------------------
# General API
#-----------------------------------------------------------------------------
class Auto(Enum):
''' Accepts only the string "auto".
Useful for properties that can be configured to behave "automatically".
Example:
This property is often most useful in conjunction with the
:class:`~bokeh.core.properties.Either` property.
.. code-block:: python
>>> class AutoModel(HasProps):
... prop = Either(Float, Auto)
...
>>> m = AutoModel()
>>> m.prop = 10.2
>>> m.prop = "auto"
>>> m.prop = "foo" # ValueError !!
>>> m.prop = [1, 2, 3] # ValueError !!
'''
def __init__(self):
super(Auto, self).__init__("auto")
def __str__(self):
return self.__class__.__name__
def _sphinx_type(self):
return self._sphinx_prop_link()
#-----------------------------------------------------------------------------
# Dev API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
| {
"content_hash": "44a6d18fc2e7260b9b0475c5c1f93915",
"timestamp": "",
"source": "github",
"line_count": 82,
"max_line_length": 82,
"avg_line_length": 28.23170731707317,
"alnum_prop": 0.2920086393088553,
"repo_name": "stonebig/bokeh",
"id": "51079f01e023592368e00196d1dd1a37dade9d05",
"size": "2646",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "bokeh/core/property/auto.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "5455"
},
{
"name": "CSS",
"bytes": "423978"
},
{
"name": "CoffeeScript",
"bytes": "1961885"
},
{
"name": "HTML",
"bytes": "1556638"
},
{
"name": "JavaScript",
"bytes": "4741"
},
{
"name": "Makefile",
"bytes": "5785"
},
{
"name": "Python",
"bytes": "1696641"
},
{
"name": "Shell",
"bytes": "14856"
}
],
"symlink_target": ""
} |
from distutils.core import setup
import pytz
setup(
name='gae-pytz',
version=pytz.__version__,
url='https://github.com/potatolondon/gae-pytz/',
license='MIT',
author='Stuart Bishop',
author_email='stuart@stuartbishop.net',
description='A version of pytz for Django on Google App Engine.',
zip_safe=False,
platforms='any',
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Software Development :: Libraries :: Python Modules',
],
packages=['pytz'],
include_package_data=True,
package_data={'pytz': ['zoneinfo.zip']},
)
| {
"content_hash": "ef2fa8b5291708b48e8478baa4daa57b",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 71,
"avg_line_length": 30.40740740740741,
"alnum_prop": 0.6236297198538368,
"repo_name": "davidwtbuxton/gae-pytz",
"id": "1d17be51ab0e60faedaddbe614f4587e7145bbad",
"size": "821",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "60248"
}
],
"symlink_target": ""
} |
import logging
from typing import Dict
from django.contrib.auth import get_user_model
from django.contrib.auth.password_validation import validate_password
from django.core import exceptions
from django.core.exceptions import ValidationError
from django.core.management.base import BaseCommand, CommandError
from polyaxon.utils.bool_utils import to_bool
_logger = logging.getLogger("polyaxon.commands")
class Command(BaseCommand):
"""Management utility to create users/superusers.
This is command is different than the django one, because:
1. does not prompt the user to enter a password, i.e. can be used inline.
2. validates and requires an email.
"""
help = "Used to create a user/superuser."
requires_migrations_checks = True
def __init__(self, *args, **kwargs) -> None:
super().__init__(*args, **kwargs)
self.UserModel = get_user_model()
# pylint:disable= protected-access
self.username_field = self.UserModel._meta.get_field(
self.UserModel.USERNAME_FIELD
)
# pylint:disable= protected-access
self.email_field = self.UserModel._meta.get_field("email")
def add_arguments(self, parser) -> None:
parser.add_argument(
"--%s" % self.UserModel.USERNAME_FIELD,
required=True,
dest=self.UserModel.USERNAME_FIELD,
help="Specifies the login for the user/superuser.",
)
parser.add_argument(
"--password",
required=True,
dest="password",
help="Specifies the password for the user/superuser.",
)
parser.add_argument(
"--email",
required=True,
dest="email",
help="Specifies the email for the user/superuser.",
)
parser.add_argument(
"--superuser",
dest="is_superuser",
action="store_true",
default=False,
help="Specifies a user or superuser.",
)
parser.add_argument(
"--force",
dest="force",
action="store_true",
default=False,
help="To force create the user even if the user is not valid.",
)
def validate_password(self, password: str, user_data: Dict, force: bool) -> None:
try:
validate_password(password, self.UserModel(**user_data))
except ValidationError as e:
_logger.warning("The password provided is not valid %s", e)
if force:
_logger.warning(
"The user will be created although the password does not meet the validation."
)
else:
raise e
def handle(self, *args, **options): # pylint:disable=too-many-branches
username = options[self.UserModel.USERNAME_FIELD].strip()
password = options["password"].strip()
email = options["email"].strip()
force = to_bool(options["force"])
is_superuser = to_bool(options["is_superuser"])
try:
username = self.username_field.clean(username, None)
except exceptions.ValidationError as e:
raise CommandError("; ".join(e.messages))
try:
self.email_field.clean(email, None)
except exceptions.ValidationError as e:
raise CommandError("; ".join(e.messages))
try:
self.UserModel.objects.get_by_natural_key(username)
except self.UserModel.DoesNotExist:
pass
else:
_logger.info(
"Info: Username %s is already taken. Will not recreate user.", username
)
return
try:
self.UserModel.objects.get(email=email)
except self.UserModel.DoesNotExist:
pass
except exceptions.MultipleObjectsReturned:
raise CommandError("Error: That %s is already taken." % email)
else:
raise CommandError("Error: That %s is already taken." % email)
if not username:
raise CommandError("Error: Blank username aren't allowed.")
if not password:
raise CommandError("Error: Blank passwords aren't allowed.")
if not email:
raise CommandError("Error: Blank email aren't allowed.")
user_data = {self.UserModel.USERNAME_FIELD: username, "email": email}
self.validate_password(password=password, user_data=user_data, force=force)
user_data["password"] = password
if is_superuser:
self.UserModel.objects.create_superuser(**user_data)
else:
self.UserModel.objects.create_user(**user_data)
if options["verbosity"] >= 1:
self.stdout.write(
"{} created successfully.".format(
"Superuser" if is_superuser else "User"
)
)
| {
"content_hash": "6aca9748b8e4b5acdf47a62435a1633c",
"timestamp": "",
"source": "github",
"line_count": 143,
"max_line_length": 98,
"avg_line_length": 34.48951048951049,
"alnum_prop": 0.58779399837794,
"repo_name": "polyaxon/polyaxon",
"id": "aa44cea184e0cf00b47c24ca6daf08d97909ca69",
"size": "5537",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "platform/polycommon/polycommon/commands/management/commands/createuser.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "1989"
},
{
"name": "Python",
"bytes": "5201898"
},
{
"name": "Shell",
"bytes": "1565"
}
],
"symlink_target": ""
} |
import unittest
from telemetry.user_story import user_story_filter
from telemetry.page import page
from telemetry.page import page_set
class MockUrlFilterOptions(object):
def __init__(self, page_filter_include, page_filter_exclude):
self.page_filter = page_filter_include
self.page_filter_exclude = page_filter_exclude
self.page_label_filter = None
self.page_label_filter_exclude = None
class MockLabelFilterOptions(object):
def __init__(self, page_label_filter, page_label_filter_exclude):
self.page_filter = None
self.page_filter_exclude = None
self.page_label_filter = page_label_filter
self.page_label_filter_exclude = page_label_filter_exclude
class UserStoryFilterTest(unittest.TestCase):
def setUp(self):
ps = page_set.PageSet()
self.p1 = page.Page(
'file://conformance/textures/tex-sub-image-2d.html', page_set=ps,
name='WebglConformance.conformance_textures_tex_sub_image_2d',
labels=['label1', 'label2'])
self.p2 = page.Page(
'file://othersuite/textures/tex-sub-image-3d.html', page_set=ps,
name='OtherSuite.textures_tex_sub_image_3d',
labels=['label1'])
self.p3 = page.Page(
'file://othersuite/textures/tex-sub-image-3d.html', page_set=ps,
labels=['label2'])
def testURLPattern(self):
options = MockUrlFilterOptions('conformance_textures', '')
user_story_filter.UserStoryFilter.ProcessCommandLineArgs(None, options)
self.assertTrue(user_story_filter.UserStoryFilter.IsSelected(self.p1))
self.assertFalse(user_story_filter.UserStoryFilter.IsSelected(self.p2))
options = MockUrlFilterOptions('textures', '')
user_story_filter.UserStoryFilter.ProcessCommandLineArgs(None, options)
self.assertTrue(user_story_filter.UserStoryFilter.IsSelected(self.p1))
self.assertTrue(user_story_filter.UserStoryFilter.IsSelected(self.p2))
options = MockUrlFilterOptions('somethingelse', '')
user_story_filter.UserStoryFilter.ProcessCommandLineArgs(None, options)
self.assertFalse(user_story_filter.UserStoryFilter.IsSelected(self.p1))
self.assertFalse(user_story_filter.UserStoryFilter.IsSelected(self.p2))
def testName(self):
options = MockUrlFilterOptions('somethingelse', '')
user_story_filter.UserStoryFilter.ProcessCommandLineArgs(None, options)
self.assertFalse(user_story_filter.UserStoryFilter.IsSelected(self.p1))
self.assertFalse(user_story_filter.UserStoryFilter.IsSelected(self.p2))
options = MockUrlFilterOptions('textures_tex_sub_image', '')
user_story_filter.UserStoryFilter.ProcessCommandLineArgs(None, options)
self.assertTrue(user_story_filter.UserStoryFilter.IsSelected(self.p1))
self.assertTrue(user_story_filter.UserStoryFilter.IsSelected(self.p2))
options = MockUrlFilterOptions('WebglConformance', '')
user_story_filter.UserStoryFilter.ProcessCommandLineArgs(None, options)
self.assertTrue(user_story_filter.UserStoryFilter.IsSelected(self.p1))
self.assertFalse(user_story_filter.UserStoryFilter.IsSelected(self.p2))
options = MockUrlFilterOptions('OtherSuite', '')
user_story_filter.UserStoryFilter.ProcessCommandLineArgs(None, options)
self.assertFalse(user_story_filter.UserStoryFilter.IsSelected(self.p1))
self.assertTrue(user_story_filter.UserStoryFilter.IsSelected(self.p2))
def testNameNone(self):
options = MockUrlFilterOptions('othersuite/textures', '')
user_story_filter.UserStoryFilter.ProcessCommandLineArgs(None, options)
self.assertTrue(user_story_filter.UserStoryFilter.IsSelected(self.p3))
options = MockUrlFilterOptions('conformance/textures', '')
user_story_filter.UserStoryFilter.ProcessCommandLineArgs(None, options)
self.assertFalse(user_story_filter.UserStoryFilter.IsSelected(self.p3))
def testLabelFilters(self):
# Include both labels
options = MockLabelFilterOptions('label1,label2', '')
user_story_filter.UserStoryFilter.ProcessCommandLineArgs(None, options)
self.assertTrue(user_story_filter.UserStoryFilter.IsSelected(self.p1))
self.assertTrue(user_story_filter.UserStoryFilter.IsSelected(self.p2))
self.assertTrue(user_story_filter.UserStoryFilter.IsSelected(self.p3))
# Exclude takes priority
options = MockLabelFilterOptions('label1', 'label2')
user_story_filter.UserStoryFilter.ProcessCommandLineArgs(None, options)
self.assertFalse(user_story_filter.UserStoryFilter.IsSelected(self.p1))
self.assertTrue(user_story_filter.UserStoryFilter.IsSelected(self.p2))
self.assertFalse(user_story_filter.UserStoryFilter.IsSelected(self.p3))
| {
"content_hash": "6e3f6d98be5a97652431b5f4071481aa",
"timestamp": "",
"source": "github",
"line_count": 91,
"max_line_length": 75,
"avg_line_length": 50.362637362637365,
"alnum_prop": 0.7636919048658084,
"repo_name": "dushu1203/chromium.src",
"id": "8445e70dcc5a06cad7731f477a8f0e0e3fc44290",
"size": "4746",
"binary": false,
"copies": "12",
"ref": "refs/heads/nw12",
"path": "tools/telemetry/telemetry/user_story/user_story_filter_unittest.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "AppleScript",
"bytes": "6973"
},
{
"name": "Arduino",
"bytes": "464"
},
{
"name": "Assembly",
"bytes": "34522"
},
{
"name": "Batchfile",
"bytes": "8451"
},
{
"name": "C",
"bytes": "9249764"
},
{
"name": "C++",
"bytes": "222763973"
},
{
"name": "CSS",
"bytes": "875874"
},
{
"name": "Dart",
"bytes": "74976"
},
{
"name": "Go",
"bytes": "18155"
},
{
"name": "HTML",
"bytes": "27190037"
},
{
"name": "Java",
"bytes": "7645280"
},
{
"name": "JavaScript",
"bytes": "18828195"
},
{
"name": "Makefile",
"bytes": "96270"
},
{
"name": "Objective-C",
"bytes": "1397246"
},
{
"name": "Objective-C++",
"bytes": "7575073"
},
{
"name": "PHP",
"bytes": "97817"
},
{
"name": "PLpgSQL",
"bytes": "248854"
},
{
"name": "Perl",
"bytes": "63937"
},
{
"name": "Protocol Buffer",
"bytes": "418340"
},
{
"name": "Python",
"bytes": "8032766"
},
{
"name": "Shell",
"bytes": "464218"
},
{
"name": "Standard ML",
"bytes": "4965"
},
{
"name": "XSLT",
"bytes": "418"
},
{
"name": "nesC",
"bytes": "18335"
}
],
"symlink_target": ""
} |