text
stringlengths 4
1.02M
| meta
dict |
---|---|
"""Test multiple RPC users."""
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import str_to_b64str, assert_equal
import os
import http.client
import urllib.parse
class HTTPBasicsTest (BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 2
def setup_chain(self):
super().setup_chain()
#Append rpcauth to bitcoin.conf before initialization
rpcauth = "rpcauth=rt:93648e835a54c573682c2eb19f882535$7681e9c5b74bdd85e78166031d2058e1069b3ed7ed967c93fc63abba06f31144"
rpcauth2 = "rpcauth=rt2:f8607b1a88861fac29dfccf9b52ff9f$ff36a0c23c8c62b4846112e50fa888416e94c17bfd4c42f88fd8f55ec6a3137e"
rpcuser = "rpcuser=rpcuser💻"
rpcpassword = "rpcpassword=rpcpassword🔑"
with open(os.path.join(self.options.tmpdir+"/node0", "namecoin.conf"), 'a', encoding='utf8') as f:
f.write(rpcauth+"\n")
f.write(rpcauth2+"\n")
with open(os.path.join(self.options.tmpdir+"/node1", "namecoin.conf"), 'a', encoding='utf8') as f:
f.write(rpcuser+"\n")
f.write(rpcpassword+"\n")
def run_test(self):
##################################################
# Check correctness of the rpcauth config option #
##################################################
url = urllib.parse.urlparse(self.nodes[0].url)
#Old authpair
authpair = url.username + ':' + url.password
#New authpair generated via share/rpcuser tool
password = "cA773lm788buwYe4g4WT+05pKyNruVKjQ25x3n0DQcM="
#Second authpair with different username
password2 = "8/F3uMDw4KSEbw96U3CA1C4X05dkHDN2BPFjTgZW4KI="
authpairnew = "rt:"+password
headers = {"Authorization": "Basic " + str_to_b64str(authpair)}
conn = http.client.HTTPConnection(url.hostname, url.port)
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
resp = conn.getresponse()
assert_equal(resp.status, 200)
conn.close()
#Use new authpair to confirm both work
headers = {"Authorization": "Basic " + str_to_b64str(authpairnew)}
conn = http.client.HTTPConnection(url.hostname, url.port)
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
resp = conn.getresponse()
assert_equal(resp.status, 200)
conn.close()
#Wrong login name with rt's password
authpairnew = "rtwrong:"+password
headers = {"Authorization": "Basic " + str_to_b64str(authpairnew)}
conn = http.client.HTTPConnection(url.hostname, url.port)
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
resp = conn.getresponse()
assert_equal(resp.status, 401)
conn.close()
#Wrong password for rt
authpairnew = "rt:"+password+"wrong"
headers = {"Authorization": "Basic " + str_to_b64str(authpairnew)}
conn = http.client.HTTPConnection(url.hostname, url.port)
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
resp = conn.getresponse()
assert_equal(resp.status, 401)
conn.close()
#Correct for rt2
authpairnew = "rt2:"+password2
headers = {"Authorization": "Basic " + str_to_b64str(authpairnew)}
conn = http.client.HTTPConnection(url.hostname, url.port)
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
resp = conn.getresponse()
assert_equal(resp.status, 200)
conn.close()
#Wrong password for rt2
authpairnew = "rt2:"+password2+"wrong"
headers = {"Authorization": "Basic " + str_to_b64str(authpairnew)}
conn = http.client.HTTPConnection(url.hostname, url.port)
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
resp = conn.getresponse()
assert_equal(resp.status, 401)
conn.close()
###############################################################
# Check correctness of the rpcuser/rpcpassword config options #
###############################################################
url = urllib.parse.urlparse(self.nodes[1].url)
# rpcuser and rpcpassword authpair
rpcuserauthpair = "rpcuser💻:rpcpassword🔑"
headers = {"Authorization": "Basic " + str_to_b64str(rpcuserauthpair)}
conn = http.client.HTTPConnection(url.hostname, url.port)
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
resp = conn.getresponse()
assert_equal(resp.status, 200)
conn.close()
#Wrong login name with rpcuser's password
rpcuserauthpair = "rpcuserwrong:rpcpassword"
headers = {"Authorization": "Basic " + str_to_b64str(rpcuserauthpair)}
conn = http.client.HTTPConnection(url.hostname, url.port)
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
resp = conn.getresponse()
assert_equal(resp.status, 401)
conn.close()
#Wrong password for rpcuser
rpcuserauthpair = "rpcuser:rpcpasswordwrong"
headers = {"Authorization": "Basic " + str_to_b64str(rpcuserauthpair)}
conn = http.client.HTTPConnection(url.hostname, url.port)
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
resp = conn.getresponse()
assert_equal(resp.status, 401)
conn.close()
if __name__ == '__main__':
HTTPBasicsTest ().main ()
| {
"content_hash": "6b16335ac83e3636aa6a54628fafc20c",
"timestamp": "",
"source": "github",
"line_count": 149,
"max_line_length": 129,
"avg_line_length": 38.651006711409394,
"alnum_prop": 0.5990623372113214,
"repo_name": "brandonrobertz/namecoin-core",
"id": "ef538fdc608913638c9a59ddff501a689a47e530",
"size": "5985",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/functional/multi_rpc.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "28453"
},
{
"name": "C",
"bytes": "688659"
},
{
"name": "C++",
"bytes": "5733984"
},
{
"name": "HTML",
"bytes": "21860"
},
{
"name": "Java",
"bytes": "30290"
},
{
"name": "M4",
"bytes": "194059"
},
{
"name": "Makefile",
"bytes": "114172"
},
{
"name": "Objective-C",
"bytes": "5737"
},
{
"name": "Objective-C++",
"bytes": "6763"
},
{
"name": "Python",
"bytes": "1363751"
},
{
"name": "QMake",
"bytes": "756"
},
{
"name": "Shell",
"bytes": "67429"
}
],
"symlink_target": ""
} |
try:
from urllib.parse import urlparse, urlunparse
except ImportError: # pragma: no cover
# Python 2 fallback
from urlparse import urlparse, urlunparse # noqa
from django.contrib.auth import views as auth_views
from django.contrib.auth.decorators import login_required
from django.contrib.auth.tokens import default_token_generator
from django.core.urlresolvers import reverse, reverse_lazy
from django.views.decorators.cache import never_cache
from django.views.decorators.csrf import csrf_protect
from django.views.decorators.debug import sensitive_post_parameters
from django.views.generic import ListView, DeleteView, View
from django.views.generic.edit import DeletionMixin
from . import app_settings
from . import forms
from . import utils
from .mixins import SessionMixin
LoginRequiredMixin = utils.import_from_path(
app_settings.LOGIN_REQUIRED_MIXIN_CLASS)
# -----------------------------------------------------------------------------
# Sessions
# -----------------------------------------------------------------------------
class SessionListView(LoginRequiredMixin, SessionMixin, ListView):
pass
class SessionDeleteView(LoginRequiredMixin, SessionMixin, DeleteView):
def get_success_url(self):
return str(reverse_lazy('safety:session_list'))
class SessionDeleteOtherView(LoginRequiredMixin, SessionMixin, DeletionMixin, View):
def get_object(self):
qs = super(SessionDeleteOtherView, self).get_queryset()
qs = qs.exclude(session_key=self.request.session.session_key)
return qs
def get_success_url(self):
return str(reverse_lazy('safety:session_list'))
# -----------------------------------------------------------------------------
# Password Change
# -----------------------------------------------------------------------------
@sensitive_post_parameters()
@csrf_protect
@login_required
def password_change(request):
return auth_views.password_change(
request=request,
template_name='safety/password_change/form.html',
post_change_redirect=reverse('safety:password_change_done'),
password_change_form=forms.PasswordChangeForm)
@login_required
def password_change_done(request):
return auth_views.password_change_done(
request=request,
template_name='safety/password_change/done.html')
| {
"content_hash": "933fe4a9dacba171bbae01e7a85d44da",
"timestamp": "",
"source": "github",
"line_count": 70,
"max_line_length": 84,
"avg_line_length": 33.52857142857143,
"alnum_prop": 0.6574350234341713,
"repo_name": "ulule/django-safety",
"id": "6197a54a69e855474a2ec76b3453ef7f59de8e52",
"size": "2372",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "safety/views.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "5783"
},
{
"name": "Makefile",
"bytes": "1230"
},
{
"name": "Python",
"bytes": "37127"
}
],
"symlink_target": ""
} |
import re
from warnings import warn
from compressor.exceptions import UncompressableFileError
from django.apps import AppConfig
from django.conf import settings
from django.template import Template, Context
class DezedeConfig(AppConfig):
name = 'dezede'
verbose_name = 'Dezède'
def ready(self):
# Sets TinyMCE styling to the front-end styling
html = ('{% load compress static %}'
'{% compress css %}'
' <link rel="stylesheet" type="text/less"'
' href="{% static "css/styles.less" %}" />'
'{% endcompress %}')
try:
html = Template(html).render(Context())
except UncompressableFileError:
warn('Unable to apply front-end styling to the admin!')
else:
settings.TINYMCE_DEFAULT_CONFIG['content_css'] = re.search(
r'href="([^"]+)"', html).group(1)
| {
"content_hash": "984756af7a924da132510418d81a1afb",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 71,
"avg_line_length": 34.148148148148145,
"alnum_prop": 0.5921908893709328,
"repo_name": "dezede/dezede",
"id": "bc61c9921c9b55fef868ef3a430effda8506b08a",
"size": "923",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "dezede/apps.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "10100"
},
{
"name": "HTML",
"bytes": "205803"
},
{
"name": "JavaScript",
"bytes": "53836"
},
{
"name": "Less",
"bytes": "21716"
},
{
"name": "Python",
"bytes": "818952"
},
{
"name": "Shell",
"bytes": "433"
},
{
"name": "TeX",
"bytes": "5922"
}
],
"symlink_target": ""
} |
from django.core.management.base import BaseCommand
from pytz import timezone
from dojo.models import Finding
from dojo.utils import update_issue, get_system_setting
locale = timezone(get_system_setting('time_zone'))
"""
Author: Aaron Weaver
This script will locate open, active findings and update them in Jira. Useful if you need to make bulk changes with Jira:
"""
class Command(BaseCommand):
help = 'No input commands for Jira bulk update.'
def handle(self, *args, **options):
findings = Finding.objects.exclude(jira_issue__isnull=True)
findings = findings.filter(verified=True, active=True)
for finding in findings:
print("Checking issue:" + str(finding.id))
update_issue(finding, finding.status(), True)
print("########\n")
| {
"content_hash": "3c76a10b2720769d67b538ab407086a7",
"timestamp": "",
"source": "github",
"line_count": 26,
"max_line_length": 121,
"avg_line_length": 31,
"alnum_prop": 0.6923076923076923,
"repo_name": "OWASP/django-DefectDojo",
"id": "91cc894a2a94c0d770d42b9bc51c7ba1f71fed5f",
"size": "806",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "dojo/management/commands/push_to_jira_update.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "18512"
},
{
"name": "HTML",
"bytes": "848751"
},
{
"name": "JavaScript",
"bytes": "6717"
},
{
"name": "Python",
"bytes": "869791"
},
{
"name": "Ruby",
"bytes": "998"
},
{
"name": "Shell",
"bytes": "30386"
},
{
"name": "Smarty",
"bytes": "3485"
},
{
"name": "XSLT",
"bytes": "6624"
}
],
"symlink_target": ""
} |
import datetime
import json
from celery.execute import send_task
from django.conf import settings
from django.db import models
from humfrey.sparql.models import Store
from humfrey.update.models import UpdateDefinition
INDEX_STATUS_CHOICES = (
('idle', 'Idle'),
('queued', 'Queued'),
('active', 'Active'),
)
class Index(models.Model):
UPDATE_QUEUE = 'humfrey:elasticsearch:index-queue'
slug = models.CharField(max_length=50, primary_key=True)
stores = models.ManyToManyField(Store, null=True)
title = models.CharField(max_length=128)
query = models.TextField()
mapping = models.TextField(blank=True)
update_mapping = models.BooleanField()
groups = models.CharField(max_length=256, blank=True)
update_after = models.ManyToManyField(UpdateDefinition, blank=True)
status = models.CharField(max_length=10, choices=INDEX_STATUS_CHOICES, default='idle')
last_queued = models.DateTimeField(null=True, blank=True)
last_started = models.DateTimeField(null=True, blank=True)
last_completed = models.DateTimeField(null=True, blank=True)
item_count = models.IntegerField(null=True, blank=True)
def __unicode__(self):
return self.title
def __init__(self, *args, **kwargs):
super(Index, self).__init__(*args, **kwargs)
self._original_mapping = self.mapping
def save(self, *args, **kwargs):
if self._original_mapping != self.mapping:
self.mapping = json.dumps(json.loads(self.mapping), indent=2)
self.update_mapping = True
return super(Index, self).save(*args, **kwargs)
def _get_url(self, store, path, pattern):
params = {'slug': self.slug,
'store': store.slug}
params.update(settings.ELASTICSEARCH_SERVER)
if not path:
pattern = 'http://%(host)s:%(port)d' + pattern
return pattern % params
def get_index_url(self, store, path=False):
return self._get_url(store, path, '/%(store)s')
def get_index_status_url(self, store, path=False):
return self._get_url(store, path, '/%(store)s/_status')
def get_type_url(self, store, path=False):
return self._get_url(store, path, '/%(store)s/%(slug)s')
def get_type_status_url(self, store, path=False):
return self._get_url(store, path, '/%(store)s/%(slug)s/_status')
def get_bulk_url(self, store, path=False):
return self._get_url(store, path, '/%(store)s/%(slug)s/_bulk')
def get_mapping_url(self, store, path=False):
return self._get_url(store, path, '/%(store)s/%(slug)s/_mapping')
class Meta:
verbose_name_plural = 'indexes'
def queue(self):
if self.status != 'idle':
return
self.status = 'queued'
self.last_queued = datetime.datetime.now()
self.save()
send_task('humfrey.elasticsearch.update_index', kwargs={'index': self.slug})
| {
"content_hash": "060b227fdda29766a4f6442d2e62f226",
"timestamp": "",
"source": "github",
"line_count": 91,
"max_line_length": 90,
"avg_line_length": 32.24175824175824,
"alnum_prop": 0.6414451261077028,
"repo_name": "ox-it/humfrey",
"id": "dd5309b30cfd65938e52dd40c98753050691ea9e",
"size": "2934",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "humfrey/elasticsearch/models.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "2990"
},
{
"name": "HTML",
"bytes": "39767"
},
{
"name": "JavaScript",
"bytes": "658"
},
{
"name": "Python",
"bytes": "375681"
}
],
"symlink_target": ""
} |
"""
The core of grab package: the Grab class.
"""
from __future__ import absolute_import
import logging
import os
#import urllib
from random import randint, choice
from copy import copy
import threading
import itertools
import collections
try:
from urlparse import urljoin
except ImportError:
from urllib.parse import urljoin
import time
import re
import json
import email
from datetime import datetime
from .proxylist import ProxyList, parse_proxyline
from .tools.html import find_refresh_url, find_base_url
from .response import Response
from . import error
from .upload import UploadContent, UploadFile
from .tools.http import normalize_http_values, normalize_url
from .extension import register_extensions
from grab.util.py2old_support import *
from grab.util.py3k_support import *
# This counter will used in enumerating network queries.
# Its value will be displayed in logging messages and also used
# in names of dumps
# I use mutable module variable to allow different
# instances of Grab maintain single counter
# This could be helpful in debuggin when your script
# creates multiple Grab instances - in case of shared counter
# grab instances do not overwrite dump logs
GLOBAL_STATE = {
'dom_build_time': 0,
'selector_time': 0,
}
REQUEST_COUNTER = itertools.count(1)
# Some extensions need GLOBAL_STATE variable
# what's why they go after GLOBAL_STATE definition
from .ext.lxml import LXMLExtension
from .ext.form import FormExtension
from .ext.django import DjangoExtension
from .ext.text import TextExtension
from .ext.rex import RegexpExtension
from .ext.pquery import PyqueryExtension
from .ext.ftp import FTPExtension
from .ext.doc import DocExtension
from .ext.kit import KitExtension
__all__ = ('Grab', 'UploadContent', 'UploadFile')
MUTABLE_CONFIG_KEYS = ['post', 'multipart_post', 'headers', 'cookies',
'hammer_timeouts']
logger = logging.getLogger('grab.base')
# Logger to handle network activity
# It is separate logger to allow you easily
# control network logging separately from other grab logs
logger_network = logging.getLogger('grab.network')
def copy_config(config, mutable_config_keys=MUTABLE_CONFIG_KEYS):
"""
Copy grab config ojection with correct handling
of mutable config values.
"""
cloned_config = copy(config)
# Apply ``copy`` function to mutable config values
for key in mutable_config_keys:
cloned_config[key] = copy(config[key])
return cloned_config
def default_config():
# TODO: it seems config should be splitted into two entities:
# 1) config which is not changed during request
# 2) changable config
return dict(
# Common
url = None,
# Debugging
log_file = None,
log_dir = False,
debug_post = False,
# Only for curl transport
debug = False,
verbose_logging = False,
# Only for selenium transport
webdriver = 'firefox',
selenium_wait = 1, # in seconds
# Proxy
proxy = None,
proxy_type = None,
proxy_userpwd = None,
proxy_auto_change = True,
# Method, Post
method = None,
post = None,
multipart_post = None,
# Headers, User-Agent, Referer
headers = {},
common_headers = {},
user_agent = None,
user_agent_file = None,
referer = None,
reuse_referer = True,
# Cookies
cookies = {},
reuse_cookies = True,
cookiefile = None,
# Timeouts
timeout = 15,
connect_timeout = 10,
hammer_mode = False,
hammer_timeouts = ((2, 5), (5, 10), (10, 20), (15, 30)),
# Response processing
nobody = False,
body_maxsize = None,
body_inmemory = True,
body_storage_dir = None,
body_storage_filename = None,
# Content compression
encoding = 'gzip',
# Network interface
interface = None,
# Redirects
follow_refresh = False,
follow_location = True,
refresh_redirect_count = 0,
redirect_limit = 10,
# Authentication
userpwd = None,
# Character set to which any unicode data should be encoded
# before get placed in request
# This setting is overwritten after each request with
# charset of rertreived document
charset = 'utf-8',
# Charset to use for converting content of response
# into unicode, by default it is detected automatically
document_charset = None,
# Conent type control how DOM are built
# For html type HTML DOM builder is used
# For xml type XML DOM builder is used
content_type = 'html',
# Fix &#X; entities, where X between 128 and 160
# Such entities are parsed by modern browsers as
# windows-1251 entities independently of the real charset of
# the document, If this option is True then such entities
# will be replaced with correct unicode entitites e.g.:
# — -> —
fix_special_entities = True,
# Convert document body to lower case before bulding LXML tree
# It does not affect `response.body`
lowercased_tree = False,
# Strip null bytes from document body before building lXML tree
# It does not affect `response.body`
strip_null_bytes = True,
)
class Grab(
LXMLExtension, FormExtension, PyqueryExtension,
DjangoExtension, TextExtension, RegexpExtension,
FTPExtension, DocExtension, KitExtension,
):
__slots__ = ('request_head', 'request_log', 'request_body',
'proxylist', 'config', '_request_prepared',
'clone_counter', 'response', 'transport',
'transport_param', 'request_method', 'request_counter',
'__weakref__',
# Dirst hack to make it possbile to inherit Grab from
# multiple base classes with __slots__
'_lxml_form', '_file_fields',
'_lxml_tree', '_strict_lxml_tree',
'_pyquery', '_doc', '_kit',
)
# Points which could be handled in extension classes
extension_points = ('config', 'init', 'reset')
# Attributes which should be processed when clone
# of Grab instance is creating
clonable_attributes = ('request_head', 'request_log', 'request_body',
'proxylist')
# Complex config items which points to mutable objects
mutable_config_keys = copy(MUTABLE_CONFIG_KEYS)
"""
Public methods
"""
def __init__(self, response_body=None, transport='grab.transport.curl.CurlTransport',
**kwargs):
"""
Create Grab instance
"""
self.config = default_config()
self.config['common_headers'] = self.common_headers()
self.trigger_extensions('config')
self.trigger_extensions('init')
self._request_prepared = False
self.setup_transport(transport)
self.reset()
self.proxylist = None
if kwargs:
self.setup(**kwargs)
self.clone_counter = 0
if response_body is not None:
self.fake_response(response_body)
def setup_transport(self, transport_param):
self.transport_param = transport_param
if isinstance(transport_param, basestring):
mod_path, cls_name = transport_param.rsplit('.', 1)
mod = __import__(mod_path, globals(), locals(), ['foo'])
self.transport = getattr(mod, cls_name)()
elif isinstance(transport_param, collections.Callable):
self.transport = transport_param()
else:
raise GrabMisuseError('Option `transport` should be string or callable. '\
'Got %s' % type(transport_param))
def reset(self):
"""
Reset all attributes which could be modified during previous request
or which is not initialized yet if this is the new Grab instance.
This methods is automatically called before each network request.
"""
self.response = None
self.request_head = None
self.request_log = None
self.request_body = None
self.request_method = None
self.trigger_extensions('reset')
self.transport.reset()
def clone(self, **kwargs):
"""
Create clone of Grab instance.
Cloned instance will have the same state: cookies, referer, response data
:param **kwargs: overrides settings of cloned grab instance
"""
g = Grab(transport=self.transport_param)
g.config = self.dump_config()
if self.response is not None:
g.response = self.response.copy()
for key in self.clonable_attributes:
setattr(g, key, getattr(self, key))
g.clone_counter = self.clone_counter + 1
if kwargs:
g.setup(**kwargs)
return g
def adopt(self, g):
"""
Copy the state of another `Grab` instance.
Use case: create backup of current state to the cloned instance and
then restore the state from it.
"""
self.load_config(g.config)
if g.response is not None:
self.response = g.response.copy()
for key in self.clonable_attributes:
setattr(self, key, getattr(g, key))
self.clone_counter = g.clone_counter + 1
def dump_config(self):
"""
Make clone of current config.
"""
return copy_config(self.config, self.mutable_config_keys)
def load_config(self, config):
"""
Configure grab instance with external config object.
"""
self.config = copy_config(config, self.mutable_config_keys)
def setup(self, **kwargs):
"""
Setting up Grab instance configuration.
"""
for key in kwargs:
if not key in self.config.keys():
raise error.GrabMisuseError('Unknown option: %s' % key)
if 'url' in kwargs:
if self.config.get('url'):
kwargs['url'] = self.make_url_absolute(kwargs['url'])
kwargs['url'] = normalize_url(kwargs['url'])
self.config.update(kwargs)
def go(self, url, **kwargs):
"""
Go to ``url``
Args:
:url: could be absolute or relative. If relative then t will be appended to the
absolute URL of previous request.
"""
return self.request(url=url, **kwargs)
def download(self, url, location, **kwargs):
"""
Fetch document located at ``url`` and save to to ``location``.
"""
response = self.go(url, **kwargs)
with open(location, 'wb') as out:
out.write(response.body)
return len(response.body)
def prepare_request(self, **kwargs):
"""
Configure all things to make real network request.
This method is called before doing real request via
tranposrt extension.
"""
# Reset the state setted by previous request
if not self._request_prepared:
self.reset()
self.request_counter = next(REQUEST_COUNTER)
if kwargs:
self.setup(**kwargs)
if self.proxylist and self.config['proxy_auto_change']:
self.change_proxy()
self.request_method = self.detect_request_method()
self.transport.process_config(self)
self._request_prepared = True
def log_request(self, extra=''):
"""
Send request details to logging system.
"""
tname = threading.currentThread().getName().lower()
if tname == 'mainthread':
tname = ''
else:
tname = '-%s' % tname
if self.config['proxy']:
if self.config['proxy_userpwd']:
auth = ' with authorization'
else:
auth = ''
proxy_info = ' via %s proxy of type %s%s' % (
self.config['proxy'], self.config['proxy_type'], auth)
else:
proxy_info = ''
if extra:
extra = '[%s] ' % extra
logger_network.debug('[%02d%s] %s%s %s%s' % (
self.request_counter, tname,
extra, self.request_method or 'GET',
self.config['url'], proxy_info))
def request(self, **kwargs):
"""
Perform network request.
You can specify grab settings in ``**kwargs``.
Any keyword argument will be passed to ``self.config``.
Returns: ``Response`` objects.
"""
if self.config['hammer_mode']:
hammer_timeouts = list(self.config['hammer_timeouts'])
connect_timeout, total_timeout = hammer_timeouts.pop(0)
self.setup(connect_timeout=connect_timeout, timeout=total_timeout)
while True:
try:
self.prepare_request(**kwargs)
self.log_request()
self.transport.request()
except error.GrabError as ex:
# In hammer mode try to use next timeouts
if self.config['hammer_mode']:# and isinstance(ex, (error.GrabTimeoutError,
# error.GrabConnectionError)):
# If no more timeouts
# then raise an error
if not hammer_timeouts:
self._request_prepared = False
self.save_failed_dump()
raise
else:
connect_timeout, total_timeout = hammer_timeouts.pop(0)
self.setup(connect_timeout=connect_timeout, timeout=total_timeout)
logger_network.debug('Trying another timeouts. Connect: %d sec., total: %d sec.' % (connect_timeout, total_timeout))
self._request_prepared = False
# If we are not in hammer mode
# Then just raise an error
else:
self._request_prepared = False
self.save_failed_dump()
raise
else:
# Break the infinite loop in case of success response
break
# It will configure `self.response`
self.process_request_result()
return self.response
def process_request_result(self, prepare_response_func=None):
"""
Process result of real request performed via transport extension.
"""
now = datetime.now()
# TODO: move into separate method
if self.config['debug_post']:
post = self.config['post'] or self.config['multipart_post']
if isinstance(post, dict):
post = list(post.items())
if post:
if isinstance(post, basestring):
post = post[:150] + '...'
else:
items = normalize_http_values(post, charset='utf-8')
new_items = []
for key, value in items:
if len(value) > 150:
value = value[:150] + '...'
else:
value = value
new_items.append((key, value))
post = '\n'.join('%-25s: %s' % x for x in new_items)
if post:
logger_network.debug('POST request:\n%s\n' % post)
# It's important to delete old POST data after request is performed.
# If POST data is not cleared then next request will try to use them again!
old_refresh_count = self.config['refresh_redirect_count']
self.reset_temporary_options()
if prepare_response_func:
self.response = prepare_response_func(self.transport, self)
else:
self.response = self.transport.prepare_response(self)
self.response.timestamp = now
self.config['charset'] = self.response.charset
if self.config['reuse_cookies']:
# Copy cookies from response into config object
for name, value in self.response.cookies.items():
self.config['cookies'][name] = value
# TODO: raise GrabWarning if self.config['http_warnings']
#if 400 <= self.response_code:
#raise IOError('Response code is %s: ' % self.response_code)
if self.config['log_file']:
with open(self.config['log_file'], 'wb') as out:
out.write(self.response.body)
if self.config['cookiefile']:
self.dump_cookies(self.config['cookiefile'])
if self.config['reuse_referer']:
self.config['referer'] = self.response.url
self.copy_request_data()
# Should be called after `copy_request_data`
self.save_dumps()
self._request_prepared = False
# TODO: check max redirect count
if self.config['follow_refresh']:
url = find_refresh_url(self.response.unicode_body())
print('URL', url)
if url is not None:
inc_count = old_refresh_count + 1
if inc_count > self.config['redirect_limit']:
raise error.GrabTooManyRedirectsError()
else:
print(inc_count)
return self.request(url=url, refresh_redirect_count=inc_count)
return None
def reset_temporary_options(self):
self.config['post'] = None
self.config['multipart_post'] = None
self.config['method'] = None
self.config['body_storage_filename'] = None
self.config['refresh_redirect_count'] = 0
def save_failed_dump(self):
"""
Save dump of failed request for debugging.
This method is called then fatal network exception is raised.
The saved dump could be used for debugging the reason of the failure.
"""
# This is very untested feature, so
# I put it inside try/except to not break
# live spiders
try:
self.response = self.transport.prepare_response(self)
self.copy_request_data()
self.save_dumps()
except Exception as ex:
logging.error(unicode(ex))
def copy_request_data(self):
# TODO: Maybe request object?
self.request_head = self.transport.request_head
self.request_body = self.transport.request_body
self.request_log = self.transport.request_log
def fake_response(self, content, **kwargs):
"""
Setup `response` object without real network requests.
Useful for testing and debuging.
All ``**kwargs`` will be passed to `Response` constructor.
"""
# Trigger reset
self.reset()
# Configure fake response object
res = Response()
res.body = content
res.status = ''
res.head = ''
res.parse(charset=kwargs.get('document_charset'))
res.cookies = {}
res.code = 200
res.total_time = 0
res.connect_time = 0
res.name_lookup_time = 0
res.url = ''
for key, value in kwargs.items():
setattr(res, key, value)
self.response = res
def load_proxylist(self, source, source_type, proxy_type='http',
auto_init=True, auto_change=True,
**kwargs):
self.proxylist = ProxyList(source, source_type, proxy_type=proxy_type, **kwargs)
self.setup(proxy_auto_change=auto_change)
if not auto_change and auto_init:
self.change_proxy()
def change_proxy(self):
"""
Set random proxy from proxylist.
"""
if self.proxylist:
server, userpwd, proxy_type = self.proxylist.get_random()
self.setup(proxy=server, proxy_userpwd=userpwd,
proxy_type=proxy_type)
else:
logging.debug('Could not change proxy because proxy list is not loaded')
"""
Private methods
"""
def common_headers(self):
"""
Build headers which sends typical browser.
"""
return {
'Accept': 'text/xml,application/xml,application/xhtml+xml,text/html;q=0.9,text/plain;q=0.8,image/png,*/*;q=0.%d' % randint(2, 5),
'Accept-Language': 'en-us,en;q=0.%d' % (randint(5, 9)),
'Accept-Charset': 'utf-8,windows-1251;q=0.7,*;q=0.%d' % randint(5, 7),
'Keep-Alive': '300',
'Expect': '',
}
def save_dumps(self):
if self.config['log_dir']:
tname = threading.currentThread().getName().lower()
if tname == 'mainthread':
tname = ''
else:
tname = '-%s' % tname
fname = os.path.join(self.config['log_dir'], '%02d%s.log' % (
self.request_counter, tname))
with open(fname, 'w') as out:
out.write('Request:\n')
out.write(self.request_head)
out.write('\n')
out.write('Response:\n')
out.write(self.response.head)
fext = 'html'
fname = os.path.join(self.config['log_dir'], '%02d%s.%s' % (
self.request_counter, tname, fext))
self.response.save(fname)
def make_url_absolute(self, url, resolve_base=False):
"""
Make url absolute using previous request url as base url.
"""
if self.config['url']:
if resolve_base:
ubody = self.response.unicode_body()
base_url = find_base_url(ubody)
if base_url:
return urljoin(base_url, url)
return urljoin(self.config['url'], url)
else:
return url
def detect_request_method(self):
"""
Analize request config and find which
request method will be used.
Returns request method in upper case
This method needs simetime when process_config method
was not executed yet.
"""
method = self.config['method']
if method:
method = method.upper()
else:
if self.config['post'] or self.config['multipart_post']:
method = 'POST'
else:
method = 'GET'
return method
def clear_cookies(self):
"""
Clear all remembered cookies.
"""
self.config['cookies'] = {}
def load_cookies(self, path, file_required=True):
"""
Load cookies from the file.
Content of file should be a JSON-serialized dict of keys and values.
"""
try:
with open(path) as inf:
data = inf.read()
if data:
cookies = json.loads(data)
else:
cookies = {}
except IOError:
if file_required:
raise
else:
pass
else:
self.config['cookies'].update(cookies)
def dump_cookies(self, path):
"""
Dump all cookies to file.
Cookies are dumped as JSON-serialized dict of keys and values.
"""
with open(path, 'w') as out:
out.write(json.dumps(self.config['cookies']))
def setup_with_proxyline(self, line, proxy_type='http'):
# TODO: remove from base class
# maybe to proxylist?
host, port, user, pwd = parse_proxyline(line)
server_port = '%s:%s' % (host, port)
self.setup(proxy=server_port, proxy_type=proxy_type)
if user:
userpwd = '%s:%s' % (user, pwd)
self.setup(proxy_userpwd=userpwd)
def __getstate__(self):
"""
Reset cached lxml objects which could not be pickled.
"""
state = {}
for cls in type(self).mro():
cls_slots = getattr(cls, '__slots__', ())
for slot in cls_slots:
if slot != '__weakref__':
if hasattr(self, slot):
state[slot] = getattr(self, slot)
state['_lxml_form'] = None
state['_lxml_tree'] = None
state['_strict_lxml_tree'] = None
return state
def __setstate__(self, state):
for slot, value in state.items():
setattr(self, slot, value)
@property
def request_headers(self):
"""
Temporary hack till the time I'll understand
where to store request details.
"""
try:
first_head = self.request_head.split('\r\n\r\n')[0]
lines = first_head.split('\r\n')
lines = [x for x in lines if ':' in x]
headers = email.message_from_string('\n'.join(lines))
return headers
except Exception as ex:
logging.error('Could not parse request headers', exc_info=ex)
return {}
register_extensions(Grab)
# For backward compatibility
BaseGrab = Grab
| {
"content_hash": "a01d5ff7391604aa8a2593d2064d7ea0",
"timestamp": "",
"source": "github",
"line_count": 791,
"max_line_length": 141,
"avg_line_length": 32.07458912768647,
"alnum_prop": 0.5614678175870088,
"repo_name": "boooka/GeoPowerOff",
"id": "984d02bc201fda7be5ccbfc42411ed9a8908f040",
"size": "25496",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "venv/lib/python2.7/site-packages/grab/base.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "38253"
},
{
"name": "CSS",
"bytes": "102019"
},
{
"name": "JavaScript",
"bytes": "121188"
},
{
"name": "Python",
"bytes": "7232605"
},
{
"name": "Shell",
"bytes": "3777"
},
{
"name": "XSLT",
"bytes": "152770"
}
],
"symlink_target": ""
} |
class While:
def __init__(self, condition, body):
self.condition = condition
self.body = body
def eval(self, env):
while self.condition.eval(env):
self.body.eval(env)
def __repr__(self):
return "While: {0} {1}".format(self.condition, self.body)
| {
"content_hash": "8c6421c03508e14927015a629f6b279c",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 65,
"avg_line_length": 29.363636363636363,
"alnum_prop": 0.5356037151702786,
"repo_name": "henrik645/rpp",
"id": "e4b2b1573fb10e95019f564cf5b63b1a2190bc52",
"size": "323",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "while.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "4876"
}
],
"symlink_target": ""
} |
import unittest
from httmock import HTTMock
from myria.connection import MyriaConnection
from myria.test.mock import *
from myria.udf import MyriaFunction, MyriaPostgresFunction, myria_function
from raco.backends.myria.connection import FunctionTypes
from raco.myrial.parser import Parser
class TestUDF(unittest.TestCase):
def __init__(self, args):
with HTTMock(create_mock()):
self.connection = MyriaConnection(hostname='localhost', port=12345)
super(TestUDF, self).__init__(args)
def test_get_all(self):
with HTTMock(create_mock()):
functions = MyriaFunction.get_all(self.connection)
self.assertGreaterEqual(len(functions), 2)
self.assertEqual(functions[0].language, FunctionTypes.PYTHON)
self.assertEqual(functions[1].language, FunctionTypes.PYTHON)
self.assertEqual(functions[0].name, UDF1_NAME)
self.assertEqual(functions[1].name, UDF2_NAME)
self.assertEqual(functions[0].output_type, UDF1_TYPE)
self.assertEqual(functions[1].output_type, UDF2_TYPE)
def test_get(self):
with HTTMock(create_mock()):
function = MyriaFunction.get(UDF1_NAME, self.connection)
self.assertEqual(function.language, FunctionTypes.PYTHON)
self.assertEqual(function.name, UDF1_NAME)
self.assertEqual(function.output_type, UDF1_TYPE)
def test_register(self):
server_state = {}
with HTTMock(create_mock(server_state)):
f = MyriaFunction('mockudf', 'source', STRING_TYPE,
FunctionTypes.PYTHON, False,
connection=self.connection)
f.register()
self.assertEqual(len(server_state), 1)
self.assertDictEqual(f.to_dict(), server_state.values()[0])
self.assertFalse(server_state.values()[0]['isMultiValued'])
self.assertEqual(server_state.values()[0]['outputType'],
'STRING_TYPE')
def test_python_udf(self):
name = 'pyudf'
server_state = {}
with HTTMock(create_mock(server_state)):
f = MyriaPythonFunction(lambda: False, STRING_TYPE, name,
multivalued=False,
connection=self.connection)
f.register()
d = MyriaPythonFunction.from_dict(server_state[name]).to_dict()
self.assertEqual(len(server_state), 1)
self.assertIsNotNone(MyriaFunction.get(name, self.connection))
self.assertIn(name, Parser.udf_functions)
self.assertEqual(f.to_dict()['name'], d['name'])
self.assertEqual(f.to_dict()['outputType'], d['outputType'])
self.assertEqual(f.to_dict()['lang'], d['lang'])
self.assertEqual(f.to_dict()['isMultiValued'], d['isMultiValued'])
def test_postgres_udf(self):
name = 'postgresudf'
server_state = {}
with HTTMock(create_mock(server_state)):
f = MyriaPostgresFunction(name, 'source', STRING_TYPE,
multivalued=False,
connection=self.connection)
f.register()
d = MyriaPythonFunction.from_dict(server_state[name]).to_dict()
self.assertEqual(len(server_state), 1)
self.assertIsNotNone(MyriaFunction.get(name, self.connection))
self.assertIn(name, Parser.udf_functions)
self.assertEqual(f.to_dict()['name'], d['name'])
self.assertEqual(f.to_dict()['outputType'], d['outputType'])
self.assertEqual(f.to_dict()['isMultiValued'], d['isMultiValued'])
def test_extension_method(self):
server_state = {}
with HTTMock(create_mock(server_state)):
name = 'my_udf'
@myria_function(name=name, output_type=STRING_TYPE,
connection=self.connection)
def my_udf(t):
return None
self.assertEqual(len(server_state), 1)
self.assertIsNotNone(MyriaFunction.get(name, self.connection))
self.assertIn(name, Parser.udf_functions)
d = MyriaPythonFunction.from_dict(server_state[name]).to_dict()
self.assertEqual(d['name'], name)
self.assertEqual(d['outputType'], STRING_TYPE)
| {
"content_hash": "9f075a501aa6527b4d8a0d318d35b8c3",
"timestamp": "",
"source": "github",
"line_count": 110,
"max_line_length": 79,
"avg_line_length": 40.22727272727273,
"alnum_prop": 0.5966101694915255,
"repo_name": "uwescience/myria-python",
"id": "bd6b8fa87708237f95ad354053664855848c915e",
"size": "4425",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "myria/test/test_udf.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "659489"
},
{
"name": "Python",
"bytes": "163912"
},
{
"name": "Shell",
"bytes": "209"
}
],
"symlink_target": ""
} |
from scipy.io.mmio import mmwrite
__author__ = 'hfriedrich'
import logging
import codecs
import sys
import numpy as np
from scipy.io import mmread
from scipy.sparse import csr_matrix, lil_matrix, tril, coo_matrix
from scipy.spatial.distance import pdist
from scipy.spatial.distance import squareform
from extrescal.extrescal import rescal
from itertools import product
logging.basicConfig(level=logging.INFO, stream=sys.stdout,
format='%(asctime)s %(levelname)-8s %(message)s',
datefmt='%a, %d %b %Y %H:%M:%S')
_log = logging.getLogger()
# This file contains util functions for the processing of the tensor (including handling
# of atoms, attributes, etc.)
def startsWithAttr(str):
return str.startswith('Attr:')
class SparseTensor:
CONNECTION_SLICE = 0
def __init__(self, headers, atomIndices, attrIndices):
self.shape = (len(headers), len(headers))
self.data = []
self.headers = list(headers)
self.atomIndices = atomIndices
self.attrIndices = attrIndices
def copy(self):
copyTensor = SparseTensor(self.headers)
for i in range(len(self.data)):
copyTensor.addSliceMatrix(self.data[i], i)
return copyTensor
def getMatrixShape(self):
return self.shape
def getSliceMatrix(self, slice):
return self.data[slice].copy()
def getSliceMatrixList(self):
list = [slice.copy() for slice in self.data]
return list
def addSliceMatrix(self, matrix, slice):
if self.shape != matrix.shape:
raise Exception("Bad shape of added slices of tensor, is (%d,%d) but should be (%d,%d)!" %
(matrix.shape[0], matrix.shape[1], self.shape[0], self.shape[1]))
self.data.insert(slice, csr_matrix(matrix))
def getHeaders(self):
return list(self.headers)
def getArrayFromSliceMatrix(self, slice, indices):
return matrix_to_array(self.data[slice], indices)
# return a list of indices which refer to rows/columns of atoms in the tensor
def getAtomIndices(self):
return self.atomIndices
#atoms = [i for i in range(0, len(self.getHeaders())) if (self.getHeaders()[i].startswith('Atom:'))]
#return atoms
# return a list of indices which refer to rows/columns of attributes in the tensor
def getAttributeIndices(self):
return self.attrIndices
#attrs = [i for i in range(0, len(self.getHeaders())) if (self.getHeaders()[i].startswith('Attr:'))]
#return attrs
def getAtomLabel(self, atom):
return self.getHeaders()[atom][6:]
def getAttributesForAtom(self, atom, slice):
attr = self.data[slice][atom,].nonzero()[1]
attr = [self.getHeaders()[i][6:] for i in attr]
return attr
def hasConnection(self, atom1, atom2):
return (self.data[SparseTensor.CONNECTION_SLICE][atom1,atom2] != 0)
# return the "atom x atom" matrix and their connections between them without attributes for the extension of
# the rescal algorithm extrescal
def getPureAtomConnectionMatrix(self):
return self.getSliceMatrix(SparseTensor.CONNECTION_SLICE)
# return the "atom x attribute" matrix D for the extension of the rescal algorithm extrescal
def getAtomAttributeMatrix(self):
D = self.getSliceMatrix(1)
for i in range(2, len(self.data)):
D = D + self.getSliceMatrix(i)
attrIndices = self.getAttributeIndices()
D = D.tocsc()[:, attrIndices]
return D.tocsr()
# read the input tensor data (e.g. data-0.mtx ... ) and the headers file (e.g. headers.txt)
# if adjustDim is True then the dimensions of the slice matrix
# files are automatically adjusted to fit to biggest dimensions of all slices
def read_input_tensor(headers_filename, atom_indices_filename, data_file_names, adjustDim=False):
#load the header file
_log.info("Read header input file: " + headers_filename)
input = codecs.open(headers_filename,'r',encoding='utf8')
headers = input.read().splitlines()
input.close()
# load the atom indices file and calculate the attr indices from that
_log.info("Read the atom indices file: " + atom_indices_filename)
indicesFile = codecs.open(atom_indices_filename,'r',encoding='utf8')
atomIndices = map(int, indicesFile.read().splitlines())
indicesFile.close()
attrIndices = list(set(range(len(headers))) - set(atomIndices))
# get the largest dimension of all slices
if adjustDim:
maxDim = 0
for data_file in data_file_names:
matrix = mmread(data_file)
if maxDim < matrix.shape[0]:
maxDim = matrix.shape[0]
if maxDim < matrix.shape[1]:
maxDim = matrix.shape[1]
# load the data files
slice = 1
tensor = SparseTensor(headers, atomIndices, attrIndices)
for data_file in data_file_names:
if adjustDim:
adjusted = adjust_mm_dimension(data_file, maxDim)
if adjusted:
_log.warn("Adujst dimension to (%d,%d) of matrix file: %s" % (maxDim, maxDim, data_file))
if data_file.endswith("connection.mtx"):
_log.info("Read as slice %d the data input file: %s" % (0, data_file))
matrix = mmread(data_file)
tensor.addSliceMatrix(matrix, 0)
else:
_log.info("Read as slice %d the data input file: %s" % (slice, data_file))
matrix = mmread(data_file)
tensor.addSliceMatrix(matrix, slice)
slice = slice + 1
return tensor
# adjust (increase) the dimension of an mm matrix file
def adjust_mm_dimension(data_file, dim):
file = codecs.open(data_file,'r',encoding='utf8')
lines = file.read().splitlines()
file.close()
for line in lines:
if not line.startswith('%'):
vals = line.split(' ')
if (int(vals[0]) == dim and int(vals[1]) == dim):
return False
file = codecs.open(data_file,'w+',encoding='utf8')
found = False
for line in lines:
if not line.startswith('%') and not found:
vals = line.split(' ')
newLine = str(dim) + " " + str(dim) + " " + vals[2]
file.write(newLine + "\n")
found = True
else:
file.write(line + "\n")
file.close()
return True
def execute_extrescal(input_tensor, rank, init='nvecs', conv=1e-4, lmbda=0.0):
temp_tensor = [input_tensor.getPureAtomConnectionMatrix()]
D = input_tensor.getAtomAttributeMatrix()
_log.info('start extrescal processing ...')
_log.info('config: init=%s, conv=%f, lmbda=%f' % (init, conv, lmbda))
_log.info('Tensor: %d x %d x %d | Attribute Matrix: %d x %d | Rank: %d' % (
temp_tensor[0].shape + (len(temp_tensor),) + D.shape + (rank,))
)
result = rescal(temp_tensor, D, rank, init=init, conv=conv, lmbda=lmbda)
_log.info('extrescal stopped processing')
A = result[0]
R = result[1]
return A, R
# create a similarity matrix of atoms (and attributes)
def similarity_ranking(A):
dist = squareform(pdist(A, metric='cosine'))
return dist
# return the specified indices from a sparse matrix as an numpy array
def matrix_to_array(m, indices):
return np.array(m[indices])[0]
# for rescal algorithm output predict hints
# HINT: the performance of this prediction process could be increased by parallelization, currently only one
# thread/cpu is used to perform this computation
# Parameters:
# - A, R: result matrices of rescal algorithm
# - threshold: write out only those predictions that are above the threshold
# - input_tensor: tensor for which the predictions are computed
# - symmetric: are connections between atoms symmentric? then only the half of the predictions have to be computed
# - keepConnections: if true keep the predictions between the atoms where a connection existed before
def predict_rescal_hints_by_threshold(A, R, threshold, input_tensor, symmetric=True, keepConnections=False):
rows = []
cols = []
data = []
A_T = A.T
rounds = 0
for j in input_tensor.getAtomIndices():
if (rounds % 1000 == 0):
_log.debug("Processing predictions ... number of atoms processed: " + str(rounds) + " (out of " + str(len(input_tensor.getAtomIndices())) + ")")
rounds = rounds + 1
colPred = np.dot(R[SparseTensor.CONNECTION_SLICE], A_T[:,j])
for i in input_tensor.getAtomIndices():
if ((not symmetric) or j < i):
x = np.dot(A[i], colPred)
if (x > threshold):
if (keepConnections or (not input_tensor.hasConnection(i,j))):
rows.append(i)
cols.append(j)
data.append(x)
predictions = coo_matrix((data, (rows, cols)), shape = (input_tensor.getMatrixShape()[0], input_tensor.getMatrixShape()[0]))
return predictions
# TESTING METHOD for rescal algorithm output predict hints
# PLEASE NOTE: this matrix can only practically be build for small and medium datasets e.g. < 1000 atoms
# Parameters:
# - A, R: result matrices of rescal algorithm
# - threshold: write out only those predictions that are above the threshold
# - mask_matrix: matrix with binary entries, 1 specifies where predictions should be calculated
# - keepScore: if true keep the original score of the predictions, otherwise set all above the threshold to 1
def test_predict_rescal_hints_by_threshold(A, R, threshold, mask_matrix, keepScore=True):
# compute prediction array with scores
hint_prediction_matrix = np.dot(A, np.dot(R[SparseTensor.CONNECTION_SLICE], A.T))
# choose indices above threshold to keep
hint_indices = hint_prediction_matrix > threshold
if not keepScore:
hint_prediction_matrix[hint_indices] = 1
hint_mask_matrix = np.zeros(hint_prediction_matrix.shape)
hint_mask_matrix[hint_indices] = 1
# return the calculated predictions
hint_mask_matrix = mask_matrix.multiply(coo_matrix(hint_mask_matrix))
hint_prediction_matrix = hint_mask_matrix.multiply(coo_matrix(hint_prediction_matrix))
return hint_prediction_matrix
# TESTING METHOD create a binary mask matrix for hint prediction, 1 specifies where predictions should be calculated.
# the mask contains by default entries between atoms of atom types that match each other and removes
# entries for connections of the tensor that were already available
# PLEASE NOTE: this matrix can only practically be build for small and medium datasets e.g. < 1000 atoms
# Parameters:
# - tensor: tensor for which the predictions are computed
# - symmetric: create a symmetric mask
# - keepConnections: if true keep the predictions between the atoms where a connection existed before
def test_create_hint_mask_matrix(tensor, symmetric=False, keepConnections=False):
# use only atom to atom indices for hint connection prediction
atom_indices = np.zeros(tensor.getMatrixShape()[0])
atom_indices[tensor.getAtomIndices()] = 1
atom_vector = atom_indices[np.newaxis]
atom_vector = lil_matrix(atom_vector)
mask_matrix = atom_vector.multiply(atom_vector.T).tolil()
mask_matrix.setdiag(0)
# optionally exclude already existing connections from prediction
if not keepConnections:
connection_array = np.asarray(tensor.getSliceMatrix(SparseTensor.CONNECTION_SLICE).toarray())
connection_indices = connection_array > 0.0
mask_matrix[connection_indices] = 0
# symmetric mask needed?
if not symmetric:
mask_matrix = tril(mask_matrix)
return mask_matrix | {
"content_hash": "0b5971d0ce0d7b00c737acdaae8dbcb9",
"timestamp": "",
"source": "github",
"line_count": 292,
"max_line_length": 156,
"avg_line_length": 40.78767123287671,
"alnum_prop": 0.6502099076406381,
"repo_name": "researchstudio-sat/webofneeds",
"id": "ad39ac484fecdd55199103272b74b40ad632422f",
"size": "11910",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "webofneeds/won-matcher-rescal/src/main/python/tools/tensor_utils.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "6244"
},
{
"name": "Dockerfile",
"bytes": "15930"
},
{
"name": "EJS",
"bytes": "684"
},
{
"name": "HTML",
"bytes": "16097"
},
{
"name": "Java",
"bytes": "5108851"
},
{
"name": "JavaScript",
"bytes": "1884810"
},
{
"name": "PLpgSQL",
"bytes": "20620"
},
{
"name": "Perl",
"bytes": "2060"
},
{
"name": "PowerShell",
"bytes": "416"
},
{
"name": "Python",
"bytes": "52265"
},
{
"name": "SCSS",
"bytes": "244767"
},
{
"name": "Shell",
"bytes": "70847"
},
{
"name": "TypeScript",
"bytes": "5534"
}
],
"symlink_target": ""
} |
from django import forms
from django.contrib import messages
from django.core.exceptions import ValidationError
from django.db.models import ObjectDoesNotExist
from django.shortcuts import get_object_or_404, redirect, render_to_response
from django.template import RequestContext
from django.utils.translation import ugettext as _
from django.views import generic
from plata.discount.models import Discount
from plata.shop import forms as shop_forms
from plata.shop.views import Shop
from plata.shop.models import Order
from shop.models import Contact, Product
class CheckoutForm(shop_forms.BaseCheckoutForm):
class Meta:
fields = ['email'] + ['billing_%s' % f for f in Contact.ADDRESS_FIELDS]
model = Order
def __init__(self, *args, **kwargs):
shop = kwargs.get('shop')
request = kwargs.get('request')
contact = shop.contact_from_user(request.user)
if contact:
initial = {}
for f in contact.ADDRESS_FIELDS:
initial['billing_%s' % f] = getattr(contact, f)
kwargs['initial'] = initial
initial['email'] = contact.user.email
super(CheckoutForm, self).__init__(*args, **kwargs)
if not contact:
self.fields['create_account'] = forms.BooleanField(
label=_('create account'),
required=False, initial=True)
class CustomShop(Shop):
def checkout_form(self, request, order):
return CheckoutForm
shop = CustomShop(Contact, Order, Discount)
product_list = generic.ListView.as_view(
queryset=Product.objects.filter(is_active=True),
template_name='product/product_list.html',
)
class OrderItemForm(forms.Form):
quantity = forms.IntegerField(label=_('quantity'), initial=1,
min_value=1, max_value=100)
def product_detail(request, object_id):
product = get_object_or_404(Product.objects.filter(is_active=True), pk=object_id)
if request.method == 'POST':
form = OrderItemForm(request.POST)
if form.is_valid():
order = shop.order_from_request(request, create=True)
try:
order.modify_item(product, form.cleaned_data.get('quantity'))
messages.success(request, _('The cart has been updated.'))
except ValidationError, e:
if e.code == 'order_sealed':
[messages.error(request, msg) for msg in e.messages]
return redirect('plata_product_list')
else:
form = OrderItemForm()
return render_to_response('product/product_detail.html', {
'object': product,
'form': form,
}, context_instance=RequestContext(request))
| {
"content_hash": "01f6abfa23cd217e21740b1882c768bd",
"timestamp": "",
"source": "github",
"line_count": 82,
"max_line_length": 85,
"avg_line_length": 30.75609756097561,
"alnum_prop": 0.6954797779540047,
"repo_name": "miraculixx/plata-restapi",
"id": "21b293fb8f72c425ba66421baaa461365b0fcb82",
"size": "2522",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "shop/views.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "2530"
},
{
"name": "Python",
"bytes": "54333"
}
],
"symlink_target": ""
} |
from encdec import RNNEncoderDecoder
from encdec import get_batch_iterator
from encdec import parse_input
from encdec import create_padded_batch
from state import\
prototype_state,\
prototype_phrase_state,\
prototype_encdec_state,\
prototype_search_state,\
prototype_search_with_coverage_state
| {
"content_hash": "fbb72e5b688287e422ca47c28604ae6a",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 40,
"avg_line_length": 28.636363636363637,
"alnum_prop": 0.7777777777777778,
"repo_name": "tuzhaopeng/NMT-Coverage",
"id": "3dfd8b7a9a534e78d7a9a1413339131638d072c5",
"size": "315",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "build/lib/experiments/nmt/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Prolog",
"bytes": "29017"
},
{
"name": "Python",
"bytes": "877411"
},
{
"name": "Shell",
"bytes": "299"
}
],
"symlink_target": ""
} |
import os
import shlex
import sys
from resources.lib.utils.kodipathtools import translatepath
def process_cmdline(cmd):
posspaths = []
cmds = cmd.encode('utf-8')
partss = shlex.split(cmds, posix= not sys.platform.lower().startswith('win'))
parts = []
for part in partss:
parts.append(unicode(part, encoding='utf-8'))
for i in xrange(0, len(parts)):
found=-1
for j in xrange(i+1, len(parts)+1):
t = u' '.join(parts[i:j])
t = translatepath(t)
t = t.strip(u'"')
if os.path.exists(t):
if j > found:
found = j
if found != -1:
posspaths.append([i, found])
paths = []
args = []
if len(posspaths) > 0:
for i, path in enumerate(posspaths): # Check for overlaps
if i > 0:
if path[0] < posspaths[i-1][1]:
pass # If possible paths overlap, treat the first as a path and treat the rest of the overlap as non-path
else:
paths.append(path)
else:
paths.append(path)
for i in xrange(0, len(parts)):
for j in xrange(0, len(paths)):
if i == paths[j][0]:
t = u' '.join(parts[i:paths[j][1]])
t = translatepath(t)
t = t.strip(u'"')
parts[i] = t
for k in xrange(i+1, paths[j][1]):
parts[k]=u''
for i in xrange(0, len(parts)):
if parts[i] != u'':
args.append(parts[i])
else:
args = parts
return args
def fsencode(s):
if sys.platform.lower().startswith('win'):
try:
import ctypes
import ctypes.wintypes
except ImportError:
return s
ctypes.windll.kernel32.GetShortPathNameW.argtypes = [
ctypes.wintypes.LPCWSTR, # lpszLongPath
ctypes.wintypes.LPWSTR, # lpszShortPath
ctypes.wintypes.DWORD # cchBuffer
]
ctypes.windll.kernel32.GetShortPathNameW.restype = ctypes.wintypes.DWORD
buf = ctypes.create_unicode_buffer(1024) # adjust buffer size, if necessary
ctypes.windll.kernel32.GetShortPathNameW(s, buf, len(buf))
short_path = buf.value
return short_path
else:
return s | {
"content_hash": "7f444e92fc478f0a5cec277962356ef2",
"timestamp": "",
"source": "github",
"line_count": 71,
"max_line_length": 126,
"avg_line_length": 33.88732394366197,
"alnum_prop": 0.5170407315045719,
"repo_name": "TidalPaladin/Superliminal-resin",
"id": "eb43370e37820a67c1cb1ce5431329507958cf15",
"size": "3150",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "app/script.service.kodi.callbacks/resources/lib/utils/detectPath.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "11"
},
{
"name": "Python",
"bytes": "874230"
},
{
"name": "Shell",
"bytes": "2757"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
import re
import os
import random
from hashlib import sha256
from twisted.web import client, http_headers
from ooni.utils.net import hasRawSocketPermission
client._HTTP11ClientFactory.noisy = False
from twisted.internet import reactor, defer
from ooni.utils import log
from ooni import errors
try:
from pygeoip import GeoIP
except ImportError:
try:
import GeoIP as CGeoIP
def GeoIP(database_path, *args, **kwargs):
return CGeoIP.open(database_path, CGeoIP.GEOIP_STANDARD)
except ImportError:
log.err("Unable to import pygeoip. We will not be able to run geo IP related measurements")
class GeoIPDataFilesNotFound(Exception):
pass
def IPToLocation(ipaddr):
from ooni.settings import config
city_file = config.get_data_file_path('GeoIP/GeoLiteCity.dat')
country_file = config.get_data_file_path('GeoIP/GeoIP.dat')
asn_file = config.get_data_file_path('GeoIP/GeoIPASNum.dat')
location = {'city': None, 'countrycode': 'ZZ', 'asn': 'AS0'}
def error():
log.err("Could not find GeoIP data file in %s."
"Try running ooniresources --update-geoip or"
" edit your ooniprobe.conf" % config.advanced.geoip_data_dir)
try:
country_dat = GeoIP(country_file)
location['countrycode'] = country_dat.country_code_by_addr(ipaddr)
if not location['countrycode']:
location['countrycode'] = 'ZZ'
except IOError:
error()
try:
city_dat = GeoIP(city_file)
location['city'] = city_dat.record_by_addr(ipaddr)['city']
except:
error()
try:
asn_dat = GeoIP(asn_file)
location['asn'] = asn_dat.org_by_addr(ipaddr).split(' ')[0]
except:
error()
return location
def database_version():
from ooni.settings import config
version = {
'GeoIP': {
'sha256': None,
'timestamp': None,
},
'GeoIPASNum': {
'sha256': None,
'timestamp': None
},
'GeoLiteCity': {
'sha256': None,
'timestamp': None
}
}
for key in version.keys():
geoip_file = config.get_data_file_path("GeoIP/" + key + ".dat")
if not geoip_file or not os.path.isfile(geoip_file):
continue
timestamp = os.stat(geoip_file).st_mtime
sha256hash = sha256()
with open(geoip_file) as f:
while True:
chunk = f.read(8192)
if not chunk:
break
sha256hash.update(chunk)
version[key]['timestamp'] = timestamp
version[key]['sha256'] = sha256hash.hexdigest()
return version
class HTTPGeoIPLookupper(object):
url = None
_agent = client.Agent
def __init__(self):
self.agent = self._agent(reactor)
def _response(self, response):
from ooni.utils.net import BodyReceiver
content_length = response.headers.getRawHeaders('content-length')
finished = defer.Deferred()
response.deliverBody(BodyReceiver(finished, content_length))
finished.addCallback(self.parseResponse)
return finished
def parseResponse(self, response_body):
"""
Override this with the logic for parsing the response.
Should return the IP address of the probe.
"""
pass
def failed(self, failure):
log.err("Failed to lookup via %s" % self.url)
log.exception(failure)
return failure
def lookup(self):
from ooni.utils.net import userAgents
headers = {}
headers['User-Agent'] = [random.choice(userAgents)]
d = self.agent.request("GET", self.url, http_headers.Headers(headers))
d.addCallback(self._response)
d.addErrback(self.failed)
return d
class UbuntuGeoIP(HTTPGeoIPLookupper):
url = "http://geoip.ubuntu.com/lookup"
def parseResponse(self, response_body):
m = re.match(".*<Ip>(.*)</Ip>.*", response_body)
probe_ip = m.group(1)
return probe_ip
class TorProjectGeoIP(HTTPGeoIPLookupper):
url = "https://check.torproject.org/"
def parseResponse(self, response_body):
regexp = "Your IP address appears to be: <strong>((\d+\.)+(\d+))"
probe_ip = re.search(regexp, response_body).group(1)
return probe_ip
class ProbeIP(object):
strategy = None
address = None
def __init__(self):
self.geoIPServices = {
'ubuntu': UbuntuGeoIP,
'torproject': TorProjectGeoIP
}
self.geodata = {
'asn': 'AS0',
'city': None,
'countrycode': 'ZZ',
'ip': '127.0.0.1'
}
def resolveGeodata(self):
from ooni.settings import config
self.geodata = IPToLocation(self.address)
self.geodata['ip'] = self.address
if not config.privacy.includeasn:
self.geodata['asn'] = 'AS0'
if not config.privacy.includecity:
self.geodata['city'] = None
if not config.privacy.includecountry:
self.geodata['countrycode'] = 'ZZ'
if not config.privacy.includeip:
self.geodata['ip'] = '127.0.0.1'
@defer.inlineCallbacks
def lookup(self):
if self.address:
defer.returnValue(self.address)
else:
try:
yield self.askTor()
log.msg("Found your IP via Tor %s" % self.address)
self.resolveGeodata()
defer.returnValue(self.address)
except errors.TorStateNotFound:
log.debug("Tor is not running. Skipping IP lookup via Tor.")
except Exception:
log.msg("Unable to lookup the probe IP via Tor.")
try:
yield self.askTraceroute()
log.msg("Found your IP via Traceroute %s" % self.address)
self.resolveGeodata()
defer.returnValue(self.address)
except errors.InsufficientPrivileges:
log.debug("Cannot determine the probe IP address with a traceroute, becase of insufficient priviledges")
except:
log.msg("Unable to lookup the probe IP via traceroute")
try:
yield self.askGeoIPService()
log.msg("Found your IP via a GeoIP service: %s" % self.address)
self.resolveGeodata()
defer.returnValue(self.address)
except Exception:
log.msg("Unable to lookup the probe IP via GeoIPService")
raise
@defer.inlineCallbacks
def askGeoIPService(self):
# Shuffle the order in which we test the geoip services.
services = self.geoIPServices.items()
random.shuffle(services)
for service_name, service in services:
s = service()
log.msg("Looking up your IP address via %s" % service_name)
try:
self.address = yield s.lookup()
self.strategy = 'geo_ip_service-' + service_name
break
except Exception:
log.msg("Failed to lookup your IP via %s" % service_name)
if not self.address:
raise errors.ProbeIPUnknown
def askTraceroute(self):
"""
Perform a UDP traceroute to determine the probes IP address.
"""
if not hasRawSocketPermission():
raise errors.InsufficientPrivileges
raise NotImplemented
def askTor(self):
"""
Obtain the probes IP address by asking the Tor Control port via GET INFO
address.
XXX this lookup method is currently broken when there are cached descriptors or consensus documents
see: https://trac.torproject.org/projects/tor/ticket/8214
"""
from ooni.settings import config
if config.tor_state:
d = config.tor_state.protocol.get_info("address")
@d.addCallback
def cb(result):
self.strategy = 'tor_get_info_address'
self.address = result.values()[0]
return d
else:
raise errors.TorStateNotFound
| {
"content_hash": "14ff44d68cc00dadba85faff81373cb2",
"timestamp": "",
"source": "github",
"line_count": 270,
"max_line_length": 120,
"avg_line_length": 30.71851851851852,
"alnum_prop": 0.5853629129491198,
"repo_name": "0xPoly/ooni-probe",
"id": "11800b6d840ac95d81735fcf295956a16a227904",
"size": "8294",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ooni/geoip.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "CSS",
"bytes": "392"
},
{
"name": "Groff",
"bytes": "38425"
},
{
"name": "HTML",
"bytes": "3963"
},
{
"name": "JavaScript",
"bytes": "7778"
},
{
"name": "Makefile",
"bytes": "3786"
},
{
"name": "Python",
"bytes": "518736"
},
{
"name": "Shell",
"bytes": "77958"
}
],
"symlink_target": ""
} |
"""
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import errno
from resource_management.core.logger import Logger
from resource_management.core.resources.system import Execute
from resource_management.core.resources.system import File
from resource_management.libraries.functions import check_process_status, format
# Note: Phoenix Query Server is only applicable to phoenix version stacks and above.
def phoenix_service(action = 'start'): # 'start', 'stop', 'status'
# Note: params/status_params should already be imported before calling phoenix_service()
pid_file = format("{pid_dir}/phoenix-hbase-queryserver.pid")
no_op_test = format("ls {pid_file} >/dev/null 2>&1 && ps -p `cat {pid_file}` >/dev/null 2>&1")
if action == "status":
check_process_status(pid_file)
else:
env = {'JAVA_HOME': format("{java64_home}"), 'HBASE_CONF_DIR': format("{hbase_conf_dir}")}
daemon_cmd = format("{phx_daemon_script} {action}")
if action == 'start':
Execute(daemon_cmd,
user=format("{hbase_user}"),
environment=env)
elif action == 'stop':
Execute(daemon_cmd,
user=format("{hbase_user}"),
environment=env
)
try:
File(pid_file, action = "delete")
except OSError as exc:
# OSError: [Errno 2] No such file or directory
if exc.errno == errno.ENOENT:
Logger.info("Did not remove '{0}' as it did not exist".format(pid_file))
else:
raise
| {
"content_hash": "f4d5bbb8e71b6221e5b6fe5097a90956",
"timestamp": "",
"source": "github",
"line_count": 53,
"max_line_length": 98,
"avg_line_length": 42.528301886792455,
"alnum_prop": 0.6827861579414375,
"repo_name": "arenadata/ambari",
"id": "b2cdc42ce5a3354cdd34728da1edabb8a2441b74",
"size": "2276",
"binary": false,
"copies": "1",
"ref": "refs/heads/branch-adh-1.6",
"path": "ambari-server/src/main/resources/stacks/ADH/1.6/services/HBASE/package/scripts/phoenix_service.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "46700"
},
{
"name": "C",
"bytes": "331204"
},
{
"name": "C#",
"bytes": "215907"
},
{
"name": "C++",
"bytes": "257"
},
{
"name": "CSS",
"bytes": "343739"
},
{
"name": "CoffeeScript",
"bytes": "8465"
},
{
"name": "Dockerfile",
"bytes": "6387"
},
{
"name": "EJS",
"bytes": "777"
},
{
"name": "FreeMarker",
"bytes": "2654"
},
{
"name": "Gherkin",
"bytes": "990"
},
{
"name": "Groovy",
"bytes": "15882"
},
{
"name": "HTML",
"bytes": "717983"
},
{
"name": "Handlebars",
"bytes": "1819641"
},
{
"name": "Java",
"bytes": "29172298"
},
{
"name": "JavaScript",
"bytes": "18571926"
},
{
"name": "Jinja",
"bytes": "1490416"
},
{
"name": "Less",
"bytes": "412933"
},
{
"name": "Makefile",
"bytes": "11111"
},
{
"name": "PHP",
"bytes": "149648"
},
{
"name": "PLpgSQL",
"bytes": "287501"
},
{
"name": "PowerShell",
"bytes": "2090340"
},
{
"name": "Python",
"bytes": "18507704"
},
{
"name": "R",
"bytes": "3943"
},
{
"name": "Ruby",
"bytes": "38590"
},
{
"name": "SCSS",
"bytes": "40072"
},
{
"name": "Shell",
"bytes": "924115"
},
{
"name": "Stylus",
"bytes": "820"
},
{
"name": "TSQL",
"bytes": "42351"
},
{
"name": "Vim script",
"bytes": "5813"
},
{
"name": "sed",
"bytes": "2303"
}
],
"symlink_target": ""
} |
"""
FISSPy' Analysis module provides various tools for data analysis.
"""
from __future__ import absolute_import
__author__ = "Juhyung Kang"
__email__ = "jhkang@astro.snu.ac.kr"
from fisspy.analysis.tdmap import TDmap
from fisspy.analysis.wavelet import *
from fisspy.analysis.doppler import *
from fisspy.analysis.forecast import *
| {
"content_hash": "185f442deb30a45510bfa51e8c3d4cc2",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 65,
"avg_line_length": 25.846153846153847,
"alnum_prop": 0.7529761904761905,
"repo_name": "SNU-sunday/fisspy",
"id": "678df2c528a3f831042f11776476b7a92fcd078f",
"size": "336",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "fisspy/analysis/__init__.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "182841"
}
],
"symlink_target": ""
} |
from operator import attrgetter
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType
from pyangbind.lib.yangtypes import RestrictedClassType
from pyangbind.lib.yangtypes import TypedListType
from pyangbind.lib.yangtypes import YANGBool
from pyangbind.lib.yangtypes import YANGListType
from pyangbind.lib.yangtypes import YANGDynClass
from pyangbind.lib.yangtypes import ReferenceType
from pyangbind.lib.base import PybindBase
from collections import OrderedDict
from decimal import Decimal
from bitarray import bitarray
import six
# PY3 support of some PY2 keywords (needs improved)
if six.PY3:
import builtins as __builtin__
long = int
elif six.PY2:
import __builtin__
class config(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-network-instance - based on the path /network-instances/network-instance/protocols/protocol/bgp/global/default-route-distance/config. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: Configuration parameters relating to the default route
distance
"""
__slots__ = (
"_path_helper",
"_extmethods",
"__external_route_distance",
"__internal_route_distance",
)
_yang_name = "config"
_pybind_generated_by = "container"
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__external_route_distance = YANGDynClass(
base=RestrictedClassType(
base_type=RestrictedClassType(
base_type=int, restriction_dict={"range": ["0..255"]}, int_size=8
),
restriction_dict={"range": ["1..255"]},
),
is_leaf=True,
yang_name="external-route-distance",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="uint8",
is_config=True,
)
self.__internal_route_distance = YANGDynClass(
base=RestrictedClassType(
base_type=RestrictedClassType(
base_type=int, restriction_dict={"range": ["0..255"]}, int_size=8
),
restriction_dict={"range": ["1..255"]},
),
is_leaf=True,
yang_name="internal-route-distance",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="uint8",
is_config=True,
)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path() + [self._yang_name]
else:
return [
"network-instances",
"network-instance",
"protocols",
"protocol",
"bgp",
"global",
"default-route-distance",
"config",
]
def _get_external_route_distance(self):
"""
Getter method for external_route_distance, mapped from YANG variable /network_instances/network_instance/protocols/protocol/bgp/global/default_route_distance/config/external_route_distance (uint8)
YANG Description: Administrative distance for routes learned from external
BGP (eBGP).
"""
return self.__external_route_distance
def _set_external_route_distance(self, v, load=False):
"""
Setter method for external_route_distance, mapped from YANG variable /network_instances/network_instance/protocols/protocol/bgp/global/default_route_distance/config/external_route_distance (uint8)
If this variable is read-only (config: false) in the
source YANG file, then _set_external_route_distance is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_external_route_distance() directly.
YANG Description: Administrative distance for routes learned from external
BGP (eBGP).
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=RestrictedClassType(
base_type=RestrictedClassType(
base_type=int,
restriction_dict={"range": ["0..255"]},
int_size=8,
),
restriction_dict={"range": ["1..255"]},
),
is_leaf=True,
yang_name="external-route-distance",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="uint8",
is_config=True,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """external_route_distance must be of a type compatible with uint8""",
"defined-type": "uint8",
"generated-type": """YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..255']}, int_size=8), restriction_dict={'range': ['1..255']}), is_leaf=True, yang_name="external-route-distance", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='uint8', is_config=True)""",
}
)
self.__external_route_distance = t
if hasattr(self, "_set"):
self._set()
def _unset_external_route_distance(self):
self.__external_route_distance = YANGDynClass(
base=RestrictedClassType(
base_type=RestrictedClassType(
base_type=int, restriction_dict={"range": ["0..255"]}, int_size=8
),
restriction_dict={"range": ["1..255"]},
),
is_leaf=True,
yang_name="external-route-distance",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="uint8",
is_config=True,
)
def _get_internal_route_distance(self):
"""
Getter method for internal_route_distance, mapped from YANG variable /network_instances/network_instance/protocols/protocol/bgp/global/default_route_distance/config/internal_route_distance (uint8)
YANG Description: Administrative distance for routes learned from internal
BGP (iBGP).
"""
return self.__internal_route_distance
def _set_internal_route_distance(self, v, load=False):
"""
Setter method for internal_route_distance, mapped from YANG variable /network_instances/network_instance/protocols/protocol/bgp/global/default_route_distance/config/internal_route_distance (uint8)
If this variable is read-only (config: false) in the
source YANG file, then _set_internal_route_distance is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_internal_route_distance() directly.
YANG Description: Administrative distance for routes learned from internal
BGP (iBGP).
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=RestrictedClassType(
base_type=RestrictedClassType(
base_type=int,
restriction_dict={"range": ["0..255"]},
int_size=8,
),
restriction_dict={"range": ["1..255"]},
),
is_leaf=True,
yang_name="internal-route-distance",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="uint8",
is_config=True,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """internal_route_distance must be of a type compatible with uint8""",
"defined-type": "uint8",
"generated-type": """YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..255']}, int_size=8), restriction_dict={'range': ['1..255']}), is_leaf=True, yang_name="internal-route-distance", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='uint8', is_config=True)""",
}
)
self.__internal_route_distance = t
if hasattr(self, "_set"):
self._set()
def _unset_internal_route_distance(self):
self.__internal_route_distance = YANGDynClass(
base=RestrictedClassType(
base_type=RestrictedClassType(
base_type=int, restriction_dict={"range": ["0..255"]}, int_size=8
),
restriction_dict={"range": ["1..255"]},
),
is_leaf=True,
yang_name="internal-route-distance",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="uint8",
is_config=True,
)
external_route_distance = __builtin__.property(
_get_external_route_distance, _set_external_route_distance
)
internal_route_distance = __builtin__.property(
_get_internal_route_distance, _set_internal_route_distance
)
_pyangbind_elements = OrderedDict(
[
("external_route_distance", external_route_distance),
("internal_route_distance", internal_route_distance),
]
)
class config(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-network-instance-l2 - based on the path /network-instances/network-instance/protocols/protocol/bgp/global/default-route-distance/config. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: Configuration parameters relating to the default route
distance
"""
__slots__ = (
"_path_helper",
"_extmethods",
"__external_route_distance",
"__internal_route_distance",
)
_yang_name = "config"
_pybind_generated_by = "container"
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__external_route_distance = YANGDynClass(
base=RestrictedClassType(
base_type=RestrictedClassType(
base_type=int, restriction_dict={"range": ["0..255"]}, int_size=8
),
restriction_dict={"range": ["1..255"]},
),
is_leaf=True,
yang_name="external-route-distance",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="uint8",
is_config=True,
)
self.__internal_route_distance = YANGDynClass(
base=RestrictedClassType(
base_type=RestrictedClassType(
base_type=int, restriction_dict={"range": ["0..255"]}, int_size=8
),
restriction_dict={"range": ["1..255"]},
),
is_leaf=True,
yang_name="internal-route-distance",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="uint8",
is_config=True,
)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path() + [self._yang_name]
else:
return [
"network-instances",
"network-instance",
"protocols",
"protocol",
"bgp",
"global",
"default-route-distance",
"config",
]
def _get_external_route_distance(self):
"""
Getter method for external_route_distance, mapped from YANG variable /network_instances/network_instance/protocols/protocol/bgp/global/default_route_distance/config/external_route_distance (uint8)
YANG Description: Administrative distance for routes learned from external
BGP (eBGP).
"""
return self.__external_route_distance
def _set_external_route_distance(self, v, load=False):
"""
Setter method for external_route_distance, mapped from YANG variable /network_instances/network_instance/protocols/protocol/bgp/global/default_route_distance/config/external_route_distance (uint8)
If this variable is read-only (config: false) in the
source YANG file, then _set_external_route_distance is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_external_route_distance() directly.
YANG Description: Administrative distance for routes learned from external
BGP (eBGP).
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=RestrictedClassType(
base_type=RestrictedClassType(
base_type=int,
restriction_dict={"range": ["0..255"]},
int_size=8,
),
restriction_dict={"range": ["1..255"]},
),
is_leaf=True,
yang_name="external-route-distance",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="uint8",
is_config=True,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """external_route_distance must be of a type compatible with uint8""",
"defined-type": "uint8",
"generated-type": """YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..255']}, int_size=8), restriction_dict={'range': ['1..255']}), is_leaf=True, yang_name="external-route-distance", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='uint8', is_config=True)""",
}
)
self.__external_route_distance = t
if hasattr(self, "_set"):
self._set()
def _unset_external_route_distance(self):
self.__external_route_distance = YANGDynClass(
base=RestrictedClassType(
base_type=RestrictedClassType(
base_type=int, restriction_dict={"range": ["0..255"]}, int_size=8
),
restriction_dict={"range": ["1..255"]},
),
is_leaf=True,
yang_name="external-route-distance",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="uint8",
is_config=True,
)
def _get_internal_route_distance(self):
"""
Getter method for internal_route_distance, mapped from YANG variable /network_instances/network_instance/protocols/protocol/bgp/global/default_route_distance/config/internal_route_distance (uint8)
YANG Description: Administrative distance for routes learned from internal
BGP (iBGP).
"""
return self.__internal_route_distance
def _set_internal_route_distance(self, v, load=False):
"""
Setter method for internal_route_distance, mapped from YANG variable /network_instances/network_instance/protocols/protocol/bgp/global/default_route_distance/config/internal_route_distance (uint8)
If this variable is read-only (config: false) in the
source YANG file, then _set_internal_route_distance is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_internal_route_distance() directly.
YANG Description: Administrative distance for routes learned from internal
BGP (iBGP).
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=RestrictedClassType(
base_type=RestrictedClassType(
base_type=int,
restriction_dict={"range": ["0..255"]},
int_size=8,
),
restriction_dict={"range": ["1..255"]},
),
is_leaf=True,
yang_name="internal-route-distance",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="uint8",
is_config=True,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """internal_route_distance must be of a type compatible with uint8""",
"defined-type": "uint8",
"generated-type": """YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..255']}, int_size=8), restriction_dict={'range': ['1..255']}), is_leaf=True, yang_name="internal-route-distance", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='uint8', is_config=True)""",
}
)
self.__internal_route_distance = t
if hasattr(self, "_set"):
self._set()
def _unset_internal_route_distance(self):
self.__internal_route_distance = YANGDynClass(
base=RestrictedClassType(
base_type=RestrictedClassType(
base_type=int, restriction_dict={"range": ["0..255"]}, int_size=8
),
restriction_dict={"range": ["1..255"]},
),
is_leaf=True,
yang_name="internal-route-distance",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="uint8",
is_config=True,
)
external_route_distance = __builtin__.property(
_get_external_route_distance, _set_external_route_distance
)
internal_route_distance = __builtin__.property(
_get_internal_route_distance, _set_internal_route_distance
)
_pyangbind_elements = OrderedDict(
[
("external_route_distance", external_route_distance),
("internal_route_distance", internal_route_distance),
]
)
| {
"content_hash": "2af0588370cf9462dcce230a5a8fa3a9",
"timestamp": "",
"source": "github",
"line_count": 557,
"max_line_length": 505,
"avg_line_length": 41.52423698384201,
"alnum_prop": 0.5733927104500843,
"repo_name": "napalm-automation/napalm-yang",
"id": "420241d5fbebd6344d9c81327d098f8037839cd0",
"size": "23153",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "napalm_yang/models/openconfig/network_instances/network_instance/protocols/protocol/bgp/global_/default_route_distance/config/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "370237"
},
{
"name": "Jupyter Notebook",
"bytes": "152135"
},
{
"name": "Makefile",
"bytes": "1965"
},
{
"name": "Python",
"bytes": "105688785"
},
{
"name": "Roff",
"bytes": "1632"
}
],
"symlink_target": ""
} |
import argparse
import re
import subprocess
import sys
import os
from enum import Enum
import time
import urllib.request, urllib.error, urllib.parse
import urllib.parse
class Version(object):
def __init__(self, major, minor, bugfix, prerelease):
self.major = major
self.minor = minor
self.bugfix = bugfix
self.prerelease = prerelease
self.previous_dot_matcher = self.make_previous_matcher()
self.dot_no_v = '%d.%d.%d' % (self.major, self.minor, self.bugfix)
self.dot = 'v%s' % (self.dot_no_v)
self.constant = 'SOLR_OPERATOR_%d_%d_%d' % (self.major, self.minor, self.bugfix)
@classmethod
def parse(cls, value):
match = re.search(r'(?:v)?(\d+)\.(\d+).(\d+)(.1|.2)?', value)
if match is None:
raise argparse.ArgumentTypeError('Version argument must be of format vX.Y.Z(.1|.2)?')
parts = [int(v) for v in match.groups()[:-1]]
parts.append({ None: 0, '.1': 1, '.2': 2 }[match.groups()[-1]])
return Version(*parts)
def __str__(self):
return self.dot
def make_previous_matcher(self, prefix='', suffix='', sep='\\.'):
if self.is_bugfix_release():
pattern = '%s%s%s%s%d' % (self.major, sep, self.minor, sep, self.bugfix - 1)
elif self.is_minor_release():
pattern = '%s%s%d%s\\d+' % (self.major, sep, self.minor - 1, sep)
else:
pattern = '%d%s\\d+%s\\d+' % (self.major - 1, sep, sep)
return re.compile(prefix + '(' + pattern + ')' + suffix)
def is_bugfix_release(self):
return self.bugfix != 0
def is_minor_release(self):
return self.bugfix == 0 and self.minor != 0
def is_major_release(self):
return self.bugfix == 0 and self.minor == 0
def no_major_release(self):
return self.major == 0
def on_or_after(self, other):
return (self.major > other.major or self.major == other.major and
(self.minor > other.minor or self.minor == other.minor and
(self.bugfix > other.bugfix or self.bugfix == other.bugfix and
self.prerelease >= other.prerelease)))
def gt(self, other):
return (self.major > other.major or
(self.major == other.major and self.minor > other.minor) or
(self.major == other.major and self.minor == other.minor and self.bugfix > other.bugfix))
def is_back_compat_with(self, other):
if not self.on_or_after(other):
raise Exception('Back compat check disallowed for newer version: %s < %s' % (self, other))
return other.major + 1 >= self.major
def run(cmd, cwd=None):
try:
output = subprocess.check_output(cmd, shell=True, stderr=subprocess.STDOUT, cwd=cwd)
except subprocess.CalledProcessError as e:
print(e.output.decode('utf-8'))
raise e
return output.decode('utf-8')
def update_file(filename, line_re, edit):
infile = open(filename, 'r')
buffer = []
changed = False
for line in infile:
if not changed:
match = line_re.search(line)
if match:
changed = edit(buffer, match, line)
if changed is None:
return False
continue
buffer.append(line)
if not changed:
raise Exception('Could not find %s in %s' % (line_re, filename))
with open(filename, 'w') as f:
f.write(''.join(buffer))
return True
# branch types are "release", "stable" and "unstable"
class BranchType(Enum):
unstable = 1
stable = 2
release = 3
def find_branch_type():
output = subprocess.check_output('git status', shell=True)
for line in output.split(b'\n'):
if line.startswith(b'On branch '):
branchName = line.split(b' ')[-1]
break
else:
raise Exception('git status missing branch name')
if branchName == b'main':
return BranchType.unstable
if re.match(r'^release-(\d+)$', branchName.decode('UTF-8')):
return BranchType.stable
if re.match(r'^release-(\d+)\.(\d+)$', branchName.decode('UTF-8')):
return BranchType.release
raise Exception('Cannot run %s on feature branch' % sys.argv[0].rsplit('/', 1)[-1])
def download(name, urlString, tmpDir, quiet=False, force_clean=True):
if not quiet:
print("Downloading %s" % urlString)
startTime = time.time()
fileName = '%s/%s' % (tmpDir, name)
if not force_clean and os.path.exists(fileName):
if not quiet and fileName.find('.asc') == -1:
print(' already done: %.1f MB' % (os.path.getsize(fileName)/1024./1024.))
return
try:
attemptDownload(urlString, fileName)
except Exception as e:
print('Retrying download of url %s after exception: %s' % (urlString, e))
try:
attemptDownload(urlString, fileName)
except Exception as e:
raise RuntimeError('failed to download url "%s"' % urlString) from e
if not quiet and fileName.find('.asc') == -1:
t = time.time()-startTime
sizeMB = os.path.getsize(fileName)/1024./1024.
print(' %.1f MB in %.2f sec (%.1f MB/sec)' % (sizeMB, t, sizeMB/t))
def attemptDownload(urlString, fileName):
fIn = urllib.request.urlopen(urlString)
fOut = open(fileName, 'wb')
success = False
try:
while True:
s = fIn.read(65536)
if s == b'':
break
fOut.write(s)
fOut.close()
fIn.close()
success = True
finally:
fIn.close()
fOut.close()
if not success:
os.remove(fileName)
version_prop_re = re.compile(r'Version\s*=\s*([\'"])(.*)\1')
def find_current_version():
script_path = os.path.dirname(os.path.realpath(__file__))
top_level_dir = os.path.join(os.path.abspath("%s/" % script_path), os.path.pardir, os.path.pardir, os.path.pardir)
return version_prop_re.search(open('%s/version/version.go' % top_level_dir).read()).group(2).strip()
if __name__ == '__main__':
print('This is only a support module, it cannot be run')
sys.exit(1)
| {
"content_hash": "f1dc94aaabec84cd68e036da74959688",
"timestamp": "",
"source": "github",
"line_count": 175,
"max_line_length": 116,
"avg_line_length": 32.565714285714286,
"alnum_prop": 0.6323916476574838,
"repo_name": "apache/solr-operator",
"id": "c453dabb8e98780422e10b6bef28d78fcff5aacd",
"size": "6481",
"binary": false,
"copies": "2",
"ref": "refs/heads/main",
"path": "hack/release/wizard/scriptutil.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2430"
},
{
"name": "Go",
"bytes": "884367"
},
{
"name": "Makefile",
"bytes": "13382"
},
{
"name": "Python",
"bytes": "85048"
},
{
"name": "Shell",
"bytes": "84463"
},
{
"name": "Smarty",
"bytes": "17797"
}
],
"symlink_target": ""
} |
import _plotly_utils.basevalidators
class TextsrcValidator(_plotly_utils.basevalidators.SrcValidator):
def __init__(self, plotly_name="textsrc", parent_name="scattersmith", **kwargs):
super(TextsrcValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "none"),
**kwargs,
)
| {
"content_hash": "75cee921584d0bb8b56e0a8ba348db97",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 84,
"avg_line_length": 36.27272727272727,
"alnum_prop": 0.6240601503759399,
"repo_name": "plotly/plotly.py",
"id": "2ce9eacdffe562b939b4733e23eb759cbcabd0f1",
"size": "399",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "packages/python/plotly/plotly/validators/scattersmith/_textsrc.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "545"
},
{
"name": "JavaScript",
"bytes": "2074"
},
{
"name": "PostScript",
"bytes": "565328"
},
{
"name": "Python",
"bytes": "31506317"
},
{
"name": "TypeScript",
"bytes": "71337"
}
],
"symlink_target": ""
} |
import inspect
import os
import unittest
from django.test.runner import DebugSQLTextTestResult, DiscoverRunner
class SensibleTextTestResult(unittest.TextTestResult):
def get_vi_command(self, test):
code_object = inspect.unwrap(getattr(test, test._testMethodName)).__code__
return 'vi +{} {}'.format(code_object.co_firstlineno, code_object.co_filename)
def getDescription(self, test):
doc_first_line = test.shortDescription()
if self.descriptions and doc_first_line:
return '\n'.join((str(test), doc_first_line))
else:
description = '{}.{}.{}'.format(test.__class__.__module__, test.__class__.__qualname__, test._testMethodName)
if os.environ.get('SHOW_VI', False):
description += '\n\n' + self.get_vi_command(test)
return description
class SensibleTextTestRunner(DiscoverRunner):
def __init__(self, tb_locals=False, **kwargs):
self.tb_locals = tb_locals
super().__init__(**kwargs)
def get_resultclass(self):
return DebugSQLTextTestResult if self.debug_sql else SensibleTextTestResult
def run_suite(self, suite, **kwargs):
resultclass = self.get_resultclass()
return self.test_runner(
verbosity=self.verbosity,
failfast=self.failfast,
tb_locals=self.tb_locals,
resultclass=resultclass,
).run(suite)
| {
"content_hash": "8f44d91243440f0282a4336a64c96312",
"timestamp": "",
"source": "github",
"line_count": 39,
"max_line_length": 121,
"avg_line_length": 36.56410256410256,
"alnum_prop": 0.638148667601683,
"repo_name": "rapilabs/sensible-text-test-runner",
"id": "56f6b5c5de33b0954c4b25844e2f7222a403680a",
"size": "1426",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sensible_text_test_runner/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "2284"
}
],
"symlink_target": ""
} |
from django.shortcuts import render
# Create your views here.
def index(request):
return render(request, 'stateMap.html') | {
"content_hash": "00357f0fec9e9f2f2dcf12484e4d2e79",
"timestamp": "",
"source": "github",
"line_count": 5,
"max_line_length": 43,
"avg_line_length": 25.2,
"alnum_prop": 0.7619047619047619,
"repo_name": "SavenR/Map_-_Energy_Production",
"id": "ab71de823dbe06077687566d7ef0e1c40d398cbf",
"size": "126",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "jsframework/views.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "2363"
},
{
"name": "HTML",
"bytes": "2422"
},
{
"name": "JavaScript",
"bytes": "41866"
},
{
"name": "Python",
"bytes": "19233"
}
],
"symlink_target": ""
} |
"""
Simple flask-based API to access FreeLing functionalities.
"""
__author__ = "Víctor Peinado"
__email__ = "vitojph@gmail.com"
__date__ = "26/06/2013"
import freeling
from flask import Flask, Response, request
from flask.ext.restful import Api, Resource
import json
# #################################################################
# FreeLing settings (borrowed from freeling-3.0/APIs/python/sample.py)
PUNCTUATION = u""".,;:!? """
## Modify this line to be your FreeLing installation directory
FREELINGDIR = "/usr/local/"
DATA = FREELINGDIR + "share/freeling/"
LANG = "ca"
freeling.util_init_locale("default");
# Create language analyzer
#la=freeling.lang_ident(DATA + "common/lang_ident/ident.dat")
# Create options set for maco analyzer. Default values are Ok, except for data files.
op = freeling.maco_options(LANG)
op.set_active_modules(0,1,1,1,1,1,1,1,1,1,0)
op.set_data_files("",
DATA + LANG + "/locucions.dat",
DATA + LANG + "/quantities.dat",
DATA + LANG + "/afixos.dat",
DATA + LANG + "/probabilitats.dat",
DATA + LANG + "/dicc.src",
DATA + LANG + "/np.dat",
DATA + "common/punct.dat",
DATA + LANG + "/corrector/corrector.dat")
# Create analyzers
tk = freeling.tokenizer(DATA + LANG + "/tokenizer.dat")
sp = freeling.splitter(DATA + LANG + "/splitter.dat")
mf = freeling.maco(op)
tg = freeling.hmm_tagger(LANG, DATA + LANG + "/tagger.dat", 1, 2)
sen = freeling.senses(DATA+LANG+"/senses.dat")
parser = freeling.chart_parser(DATA + LANG + "/chunker/grammar-chunk.dat")
dep = freeling.dep_txala(DATA + LANG+ "/dep/dependences.dat", parser.get_start_symbol())
# #################################################################
# flask API
app = Flask(__name__)
api = Api(app)
# ##############################################################################
def handleParsedTreeAsFL(tree, depth, output):
"""Handles a parsed tree"""
node = tree.get_info()
nch = tree.num_children()
# if node is head and has no children
if nch == 0:
if node.is_head():
w = node.get_word()
output.append("+(%s %s %s)" % (w.get_form(), w.get_lemma(), w.get_tag()))
else:
# if node is head and has children
if node.is_head():
output.append("+%s_[" % (node.get_label()))
else:
# if node has children but isn't head
output.append("%s_[" % (node.get_label()))
# for each children, repeat process
for i in range(nch):
child = tree.nth_child_ref(i)
handleParsedTreeAsFL(child, depth+1, output)
# close node
output.append("]")
return output
# ##############################################################################
def handleParsedTreeAsString(tree, depth, output):
"""Handles a parsed tree"""
node = tree.get_info()
nch = tree.num_children()
parent = tree.get_parent()
# if node is head and has no children
if nch == 0:
if node.is_head():
w = node.get_word()
output.append(u"%s/%s/%s" % (w.get_form(), w.get_lemma(), w.get_tag()))
else:
if depth > 0:
output.append(u"%s(" % node.get_label())
# for each children, repeat process
for i in range(nch):
child = tree.nth_child_ref(i)
handleParsedTreeAsString(child, depth+1, output)
if depth > 0:
output.append(u")")
return output
# ##############################################################################
def handleParsedTreeAsJSON(tree, depth, output):
"""Handles a parsed tree"""
node = tree.get_info()
nch = tree.num_children()
parent = tree.get_parent()
# if node is head and has no children
if nch == 0:
if node.is_head():
w = node.get_word()
output.append(dict(text=w.get_form(), lemma=w.get_lemma(), tag=w.get_tag(), parent=parent.get_info().get_label(), level=depth))
else:
if depth > 0:
output.append(dict(tag=node.get_label(), parent=parent.get_info().get_label(), level=depth))
# for each children, repeat process
for i in range(nch):
child = tree.nth_child_ref(i)
handleParsedTreeAsJSON(child, depth+1, output)
return output
# #############################################################################
def handleDepTree(tree, depth, output):
"""Handles a parsed tree"""
node = tree.get_info()
link = node.get_link()
linfo = link.get_info()
parentLabel = None
if depth > 0:
parentLabel = tree.get_parent().get_info().get_label()
w = tree.get_info().get_word()
output.append(dict(parent=parentLabel, rel=node.get_label(), label=link.get_info().get_label(), text=w.get_form(), lemma=w.get_lemma(), tag=w.get_tag()))
nch = tree.num_children()
if nch > 0:
for i in range(nch):
d = tree.nth_child_ref(i)
if not d.get_info().is_chunk():
handleDepTree(d, depth+1, output)
ch = {}
for i in range(nch):
d = tree.nth_child_ref(i)
if d.get_info().is_chunk():
ch[d.get_info().get_chunk_ord()] = d
for i in sorted(ch.keys()):
handleDepTree(ch[i], depth+1, output)
return output
# ##############################################################################
class Splitter(Resource):
"""Splits an input text into sentences."""
def post(self):
text = request.json["texto"]
if text[-1] not in PUNCTUATION:
text = text + "."
tokens = tk.tokenize(text)
sentences = sp.split(tokens, 0)
# output list of sentences
outputSentences = []
for sentence in sentences:
outputTokens = []
for w in sentence.get_words():
outputTokens.append(w.get_form())
outputSentences.append(dict(oracion=" ".join(outputTokens)))
return Response(json.dumps(outputSentences), mimetype="application/json")
class TokenizerSplitter(Resource):
"""Splits an input text into tokenized sentences."""
def post(self):
text = request.json["texto"]
if text[-1] not in PUNCTUATION:
text = text + "."
tokens = tk.tokenize(text)
sentences = sp.split(tokens, 0)
# output list of sentences
outputSentences = []
for sentence in sentences:
outputTokens = []
for w in sentence.get_words():
outputTokens.append(w.get_form())
outputSentences.append(dict(oracion=outputTokens))
return Response(json.dumps(outputSentences), mimetype="application/json")
# ##############################################################################
class NERecognizer(Resource):
"""Recognizes Named Entities from an input text."""
def post(self):
text = request.json["texto"]
if text[-1] not in PUNCTUATION:
text = text + "."
tokens = tk.tokenize(text)
sentences = sp.split(tokens, 0)
sentences = mf.analyze(sentences)
sentences = tg.analyze(sentences)
output = []
for sentence in sentences:
words = sentence.get_words()
for word in words:
# Person (NP00SP0), Geographical location (NP00G00), Organization (NP00O00), and Others (NP00V00)
if word.get_tag() in "NP00SP0 NP00G00 NP00000 NP00V00".split():
entities = []
entities.append(dict(lema=word.get_lemma(), categoria=word.get_tag()))
output.append(dict(palabra=word.get_form(), entidades=entities))
return Response(json.dumps(output), mimetype="application/json")
# ##############################################################################
class DatesQuatitiesRecognizer(Resource):
"""Recognizes dates, currencies, and quatities from an input text."""
def post(self):
text = request.json["texto"]
if text[-1] not in PUNCTUATION:
text = text + "."
tokens = tk.tokenize(text)
sentences = sp.split(tokens, 0)
sentences = mf.analyze(sentences)
sentences = tg.analyze(sentences)
output = []
for sentence in sentences:
words = sentence.get_words()
for word in words:
# dates
tag = word.get_tag()
if tag[0] in "W Z".split():
expression = []
if tag == "W":
expression.append(dict(lema=word.get_lemma(), categoria="temporal"))
else:
if tag == "Z":
category = "numero"
elif tag == "Zd":
category = "partitivo"
elif tag == "Zm":
category = "moneda"
elif tag == "Zp":
category = "porcentaje"
elif tag == "Zu":
category = "magnitud"
else:
category = "numero"
expression.append(dict(lema=word.get_lemma(), categoria=category))
output.append(dict(expresion=word.get_form(), entidades=expression))
return Response(json.dumps(output), mimetype="application/json")
# ##############################################################################
class Tagger(Resource):
"""Performs POS tagging from an input text."""
def post(self):
"""docstring for post"""
text = request.json["texto"]
if text[-1] not in PUNCTUATION:
text = text + "."
tokens = tk.tokenize(text)
sentences = sp.split(tokens, 0)
sentences = mf.analyze(sentences)
sentences = tg.analyze(sentences)
output = []
for sentence in sentences:
words = sentence.get_words()
for word in words:
lemmas = []
lemmas.append(dict(lema=word.get_lemma(), categoria=word.get_tag()))
output.append(dict(palabra=word.get_form(), lemas=lemmas))
return Response(json.dumps(output), mimetype="application/json")
# ##############################################################################
class WSDTagger(Resource):
"""Performs POS tagging and WSD from an input text."""
def post(self):
"""docstring for post"""
text = request.json["texto"]
if text[-1] not in PUNCTUATION:
text = text + "."
tokens = tk.tokenize(text)
sentences = sp.split(tokens, 0)
sentences = mf.analyze(sentences)
sentences = tg.analyze(sentences)
sentences = sen.analyze(sentences)
output = []
for sentence in sentences:
words = sentence.get_words()
for word in words:
lemmas = []
lemmas.append(dict(lema=word.get_lemma(), categoria=word.get_tag()))
# split the senses and get just the synset ID
synsets = []
[synsets.append(synsetID.split(":")[0]) for synsetID in word.get_senses_string().split("/")]
output.append(dict(palabra=word.get_form(), lemas=lemmas, synsets=synsets))
return Response(json.dumps(output), mimetype="application/json")
# ##############################################################################
class Parser(Resource):
"""FreeLing parser with three output formats: freeling-like, stanford-like and jsonified"""
def post(self):
"""docstring for post"""
text = request.json["texto"]
try:
format = request.json["format"]
except KeyError:
format = "json"
if text[-1] not in PUNCTUATION:
text = text + "."
tokens = tk.tokenize(text)
sentences = sp.split(tokens, 0)
sentences = mf.analyze(sentences)
sentences = tg.analyze(sentences)
sentences = sen.analyze(sentences)
sentences = parser.analyze(sentences)
# set up the output format
parsedtree = []
for sentence in sentences:
tree = sentence.get_parse_tree()
if format == "fl":
parsedtree = handleParsedTreeAsFL(tree.begin(), 0, parsedtree)
elif format == "string":
# add the S(entence) tag
parsedtree.append("S(")
parsedtree = handleParsedTreeAsString(tree.begin(), 0, parsedtree)
# close the (S)entence
parsedtree.append(")")
elif format == "json":
# add the S tag with parent ROOT
parsedtree.append(dict(tag="S", parent="ROOT", level=0))
parsedtree = handleParsedTreeAsJSON(tree.begin(), 0, parsedtree)
# format the output accordingly
if format == "fl" or format == "string":
return Response(json.dumps(dict(tree=" ".join(parsedtree))), mimetype="application/json")
elif format == "json":
return Response(json.dumps(parsedtree), mimetype="application/json")
# ###############################################################################
class DependencyParser(Resource):
"""FreeLing Dependency Parser"""
def post(self):
"""docstring for post"""
text = request.json["texto"]
if text[-1] not in PUNCTUATION:
text = text + "."
tokens = tk.tokenize(text)
sentences = sp.split(tokens, 0)
sentences = mf.analyze(sentences)
sentences = tg.analyze(sentences)
sentences = sen.analyze(sentences)
sentences = parser.analyze(sentences)
sentences = dep.analyze(sentences)
# set up the output format
deptree = []
for sentence in sentences:
tree = sentence.get_dep_tree()
deptree = handleDepTree(tree.begin(), 0, deptree)
return Response(json.dumps(deptree), mimetype="application/json")
# #############################################################################
# Api resource routing
# split a text into sentences
api.add_resource(Splitter, "/splitter")
# split a text into tokenized sentences
api.add_resource(TokenizerSplitter, "/tokenizersplitter")
# perform PoS tagging from an input text
api.add_resource(Tagger, "/tagger")
# perform PoS tagging and WSD from an input text
api.add_resource(WSDTagger, "/wsdtagger")
# perform NE recognition from an input text
api.add_resource(NERecognizer, "/ner")
# recognizes dates, currencies and quantities
api.add_resource(DatesQuatitiesRecognizer, "/datesquantities")
# returns a parsed tree
api.add_resource(Parser, "/parser")
# returns a dependency tree
api.add_resource(DependencyParser, "/dep")
if __name__ == '__main__':
app.run(debug=True, host="0.0.0.0", port=9999)
| {
"content_hash": "ce5adb5b3df0799bc2819d346dc6c3d2",
"timestamp": "",
"source": "github",
"line_count": 458,
"max_line_length": 157,
"avg_line_length": 33.3646288209607,
"alnum_prop": 0.5241149139454224,
"repo_name": "vitojph/flws",
"id": "dfc78a18ecabac75198f2462a26b8bcb35decde7",
"size": "15329",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "flws/flwsca.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "124594"
}
],
"symlink_target": ""
} |
import datetime
class Signature(object):
"""
Returns ChatterBot's signature.
"""
def __init__(self, name):
self.name = name
self.time = self.create_timestamp()
def create_timestamp(self, fmt="%Y-%m-%d-%H-%M-%S"):
"""
Returns a string formatted timestamp of the current time.
"""
return datetime.datetime.now().strftime(fmt)
def serialize(self):
signature = {}
signature['name'] = self.name
signature['time'] = self.time
return signature
| {
"content_hash": "546a00bf5505d694fb682b8d9f3b3c20",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 65,
"avg_line_length": 21.92,
"alnum_prop": 0.5693430656934306,
"repo_name": "imminent-tuba/thesis",
"id": "044767e53d0c65286aaab94f098e10ab11f968e3",
"size": "548",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "server/chatterbot/chatterbot/conversation/signature.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "203080"
},
{
"name": "HTML",
"bytes": "642"
},
{
"name": "JavaScript",
"bytes": "47372"
},
{
"name": "Python",
"bytes": "71261"
}
],
"symlink_target": ""
} |
from typing import Any, TYPE_CHECKING
if TYPE_CHECKING:
from boxsdk import NetworkResponse
class BoxResponse:
"""Represents a response to a Box API request."""
def __init__(self, network_response):
self._network_response = network_response
def json(self) -> Any:
"""
Return the parsed JSON response.
"""
return self._network_response.json()
@property
def content(self) -> Any:
"""
Return the content of the response body.
"""
return self._network_response.content
@property
def ok(self) -> bool:
"""
Return whether or not the request was successful.
"""
# pylint:disable=invalid-name
return self._network_response.ok
@property
def status_code(self) -> int:
"""
Return the HTTP status code of the response.
"""
return self._network_response.status_code
@property
def headers(self) -> dict:
"""
Get the response headers.
"""
return self._network_response.headers
@property
def network_response(self) -> 'NetworkResponse':
"""
Return the underlying network response.
"""
return self._network_response
def __repr__(self) -> str:
return f'<Box Response[{self.status_code}]>'
| {
"content_hash": "967bfd3f60b5216a9e1009061efe5e6b",
"timestamp": "",
"source": "github",
"line_count": 56,
"max_line_length": 57,
"avg_line_length": 24.232142857142858,
"alnum_prop": 0.58069270449521,
"repo_name": "box/box-python-sdk",
"id": "9b9e83dfa5f8384f28eecacf1c0cbfd3d80222d3",
"size": "1357",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "boxsdk/session/box_response.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "1036959"
},
{
"name": "Smarty",
"bytes": "527"
}
],
"symlink_target": ""
} |
from collections import OrderedDict
from gluon import current, URL
from gluon.storage import Storage
def config(settings):
"""
SHARE settings for Sri Lanka
@ToDo: Setting for single set of Sectors / Sector Leads Nationally
"""
T = current.T
# PrePopulate data
settings.base.prepopulate += ("SHARE/LK",)
settings.base.prepopulate_demo += ("SHARE/Demo",)
# -------------------------------------------------------------------------
# L10n (Localization) settings
settings.L10n.languages = OrderedDict([
("en-gb", "English"),
("si", "Sinhala"),
("ta", "Tamil"),
])
# Default Language
settings.L10n.default_language = "en-gb"
# Finance settings
settings.fin.currencies = {
#"EUR" : "Euros",
#"GBP" : "Great British Pounds",
"LKR" : "Sri Lanka Rupees",
"USD" : "United States Dollars",
}
settings.fin.currency_default = "USD"
# -------------------------------------------------------------------------
def customise_event_event_resource(r, tablename):
s3db = current.s3db
s3db.event_event.name.label = T("Disaster Title")
# Custom Components
s3db.add_components(tablename,
event_event_name = (# Sinhala
{"name": "name_si",
"joinby": "event_id",
"filterby": {"language": "si",
},
"multiple": False,
},
# Tamil
{"name": "name_ta",
"joinby": "event_id",
"filterby": {"language": "ta",
},
"multiple": False,
},
),
need_need = {"link": "need_event",
"joinby": "event_id",
"key": "need_id",
"actuate": "hide",
"autodelete": False,
},
)
from s3 import S3SQLCustomForm, S3SQLInlineComponent
crud_form = S3SQLCustomForm("name",
S3SQLInlineComponent("name_si",
label = T("Title in Sinhala"),
multiple = False,
fields = [("", "name_l10n")],
),
S3SQLInlineComponent("name_ta",
label = T("Title in Tamil"),
multiple = False,
fields = [("", "name_l10n")],
),
"event_type_id",
"start_date",
"closed",
"end_date",
S3SQLInlineComponent("event_location",
label = T("Locations"),
multiple = False,
fields = [("", "location_id")],
),
"comments",
)
s3db.configure(tablename,
crud_form = crud_form,
)
settings.customise_event_event_resource = customise_event_event_resource
# END =========================================================================
| {
"content_hash": "431d91987aafa60361a284d3cb1ab961",
"timestamp": "",
"source": "github",
"line_count": 101,
"max_line_length": 88,
"avg_line_length": 43.53465346534654,
"alnum_prop": 0.2974755515123948,
"repo_name": "flavour/eden",
"id": "73bed3949836015bea0a712908fe42ad2a1281c5",
"size": "4422",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "modules/templates/SHARE/LK/config.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "727"
},
{
"name": "CSS",
"bytes": "3351335"
},
{
"name": "HTML",
"bytes": "1367727"
},
{
"name": "JavaScript",
"bytes": "20109418"
},
{
"name": "NSIS",
"bytes": "3934"
},
{
"name": "PHP",
"bytes": "15220"
},
{
"name": "Python",
"bytes": "31407527"
},
{
"name": "Ruby",
"bytes": "8291"
},
{
"name": "Shell",
"bytes": "5059"
},
{
"name": "XSLT",
"bytes": "3274119"
}
],
"symlink_target": ""
} |
from json import loads
from tenable_io.api.base import BaseApi
from tenable_io.api.base import BaseRequest
from tenable_io.api.models import AccessGroup, AccessGroupList, AssetRule, AssetRuleFilter, AssetRulePrincipal, Filters
class AccessGroupsApi(BaseApi):
def list(self, f=None, ft='and', w=None, wf=None, limit=None, offset=0, sort=None):
"""Return the access groups without associated rules.
:param f: A list of :class:`tenable_io.api.models.AssetFilter` instances.
:param ft: The action to apply if multiple 'f' parameters are provided. Supported values are **and** and **or**.
:param w: The search value to be applied across wildcard fields specified with the 'wf' parameter.
:param wf: The list of fields where the search values specified in the 'w' parameter are applied.
:param limit: The maximum number of records to be retrieved.
:param offset: The offset from request.
:param sort: A list of fields on which the results are sorted.
:raise TenableIOApiException: When API error is encountered.
:return: An instance of :class:`tenable_io.api.models.AccessGroupList`.
"""
fgen = (i.field + ':' + i.operator + ':' + i.value for i in f) if f is not None else None
response = self._client.get('access-groups',
params={'f': '&'.join(fgen) if fgen is not None else None,
'ft': ft, 'w': w, 'wf': ','.join(wf) if wf is not None else None,
'limit': limit, 'offset': offset,
'sort': ','.join(sort) if sort is not None else None})
return AccessGroupList.from_json(response.text)
def create(self, access_group_request):
"""Create a new access group.
:param Access_group_request: An instance of :class:`AccessGroupRequest`.
:raise TenableIOApiException: When API error is encountered.
:return: An instance of :class:`tenable_io.api.models.AccessGroup` without the rules information.
"""
response = self._client.post('access-groups', access_group_request)
return AccessGroup.from_json(response.text)
def details(self, id):
"""Returns details for a specific access group
:param id: The id of the access group
:raise TenableIOApiException: When API error is encountered.
:return: An instance of :class:`tenable_io.api.models.AccessGroup`.
"""
response = self._client.get('access-groups/%(id)s', path_params={'id': id})
return AccessGroup.from_json(response.text)
def delete(self, id):
"""Delete an access group.
:param id: The id of the access group to delete.
:raise TenableIOApiException: When API error is encountered.
:return: True if successful.
"""
self._client.delete('access-groups/%(id)s', path_params={'id': id})
return True
def edit(self, id, access_group_request):
"""Modifies an access group. This method overwrites the existing data.
:param id: The id of the access group to be edited.
:param access_group_request: An instance of :class:`AccessGroupRequest`.
:raise TenableIOApiException: When API error is encountered.
:return: An instance of :class:`tenable_io.api.models.AccessGroup`.
"""
response = self._client.put('access-groups/%(id)s', payload=access_group_request, path_params={'id': id})
return AccessGroup.from_json(response.text)
def filters(self):
"""List available filters for access groups.
:raise TenableIOApiException: When API error is encountered.
:return: An instance of :class:`tenable_io.api.models.Filters`.
"""
response = self._client.get('access-groups/filters')
return Filters.from_json(response.text)
def rule_filters(self):
"""List available filters for asset rules.
:raise TenableIOApiException: When API error is encountered.
:return: An instance of :class:`tenable_io.api.models.Filters`.
"""
response = self._client.get('access-groups/rules/filters')
return AssetRuleFilter.from_list(loads(response.text).get('rules', {}))
class AccessGroupRequest(BaseRequest):
def __init__(
self,
name=None,
all_assets=False,
all_users=False,
rules=None,
principals=None
):
"""Request for AccessGroupsApi.create and AccessGroupsApi.edit.
:param name: The name of the access group. Must be unique within the container, a maximum of 255 characters, and
alphanumeric, but can include limited special characters (underscore, dash, parenthesis, brackets, colon).
You can add a maximum of 5,000 access groups to an individual container.
:type name: string
:param all_assets: Specifies whether the access group is the All Assets group or a user-defined group. A create
request with this parameter set to 'true' will fail.
Set to 'true' to edit membership in the All Assets access group. In which case, any rules
are ignored, but existing principals are overwritten based on the all_users and principals parameters.
Set to 'false' to edit a user-defined access group. The existing rules are overwritten with the new rules,
and existing principals are overwritten based on the all_users and principals parameters.
:type all_assets: boolean
:param all_users: Specifies whether assets in the access group can be viewed by all or only some users.
Default is 'False'. If 'true', all users in your organization have Can View access to the
assets defined in the rules parameter and any principal parameters is ignored.
If 'false', only specified users have Can View access to the assets defined in the rules parameter.
You define which users or user groups have access in the principals parameter of the request.
:type all_users: boolean
:param rules: An array of asset rules. Tenable.io uses these rules to assign assets to the access group.
You can only add rules to access groups if the all_assets parameter is set to 'false'.
:type rules: list
:param principals: A list of principals. Each representing a user or user group assigned to the access group.
Data in this array is handled based on the all_users parameter. If all_users is 'true',
any principal data is ignored and you can omit this parameter.
If all_users is 'false', the principal data is added to the access group.
:type principals: list
"""
for r in rules:
assert isinstance(r, AssetRule)
self.name = name
self.all_assets = all_assets
self.all_users = all_users
self.rules = rules
self.principals = principals
def as_payload(self, filter_=None):
payload = super(AccessGroupRequest, self).as_payload(True)
rule_list = []
for r in self.rules:
rule_list.append(r.as_payload())
payload.__setitem__('rules', rule_list)
if not self.all_users and self.principals:
principal_list = []
for p in self.principals:
assert isinstance(p, AssetRulePrincipal)
principal_list.append(p.as_payload())
payload.__setitem__('principals', principal_list)
return payload
| {
"content_hash": "86f3cdaedb6d18687e8092e9644764ea",
"timestamp": "",
"source": "github",
"line_count": 150,
"max_line_length": 120,
"avg_line_length": 50.94,
"alnum_prop": 0.6441565240151813,
"repo_name": "tenable/Tenable.io-SDK-for-Python",
"id": "bf989a614a0e93f0b5f6ac389d4dc632fe667c21",
"size": "7641",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tenable_io/api/access_groups.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "459766"
}
],
"symlink_target": ""
} |
'''
Created by auto_sdk on 2014-12-17 17:22:51
'''
from top.api.base import RestApi
class VasSubscribeGetRequest(RestApi):
def __init__(self,domain='gw.api.taobao.com',port=80):
RestApi.__init__(self,domain, port)
self.article_code = None
self.nick = None
def getapiname(self):
return 'taobao.vas.subscribe.get'
| {
"content_hash": "31e5a0c1da6b91945b24f6fbf9efd81f",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 55,
"avg_line_length": 27.916666666666668,
"alnum_prop": 0.6865671641791045,
"repo_name": "CooperLuan/devops.notes",
"id": "07d6ddb609814f3f5d407342ff2d9b7e0dbbd548",
"size": "335",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "taobao/top/api/rest/VasSubscribeGetRequest.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1505"
},
{
"name": "JavaScript",
"bytes": "29"
},
{
"name": "Python",
"bytes": "211546"
},
{
"name": "Shell",
"bytes": "150"
}
],
"symlink_target": ""
} |
import os
import json
import itertools
# experimenting with dierent popups
def make_rows(headers):
varblock=[]
# makes a list of rows from a given input header
for row in headers:
newrow='\t\t\t\t + "<br>%s:" + feature.properties.place' % (row)
varblock.append(newrow)
return varblock
# the function actually used to make the styles table
def make_rows2(headers):
varblock=[]
# makes a list of rows from a given input header
for row in headers:
maxchared=15
row1=row
row2=row
if row==headers[0]:
newrow=""" var popupText = "<table><tr><th>%s: </th><td>" + feature.properties['%s']+"</td></tr>"; """ % (row1,row2)
else:
newrow=""" var popupText = popupText+ "<tr><th>%s: </th><td>" + feature.properties['%s']+"</td></tr>"; """ % (row1,row2)
varblock.append(newrow)
if row==headers[-1]:
newrow=""" var popupText = popupText+ "<tr><th>%s: </th><td>" + feature.properties['%s']+</td></tr></table>"; """ % (row1,row2)
return varblock
# experimenting with dierent popups
def make_rows5(headers):
varblock=[]
# makes a list of rows from a given input header
for row in headers:
row1=row
row2=row
newrow=""" var popupText = "%s: " + feature.properties['%s']
""" % (row1,row2)
varblock.append(newrow)
# experimenting with dierent popups
def make_rows3(headers):
varblock=[]
# makes a list of rows from a given input header
for row in headers:
row1='<tr><td>'+row+'<td>'
row2=row
if row==headers[0]:
newrow=""" var popupText = "%s:" + feature.properties['%s']
""" % (row1,row2)
else:
newrow=""" var popupText = popupText+ "%s: <td>" + feature.properties['%s']
""" % (row1,row2)
varblock.append(newrow)
if row==headers[-1]:
pass
return varblock
# experimenting with different popups
def make_rows4(headers):
varblock=[]
# makes a list of rows from a given input header
for row in headers:
varblock=[' var popupText = "<table width=1>"']
row1=row
row2=row
if row==headers[0]:
newrow=""" + "<p>%s:" + feature.properties['%s']+"<.p>"
""" % (row1,row2)
else:
newrow=""" +"<td>%s:</td><td>" + feature.properties['%s']+"</td></tr>"
""" % (row1,row2)
if row==headers[-1]:
newrow=""" +"<td>%s:</td><td>" + feature.properties['%s']+"</td></tr></table>"
""" % (row1,row2)
varblock.append(newrow)
if row==headers[-1]:
pass
return varblock
# makes the block str for all the unique data within the html file
def making_blockstr(varblock):
start="""function addDataToMap(data, map) {
var dataLayer = L.geoJson(data, {
onEachFeature: function(feature, layer) {"""
end="""
layer.bindPopup(popupText, {autoPan:false, maxHeight:500, maxWidth:350} ); }
});
dataLayer.addTo(map);\n}"""
total=''
for row in varblock:
total+=row
return start+total+end
#attempting to infuse real time updates
def making_blockstr2(varblock):
start=""" function addDataToMap(data, map) {
var dataLayer = L.geoJson(data, {
onEachFeature: function(feature, layer) {"""
end="""
layer.bindPopup(popupText, {autoPan:false, maxHeight:500, maxWidth:350} ); }
});
dataLayer.addTo(map);\n}"""
total=''
for row in varblock:
total+=row
return start+total+end
# makes the style bindings for each element
def make_bindings(headers):
varblock=make_rows2(headers)
block=making_blockstr2(varblock)
return block
# make_blockstr with color and elment options added (newer)
def making_blockstr2(varblock,count,colorline,element,time):
start="""function addDataToMap%s(data, map) {
var dataLayer = L.geoJson(data, {
onEachFeature: function(feature, layer) {""" % count
end="""
layer.bindPopup(popupText, {autoPan:false, maxHeight:500, maxWidth:350} ); }
});
dataLayer.addTo(map);\n\t\tsetTimeout(function() {\n\t\t\t\tdataLayer.clearLayers();\n\t\t},%s);\n\t}\n}\nsetInterval(add%s,%s)""" % (time,count,time)
total=''
for row in varblock:
total+=row
if element=='Point':
return start+total+colorline+end
else:
return start+total+'\n'+colorline+end
# make bindings after color options were added
def make_bindings2(headers,count,colorline,element,time):
varblock=make_rows2(headers)
block=making_blockstr2(varblock,count,colorline,element,time)
return block
'''
# depricated function that use to makethe html
def make_html_block(headers,filenames):
string="""<!DOCTYPE html>
<html>
<head>
<meta charset=utf-8 />
<title>PipeGeoJSON Demo</title>
<meta name='viewport' content='initial-scale=1,maximum-scale=1,user-scalable=no' />
<script src="https://api.mapbox.com/mapbox.js/v2.2.4/mapbox.js"></script>
<script src="http://code.jquery.com/jquery-1.11.3.min.js"></script>
<link href='https://api.mapbox.com/mapbox.js/v2.2.4/mapbox.css' rel='stylesheet' />
<style>
body { margin:0; padding:0; }
#map { position:absolute; top:0; bottom:0; width:100%; }
</style>
</head>
<body>
<script src='https://api.mapbox.com/mapbox.js/plugins/leaflet-omnivore/v0.2.0/leaflet-omnivore.min.js'></script>
<div id='map'></div>
<script>
L.mapbox.accessToken = 'pk.eyJ1IjoibXVycGh5MjE0IiwiYSI6ImNpam5kb3puZzAwZ2l0aG01ZW1uMTRjbnoifQ.5Znb4MArp7v3Wwrn6WFE6A';
var map = L.mapbox.map('map', 'mapbox.streets',{
center: [1, 100],
zoom: 8
});
// omnivore will AJAX-request this file behind the scenes and parse it:
// note that there are considerations:
// - The file must either be on the same domain as the page that requests it,
// or both the server it is requested from and the user's browser must
// support CORS.
// Internally this uses the toGeoJSON library to decode the KML file
// into GeoJSON
function addDataToMap(data, map) {
var dataLayer = L.geoJson(data);
dataLayer.addTo(map);
}
"""
for row in filenames:
loc="""$.getJSON('http://localhost:8000/%s',function(data) { addDataToMap(data,map); });""" % (row)
string+=loc
return string+make_bindings(headers)+"""\n</script>
</body>
</html>"""
a=make_html_block(headers,'index.html')
'''
#writes text file to given location
def writetxt(data,location):
with open(location,'w') as f:
f.writelines(data)
print 'Wrote text file to location %s' % location
# writes the html file to a document then opens it up in safari (beware it will call a terminal command)
def load(lines,filename):
with open(filename,'w') as f:
f.writelines(lines)
f.close()
os.system('open -a Safari '+filename)
'''
# not used in current iteration of module
def make_all_headertype(header,geojsonlocations):
a=make_html_block(header,geojsonlocations)
writetxt(a,'a.html')
#load(a,'a.html')
'''
# given a list of file names and kwargs carried throughout returns a string of the function bindings for each element
def make_bindings_type(filenames,color_input,colorkey,time,file_dictionary):
string=''
blocky="""\nfunction addDataToMap(data, map) {
var dataLayer = L.geoJson(data);
dataLayer.addTo(map);
}\n"""
count=0
for row in filenames:
color_input=''
count+=1
filename=row
with open(row) as data_file:
data = json.load(data_file)
#pprint(data)
data=data['features']
data=data[0]
featuretype=data['geometry']
featuretype=featuretype['type']
data=data['properties']
if not file_dictionary==False:
try:
color_input=file_dictionary[filename]
except Exception:
color_input=''
if featuretype=='Point':
colorline=get_colorline_marker(color_input)
else:
colorline=get_colorline_marker2(color_input)
if not colorkey=='':
if featuretype=='Point':
colorline=get_colorline(data[str(colorkey)])
else:
colorline=get_colorline_marker2(data[str(colorkey)])
if featuretype=='Point':
colorline=get_colorline_marker(color_input)
else:
colorline=get_colorline_marker2(color_input)
headers=[]
for row in data:
headers.append(str(row))
blocky= blocky="""\nfunction addDataToMap%s(data, map) {
var dataLayer = L.geoJson(data);
dataLayer.addTo(map);
}\n""" % count
preloc='function add%s() {\n' % (str(count))
loc="""\t$.getJSON('http://localhost:8000/%s',function(data) { addDataToMap%s(data,map); });""" % (filename,count)
if featuretype=='Point':
string+=blocky+preloc+loc+make_bindings2(headers,count,colorline,featuretype,time)+'\n'
else:
string+=blocky+preloc+loc+make_bindings2(headers,count,colorline,featuretype,time)+'\n'
return string
# given a list of file names and kwargs carried throughout returns a string of the function bindings for each element
def make_bindings_type2(filenames,color_input,colorkey):
string=''
blocky="""\nfunction addDataToMap(data, map) {
var dataLayer = L.geoJson(data);
dataLayer.addTo(map);
}\n"""
count=0
for row in filenames:
count+=1
filename=row
with open(row) as data_file:
data = json.load(data_file)
#pprint(data)
data=data['features']
data=data[0]
featuretype=data['geometry']
featuretype=featuretype['type']
data=data['properties']
#if a point and no entry for color_input
if featuretype=='Point' and colorkey=='':
colorline=get_colorline_marker(color_input)
elif not colorkey=='' and featuretype=='Point':
colorline=get_colorline_marker(data[str(colorkey)])
elif not colorkey=='':
colorline=get_colorline_marker2(data[str(colorkey)])
elif colorkey=='':
colorline=get_colorline_marker2(str(color_input))
headers=[]
for row in data:
headers.append(str(row))
blocky= blocky="""\nfunction addDataToMap%s(data, map) {
var dataLayer = L.geoJson(data);
dataLayer.addTo(map);
}\n""" % count
start='function add() {'
loc="""\n$.getJSON('http://localhost:8000/%s',function(data) { addDataToMap%s(data,map); });""" % (filename,count)
if featuretype=='Point':
string+=blocky+loc+make_bindings2(headers,count,colorline,featuretype)+'\n'
else:
string+=blocky+loc+make_bindings2(headers,count,colorline,featuretype)+'\n'
return string
# makes the corresponding styled html for the map were about to load
def make_html2(filenames,color_input,colorkey,apikey,time,file_dictionary):
block="""<html>
<head>
<meta charset=utf-8 />
<title>PipeGeoJSON Demo</title>
<meta name='viewport' content='initial-scale=1,maximum-scale=1,user-scalable=no' />
<script src="https://api.mapbox.com/mapbox.js/v2.2.4/mapbox.js"></script>
<script src="http://code.jquery.com/jquery-1.11.3.min.js"></script>
<link href='https://api.mapbox.com/mapbox.js/v2.2.4/mapbox.css' rel='stylesheet' />
<style>
body { margin:0; padding:0; }
#map { position:absolute; top:0; bottom:0; width:100%; }
</style>
</head>
<body>
<style>
table, th, td {
border: 1px solid black;
}
</style>
<script src='https://api.mapbox.com/mapbox.js/plugins/leaflet-omnivore/v0.2.0/leaflet-omnivore.min.js'></script>
<div id='map'></div>
<script>
L.mapbox.accessToken = 'pk.eyJ1IjoibXVycGh5MjE0IiwiYSI6ImNpam5kb3puZzAwZ2l0aG01ZW1uMTRjbnoifQ.5Znb4MArp7v3Wwrn6WFE6A';
var map = L.mapbox.map('map', 'mapbox.streets',{
center: [38, -102.0],
zoom: 5
});
// omnivore will AJAX-request this file behind the scenes and parse it:
// note that there are considerations:
// - The file must either be on the same domain as the page that requests it,
// or both the server it is requested from and the user's browser must
// support CORS.
// Internally this uses the toGeoJSON library to decode the KML file
// into GeoJSON
\n""".replace('pk.eyJ1IjoibXVycGh5MjE0IiwiYSI6ImNpam5kb3puZzAwZ2l0aG01ZW1uMTRjbnoifQ.5Znb4MArp7v3Wwrn6WFE6A',apikey)+make_bindings_type(filenames,color_input,colorkey,time,file_dictionary)+"""\n</script>
</body>
</html>"""
return block
# get colors for just markers
def get_colors(color_input):
colors=[['light green','#36db04'],
['blue','#1717b5'],
['red','#fb0026'],
['yellow','#f9fb00'],
['light blue','#00f4fb'],
['orange','#dd5a21'],
['purple','#6617b5'],
['green','#1a7e55'],
['brown','#b56617'],
['pink','#F08080'],
['default','#1766B5']]
for row in colors:
if row[0]==color_input:
return row[1]
return '#1766B5'
# get colors for everything else
def get_colors2(color_input):
colors=[['light green','#36db04'],
['blue','#1717b5'],
['red','#fb0026'],
['yellow','#f9fb00'],
['light blue','#00f4fb'],
['orange','#dd5a21'],
['purple','#6617b5'],
['green','#1a7e55'],
['brown','#b56617'],
['pink','#F08080'],
['default','#1766B5']]
for row in colors:
if row[0]==color_input:
return row[1]
return '#1766B5'
# get colorline for marker
def get_colorline_marker(color_input):
colorline=""" layer.setIcon(L.mapbox.marker.icon({'marker-color': '%s','marker-size': 'small'}))""" % get_colors(color_input)
return colorline
# get colorline for non-marker objects
def get_colorline_marker2(color_input):
colorline=""" layer.setStyle({color: '%s', weight: 5, opacity: 1});""" % get_colors2(color_input)
return colorline
# THE FUNCTION YOU ACTUALLY USE WITH THIS MODULE
def loadparsehtmlrealtime(filenames,apikey,**kwargs):
color=''
colorkey=''
frame=False
time=1000
file_dictionary=False
for key,value in kwargs.iteritems():
if key=='color':
color=str(value)
if key=='colorkey':
colorkey=str(value)
if key=='frame':
if value==True:
frame=True
if key=='time':
time=int(value)
if key=='file_dictionary':
file_dictionary=value
block=make_html2(filenames,color,colorkey,apikey,time,file_dictionary)
if frame==True:
with open('index.html','w') as f:
f.write(block)
f.close()
return 'http://localhost:8000/index.html'
else:
load(block,'index.html')
| {
"content_hash": "39596411f4d93d63e1d385bdd96f6769",
"timestamp": "",
"source": "github",
"line_count": 484,
"max_line_length": 203,
"avg_line_length": 28.47107438016529,
"alnum_prop": 0.6654571843251088,
"repo_name": "murphy214/berrl",
"id": "c3dd95f568a2fbebbc094c88b52becd409549b22",
"size": "13781",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "build/lib/berrl/piperealtime.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "7910"
},
{
"name": "Python",
"bytes": "440698"
}
],
"symlink_target": ""
} |
from django.core.management.base import BaseCommand, CommandError
import __future__
from ModelTracker.models import History
import datetime
from django.apps import apps
def getModel(table_name):
return next((m for m in apps.get_models() if m._meta.db_table==table_name), None)
class Command(BaseCommand):
help = 'Restore Object to old status'
def add_arguments(self, parser):
parser.add_argument('--id', nargs='?', type=str,default=None)
parser.add_argument("--state",type=str,nargs='?',default="new")
def handle(self, *args, **options):
if not options.get("id",None):
print ("Change ID is needed")
exit(1)
print (options)
h = History.objects.get(id=int(options["id"]))
model = getModel(h.table)
if model == None:
print("Can't find the Model")
exit(2)
d=[f.name for f in model._meta.get_fields()]
if options["state"]=="old": state=h.old_state
else: state=h.new_state
keys2del=[]
for key in state:
if (key.startswith("_") and "_cache" in key) or (key not in d and not ("_id" in key and key[:-3] in d)):
keys2del.append(key)
if type(state[key])==type({}):
if state[key].get("_type",None) == "datetime":
state[key]=datetime.datetime.strptime(state[key]["value"],"%Y-%m-%d %H:%M:%S")
elif state[key].get("_type",None) == "date":
state[key]=datetime.datetime.strptime(state[key]["value"],"%Y-%m-%d")
for key in keys2del:
del state[key]
print(state)
m=model(**state)
m.save("CLI",event_name="Restore Record to %s (%s)"%(options["id"],options["state"]))
| {
"content_hash": "5573806d4d9d31363a586dfb417c00ef",
"timestamp": "",
"source": "github",
"line_count": 46,
"max_line_length": 116,
"avg_line_length": 38.47826086956522,
"alnum_prop": 0.5689265536723164,
"repo_name": "mkalioby/ModelTracker",
"id": "1d7c11fbf09ffcd73700a97499b637e77475199a",
"size": "1770",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ModelTracker/management/commands/restoreObject.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "2322"
},
{
"name": "Python",
"bytes": "18833"
}
],
"symlink_target": ""
} |
"""
3D visualization of activation maps using Mayavi
"""
# Author: Gael Varoquaux <gael dot varoquaux at normalesup dot org>
# License: BSD
# Standard library imports
import os
# Standard scientific libraries imports (more specific imports are
# delayed, so that the part module can be used without them).
import numpy as np
from scipy import ndimage
from nipy.io.imageformats import load
# The sform for MNI templates
mni_sform = np.array([[-1, 0, 0, 90],
[ 0, 1, 0, -126],
[ 0, 0, 1, -72],
[ 0, 0, 0, 1]])
mni_sform_inv = np.linalg.inv(mni_sform)
################################################################################
# Caching of the MNI template.
################################################################################
class _AnatCache(object):
""" Class to store the anat array in cache, to avoid reloading it
each time.
"""
anat = None
anat_sform = None
blurred = None
@classmethod
def get_anat(cls):
from nipy.utils.data import templates
if cls.anat is not None:
return cls.anat, cls.anat_sform, cls.anat_max
filename = templates.get_filename(
'ICBM152', '1mm', 'T1_brain.nii.gz')
if not os.path.exists(filename):
raise OSError('Cannot find template file T1_brain.nii.gz'
'required to plot anatomy. Possible path: %s'
% filename)
anat_im = load(filename)
anat = anat_im.get_data()
anat = anat.astype(np.float)
anat_mask = ndimage.morphology.binary_fill_holes(anat > 0)
anat = np.ma.masked_array(anat, np.logical_not(anat_mask))
cls.anat_sform = anat_im.get_affine()
cls.anat = anat
cls.anat_max = anat.max()
return cls.anat, cls.anat_sform, cls.anat_max
@classmethod
def get_blurred(cls):
if cls.blurred is not None:
return cls.blurred
anat, _, _ = cls.get_anat()
cls.blurred = ndimage.gaussian_filter(
(ndimage.morphology.binary_fill_holes(
ndimage.gaussian_filter(
(anat > 4800).astype(np.float), 6)
> 0.5
)).astype(np.float),
2).T.ravel()
return cls.blurred
| {
"content_hash": "2494a61493402658eedd098450501e6d",
"timestamp": "",
"source": "github",
"line_count": 77,
"max_line_length": 80,
"avg_line_length": 31.103896103896105,
"alnum_prop": 0.5235908141962422,
"repo_name": "yarikoptic/NiPy-OLD",
"id": "7e253d5f0a29762f2ce61f0674e6c51c375ae1ef",
"size": "2395",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "nipy/neurospin/viz/anat_cache.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "4411801"
},
{
"name": "Objective-C",
"bytes": "4262"
},
{
"name": "Python",
"bytes": "2617786"
}
],
"symlink_target": ""
} |
import os
import inspect
class Environment(object):
"""
A class representing which environment the client library is using.
Pass in one of the following values as the first argument to
:class:`braintree.Configuration.configure() <braintree.configuration.Configuration>` ::
braintree.Environment.Sandbox
braintree.Environment.Production
"""
def __init__(self, server, port, is_ssl, ssl_certificate):
self.__server = server
self.__port = port
self.is_ssl = is_ssl
self.ssl_certificate = ssl_certificate
@property
def port(self):
return int(self.__port)
@property
def protocol(self):
return self.__port == "443" and "https://" or "http://"
@property
def server(self):
return self.__server
@property
def server_and_port(self):
return self.__server + ":" + self.__port
@staticmethod
def braintree_root():
return os.path.dirname(inspect.getfile(Environment))
Environment.Development = Environment("localhost", os.getenv("GATEWAY_PORT") or "3000", False, None)
Environment.Sandbox = Environment("sandbox.braintreegateway.com", "443", True, Environment.braintree_root() + "/ssl/sandbox_braintreegateway_com.ca.crt")
Environment.Production = Environment("www.braintreegateway.com", "443", True, Environment.braintree_root() + "/ssl/www_braintreegateway_com.ca.crt")
| {
"content_hash": "84cdfe59e8cb577eb991540a79b936ab",
"timestamp": "",
"source": "github",
"line_count": 42,
"max_line_length": 153,
"avg_line_length": 33.76190476190476,
"alnum_prop": 0.6720733427362482,
"repo_name": "eldarion/braintree_python",
"id": "2f8bcdc27bfee4da440c85b7384aa7f28e08dc1f",
"size": "1418",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "braintree/environment.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "394920"
},
{
"name": "Ruby",
"bytes": "3033"
}
],
"symlink_target": ""
} |
import fresh_tomatoes
import media
# Initializing Movie Data
shutter_island = media.Movie("Shutter Island",
"In 1954, a U.S. marshal investigates the \
disappearance of a murderess who escaped from \
a hospital for the criminally insane.",
"https://images-na.ssl-images-amazon.com/images/M/MV5BMTMxMTIyNzMxMV5BMl5BanBnXkFtZTcwOTc4OTI3Mg@@._V1_.jpg", # NOQA
"https://www.youtube.com/watch?v=5iaYLCiq5RM")
rogue_one = media.Movie("Rogue One",
"The Rebel Alliance makes a risky move to steal the \
plans for the Death Star, setting up the epic saga to \
follow.",
"https://images-na.ssl-images-amazon.com/images/M/MV5BMjEwMzMxODIzOV5BMl5BanBnXkFtZTgwNzg3OTAzMDI@._V1_SY1000_SX675_AL_.jpg", # NOQA
"https://www.youtube.com/watch?v=frdj1zb9sMY")
prestige = media.Movie("The Prestige",
"Two stage magicians engage in competitive \
one-upmanship in an attempt to create the ultimate \
stage illusion.",
"https://images-na.ssl-images-amazon.com/images/M/MV5BMjA4NDI0MTIxNF5BMl5BanBnXkFtZTYwNTM0MzY2._V1_.jpg", # NOQA
"https://www.youtube.com/watch?v=o4gHCmTQDVI")
thor = media.Movie("Thor",
"The powerful but arrogant god Thor is cast out of Asgard \
to live amongst humans in Midgard (Earth).",
"https://images-na.ssl-images-amazon.com/images/M/MV5BMTYxMjA5NDMzNV5BMl5BanBnXkFtZTcwOTk2Mjk3NA@@._V1_SY1000_CR0,0,674,1000_AL_.jpg", # NOQA
"https://www.youtube.com/watch?v=JOddp-nlNvQ")
moana = media.Movie("Moana",
"When a terrible curse incurred by the Demigod Maui reaches an \
impetuous Chieftain's daughter's island, she \
answers the Ocean's call to set things right.",
"https://images-na.ssl-images-amazon.com/images/M/MV5BMjI4MzU5NTExNF5BMl5BanBnXkFtZTgwNzY1MTEwMDI@._V1_SY1000_CR0,0,674,1000_AL_.jpg", # NOQA
"https://www.youtube.com/watch?v=LKFuXETZUsI")
assassins_creed = media.Movie("Assassin's Creed",
"When Callum Lynch explores the memories of his ancestor Aguilar \
and gains the skills of a Master Assassin, he \
discovers he is a descendant of the secret \
Assassins society.",
"https://images-na.ssl-images-amazon.com/images/M/MV5BNzE1OTczNTc1OF5BMl5BanBnXkFtZTgwMzgyMDI3MDI@._V1_SY1000_CR0,0,674,1000_AL_.jpg", # NOQA
"https://www.youtube.com/watch?v=gfJVoF5ko1Y")
# Storing movies in an Array to pass
movies = [shutter_island, rogue_one, prestige, thor, moana, assassins_creed]
# Displaying movies in HTML format
fresh_tomatoes.open_movies_page(movies)
| {
"content_hash": "ff409ee1d9a92de3d8a0f357485f9eae",
"timestamp": "",
"source": "github",
"line_count": 52,
"max_line_length": 172,
"avg_line_length": 60.90384615384615,
"alnum_prop": 0.577202399747395,
"repo_name": "danaguinaldo/Movie-Trailer-Project",
"id": "4310fe3950fac707b26dc735f00e079ebd3f9ba2",
"size": "3167",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "entertainment_center.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "6978"
},
{
"name": "Python",
"bytes": "9313"
}
],
"symlink_target": ""
} |
import os
from werkzeug.utils import import_string
from flask import current_app
from flask.cli import with_appcontext
import click
from .tests import register_test_helpers
def create_shell_context(*paths):
result = {}
for path in paths:
if '.' in path:
name = path.split('.')[-1]
if name == '*':
path = '.'.join(path.split('.')[:-1])
module = import_string(path)
result.update(module.__dict__)
else:
result[name] = import_string(path)
else:
result[path] = import_string(path)
return {k: v for k, v in result.items() if not k.startswith('__')}
def register_shell_context(app, *context_paths, test_helpers=True):
@app.shell_context_processor
def shell_context_processor():
ctx = {}
if test_helpers:
register_test_helpers(app)
ctx['client'] = app.test_client()
ctx.update(create_shell_context(*context_paths))
return ctx
@click.command()
@click.option('--verbose', '-v', is_flag=True)
@click.option('--no-confirm', is_flag=True)
@click.option('--bind', '-b', default=None)
@with_appcontext
def dbreinit(verbose, no_confirm, bind=None):
"""Reinitialize database (temporary before using alembic migrations)"""
from .sqla import db_reinit
if not no_confirm:
click.confirm('This will drop ALL DATA. Do you want to continue?', abort=True)
db = current_app.extensions['sqlalchemy'].db
if verbose:
echo_ = db.engine.echo
db.engine.echo = True
db_reinit(db, bind)
if verbose:
db.engine.echo = echo_
@click.command()
@click.option('--bind', '-b', default=None)
@with_appcontext
def dbshell(bind):
"""Database shell (currently only PostgreSQL supported)."""
db = current_app.extensions['sqlalchemy'].db
engine = db.get_engine(bind=bind)
assert engine.name == 'postgresql'
cmd, url = 'psql', engine.url
url_map = (('U', 'username'), ('h', 'host'), ('p', 'port'), ('d', 'database'))
for psql_key, url_attr in url_map:
if getattr(url, url_attr, None):
cmd += ' -{} {}'.format(psql_key, getattr(url, url_attr))
return os.system(cmd)
| {
"content_hash": "049607f1e9562f5493684e62947d8f35",
"timestamp": "",
"source": "github",
"line_count": 73,
"max_line_length": 86,
"avg_line_length": 30.767123287671232,
"alnum_prop": 0.6024042742653607,
"repo_name": "vgavro/flask-vgavro-utils",
"id": "7158bc32fde75e5a17bbc5dd36335bdfa1814529",
"size": "2246",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "flask_vgavro_utils/cli.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "79862"
},
{
"name": "Shell",
"bytes": "2338"
}
],
"symlink_target": ""
} |
import os
import sys
import site
lib_path = os.path.abspath(os.path.join(os.path.dirname(__file__), '../lib/python2.4/site-packages'))
path = site.addsitedir(lib_path, set())
if path:
sys.path = list(path) + sys.path
os.environ['DJANGO_SETTINGS_MODULE'] = "django_de.settings"
from django.core.management import call_command
from django.core.mail import mail_admins
def main():
"""
Deletes or generates static documentation files depending on the
received signal.
"""
repo, rev = sys.argv[1:3]
mail_admins("SVN revision %s committed!" % rev, "SVN repo: %s\nhttps://www.django-de.org/trac/changeset/%s/" % (repo, rev), fail_silently=True)
call_command('deletestatic', **{'repo': repo, 'rev': rev})
if __name__ == '__main__':
if len(sys.argv) < 3:
sys.stderr.write("Usage: %s REPOS TXN\n" % (sys.argv[0]))
else:
main()
| {
"content_hash": "c52f7d46cababeb2556531701fcf1798",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 147,
"avg_line_length": 32.48148148148148,
"alnum_prop": 0.6499429874572406,
"repo_name": "django-de/django-de-v2",
"id": "ab9bd842e718743a3e13dc1f560efb7713412869",
"size": "923",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "extras/post-commit.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "172"
},
{
"name": "JavaScript",
"bytes": "21778"
},
{
"name": "Python",
"bytes": "58803"
}
],
"symlink_target": ""
} |
import glob
import os
import sys
from euca2ools.commands.eustore.installimage import *
class InstallUnpackedImage(InstallImage):
def get_tarball(self, workdir):
return None
def bundle_and_register_all(self, dir, tarfile):
names = glob.glob(os.path.join(dir, '*'))
kernel_id = None
ramdisk_id = None
machine_id = None
for name in names:
if os.path.basename(name).startswith('vmlin'):
kernel_id = self._upload_and_register(name, 'kernel', dir)
elif os.path.basename(name).startswith('initr'):
ramdisk_id = self._upload_and_register(name, 'ramdisk', dir)
for name in names:
if os.path.basename(name).endswith('.img'):
machine_id = self._upload_and_register(name, 'machine', dir,
kernel_id=kernel_id,
ramdisk_id=ramdisk_id)
return dict(machine=machine_id, kernel=kernel_id, ramdisk=ramdisk_id)
def _upload_and_register(self, name, image_type, dir, **kwargs):
print "Bundling/uploading {0}".format(image_type)
manifest_loc = self.bundle_and_upload_image(name, image_type, dir,
**kwargs)
# Image name pad algorithm from euca2ools 3.0.2
image_name_pad = '{0:0>8x}-'.format(random.randrange(16**8))
image_name = image_name_pad + os.path.basename(name.split('.')[0])
req = RegisterImage(config=self.config,
service=self._InstallImage__eucalyptus,
ImageLocation=manifest_loc, Name=image_name,
Description=self.args.get('description'),
Architecture=self.args.get('architecture'))
response = req.main()
image_id = response.get('imageId')
if self.args['show_progress']:
print 'Registered {0} image {1}'.format(image_type, image_id)
os.system('euca-modify-image-attribute -l -a all {0}'.format(image_id))
return image_id
if __name__ == '__main__':
InstallUnpackedImage.run()
| {
"content_hash": "9a5e7ccb736f6e920f543af95bcf2383",
"timestamp": "",
"source": "github",
"line_count": 53,
"max_line_length": 79,
"avg_line_length": 40.79245283018868,
"alnum_prop": 0.5698427382053654,
"repo_name": "eucalyptus/silvereye",
"id": "8db2498ed0bfefc8800c3bcc88f47c84637f6e4f",
"size": "3678",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "anaconda-updates/6/scripts/install-unpacked-image.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "212181"
},
{
"name": "Shell",
"bytes": "37984"
}
],
"symlink_target": ""
} |
import requests
import time
def action(**kwargs):
''' This method is called to action a reaction '''
redata = kwargs['redata']
jdata = kwargs['jdata']
logger = kwargs['logger']
run = True
# Check for Trigger
if redata['trigger'] > jdata['failcount']:
run = False
# Check for lastrun
checktime = time.time() - float(redata['lastrun'])
if checktime < redata['frequency']:
run = False
if redata['data']['call_on'] not in jdata['check']['status']:
run = False
if run:
return callSalt(redata, jdata, logger)
else:
return None
def callSalt(redata, jdata, logger):
''' Perform actual call '''
url = redata['data']['url']
payload = redata['data']
try:
req = requests.post(url, data=payload, timeout=3.00, verify=False)
except:
return False
if req.status_code == 200:
line = "saltstack-cmdscript: Reqeust to %s sent for monitor %s - Successful" % (url, jdata['cid'])
logger.info(line)
return True
else:
line = "saltstack-cmdscript: Request to %s sent for monitor %s - False" % (url, jdata['cid'])
logger.info(line)
return False
| {
"content_hash": "b8ed978998a861ba40ece70aa6a86030",
"timestamp": "",
"source": "github",
"line_count": 42,
"max_line_length": 106,
"avg_line_length": 28.761904761904763,
"alnum_prop": 0.5927152317880795,
"repo_name": "madflojo/cloudroutes-service",
"id": "e81683311cfeee17eff674e0bdfb6380d7d32fe0",
"size": "1478",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "src/actions/actions/saltstack-cmdscript/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "17816"
},
{
"name": "HTML",
"bytes": "227943"
},
{
"name": "JavaScript",
"bytes": "3271"
},
{
"name": "Python",
"bytes": "678083"
},
{
"name": "Shell",
"bytes": "5859"
}
],
"symlink_target": ""
} |
import _plotly_utils.basevalidators
class TickangleValidator(_plotly_utils.basevalidators.AngleValidator):
def __init__(
self,
plotly_name="tickangle",
parent_name="scatterpolargl.marker.colorbar",
**kwargs,
):
super(TickangleValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
**kwargs,
)
| {
"content_hash": "f866e118b0710613bfbfaf52b4bbec20",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 70,
"avg_line_length": 29,
"alnum_prop": 0.5883620689655172,
"repo_name": "plotly/plotly.py",
"id": "32d32209d7522af61ff45870ba48ebf6f4d7f256",
"size": "464",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "packages/python/plotly/plotly/validators/scatterpolargl/marker/colorbar/_tickangle.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "545"
},
{
"name": "JavaScript",
"bytes": "2074"
},
{
"name": "PostScript",
"bytes": "565328"
},
{
"name": "Python",
"bytes": "31506317"
},
{
"name": "TypeScript",
"bytes": "71337"
}
],
"symlink_target": ""
} |
def is_palindrome(n):
return str(n) == str(n)[::-1]
def is_lychrel_number(n, iteration=0):
if iteration >= 50:
return False
n += int(str(n)[::-1])
if is_palindrome(n):
return True
else:
return is_lychrel_number(n, iteration + 1)
# Testcases
assert is_lychrel_number(47), "Testcase failed"
assert is_lychrel_number(349), "Testcase failed"
assert not is_lychrel_number(196), "Testcase failed"
# Solve
solution = 0
for n in range(10*1000):
if not is_lychrel_number(n):
solution = solution + 1
print "Solution:", solution
| {
"content_hash": "304c67c6410939a4f60202b94e93e92c",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 52,
"avg_line_length": 23.16,
"alnum_prop": 0.6373056994818653,
"repo_name": "fbcom/project-euler",
"id": "5edabeec76670df9b4c2dc11f9eac936bf11174b",
"size": "828",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "055_lychrel_numbers.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "10885"
}
],
"symlink_target": ""
} |
import bz2
import gzip
from .basefile import BaseFile
class CompressedFile(BaseFile):
"""
The **CompressedFile** :py:class:`pynion.Multiton` is a file management object
created directly through the py:class:`pynion.File` factory.
Extends :py:class:`pynion.filesystem._filetypes.BaseFile`
It specifically manages compressed files.
"""
def __init__(self, file_name, action, ctype):
super(CompressedFile, self).__init__(file_name, action)
self.ctype = ctype
############
# BOOLEANS #
############
@property
def is_gzipped(self):
"""
:return: Check if compression is gzip
:rtype: bool
"""
return self.ctype == 'gzip'
@property
def is_bzipped(self):
"""
:return: Check if compression is bzip
:rtype: bool
"""
return self.ctype == 'bzip'
####################
# METHODS: ON FILE #
####################
def open(self):
"""
Open the file in the previously defined action type.
:rtype: self
"""
if self.is_open:
return self
if self.is_gzipped:
self._fd = gzip.open(self.full, self.action)
elif self.is_bzipped:
self._fd = bz2.BZ2File(self.full, self.action)
return self
def flush(self):
"""
:raise: :py:class:`pynion.errors.fe.FileWrongRequestedActionError` if
opened in read mode.
"""
self._work_action('w')
if self.is_bzipped:
return
self._fd.flush()
| {
"content_hash": "711b1638f14c701c15d9c9180e53a2c5",
"timestamp": "",
"source": "github",
"line_count": 65,
"max_line_length": 82,
"avg_line_length": 24.584615384615386,
"alnum_prop": 0.5381727158948686,
"repo_name": "sadeghiafshin/rgCrawler",
"id": "14b78dc9849059d2279e5d181c7d1f896ff0d091",
"size": "1598",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": ".eggs/pynion-0.0.4-py2.7.egg/pynion/filesystem/_filetypes/compressedfile.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "91825"
}
],
"symlink_target": ""
} |
"""Exceptions defined within cellpy"""
class Error(Exception):
"""Base class for other exceptions"""
pass
class ConfigFileNotWritten(Error):
"""Raised when the configuration file cannot be written"""
pass
class ConfigFileNotRead(Error):
"""Raised when the configuration file cannot be read"""
pass
class FileNotFound(Error):
"""Raised when the given file is not found"""
pass
class WrongFileVersion(Error):
"""Raised when the file version is wrong"""
pass
class DeprecatedFeature(Error):
"""Raised when the feature is recently deprecated"""
pass
class ExportFailed(Error):
"""Raised when exporting data failed"""
pass
class IOError(Error):
"""Raised when exporting data failed"""
pass
class NullData(Error):
"""Raised when required data is missing (e.g. voltage = None or summary_frames are missing)"""
pass
class NoCellFound(Error):
"""Raised when there are no cells, but a data is needed."""
pass
class UnderDefined(Error):
"""Raised when trying something that requires you to set
a missing prm first"""
| {
"content_hash": "4442f5c27c9596fc3a9e5d7436e42174",
"timestamp": "",
"source": "github",
"line_count": 66,
"max_line_length": 98,
"avg_line_length": 17.060606060606062,
"alnum_prop": 0.6802841918294849,
"repo_name": "jepegit/cellpy",
"id": "8a02aa5263acd8fed70f065fa571fd01afa816ec",
"size": "1126",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cellpy/exceptions.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "106"
},
{
"name": "Python",
"bytes": "1255316"
},
{
"name": "Shell",
"bytes": "335"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('browser', '0028_remove_compare_job_id'),
]
operations = [
migrations.AlterField(
model_name='compare',
name='year_range',
field=models.CharField(max_length=12),
),
migrations.AlterField(
model_name='searchsetanalysis',
name='year_range',
field=models.CharField(max_length=12),
),
]
| {
"content_hash": "218ebaefc570ac4d9bbb29ffe62a0e75",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 50,
"avg_line_length": 24.08695652173913,
"alnum_prop": 0.575812274368231,
"repo_name": "MRCIEU/melodi",
"id": "2148d6286fb69eec1852f4289e2f5dc38b98e936",
"size": "578",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "browser/migrations/0029_auto_20160628_1223.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "44467"
},
{
"name": "CoffeeScript",
"bytes": "59265"
},
{
"name": "HTML",
"bytes": "253995"
},
{
"name": "JavaScript",
"bytes": "78844"
},
{
"name": "Python",
"bytes": "250291"
},
{
"name": "Shell",
"bytes": "6079"
}
],
"symlink_target": ""
} |
from decimal import Decimal as D
from django.conf import settings
from django.contrib.sites.models import Site
from django.utils.translation import ugettext_lazy as _
from oscar.core.loading import get_class, get_model
from . import exceptions
Order = get_model('order', 'Order')
Line = get_model('order', 'Line')
OrderDiscount = get_model('order', 'OrderDiscount')
order_placed = get_class('order.signals', 'order_placed')
class OrderNumberGenerator(object):
"""
Simple object for generating order numbers.
We need this as the order number is often required for payment
which takes place before the order model has been created.
"""
def order_number(self, basket):
"""
Return an order number for a given basket
"""
return 100000 + basket.id
class OrderCreator(object):
"""
Places the order by writing out the various models
"""
def place_order(self, basket, total, # noqa (too complex (12))
shipping_method, shipping_charge, user=None,
shipping_address=None, billing_address=None,
order_number=None, status=None, **kwargs):
"""
Placing an order involves creating all the relevant models based on the
basket and session data.
"""
if basket.is_empty:
raise ValueError(_("Empty baskets cannot be submitted"))
if not order_number:
generator = OrderNumberGenerator()
order_number = generator.order_number(basket)
if not status and hasattr(settings, 'OSCAR_INITIAL_ORDER_STATUS'):
status = getattr(settings, 'OSCAR_INITIAL_ORDER_STATUS')
try:
Order._default_manager.get(number=order_number)
except Order.DoesNotExist:
pass
else:
raise ValueError(_("There is already an order with number %s")
% order_number)
# Ok - everything seems to be in order, let's place the order
order = self.create_order_model(
user, basket, shipping_address, shipping_method, shipping_charge,
billing_address, total, order_number, status, **kwargs)
for line in basket.all_lines():
self.create_line_models(order, line)
self.update_stock_records(line)
# Record any discounts associated with this order
for application in basket.offer_applications:
# Trigger any deferred benefits from offers and capture the
# resulting message
application['message'] \
= application['offer'].apply_deferred_benefit(basket, order,
application)
# Record offer application results
if application['result'].affects_shipping:
# Skip zero shipping discounts
shipping_discount = shipping_method.discount(basket)
if shipping_discount <= D('0.00'):
continue
# If a shipping offer, we need to grab the actual discount off
# the shipping method instance, which should be wrapped in an
# OfferDiscount instance.
application['discount'] = shipping_discount
self.create_discount_model(order, application)
self.record_discount(application)
for voucher in basket.vouchers.all():
self.record_voucher_usage(order, voucher, user)
# Send signal for analytics to pick up
order_placed.send(sender=self, order=order, user=user)
return order
def create_order_model(self, user, basket, shipping_address,
shipping_method, shipping_charge, billing_address,
total, order_number, status, **extra_order_fields):
"""
Create an order model.
"""
order_data = {'basket': basket,
'number': order_number,
'site': Site._default_manager.get_current(),
'currency': total.currency,
'total_incl_tax': total.incl_tax,
'total_excl_tax': total.excl_tax,
'shipping_incl_tax': shipping_charge.incl_tax,
'shipping_excl_tax': shipping_charge.excl_tax,
'shipping_method': shipping_method.name,
'shipping_code': shipping_method.code}
if shipping_address:
order_data['shipping_address'] = shipping_address
if billing_address:
order_data['billing_address'] = billing_address
if user and user.is_authenticated():
order_data['user_id'] = user.id
if status:
order_data['status'] = status
if extra_order_fields:
order_data.update(extra_order_fields)
order = Order(**order_data)
order.save()
return order
def create_line_models(self, order, basket_line, extra_line_fields=None):
"""
Create the batch line model.
You can set extra fields by passing a dictionary as the
extra_line_fields value
"""
product = basket_line.product
stockrecord = None
if basket_line.product.get_product_class().track_stock:
stockrecord = basket_line.stockrecord
if not stockrecord:
raise exceptions.UnableToPlaceOrder(
"Baket line #%d has no stockrecord" % basket_line.id)
line_data = {
'order': order
}
if stockrecord:
partner = stockrecord.partner
line_data.update({
# Partner details
'partner': partner,
'partner_name': partner.name,
'partner_sku': stockrecord.partner_sku,
# Reporting details
'unit_cost_price': stockrecord.cost_price,
'unit_retail_price': stockrecord.price_retail,
})
line_data.update({
# Product details
'product': product,
'title': product.get_title(),
'upc': product.upc,
'quantity': basket_line.quantity,
# Price details
'line_price_excl_tax':
basket_line.line_price_excl_tax_incl_discounts,
'line_price_incl_tax':
basket_line.line_price_incl_tax_incl_discounts,
'line_price_before_discounts_excl_tax':
basket_line.line_price_excl_tax,
'line_price_before_discounts_incl_tax':
basket_line.line_price_incl_tax,
# Reporting details
'unit_price_incl_tax': basket_line.unit_price_incl_tax,
'unit_price_excl_tax': basket_line.unit_price_excl_tax,
# Shipping details
'est_dispatch_date':
basket_line.purchase_info.availability.dispatch_date
})
extra_line_fields = extra_line_fields or {}
if hasattr(settings, 'OSCAR_INITIAL_LINE_STATUS'):
if not (extra_line_fields and 'status' in extra_line_fields):
extra_line_fields['status'] = getattr(
settings, 'OSCAR_INITIAL_LINE_STATUS')
if extra_line_fields:
line_data.update(extra_line_fields)
order_line = Line._default_manager.create(**line_data)
self.create_line_price_models(order, order_line, basket_line)
self.create_line_attributes(order, order_line, basket_line)
self.create_additional_line_models(order, order_line, basket_line)
return order_line
def update_stock_records(self, line):
"""
Update any relevant stock records for this order line
"""
if line.product.get_product_class().track_stock:
line.stockrecord.allocate(line.quantity)
def create_additional_line_models(self, order, order_line, basket_line):
"""
Empty method designed to be overridden.
Some applications require additional information about lines, this
method provides a clean place to create additional models that
relate to a given line.
"""
pass
def create_line_price_models(self, order, order_line, basket_line):
"""
Creates the batch line price models
"""
breakdown = basket_line.get_price_breakdown()
for price_incl_tax, price_excl_tax, quantity in breakdown:
order_line.prices.create(
order=order,
quantity=quantity,
price_incl_tax=price_incl_tax,
price_excl_tax=price_excl_tax)
def create_line_attributes(self, order, order_line, basket_line):
"""
Creates the batch line attributes.
"""
for attr in basket_line.attributes.all():
order_line.attributes.create(
option=attr.option,
type=attr.option.code,
value=attr.value)
def create_discount_model(self, order, discount):
"""
Create an order discount model for each offer application attached to
the basket.
"""
order_discount = OrderDiscount(
order=order,
message=discount['message'] or '',
offer_id=discount['offer'].id,
frequency=discount['freq'],
amount=discount['discount'])
result = discount['result']
if result.affects_shipping:
order_discount.category = OrderDiscount.SHIPPING
elif result.affects_post_order:
order_discount.category = OrderDiscount.DEFERRED
voucher = discount.get('voucher', None)
if voucher:
order_discount.voucher_id = voucher.id
order_discount.voucher_code = voucher.code
order_discount.save()
def record_discount(self, discount):
discount['offer'].record_usage(discount)
if 'voucher' in discount and discount['voucher']:
discount['voucher'].record_discount(discount)
def record_voucher_usage(self, order, voucher, user):
"""
Updates the models that care about this voucher.
"""
voucher.record_usage(order, user)
| {
"content_hash": "9ab5da3615cf53294f964f610325ac92",
"timestamp": "",
"source": "github",
"line_count": 268,
"max_line_length": 79,
"avg_line_length": 38.406716417910445,
"alnum_prop": 0.5840862722238415,
"repo_name": "michaelkuty/django-oscar",
"id": "3c874f0eac02ff39bfa694053bf295ba07124da5",
"size": "10293",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/oscar/apps/order/utils.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "544608"
},
{
"name": "HTML",
"bytes": "494989"
},
{
"name": "JavaScript",
"bytes": "434036"
},
{
"name": "Makefile",
"bytes": "2421"
},
{
"name": "Python",
"bytes": "1692176"
},
{
"name": "Shell",
"bytes": "3047"
},
{
"name": "XSLT",
"bytes": "24882"
}
],
"symlink_target": ""
} |
import sklearn.datasets
import numpy as np
from action.linear.perceptron import Perceptron
from action.linear.adalineGD import AdalineGD
import pandas as pd
iris = sklearn.datasets.load_iris()
X = iris.data
y = iris.target
df1 = pd.DataFrame(X)
df2 = pd.DataFrame(y)
df = pd.concat([df1, df2], axis=1)
size = X.shape[0]
X_train, y_train = df.iloc[:100, [0,2]].values, df.iloc[:100, 4].values
y_train = np.where(y_train == 0, -1, 1)
def test_perceptron(X_train, y_train):
p = Perceptron()
p.fit(X_train, y_train)
# print("train ", X_train.shape, "test ", X_test.shape)
print(p.error_)
test_perceptron(X_train, y_train)
def test_adaline(X_train, y_train):
ad = AdalineGD(eta=0.01)
X_train = (X_train - X_train.mean())/X_train.std()
ad.fit(X_train, y_train)
print(ad.cost_)
# test_adaline(X_train, y_train)
| {
"content_hash": "ef7aa1e1059faaea248b793a50cec82b",
"timestamp": "",
"source": "github",
"line_count": 34,
"max_line_length": 71,
"avg_line_length": 24.823529411764707,
"alnum_prop": 0.6670616113744076,
"repo_name": "jt120/start-ml",
"id": "22915a2998c73d1ef55319d56f77961f31780686",
"size": "865",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "action/linear/linearTest.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "3223"
},
{
"name": "Jupyter Notebook",
"bytes": "1307290"
},
{
"name": "Python",
"bytes": "32655"
}
],
"symlink_target": ""
} |
"""
.. module:: zeroMQInterface
:synopsis: Wraps ZeroMQ library with a simplified publish/subscribe
interface. The serialize data protocol is MessagePack. The python
dictionary is used as a standardized format. The subscribe gets the
contents of messages, but also the publisher address and the topic.
"""
import os
import sys
scriptDir=os.path.dirname(os.path.realpath(__file__))
sys.path.append(scriptDir)
import zmq
import json
#import msgpack
import importlib
import pdb
import processNodeUtils
import logMessageAdapter
import time
PUB_BUFF_SIZE = 10000000
# static functions
def _extractProcessConfig(processList, processName):
"""
Tries to find specific process dictionary settings at supplied
process path.
:param processList: list of processes dictionaries
:type processList: list
:return: dictionary of process settings
:raises: ValueError
"""
processDict = {}
for process in processList:
if (process['processName'] == processName):
processDict = process
break
if (not(processDict)):
raise ValueError("Process configuration not found in config file")
return processDict
def _extractConfig(configFilePath, publisherName):
"""
Extracts the endpoint address and the dictionary that contains other connection
information
:param configFilePath: path to the network configuration file
:type configFilePath: str
:param publisherName: name of publisher
:type publisherName: str
:returns: endPointAddress (str), processConfigDict (dict)
"""
masterProcessConfig = processNodeUtils.importConfigJson(configFilePath)
processConfigDict = _extractProcessConfig(masterProcessConfig['processList'],
publisherName)
endPointIdsList = masterProcessConfig['endPointsIds']
endPointID = processConfigDict['endPoint']
endPointAddress = _convertIDToAddress(endPointID, endPointIdsList)
return endPointAddress, processConfigDict, endPointIdsList
def _convertIDToAddress(endPointID, endPointIdsList):
endPointFound = False
for item in endPointIdsList:
if (item['id'] == endPointID):
endPointAddress = item['address']
endPointFound = True
if (not(endPointFound)):
print('endPointID: ' + str(endPointID))
print('endPointIdsList: ' + str(endPointIdsList))
raise ValueError("can't match 'endPoint' in 'endPointIds'")
return endPointAddress
class ZeroMQPublisher():
def __init__(self, endPointAddress=None):
"""
Constructor. Sets up ZeroMQ publisher socket.
:param number port: integer designating the port number of the publisher
"""
self.context = zmq.Context()
self.publisher = self.context.socket(zmq.PUB)
self.publisher.set_hwm(PUB_BUFF_SIZE)
if (endPointAddress is not(None)):
self.bind(endPointAddress)
def __del__(self):
"""
Destructor. Closes sockets
"""
self.publisher.close()
self.context.term()
def bind(self, endPointAddress):
"""
Binds the publisher to the endpoint address
:param endPointAddress: endpoint address (e.g., 'tcp://127.0.0.1:5555')
:type endPointAddress: str
"""
self.endPointAddress = endPointAddress
self.publisher.bind(endPointAddress)
def importProcessConfig(self, configFilePath,
publisherName=processNodeUtils.getModuleName(os.path.realpath(__file__))):
"""
Registers publisher settings based off config file
:param configFilePath: full config file path
:type configFilePath: str
:param publisherPath: path to publisher process file (defaults to current file)
:type publisherPath: str
:raises: ValueError
"""
self.endPointAddress, self.processConfigDict, endPointIdsList = _extractConfig(configFilePath,
publisherName)
self.bind(self.endPointAddress)
self.publisherName = publisherName
self.logAdapter = logMessageAdapter.LogMessageAdapter(publisherName)
def logPubConnections(self):
"""
Method that logs the publisher connection information
"""
logMsg = 'Binding to address ' + str(self.endPointAddress)
self.send('log', self.logAdapter.genLogMessage(logLevel=1, message=logMsg))
def send(self, topic, inDict):
"""
Main send function over ZeroMQ socket. Input dictionary gets
serialized and sent over wire.
:param str topic: string representing the message topic
:param dictionary dict: data payload input
"""
#serialDict = msgpack.dumps(dict)
sendDict = {}
sendDict['endPointAddress'] = self.endPointAddress
sendDict['contents'] = inDict
serialDict = json.dumps(sendDict)
self.publisher.send_multipart([str.encode(topic),
#str.encode(self.endPointAddress),
str.encode(serialDict)])
class ZeroMQSubscriber():
def __init__(self, publisherRef=None):
"""
Constructor. Sets up ZeroMQ subscriber socket and poller object
:return:
"""
self.context = zmq.Context()
self.subscriberList = []
self.poller = zmq.Poller()
if (publisherRef is not(None)):
self.logPublisher = publisherRef
def __del__(self):
"""
Destructor. Closes ZeroMQ connections.
"""
for item in self.subscriberList:
item['socket'].close()
self.context.term()
def setPublisherRef(self, publisherRef):
"""
Sets the publisher handle so this class can publish log messages
:param publisherRef: publisher handle (passed by reference)
:type: ZeroMQPublisher()
"""
self.logPublisher = publisherRef
def importProcessConfig(self, configFilePath, subscriberName=processNodeUtils.getModuleName(os.path.realpath(__file__))):
"""
Registers subscriber settings based off config file
:param configFilePath: full config file path
:type configFilePath: str
:param subscriberName: path to subscriber process file (defaults to current file)
:type subscriberName: str
:raises: ValueError
"""
logMsgsList = []
self.subscriberName = subscriberName
self.endPointAddress, self.processConfigDict, endPointsIdsList = _extractConfig(configFilePath,
subscriberName)
self.logAdapter = logMessageAdapter.LogMessageAdapter(subscriberName)
if ('subscriptions' in self.processConfigDict):
for subDict in self.processConfigDict['subscriptions']:
self.connectSubscriber(_convertIDToAddress(subDict['endPoint'], endPointsIdsList))
for topic in subDict['topics']:
self.subscribeToTopic(topic)
def connectSubscriber(self, endPointAddress):
"""
Method to create subscriber connection to aa2 particular publisher
:param number port: integer representing the port number of the publisher to connect to
:param str topic: string that is used to filter unwanted messages from publisher
"""
self.subscriberList.append({'endPoint': endPointAddress,
'socket': self.context.socket(zmq.SUB), 'topics': []})
self.subscriberList[-1]['socket'].connect(endPointAddress)
self.poller.register(self.subscriberList[-1]['socket'], zmq.POLLIN)
def subscribeToTopic(self, topic):
"""
Subscribes class instance to most recently connected subscriber
:param topic: topic to subscriber to (filters other topics if not subscribed)
:type topic: str
"""
self.subscriberList[-1]['topics'].append(topic)
self.subscriberList[-1]['socket'].setsockopt(zmq.SUBSCRIBE, str.encode(topic))
def logSubConnections(self):
"""
Method that logs the connections list.
"""
for sub in self.subscriberList:
topicStr = ''
for topic in sub['topics']:
topicStr += str(topic) + ' '
logMsg = 'Connected to ' + sub['endPoint'] + \
' under the following topics: ' + topicStr
self.logPublisher.send('log', self.logAdapter.genLogMessage(logLevel=1, message=logMsg))
def _byteToString(self, inBytes):
"""
Converts bytes to string if needed
:param inBytes: input bytes
:type inBytes: bytes
"""
if (type(inBytes)==bytes):
return inBytes.decode()
else:
return inBytes
def _convert_keys_to_string(self, inDict):
"""
Converts byte encoded keys to string. Need this because msgpack unpack
doesn't decode all the elements in the serialized data stream
:param dictionary inDict: any non-nested key value dictionary
:return: dictionary
"""
newDict = {}
for key, value in inDict.items():
if (type(value) == dict):
# this might blow up, need to test more
value = self._convert_keys_to_string(value)
newDict[self._byteToString(key)] = self._byteToString(value)
return newDict
def receive(self):
"""
Method that polls all available connections and returns a dictionary. This should
get called continuously to continue receiving messages. Currently, this function
will not block if no messages are available.
:return: list of nested dictionaries
"""
socks = []
try:
socks = dict(self.poller.poll(0.1))
except:
print ('exception occurred on subscribed receive function')
responseList = []
if (len(socks)>0):
for listItem in self.subscriberList:
if listItem['socket'] in socks:
#topic, pubAddress, contents = listItem['socket'].recv_multipart()
topic, contents = listItem['socket'].recv_multipart()
#convertedContents = self._convert_keys_to_string(msgpack.loads(contents))
convertedContents = self._convert_keys_to_string(json.loads(contents.decode()))
#print ('convertedContents: ' + str(convertedContents))
responseList.append({
'topic': topic.decode(),
#'pubAddress': pubAddress.decode(),
'pubAddress': convertedContents['endPointAddress'],
'contents': convertedContents['contents']
})
return responseList | {
"content_hash": "f7276e6d775c2c7f7a8f4f061b29c2cf",
"timestamp": "",
"source": "github",
"line_count": 292,
"max_line_length": 125,
"avg_line_length": 37.16438356164384,
"alnum_prop": 0.6343531146332473,
"repo_name": "ianwcarlson/python-zeromq-pubsub",
"id": "08d5dd714c0641cf44b22ed09faa01b0643dbccf",
"size": "10852",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/zeroMQInterface.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "7541"
},
{
"name": "Python",
"bytes": "42267"
}
],
"symlink_target": ""
} |
import math
import operator
class LOF2D:
def __init__(self, points):
# points = [
# [x, y],
# [x, y]
# ]
self.data = points
self.neighbours = {}
self.outliers = []
self.k_distances = {}
self.lrd = {}
self.reach_distance_sum = {}
self.lof = {}
self.sorted_lof = []
def create_distance_dictionary(self):
"""
Creates a dictionary of distances between all points
:return:
"""
self.neighbours = {}
# TODO: Optimize this with the use of dynamic programming,
# TODO: since it calculates same distances twice
for i in range(len(self.data)-1):
point = (int(self.data[i][0]),int(self.data[i][1]))
point_neighbours = {}
for j in range(len(self.data)-1):
compared_point = (int(self.data[j][0]), int(self.data[j][1]))
if i != j:
if compared_point not in point_neighbours:
sum = (point[0] - compared_point[0])*(point[0] - compared_point[0]) + (point[1] - compared_point[1]) * (point[1] - compared_point[1])
result = math.sqrt(sum)
point_neighbours.update({compared_point:result})
if compared_point not in self.neighbours:
compared_point_neighbours = {}
compared_point_neighbours.update({point:result})
self.neighbours.update({compared_point:compared_point_neighbours})
else:
temp_dict = self.neighbours.get(compared_point)
temp_dict.update({point:result})
self.neighbours.update({compared_point:temp_dict})
self.neighbours.update({point:point_neighbours})
def print_neighbours(self):
for key in self.neighbours:
print("\n",key,"\t:\n")
for k in self.neighbours[key]:
print("\t",k,"\t:", self.neighbours[key][k])
def get_knn(self, k=3):
"""
Limits previously created distances dictionary so that it will contain
only neighbours with distance equal or closer than k neighbour.
:param k: number that specifies which neighbour distance should designate the threshold
:return:
"""
for key in self.neighbours:
k_closest = []
temp_values = []
for subkey in self.neighbours[key]:
temp_values.append(self.neighbours[key][subkey])
temp_values.sort()
threshold_value = temp_values[k-1]
for subkey in self.neighbours[key]:
if self.neighbours[key][subkey] <= threshold_value:
k_closest.append(subkey)
selected_dictionary = {}
for k_closest_neighbour in k_closest:
for subkey in self.neighbours[key]:
if k_closest_neighbour == subkey:
selected_dictionary.update({subkey:self.neighbours[key][subkey]})
self.k_distances.update({key:selected_dictionary})
def calculate_lrd(self):
"""
Calculates local reachability density for each point.
Updates LRD dictionary with results.
:return:
"""
for point in self.k_distances:
neighbours_count = len(self.k_distances[point])
sum = 0
for neighbour in self.k_distances[point]:
biggest = -1
for inner_neighbour in self.k_distances[neighbour]:
if self.k_distances[neighbour][inner_neighbour] > biggest:
biggest = self.k_distances[neighbour][inner_neighbour]
dist = self.neighbours[neighbour][point]
if biggest > dist:
sum += biggest
else:
sum += dist
self.lrd.update({point: neighbours_count/sum})
self.reach_distance_sum.update({point:sum})
def calculate_lof(self):
"""
Calculates LOF for all points.
Updates local LOF dictionary with results.
:return:
"""
for point in self.k_distances:
lrd_sum = 0
for neighbour in self.k_distances[point]:
lrd_sum += self.lrd[neighbour]
self.lof.update({point:lrd_sum * self.reach_distance_sum[point]})
def sort_lof(self):
"""
Sorts LOF data based on LOF value, descendingly
:return: list of tuples ( (x, y), lof_value )
"""
self.sorted_lof = sorted(self.lof.items(), key=operator.itemgetter(1), reverse=True)
def print_lof(self):
for ele in self.sorted_lof:
print("point:", ele[0], "lof:", ele[1])
def print_k_distances(self):
for key in self.k_distances:
print("\n",key,"\t:\n")
for subkey in self.k_distances[key]:
print("\t",subkey,"\t:", self.k_distances[key][subkey])
def get_top(self, number):
result = []
for i in range(number):
result.append(self.sorted_lof[i][0])
return result
def main():
neighbours = 3
points = [
[1, 2], [2, 4], [3, 6], [4, 37], [5, 2],
[6, 5], [7, 3], [8, 4], [9, 8]
]
lof = LOF2D(points)
# 1. Calculate all the distances
lof.create_distance_dictionary()
#lof.print_neighbours()
# 2. Get all neighbours that are closer or equal to k neighbour
lof.get_knn(neighbours)
#lof.print_k_distances()
# 3. Calculate local reachability density for all points
lof.calculate_lrd()
# 4. Calculate LOF
lof.calculate_lof()
# 5. Sort
lof.sort_lof()
# 6. Show
#lof.print_lof()
# 7. Get top 3
print(lof.get_top(3))
import pydoc
help(lof)
if __name__ == "__main__":
main()
| {
"content_hash": "9d84a25ac68544f65ffbee37652f4025",
"timestamp": "",
"source": "github",
"line_count": 191,
"max_line_length": 139,
"avg_line_length": 25.50261780104712,
"alnum_prop": 0.6534592486142475,
"repo_name": "mnmnc/campephilus",
"id": "f9d5da3236f48482c47075b5d97cfa8209eab31e",
"size": "4871",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "modules/detector/lof2d.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "32648"
}
],
"symlink_target": ""
} |
from django.db import models
from django.contrib.auth.models import User
from django.utils.translation import gettext_lazy as _
from datetime import date
GENDER_CHOICES = (
('u', _('undefined')),
('M', _('Male')),
('F', _('Female')),
)
LOOKFOR_CHOICES = (
('a', _('any')),
('M', _('Man')),
('F', _('Female')),
)
class ProfileManager(models.Manager):
"""
.. class:: ProfileManager
Django Manager class for :mod:`question.models.Profile` objects.
It provides simple statistic methods.
"""
def count(self):
"""
.. classmethod:: count(self)
Returns the number of all profiles in database.
:rtype: count of all profiles.
"""
return super(ProfileManager, self).get_queryset().count()
def female_count(self):
"""
.. classmethod:: female_count(self)
Returns the number of all female profiles in database.
:rtype: count of female profiles.
"""
return super(ProfileManager, self).get_queryset().filter(
gender='F'
).count()
def male_count(self):
"""
.. classmethod:: male_count(self)
Returns the number of all male profiles in database.
:rtype: count of male profiles.
"""
return super(ProfileManager, self).get_queryset().filter(
gender='M'
).count()
def female_percent(self):
"""
.. classmethod:: female_percent(self)
Returns the percent of female profiles in database.
:rtype: percent of female profiles.
"""
return self.female_count() / self.count()
def male_percent(self):
"""
.. classmethod:: male_percent(self)
Returns the percent of male profiles in database.
:rtype: percent of male profiles.
"""
return self.male_count() / self.count()
def get_by_natural_key(self, username):
return self.get(username=username)
class Profile(models.Model):
"""
The actual Profile to describe a user in the context of matchmaking.
"""
user = models.OneToOneField(User)
"""Reference to :mod:`django.contrib.auth.models.User`"""
is_public = models.BooleanField(default=False)
"""Describes whether the profile shall be visible publically."""
gender = models.CharField(
max_length=1,
choices=GENDER_CHOICES,
default=GENDER_CHOICES[0][0]
)
"""Describe the gender of the user."""
lookfor = models.CharField(
max_length=1,
choices=LOOKFOR_CHOICES,
default=LOOKFOR_CHOICES[0][0]
)
"""Describe what gender the user is looking for."""
dob = models.DateField(blank=True, null=True)
"""Date of Birth."""
objects = ProfileManager()
"""Use :mod:`question.models.ProfileManager` for Profile.objects."""
@property
def age(self):
"""
Calculate a users age in years.
"""
if self.dob:
return (date.today() - self.dob).days / 365
else:
return 0
def __unicode__(self):
"""
Unicode representation of self
"""
return u'%s (%s, %s)' % (self.user.username, self.gender, self.age)
@models.permalink
def get_absolute_url(self):
return ('user:profile-view', [str(self.id)])
| {
"content_hash": "68897b31f9d8988eff6eba6bfda943e2",
"timestamp": "",
"source": "github",
"line_count": 137,
"max_line_length": 75,
"avg_line_length": 24.51824817518248,
"alnum_prop": 0.5829115808276273,
"repo_name": "aneumeier/userprofile",
"id": "41236063286368b9206b7937d863cdc060a5b40e",
"size": "3359",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "userprofile/models.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "593"
},
{
"name": "HTML",
"bytes": "16346"
},
{
"name": "JavaScript",
"bytes": "2937"
},
{
"name": "Python",
"bytes": "29921"
}
],
"symlink_target": ""
} |
"""add geom column to pol shop table
Revision ID: 539cf8248905
Revises: 3fd9b2d19fb9
Create Date: 2015-12-06 15:48:43.979639
"""
# revision identifiers, used by Alembic.
revision = '539cf8248905'
down_revision = '3fd9b2d19fb9'
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
def upgrade():
op.execute('''
ALTER TABLE pol_shop
ADD COLUMN geog geography(POINT,4326)
''')
op.execute('UPDATE pol_shop set geog=CAST(ST_SetSRID(ST_Point(lon, lat), 4326) As geography);')
def downgrade():
op.execute('ALTER TABLE pol_shop DROP COLUMN geog') | {
"content_hash": "c6f453b18b13e1bf0f01762cae3e02aa",
"timestamp": "",
"source": "github",
"line_count": 29,
"max_line_length": 99,
"avg_line_length": 20.82758620689655,
"alnum_prop": 0.7135761589403974,
"repo_name": "atlefren/beerdatabase",
"id": "dd1e85658ff582062da463e5cb36eff43f131a49",
"size": "604",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "alembic/versions/539cf8248905_add_geom_column_to_pol_shop_table.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "155908"
},
{
"name": "HTML",
"bytes": "89680"
},
{
"name": "JavaScript",
"bytes": "3333115"
},
{
"name": "Mako",
"bytes": "494"
},
{
"name": "PowerShell",
"bytes": "468"
},
{
"name": "Python",
"bytes": "120748"
},
{
"name": "Shell",
"bytes": "888"
}
],
"symlink_target": ""
} |
"""
Class that permits log messages to be sent back to the REST API by other Resources
"""
import json
import logging
from flask import request
from flask_restful import Resource
class RequestLogger(Resource):
"""
RequestLogger Resource class for flask_restful
"""
def __init__(self):
self.logger = logging.getLogger('esz.RequestLogger')
self.keymap = {
'DEBUG': self.logger.debug,
'INFO': self.logger.info,
'WARNING': self.logger.warning,
'ERROR': self.logger.error,
'CRITICAL': self.logger.critical,
}
def post(self, loglevel):
"""POST method"""
if request.data != b'':
# Must decode to 'utf-8' for older versions of Python
json_data = json.loads(request.data.decode('utf-8'))
# If multiple keys were posted, log them all as specified
for k in json_data:
self.keymap[loglevel.upper()]('{0}: {1}'.format(k, json_data[k]))
else:
self.keymap[loglevel.upper()]('Received no POST data')
| {
"content_hash": "ef1b5e1d1b408bed2325ce02d07f29f1",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 82,
"avg_line_length": 33.06060606060606,
"alnum_prop": 0.5921173235563703,
"repo_name": "untergeek/es_stats_zabbix",
"id": "2b6565e9f9dfd98fd0cbc91b0c3f0d6044ca7fc8",
"size": "1091",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "es_stats_zabbix/backend/requestlogger.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "93484"
},
{
"name": "Shell",
"bytes": "2396"
}
],
"symlink_target": ""
} |
"""Test the quantum amplitude estimation algorithm."""
import unittest
from test.python.algorithms import QiskitAlgorithmsTestCase
import numpy as np
from ddt import ddt, idata, data, unpack
from qiskit import QuantumRegister, QuantumCircuit, BasicAer
from qiskit.circuit.library import QFT, GroverOperator
from qiskit.utils import QuantumInstance
from qiskit.algorithms import (
AmplitudeEstimation,
MaximumLikelihoodAmplitudeEstimation,
IterativeAmplitudeEstimation,
FasterAmplitudeEstimation,
EstimationProblem,
)
from qiskit.quantum_info import Operator, Statevector
from qiskit.primitives import Sampler
class BernoulliStateIn(QuantumCircuit):
"""A circuit preparing sqrt(1 - p)|0> + sqrt(p)|1>."""
def __init__(self, probability):
super().__init__(1)
angle = 2 * np.arcsin(np.sqrt(probability))
self.ry(angle, 0)
class BernoulliGrover(QuantumCircuit):
"""The Grover operator corresponding to the Bernoulli A operator."""
def __init__(self, probability):
super().__init__(1, global_phase=np.pi)
self.angle = 2 * np.arcsin(np.sqrt(probability))
self.ry(2 * self.angle, 0)
def power(self, power, matrix_power=False):
if matrix_power:
return super().power(power, True)
powered = QuantumCircuit(1)
powered.ry(power * 2 * self.angle, 0)
return powered
class SineIntegral(QuantumCircuit):
r"""Construct the A operator to approximate the integral
\int_0^1 \sin^2(x) d x
with a specified number of qubits.
"""
def __init__(self, num_qubits):
qr_state = QuantumRegister(num_qubits, "state")
qr_objective = QuantumRegister(1, "obj")
super().__init__(qr_state, qr_objective)
# prepare 1/sqrt{2^n} sum_x |x>_n
self.h(qr_state)
# apply the sine/cosine term
self.ry(2 * 1 / 2 / 2**num_qubits, qr_objective[0])
for i, qubit in enumerate(qr_state):
self.cry(2 * 2**i / 2**num_qubits, qubit, qr_objective[0])
@ddt
class TestBernoulli(QiskitAlgorithmsTestCase):
"""Tests based on the Bernoulli A operator.
This class tests
* the estimation result
* the constructed circuits
"""
def setUp(self):
super().setUp()
self._statevector = QuantumInstance(
backend=BasicAer.get_backend("statevector_simulator"),
seed_simulator=2,
seed_transpiler=2,
)
self._sampler = Sampler(options={"seed": 2})
def qasm(shots=100):
return QuantumInstance(
backend=BasicAer.get_backend("qasm_simulator"),
shots=shots,
seed_simulator=2,
seed_transpiler=2,
)
self._qasm = qasm
def sampler_shots(shots=100):
return Sampler(options={"shots": shots, "seed": 2})
self._sampler_shots = sampler_shots
@idata(
[
[0.2, AmplitudeEstimation(2), {"estimation": 0.5, "mle": 0.2}],
[0.49, AmplitudeEstimation(3), {"estimation": 0.5, "mle": 0.49}],
[0.2, MaximumLikelihoodAmplitudeEstimation([0, 1, 2]), {"estimation": 0.2}],
[0.49, MaximumLikelihoodAmplitudeEstimation(3), {"estimation": 0.49}],
[0.2, IterativeAmplitudeEstimation(0.1, 0.1), {"estimation": 0.2}],
[0.49, IterativeAmplitudeEstimation(0.001, 0.01), {"estimation": 0.49}],
[0.2, FasterAmplitudeEstimation(0.1, 3, rescale=False), {"estimation": 0.2}],
[0.12, FasterAmplitudeEstimation(0.1, 2, rescale=False), {"estimation": 0.12}],
]
)
@unpack
def test_statevector(self, prob, qae, expect):
"""statevector test"""
qae.quantum_instance = self._statevector
problem = EstimationProblem(BernoulliStateIn(prob), 0, BernoulliGrover(prob))
result = qae.estimate(problem)
self._statevector.reset_execution_results()
for key, value in expect.items():
self.assertAlmostEqual(
value, getattr(result, key), places=3, msg=f"estimate `{key}` failed"
)
@idata(
[
[0.2, AmplitudeEstimation(2), {"estimation": 0.5, "mle": 0.2}],
[0.49, AmplitudeEstimation(3), {"estimation": 0.5, "mle": 0.49}],
[0.2, MaximumLikelihoodAmplitudeEstimation([0, 1, 2]), {"estimation": 0.2}],
[0.49, MaximumLikelihoodAmplitudeEstimation(3), {"estimation": 0.49}],
[0.2, IterativeAmplitudeEstimation(0.1, 0.1), {"estimation": 0.2}],
[0.49, IterativeAmplitudeEstimation(0.001, 0.01), {"estimation": 0.49}],
[0.2, FasterAmplitudeEstimation(0.1, 3, rescale=False), {"estimation": 0.199}],
[0.12, FasterAmplitudeEstimation(0.1, 2, rescale=False), {"estimation": 0.12}],
]
)
@unpack
def test_sampler(self, prob, qae, expect):
"""sampler test"""
qae.sampler = self._sampler
problem = EstimationProblem(BernoulliStateIn(prob), 0, BernoulliGrover(prob))
result = qae.estimate(problem)
for key, value in expect.items():
self.assertAlmostEqual(
value, getattr(result, key), places=3, msg=f"estimate `{key}` failed"
)
@idata(
[
[0.2, 100, AmplitudeEstimation(4), {"estimation": 0.14644, "mle": 0.193888}],
[0.0, 1000, AmplitudeEstimation(2), {"estimation": 0.0, "mle": 0.0}],
[
0.2,
100,
MaximumLikelihoodAmplitudeEstimation([0, 1, 2, 4, 8]),
{"estimation": 0.199606},
],
[0.8, 10, IterativeAmplitudeEstimation(0.1, 0.05), {"estimation": 0.811711}],
[0.2, 1000, FasterAmplitudeEstimation(0.1, 3, rescale=False), {"estimation": 0.198640}],
[
0.12,
100,
FasterAmplitudeEstimation(0.01, 3, rescale=False),
{"estimation": 0.119037},
],
]
)
@unpack
def test_qasm(self, prob, shots, qae, expect):
"""qasm test"""
qae.quantum_instance = self._qasm(shots)
problem = EstimationProblem(BernoulliStateIn(prob), [0], BernoulliGrover(prob))
result = qae.estimate(problem)
for key, value in expect.items():
self.assertAlmostEqual(
value, getattr(result, key), places=3, msg=f"estimate `{key}` failed"
)
@idata(
[
[0.2, 100, AmplitudeEstimation(4), {"estimation": 0.500000, "mle": 0.562783}],
[0.0, 1000, AmplitudeEstimation(2), {"estimation": 0.0, "mle": 0.0}],
[
0.2,
100,
MaximumLikelihoodAmplitudeEstimation([0, 1, 2, 4, 8]),
{"estimation": 0.474790},
],
[0.8, 10, IterativeAmplitudeEstimation(0.1, 0.05), {"estimation": 0.811711}],
[0.2, 1000, FasterAmplitudeEstimation(0.1, 3, rescale=False), {"estimation": 0.199073}],
[
0.12,
100,
FasterAmplitudeEstimation(0.01, 3, rescale=False),
{"estimation": 0.120016},
],
]
)
@unpack
def test_sampler_with_shots(self, prob, shots, qae, expect):
"""sampler with shots test"""
qae.sampler = self._sampler_shots(shots)
problem = EstimationProblem(BernoulliStateIn(prob), [0], BernoulliGrover(prob))
result = qae.estimate(problem)
for key, value in expect.items():
self.assertAlmostEqual(
value, getattr(result, key), places=3, msg=f"estimate `{key}` failed"
)
@data(True, False)
def test_qae_circuit(self, efficient_circuit):
"""Test circuits resulting from canonical amplitude estimation.
Build the circuit manually and from the algorithm and compare the resulting unitaries.
"""
prob = 0.5
problem = EstimationProblem(BernoulliStateIn(prob), objective_qubits=[0])
for m in [2, 5]:
qae = AmplitudeEstimation(m)
angle = 2 * np.arcsin(np.sqrt(prob))
# manually set up the inefficient AE circuit
qr_eval = QuantumRegister(m, "a")
qr_objective = QuantumRegister(1, "q")
circuit = QuantumCircuit(qr_eval, qr_objective)
# initial Hadamard gates
for i in range(m):
circuit.h(qr_eval[i])
# A operator
circuit.ry(angle, qr_objective)
if efficient_circuit:
qae.grover_operator = BernoulliGrover(prob)
for power in range(m):
circuit.cry(2 * 2**power * angle, qr_eval[power], qr_objective[0])
else:
oracle = QuantumCircuit(1)
oracle.z(0)
state_preparation = QuantumCircuit(1)
state_preparation.ry(angle, 0)
grover_op = GroverOperator(oracle, state_preparation)
for power in range(m):
circuit.compose(
grover_op.power(2**power).control(),
qubits=[qr_eval[power], qr_objective[0]],
inplace=True,
)
# fourier transform
iqft = QFT(m, do_swaps=False).inverse().reverse_bits()
circuit.append(iqft.to_instruction(), qr_eval)
actual_circuit = qae.construct_circuit(problem, measurement=False)
self.assertEqual(Operator(circuit), Operator(actual_circuit))
@data(True, False)
def test_iqae_circuits(self, efficient_circuit):
"""Test circuits resulting from iterative amplitude estimation.
Build the circuit manually and from the algorithm and compare the resulting unitaries.
"""
prob = 0.5
problem = EstimationProblem(BernoulliStateIn(prob), objective_qubits=[0])
for k in [2, 5]:
qae = IterativeAmplitudeEstimation(0.01, 0.05)
angle = 2 * np.arcsin(np.sqrt(prob))
# manually set up the inefficient AE circuit
q_objective = QuantumRegister(1, "q")
circuit = QuantumCircuit(q_objective)
# A operator
circuit.ry(angle, q_objective)
if efficient_circuit:
qae.grover_operator = BernoulliGrover(prob)
circuit.ry(2 * k * angle, q_objective[0])
else:
oracle = QuantumCircuit(1)
oracle.z(0)
state_preparation = QuantumCircuit(1)
state_preparation.ry(angle, 0)
grover_op = GroverOperator(oracle, state_preparation)
for _ in range(k):
circuit.compose(grover_op, inplace=True)
actual_circuit = qae.construct_circuit(problem, k, measurement=False)
self.assertEqual(Operator(circuit), Operator(actual_circuit))
@data(True, False)
def test_mlae_circuits(self, efficient_circuit):
"""Test the circuits constructed for MLAE"""
prob = 0.5
problem = EstimationProblem(BernoulliStateIn(prob), objective_qubits=[0])
for k in [2, 5]:
qae = MaximumLikelihoodAmplitudeEstimation(k)
angle = 2 * np.arcsin(np.sqrt(prob))
# compute all the circuits used for MLAE
circuits = []
# 0th power
q_objective = QuantumRegister(1, "q")
circuit = QuantumCircuit(q_objective)
circuit.ry(angle, q_objective)
circuits += [circuit]
# powers of 2
for power in range(k):
q_objective = QuantumRegister(1, "q")
circuit = QuantumCircuit(q_objective)
# A operator
circuit.ry(angle, q_objective)
# Q^(2^j) operator
if efficient_circuit:
qae.grover_operator = BernoulliGrover(prob)
circuit.ry(2 * 2**power * angle, q_objective[0])
else:
oracle = QuantumCircuit(1)
oracle.z(0)
state_preparation = QuantumCircuit(1)
state_preparation.ry(angle, 0)
grover_op = GroverOperator(oracle, state_preparation)
for _ in range(2**power):
circuit.compose(grover_op, inplace=True)
circuits += [circuit]
actual_circuits = qae.construct_circuits(problem, measurement=False)
for actual, expected in zip(actual_circuits, circuits):
self.assertEqual(Operator(actual), Operator(expected))
@ddt
class TestSineIntegral(QiskitAlgorithmsTestCase):
"""Tests based on the A operator to integrate sin^2(x).
This class tests
* the estimation result
* the confidence intervals
"""
def setUp(self):
super().setUp()
self._statevector = QuantumInstance(
backend=BasicAer.get_backend("statevector_simulator"),
seed_simulator=123,
seed_transpiler=41,
)
self._sampler = Sampler(options={"seed": 123})
def qasm(shots=100):
return QuantumInstance(
backend=BasicAer.get_backend("qasm_simulator"),
shots=shots,
seed_simulator=7192,
seed_transpiler=90000,
)
self._qasm = qasm
def sampler_shots(shots=100):
return Sampler(options={"shots": shots, "seed": 7192})
self._sampler_shots = sampler_shots
@idata(
[
[2, AmplitudeEstimation(2), {"estimation": 0.5, "mle": 0.270290}],
[4, MaximumLikelihoodAmplitudeEstimation(4), {"estimation": 0.272675}],
[3, IterativeAmplitudeEstimation(0.1, 0.1), {"estimation": 0.272082}],
[3, FasterAmplitudeEstimation(0.01, 1), {"estimation": 0.272082}],
]
)
@unpack
def test_statevector(self, n, qae, expect):
"""Statevector end-to-end test"""
# construct factories for A and Q
# qae.state_preparation = SineIntegral(n)
qae.quantum_instance = self._statevector
estimation_problem = EstimationProblem(SineIntegral(n), objective_qubits=[n])
# result = qae.run(self._statevector)
result = qae.estimate(estimation_problem)
self._statevector.reset_execution_results()
for key, value in expect.items():
self.assertAlmostEqual(
value, getattr(result, key), places=3, msg=f"estimate `{key}` failed"
)
@idata(
[
[2, AmplitudeEstimation(2), {"estimation": 0.5, "mle": 0.270290}],
[4, MaximumLikelihoodAmplitudeEstimation(4), {"estimation": 0.0}],
[3, IterativeAmplitudeEstimation(0.1, 0.1), {"estimation": 0.0}],
[3, FasterAmplitudeEstimation(0.01, 1), {"estimation": 0.017687}],
]
)
@unpack
def test_sampler(self, n, qae, expect):
"""sampler end-to-end test"""
# construct factories for A and Q
# qae.state_preparation = SineIntegral(n)
qae.sampler = self._sampler
estimation_problem = EstimationProblem(SineIntegral(n), objective_qubits=[n])
result = qae.estimate(estimation_problem)
for key, value in expect.items():
self.assertAlmostEqual(
value, getattr(result, key), places=3, msg=f"estimate `{key}` failed"
)
@idata(
[
[4, 10, AmplitudeEstimation(2), {"estimation": 0.5, "mle": 0.333333}],
[3, 10, MaximumLikelihoodAmplitudeEstimation(2), {"estimation": 0.256878}],
[3, 1000, IterativeAmplitudeEstimation(0.01, 0.01), {"estimation": 0.271790}],
[3, 1000, FasterAmplitudeEstimation(0.1, 4), {"estimation": 0.274168}],
]
)
@unpack
def test_qasm(self, n, shots, qae, expect):
"""QASM simulator end-to-end test."""
# construct factories for A and Q
qae.quantum_instance = self._qasm(shots)
estimation_problem = EstimationProblem(SineIntegral(n), objective_qubits=[n])
result = qae.estimate(estimation_problem)
for key, value in expect.items():
self.assertAlmostEqual(
value, getattr(result, key), places=3, msg=f"estimate `{key}` failed"
)
@idata(
[
[4, 10, AmplitudeEstimation(2), {"estimation": 0.0, "mle": 0.0}],
[3, 10, MaximumLikelihoodAmplitudeEstimation(2), {"estimation": 0.0}],
[3, 1000, IterativeAmplitudeEstimation(0.01, 0.01), {"estimation": 0.0}],
[3, 1000, FasterAmplitudeEstimation(0.1, 4), {"estimation": 0.000551}],
]
)
@unpack
def test_sampler_with_shots(self, n, shots, qae, expect):
"""Sampler with shots end-to-end test."""
# construct factories for A and Q
qae.sampler = self._sampler_shots(shots)
estimation_problem = EstimationProblem(SineIntegral(n), objective_qubits=[n])
result = qae.estimate(estimation_problem)
for key, value in expect.items():
self.assertAlmostEqual(
value, getattr(result, key), places=3, msg=f"estimate `{key}` failed"
)
@idata(
[
[
AmplitudeEstimation(3),
"mle",
{
"likelihood_ratio": (0.2494734, 0.3003771),
"fisher": (0.2486176, 0.2999286),
"observed_fisher": (0.2484562, 0.3000900),
},
],
[
MaximumLikelihoodAmplitudeEstimation(3),
"estimation",
{
"likelihood_ratio": (0.2598794, 0.2798536),
"fisher": (0.2584889, 0.2797018),
"observed_fisher": (0.2659279, 0.2722627),
},
],
]
)
@unpack
def test_confidence_intervals(self, qae, key, expect):
"""End-to-end test for all confidence intervals."""
n = 3
qae.quantum_instance = self._statevector
estimation_problem = EstimationProblem(SineIntegral(n), objective_qubits=[n])
# statevector simulator
result = qae.estimate(estimation_problem)
self._statevector.reset_execution_results()
methods = ["lr", "fi", "oi"] # short for likelihood_ratio, fisher, observed_fisher
alphas = [0.1, 0.00001, 0.9] # alpha shouldn't matter in statevector
for alpha, method in zip(alphas, methods):
confint = qae.compute_confidence_interval(result, alpha, method)
# confidence interval based on statevector should be empty, as we are sure of the result
self.assertAlmostEqual(confint[1] - confint[0], 0.0)
self.assertAlmostEqual(confint[0], getattr(result, key))
# qasm simulator
shots = 100
alpha = 0.01
qae.quantum_instance = self._qasm(shots)
result = qae.estimate(estimation_problem)
for method, expected_confint in expect.items():
confint = qae.compute_confidence_interval(result, alpha, method)
np.testing.assert_array_almost_equal(confint, expected_confint)
self.assertTrue(confint[0] <= getattr(result, key) <= confint[1])
def test_iqae_confidence_intervals(self):
"""End-to-end test for the IQAE confidence interval."""
n = 3
qae = IterativeAmplitudeEstimation(0.1, 0.01, quantum_instance=self._statevector)
expected_confint = (0.1984050, 0.3511015)
estimation_problem = EstimationProblem(SineIntegral(n), objective_qubits=[n])
# statevector simulator
result = qae.estimate(estimation_problem)
self._statevector.reset_execution_results()
confint = result.confidence_interval
# confidence interval based on statevector should be empty, as we are sure of the result
self.assertAlmostEqual(confint[1] - confint[0], 0.0)
self.assertAlmostEqual(confint[0], result.estimation)
# qasm simulator
shots = 100
qae.quantum_instance = self._qasm(shots)
result = qae.estimate(estimation_problem)
confint = result.confidence_interval
np.testing.assert_array_almost_equal(confint, expected_confint)
self.assertTrue(confint[0] <= result.estimation <= confint[1])
@ddt
class TestFasterAmplitudeEstimation(QiskitAlgorithmsTestCase):
"""Specific tests for Faster AE."""
def setUp(self):
super().setUp()
self._sampler = Sampler(options={"seed": 2})
def test_rescaling(self):
"""Test the rescaling."""
amplitude = 0.8
scaling = 0.25
circuit = QuantumCircuit(1)
circuit.ry(2 * np.arcsin(amplitude), 0)
problem = EstimationProblem(circuit, objective_qubits=[0])
rescaled = problem.rescale(scaling)
rescaled_amplitude = Statevector.from_instruction(rescaled.state_preparation).data[3]
self.assertAlmostEqual(scaling * amplitude, rescaled_amplitude)
def test_run_without_rescaling(self):
"""Run Faster AE without rescaling if the amplitude is in [0, 1/4]."""
# construct estimation problem
prob = 0.11
a_op = QuantumCircuit(1)
a_op.ry(2 * np.arcsin(np.sqrt(prob)), 0)
problem = EstimationProblem(a_op, objective_qubits=[0])
# construct algo without rescaling
backend = BasicAer.get_backend("statevector_simulator")
fae = FasterAmplitudeEstimation(0.1, 1, rescale=False, quantum_instance=backend)
# run the algo
result = fae.estimate(problem)
# assert the result is correct
self.assertAlmostEqual(result.estimation, prob)
# assert no rescaling was used
theta = np.mean(result.theta_intervals[-1])
value_without_scaling = np.sin(theta) ** 2
self.assertAlmostEqual(result.estimation, value_without_scaling)
def test_sampler_run_without_rescaling(self):
"""Run Faster AE without rescaling if the amplitude is in [0, 1/4]."""
# construct estimation problem
prob = 0.11
a_op = QuantumCircuit(1)
a_op.ry(2 * np.arcsin(np.sqrt(prob)), 0)
problem = EstimationProblem(a_op, objective_qubits=[0])
# construct algo without rescaling
fae = FasterAmplitudeEstimation(0.1, 1, rescale=False, sampler=self._sampler)
# run the algo
result = fae.estimate(problem)
# assert the result is correct
self.assertAlmostEqual(result.estimation, prob, places=2)
# assert no rescaling was used
theta = np.mean(result.theta_intervals[-1])
value_without_scaling = np.sin(theta) ** 2
self.assertAlmostEqual(result.estimation, value_without_scaling)
def test_rescaling_with_custom_grover_raises(self):
"""Test that the rescaling option fails if a custom Grover operator is used."""
prob = 0.8
a_op = BernoulliStateIn(prob)
q_op = BernoulliGrover(prob)
problem = EstimationProblem(a_op, objective_qubits=[0], grover_operator=q_op)
# construct algo without rescaling
backend = BasicAer.get_backend("statevector_simulator")
fae = FasterAmplitudeEstimation(0.1, 1, quantum_instance=backend)
# run the algo
with self.assertWarns(Warning):
_ = fae.estimate(problem)
@data(("statevector_simulator", 0.2), ("qasm_simulator", 0.199440))
@unpack
def test_good_state(self, backend_str, expect):
"""Test with a good state function."""
def is_good_state(bitstr):
return bitstr[1] == "1"
# construct the estimation problem where the second qubit is ignored
a_op = QuantumCircuit(2)
a_op.ry(2 * np.arcsin(np.sqrt(0.2)), 0)
# oracle only affects first qubit
oracle = QuantumCircuit(2)
oracle.z(0)
# reflect only on first qubit
q_op = GroverOperator(oracle, a_op, reflection_qubits=[0])
# but we measure both qubits (hence both are objective qubits)
problem = EstimationProblem(
a_op, objective_qubits=[0, 1], grover_operator=q_op, is_good_state=is_good_state
)
# construct algo
backend = QuantumInstance(
BasicAer.get_backend(backend_str), seed_simulator=2, seed_transpiler=2
)
# cannot use rescaling with a custom grover operator
fae = FasterAmplitudeEstimation(0.01, 5, rescale=False, quantum_instance=backend)
# run the algo
result = fae.estimate(problem)
# assert the result is correct
self.assertAlmostEqual(result.estimation, expect, places=5)
if __name__ == "__main__":
unittest.main()
| {
"content_hash": "4fe6e8235e1de92346eefc755471f4c6",
"timestamp": "",
"source": "github",
"line_count": 669,
"max_line_length": 100,
"avg_line_length": 37.38714499252616,
"alnum_prop": 0.5820806013113705,
"repo_name": "QISKit/qiskit-sdk-py",
"id": "8360ff4fdd9137098a838ceb23bfa986e27a6912",
"size": "25496",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "test/python/algorithms/test_amplitude_estimators.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "2582"
},
{
"name": "C++",
"bytes": "327518"
},
{
"name": "CMake",
"bytes": "19294"
},
{
"name": "Makefile",
"bytes": "5608"
},
{
"name": "Pascal",
"bytes": "2444"
},
{
"name": "Python",
"bytes": "1312801"
},
{
"name": "Shell",
"bytes": "8385"
}
],
"symlink_target": ""
} |
from base64 import b64decode, b64encode
from PIL import Image
from io import BytesIO
from os import stat
from IPython.nbformat import read, write
def compress(filename, output_filename=None, img_width=2048, img_format='png'):
"""Compress images in IPython notebooks.
Parameters
----------
filename : string
Notebook to compress. Will take any notebook format.
output_filename : string
If you do not want to overwrite your existing notebook, supply an
filename for the new compressed notebook.
img_width : int
Which width images should be resized to.
img_format : string
Which compression to use on the images, valid options are
*png* and *jpeg* (**requires libjpeg**).
Returns
-------
int
Size of new notebook in bytes.
"""
orig_filesize = stat(filename).st_size
# compress images
nb = read(filename, as_version=4)
outputs = [cell.get('outputs', []) for cell in nb['cells']]
# omit empty outputs
outputs = [o for o in outputs if len(o)]
# flatten
outputs = [o for lines in outputs for o in lines]
for output in outputs:
data = output.get('data', {})
if not data:
continue
keys = data.copy().keys()
for key in keys:
if 'image' in key:
string = ''.join(data[key])
bytes_img = b64decode(string)
io_img = BytesIO(bytes_img)
img = Image.open(io_img)
factor = float(img_width) / img.size[0]
if factor < 1:
# only resize large images
new_size = [int(s*factor+0.5) for s in img.size]
img = img.resize(new_size)
out = BytesIO()
img.save(out, img_format)
out.seek(0)
mime = 'image/' + img_format
del data[key]
data[mime] = b64encode(out.read()).decode('ascii')
# save notebook
if not output_filename:
output_filename = filename
try:
output_format = nb.metadata.orig_nbformat
except AttributeError:
output_format = 4
write(nb, output_filename, version=output_format)
# calculate bytes saved
bytes_saved = orig_filesize - stat(output_filename).st_size
if bytes_saved <= 0:
print('%s: warning: no compression - %s bytes gained' % (filename, -bytes_saved))
return bytes_saved
| {
"content_hash": "3608efc5adb12e2934340f4ba0548bc8",
"timestamp": "",
"source": "github",
"line_count": 74,
"max_line_length": 89,
"avg_line_length": 33.527027027027025,
"alnum_prop": 0.5783958081418783,
"repo_name": "arve0/ipynbcompress",
"id": "f484ef031b3c034c588cb8a1e4317bd1d9bb6275",
"size": "2497",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ipynbcompress/ipynbcompress.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "12540790"
},
{
"name": "Makefile",
"bytes": "1407"
},
{
"name": "Python",
"bytes": "7243"
}
],
"symlink_target": ""
} |
"""
Utilities for local system calls, everything here is cross-platform.
become_daemon was originally taken from Django:
https://github.com/django/django/commit/5836a5771f2aefca83349b111f4191d6485af1d5#diff-f7d80be2ccf77f4f009d08dcac4b7736
We might want to refactor this into:
system/__init__.py
system/posix.py
system/windows.py
etc..
"""
from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
import logging
import os
import signal
import sys
import tempfile
import time
import six
from django.db import connections
from .conf import KOLIBRI_HOME
from .conf import OPTIONS
from kolibri.utils.android import on_android
logger = logging.getLogger(__name__)
def _posix_pid_exists(pid):
"""Check whether PID exists in the current process table."""
import errno
if pid < 0:
return False
try:
# Send signal 0, this is harmless
os.kill(pid, 0)
except OSError as e:
return e.errno == errno.EPERM
else:
return True
def _kill_pid(pid, softkill_signal_number):
"""Kill a PID by sending a signal, starting with a softer one and then escalating as needed"""
logger.info("Initiating shutdown of Kolibri")
try:
logger.debug("Attempting to soft kill process with pid %d..." % pid)
os.kill(pid, softkill_signal_number)
logger.debug("Soft kill signal sent without error.")
# process does not exist
except OSError:
logger.debug(
"Soft kill signal could not be sent (OSError); process may not exist?"
)
return
if pid_exists(pid):
logger.info("Waiting for Kolibri to finish shutting down")
# give some time for the process to clean itself up gracefully before we force anything
i = 0
while pid_exists(pid) and i < 60:
time.sleep(0.5)
i += 1
# if process didn't exit cleanly, make one last effort to kill it
if pid_exists(pid):
logger.debug(
"Process wth pid %s still exists after soft kill signal; attempting a SIGKILL."
% pid
)
os.kill(pid, signal.SIGKILL)
logger.debug("SIGKILL signal sent without error.")
def _posix_kill_pid(pid):
"""Kill a PID by sending a posix-specific soft-kill signal"""
_kill_pid(pid, signal.SIGTERM)
def _windows_pid_exists(pid):
import ctypes
kernel32 = ctypes.windll.kernel32
SYNCHRONIZE = 0x100000
process = kernel32.OpenProcess(SYNCHRONIZE, 0, pid)
if process != 0:
kernel32.CloseHandle(process)
return True
else:
return False
def _windows_kill_pid(pid):
"""Kill a PID by sending a windows-specific soft-kill signal"""
_kill_pid(pid, signal.CTRL_C_EVENT)
buffering = int(six.PY3) # No unbuffered text I/O on Python 3 (#20815).
def _posix_become_daemon(
our_home_dir=".", out_log="/dev/null", err_log="/dev/null", umask=0o022
):
"Robustly turn into a UNIX daemon, running in our_home_dir."
# First fork
try:
if os.fork() > 0:
sys.exit(0) # kill off parent
except OSError as e:
sys.stderr.write("fork #1 failed: (%d) %s\n" % (e.errno, e.strerror))
sys.exit(1)
os.setsid()
os.chdir(our_home_dir)
os.umask(umask)
# Second fork
try:
if os.fork() > 0:
os._exit(0)
except OSError as e:
sys.stderr.write("fork #2 failed: (%d) %s\n" % (e.errno, e.strerror))
os._exit(1)
if sys.platform != "darwin": # This block breaks on OS X
# Fix courtesy of https://github.com/serverdensity/python-daemon/blob/master/daemon.py#L94
si = open("/dev/null", "r")
so = open(out_log, "a+", buffering)
se = open(err_log, "a+", buffering)
os.dup2(si.fileno(), sys.stdin.fileno())
os.dup2(so.fileno(), sys.stdout.fileno())
os.dup2(se.fileno(), sys.stderr.fileno())
# Set custom file descriptors so that they get proper buffering.
sys.stdout, sys.stderr = so, se
def _windows_become_daemon(our_home_dir=".", out_log=None, err_log=None, umask=0o022):
"""
If we're not running under a POSIX system, just simulate the daemon
mode by doing redirections and directory changing.
"""
os.chdir(our_home_dir)
os.umask(umask)
sys.stdin.close()
old_stderr = sys.stderr
old_stdout = sys.stdout
if err_log:
sys.stderr = open(err_log, "a", buffering)
else:
sys.stderr = _WindowsNullDevice()
if out_log:
sys.stdout = open(out_log, "a", buffering)
else:
sys.stdout = _WindowsNullDevice()
# Redirect stderr and stdout
os.dup2(sys.stderr.fileno(), old_stderr.fileno())
os.dup2(sys.stdout.fileno(), old_stdout.fileno())
old_stderr.flush()
old_stdout.flush()
class _WindowsNullDevice:
"A writeable object that writes to nowhere -- like /dev/null."
def write(self, s):
pass
def get_free_space(path=KOLIBRI_HOME):
while path and not os.path.exists(path):
path = os.path.dirname(path) # look to parent if it doesn't exist
if not path:
raise Exception("Could not calculate free space")
if sys.platform.startswith("win"):
import ctypes
free = ctypes.c_ulonglong(0)
check = ctypes.windll.kernel32.GetDiskFreeSpaceExW(
ctypes.c_wchar_p(path), None, None, ctypes.pointer(free)
)
if check == 0:
raise ctypes.winError()
result = free.value
elif on_android():
# This is meant for android, which needs to interact with android API to understand free
# space. If we're somehow getting here on non-android, we've got a problem.
try:
from jnius import autoclass
StatFs = autoclass("android.os.StatFs")
AndroidString = autoclass("java.lang.String")
st = StatFs(AndroidString(path))
try:
# for api version 18+
result = st.getFreeBlocksLong() * st.getBlockSizeLong()
except Exception:
# for api versions < 18
result = st.getFreeBlocks() * st.getBlockSize()
except Exception as e:
raise e
else:
st = os.statvfs(os.path.realpath(path))
result = st.f_bavail * st.f_frsize
return result
_become_daemon_function = None
def become_daemon(**kwargs):
# close all connections before forking, to avoid SQLite corruption:
# https://www.sqlite.org/howtocorrupt.html#_carrying_an_open_database_connection_across_a_fork_
connections.close_all()
_become_daemon_function(**kwargs)
# Utility functions for pinging or killing PIDs
if os.name == "posix":
pid_exists = _posix_pid_exists
kill_pid = _posix_kill_pid
_become_daemon_function = _posix_become_daemon
else:
pid_exists = _windows_pid_exists
kill_pid = _windows_kill_pid
_become_daemon_function = _windows_become_daemon
def _symlink_capability_check():
"""
Function to try to establish a symlink
return True if it succeeds, return False otherwise.
"""
# If STATIC_USE_SYMLINKS has been set to False, return False directly
if not OPTIONS["Deployment"]["STATIC_USE_SYMLINKS"]:
return False
fd, temp_target = tempfile.mkstemp()
temp_pathname = temp_target + ".lnk"
can_do = True
try:
os.symlink(temp_target, temp_pathname)
os.remove(temp_pathname)
except OSError:
can_do = False
# Explicitly close the file so that we can remove it on windows
os.close(fd)
os.remove(temp_target)
return can_do
CAN_USE_SYMLINKS = _symlink_capability_check()
| {
"content_hash": "ff155ae59501a01b9e32c589eaa2e088",
"timestamp": "",
"source": "github",
"line_count": 265,
"max_line_length": 118,
"avg_line_length": 29.067924528301887,
"alnum_prop": 0.636764896793457,
"repo_name": "mrpau/kolibri",
"id": "324c44da47e8d8e70ee99a0f8e6f51c34ad745e5",
"size": "7703",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "kolibri/utils/system.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "601"
},
{
"name": "CSS",
"bytes": "1716299"
},
{
"name": "Dockerfile",
"bytes": "7303"
},
{
"name": "Gherkin",
"bytes": "278074"
},
{
"name": "HTML",
"bytes": "26440"
},
{
"name": "JavaScript",
"bytes": "1537923"
},
{
"name": "Makefile",
"bytes": "13308"
},
{
"name": "Python",
"bytes": "2298911"
},
{
"name": "Shell",
"bytes": "11777"
},
{
"name": "Vue",
"bytes": "1558714"
}
],
"symlink_target": ""
} |
__version__ = '0.0.1'
__author__ = 'Cashiuus'
__license__ = 'MIT'
__copyright__ = 'Copyright (C) 2022 Cashiuus'
## =======[ IMPORTS ]========= ##
import argparse
import errno
import logging
from logging import handlers
import os
import platform
import subprocess
import sys
from pathlib import Path
from random import randrange
from time import sleep, strftime
try: from colorama import init, Fore
except ImportError: pass
# if using, these must go below `import sys`
if sys.version > '3':
import urllib.parse as urlparse
import urllib.parse as urllib
else:
import urlparse
import urllib
# Logging Cookbook: https://docs.python.org/3/howto/logging-cookbook.html
# This first line must be at top of the file, outside of any functions, so it's global
logger = logging.getLogger(__name__)
## =========[ TEXT COLORS ]============= ##
class Colors(object):
""" Access these via 'Colors.GREEN' """
GREEN = '\033[32;1m' # Green
YELLOW = '\033[01;33m' # Warnings/Information
RED = '\033[31m' # Error or '\033[91m'
ORANGE = '\033[33m' # Debug
BLUE = '\033[01;34m' # Heading
PURPLE = '\033[01;35m' # Other
GREY = '\e[90m' # Subdued Text
BOLD = '\033[01;01m' # Highlight
RESET = '\033[00m' # Normal/White
BACKBLUE = '\033[44m' # Blue background
BACKCYAN = '\033[46m' # Cyan background
BACKRED = '\033[41m' # Red background
BACKWHITE = '\033[47m' # White background
## =======[ Constants & Settings ]========= ##
VERBOSE = True
DEBUG = True
#MY_SETTINGS = 'settings.conf'
USER_HOME = os.environ.get('HOME')
ACTIVE_SHELL = os.environ['SHELL']
BASE_DIR = Path(__file__).resolve(strict=True).parent # one parent means dir of this file
#BASE_DIR = Path(__file__).resolve(strict=True).parent.parent
SAVE_DIR = BASE_DIR / 'saved'
LOG_FILE = BASE_DIR /'debug.log'
#FILE_NAME_WITH_DATE_EXAMPLE = "data_output-" + strftime('%Y%m%d') + ".txt"
# ==========================[ BEGIN APPLICATION ]========================== #
# ---------------------
# SHUTDOWN
# ---------------------
def shutdown_app():
#logger.debug("shutdown_app :: Application shutdown function executing")
print("Application shutting down -- Goodbye!")
exit(0)
# ---------------------
# main
# ---------------------
def main():
"""
Main function of the script
"""
logger.setLevel(logging.DEBUG)
ch = logging.StreamHandler()
# FileHandler accepts string or Path object for filename; mode 'w' truncates log, 'a' appends
fh = logging.FileHandler(LOG_FILE, mode='w')
# Or you can use a rotating file handler: https://docs.python.org/3/howto/logging-cookbook.html#cookbook-rotator-namer
#fh = handlers.RotatingFileHandler(LOG_FILE, max_bytes=104857600, backupCount=4)
if DEBUG:
ch.setLevel(logging.DEBUG)
fh.setLevel(logging.DEBUG)
else:
ch.setLevel(logging.INFO)
fh.setLevel(logging.INFO)
# Message Format - See here: https://docs.python.org/3/library/logging.html#logrecord-attributes
formatter = logging.Formatter('%(funcName)s : %(levelname)-8s %(message)s')
ch.setFormatter(formatter)
formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(funcName)s : %(message)s')
fh.setFormatter(formatter)
# Add the handlers to the logger
logger.addHandler(fh)
logger.addHandler(ch)
logger.debug('Logger initialized')
# ----- Levels
#logger.debug('msg')
#logger.info('msg')
#logger.warning('msg')
#logger.error('msg')
#logger.error('foo', exc_info=True)
#logger.critical('msg')
# --------------------------
print("[TEMPLATE] BASE_DIR is: {}".format(BASE_DIR))
# Quick 'n dirty args if not using argparse
args = sys.argv[1:]
if not args:
print('Usage: [--flags options] [inputs] ')
sys.exit(1)
# -- arg parsing --
parser = argparse.ArgumentParser()
parser = argparse.ArgumentParser(description="Description of this tool")
parser.add_argument('target', help='IP/CIDR/URL of target') # positional arg
parser.add_argument("-i", "--input-file", dest='input', nargs='*',
help="Specify a file containing the output of an nmap "
"scan in xml format.")
parser.add_argument("-o", "--output-file", dest='output',
help="Specify output file name")
parser.add_argument('--url', action='store', default=None, dest='url',
help='Pass URL to request')
parser.add_argument('--version', action='version', version='%(prog)s 1.0')
parser.add_argument("-d", "--debug", action="store_true",
help="Display error information")
args = parser.parse_args()
# If we have a mandatory arg, use it here; if not given, display usage
if not args.target:
parser.print_help()
exit(1)
# Now store our args into variables for use
# NOTE: infile will be a list of files, bc args.filename accepts multiple input files
infile = args.input
outfile = args.output
url = args.url
# -- Config File parsing --
#config = ConfigParser()
#try:
#config.read(MY_SETTINGS)
#config_value_format = config.get('youtube', 'format')
# Split a list into strings from a config file
#config_value_urls = config.get('youtube', 'urls')
#urls = shlex.split(config_value_urls)
#print("[DEBUG] urls: {0}".format(urls))
try:
# main application flow
pass
except KeyboardInterrupt:
shutdown_app()
return
if __name__ == '__main__':
main()
# =========================================================================== #
# ================================[ RECIPES ]================================ #
#
#
## Old path constants
#BASE_DIR = os.path.dirname(os.path.abspath(__file__))
#SAVE_DIR = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'saved')
#LOG_FILE = os.path.join(BASE_DIR, 'debug.log')
# =======================[ CORE UTILITY FUNCTIONS ]======================== #
# Check - Root user
# TODO: If not root, run with sudo
def root_check():
if not (os.geteuid() == 0):
print("[-] Not currently root user. Please fix.")
sys.exit(1)
return
def delay(max=10):
"""Generate random number for sleep function
Usage: time.sleep(delay(max=30))
"""
return randrange(2, max, 1)
def install_pkg(package):
import os, pip, platform
pip.main(['install', package])
if platform.system() == 'Linux':
if os.geteuid() != 0:
print('\n' + RED + '[-]' + YELLOW + ' Please Run as Root!' + '\n')
sys.exit()
else:
pass
else:
pass
return
def make_dirs(path):
"""
Make all directories en route to the full provided path.
"""
# If 'path' is a single directory path, create it, else treat as a list of paths
# for i in path:
if not os.path.isdir(path):
try:
os.makedirs(path)
logger.debug("Directory created: {}".format(path))
except:
print("[ERROR] Error creating directory: {}".format(str(path)))
logger.error("Error creating directory: {}".format(str(path)))
sys.exit(1)
return
def create_file(path):
# Use os.open to create a file if it doesn't already exist
flags = os.O_CREAT | os.O_EXCL | os.O_WRONLY
try:
file_handle = os.open(path, flags)
except OSError as e:
if e.errno == errno.EEXIST:
# The file already exists
pass
return False
else:
# Something went wrong, troubleshoot error
raise
else:
# No exception, so file was hopefully created.
with os.fdopen(file_handle, 'w') as file_obj:
file_obj.write("### Log File")
return True
# -------------------------
# Git helper functions
# -------------------------
def locate_directories(pattern, root='/'):
"""
Locate all occurrences of a directory, such as all .git repositories,
creating a list in order to update them all.
Usage: for i in locate('.git'):
dir_repo, tail = os.path.split(i)
# Build a list of git clones on filesystem, absolute paths
my_apps.append(dir_repo)
# Or do something with each one in the loop
git_update(dir_repo)
"""
for path, dirs, files in os.walk(os.path.abspath(root)):
for filename in fnmatch.filter(dirs, pattern):
yield os.path.join(path, filename)
def git_owner(ap):
"""
Get the owner for existing cloned Git repos
the .git/config file has a line that startswith url that contains remote url
Unfortunately, there doesn't appear to be any way to get master owner for forks :(
:param ap:
:return:
"""
with open(os.path.join(ap, '.git', 'config'), 'r') as fgit:
#for line in fgit.readlines():
# if line.strip().startswith('url'):
# owner_string = line.strip().split(' ')[2]
owner_string = [x.strip().split(' ')[2] for x in fgit.readlines() if x.strip().startswith('url')]
return owner_string[0]
def git_update(git_path):
"""
Update an existing git cloned repository.
:param git_path:
:return:
"""
if os.path.isdir(os.path.join(git_path, '.git')):
# The path is the root level of a git clone, proceed
try:
os.chdir(git_path)
subprocess.call('git pull', shell=True)
sleep(3) # Sleep 3s
except:
print("[ERROR] Failed to update git repo at {0}".format(git_path))
return
def printer(msg, color=Colors.RESET):
"""
A print helper with colors for console output. Not for logging purposes.
Usage: printer("\n[*] Installing Repository: {}".format(app), color=GREEN)
"""
if DO_DEBUG and color == Colors.ORANGE:
print("{0}[DEBUG] {1!s}{2}".format(Colors.ORANGE, msg, Colors.RESET))
else:
print("{0}{1!s}{2}".format(color, msg, Colors.RESET))
return
# Enable install of pip requirements within same script file
import subprocess
import sys
requirements = [
"requests",
"colorama",
"xi==1.15.0",
]
def install(packages):
for package in packages:
# Update this to use .run instead?
subprocess.check_call([sys.executable, '-m', 'pip', 'install', package])
return
#
#
# =========================================================================== #
| {
"content_hash": "7ed8fe7074e8b54fe5294a399d0a9a2a",
"timestamp": "",
"source": "github",
"line_count": 346,
"max_line_length": 122,
"avg_line_length": 30.716763005780347,
"alnum_prop": 0.5761196838539706,
"repo_name": "Cashiuus/penprep",
"id": "ad7b0c21fcc17b55ae446b0aed72ba0a27c458f5",
"size": "11067",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "templates/geany/main.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "PowerShell",
"bytes": "5990"
},
{
"name": "Python",
"bytes": "51388"
},
{
"name": "Shell",
"bytes": "755384"
}
],
"symlink_target": ""
} |
import subprocess
# Use GenerateEncryptedString() locally - DO NOT include in the script!
# The 'Encrypted String' will become a parameter for the script in the JSS
# The unique 'Salt' and 'Passphrase' values will be present in your script
def GenerateEncryptedString(inputString):
'''Usage >>> GenerateEncryptedString("String")'''
salt = subprocess.check_output(['/usr/bin/openssl', 'rand', '-hex', '8']).rstrip()
passphrase = subprocess.check_output(['/usr/bin/openssl', 'rand', '-hex', '12']).rstrip()
p = subprocess.Popen(['/usr/bin/openssl', 'enc', '-aes256', '-md', 'md5', '-a', '-A', '-S', salt, '-k', passphrase], stdin = subprocess.PIPE, stdout = subprocess.PIPE)
encrypted = p.communicate(inputString)[0]
print("Encrypted String: %s" % encrypted)
print("Salt: %s | Passphrase: %s" % (salt, passphrase))
# Include DecryptString() with your script to decrypt the password sent by the JSS
# The 'Salt' and 'Passphrase' values would be present in the script
def DecryptString(inputString, salt, passphrase):
'''Usage: >>> DecryptString("Encrypted String", "Salt", "Passphrase")'''
p = subprocess.Popen(['/usr/bin/openssl', 'enc', '-aes256', '-md', 'md5', '-d', '-a', '-A', '-S', salt, '-k', passphrase], stdin = subprocess.PIPE, stdout = subprocess.PIPE)
return p.communicate(inputString)[0]
# Alternative format for DecryptString function
def DecryptString(inputString):
'''Usage: >>> DecryptString("Encrypted String")'''
salt = ""
passphrase = ""
p = subprocess.Popen(['/usr/bin/openssl', 'enc', '-aes256', '-md', 'md5', '-d', '-a', '-A', '-S', salt, '-k', passphrase], stdin = subprocess.PIPE, stdout = subprocess.PIPE)
return p.communicate(inputString)[0] | {
"content_hash": "e5cc2e51e0e895dc0894a295a4df2c51",
"timestamp": "",
"source": "github",
"line_count": 28,
"max_line_length": 177,
"avg_line_length": 61.75,
"alnum_prop": 0.664545980335454,
"repo_name": "brysontyrrell/EncryptedStrings",
"id": "8b914b11a790fa75b61b81868ce0dc989454bdb2",
"size": "1929",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "EncryptedStrings_Python.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "1929"
},
{
"name": "Shell",
"bytes": "1345"
}
],
"symlink_target": ""
} |
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Party'
db.create_table('distribution_party', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('member_id', self.gf('django.db.models.fields.CharField')(max_length=12, blank=True)),
('short_name', self.gf('django.db.models.fields.CharField')(unique=True, max_length=32)),
('long_name', self.gf('django.db.models.fields.CharField')(max_length=64)),
('contact', self.gf('django.db.models.fields.CharField')(max_length=64, blank=True)),
('phone', self.gf('django.contrib.localflavor.us.models.PhoneNumberField')(max_length=20, blank=True)),
('cell', self.gf('django.contrib.localflavor.us.models.PhoneNumberField')(max_length=20, blank=True)),
('fax', self.gf('django.contrib.localflavor.us.models.PhoneNumberField')(max_length=20, blank=True)),
('address', self.gf('django.db.models.fields.TextField')(blank=True)),
('email_address', self.gf('django.db.models.fields.EmailField')(max_length=96, null=True, blank=True)),
('description', self.gf('django.db.models.fields.TextField')(blank=True)),
('storage_capacity', self.gf('django.db.models.fields.TextField')(blank=True)),
('content_type', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['contenttypes.ContentType'], null=True)),
))
db.send_create_signal('distribution', ['Party'])
# Adding model 'PartyUser'
db.create_table('distribution_partyuser', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('party', self.gf('django.db.models.fields.related.ForeignKey')(related_name='users', to=orm['distribution.Party'])),
('user', self.gf('django.db.models.fields.related.ForeignKey')(related_name='parties', to=orm['auth.User'])),
))
db.send_create_signal('distribution', ['PartyUser'])
# Adding model 'EmailIntro'
db.create_table('distribution_emailintro', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('message', self.gf('django.db.models.fields.TextField')(blank=True)),
('notice_type', self.gf('django.db.models.fields.related.ForeignKey')(related_name='email_intro', to=orm['notification.NoticeType'])),
))
db.send_create_signal('distribution', ['EmailIntro'])
# Adding model 'FoodNetwork'
db.create_table('distribution_foodnetwork', (
('party_ptr', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['distribution.Party'], unique=True, primary_key=True)),
('billing_contact', self.gf('django.db.models.fields.CharField')(max_length=64, blank=True)),
('billing_phone', self.gf('django.contrib.localflavor.us.models.PhoneNumberField')(max_length=20, null=True, blank=True)),
('billing_address', self.gf('django.db.models.fields.TextField')(blank=True)),
('billing_email_address', self.gf('django.db.models.fields.EmailField')(max_length=96, null=True, blank=True)),
('customer_terms', self.gf('django.db.models.fields.IntegerField')(default=0)),
('member_terms', self.gf('django.db.models.fields.IntegerField')(null=True, blank=True)),
('customer_fee', self.gf('django.db.models.fields.DecimalField')(default='0', max_digits=3, decimal_places=2)),
('producer_fee', self.gf('django.db.models.fields.DecimalField')(default='0', max_digits=3, decimal_places=2)),
('transportation_fee', self.gf('django.db.models.fields.DecimalField')(default='0', max_digits=8, decimal_places=2)),
('next_delivery_date', self.gf('django.db.models.fields.DateField')(default=datetime.date.today)),
('order_by_lot', self.gf('django.db.models.fields.BooleanField')(default=False)),
('use_plans_for_ordering', self.gf('django.db.models.fields.BooleanField')(default=False)),
('default_product_expiration_days', self.gf('django.db.models.fields.IntegerField')(default=6)),
))
db.send_create_signal('distribution', ['FoodNetwork'])
# Adding model 'Producer'
db.create_table('distribution_producer', (
('party_ptr', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['distribution.Party'], unique=True, primary_key=True)),
('delivers', self.gf('django.db.models.fields.BooleanField')(default=False)),
))
db.send_create_signal('distribution', ['Producer'])
# Adding model 'Processor'
db.create_table('distribution_processor', (
('party_ptr', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['distribution.Party'], unique=True, primary_key=True)),
))
db.send_create_signal('distribution', ['Processor'])
# Adding model 'Distributor'
db.create_table('distribution_distributor', (
('party_ptr', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['distribution.Party'], unique=True, primary_key=True)),
('transportation_fee', self.gf('django.db.models.fields.DecimalField')(default='0', max_digits=8, decimal_places=2)),
))
db.send_create_signal('distribution', ['Distributor'])
# Adding model 'Customer'
db.create_table('distribution_customer', (
('party_ptr', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['distribution.Party'], unique=True, primary_key=True)),
('customer_transportation_fee', self.gf('django.db.models.fields.DecimalField')(default='0', max_digits=8, decimal_places=2)),
('apply_transportation_fee', self.gf('django.db.models.fields.BooleanField')(default=True)),
))
db.send_create_signal('distribution', ['Customer'])
# Adding model 'CustomerContact'
db.create_table('distribution_customercontact', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('customer', self.gf('django.db.models.fields.related.ForeignKey')(related_name='contacts', to=orm['distribution.Customer'])),
('name', self.gf('django.db.models.fields.CharField')(max_length=64)),
('email', self.gf('django.db.models.fields.EmailField')(max_length=96, null=True, blank=True)),
('phone', self.gf('django.contrib.localflavor.us.models.PhoneNumberField')(max_length=20, blank=True)),
('cell', self.gf('django.contrib.localflavor.us.models.PhoneNumberField')(max_length=20, blank=True)),
('login_user', self.gf('django.db.models.fields.related.OneToOneField')(blank=True, related_name='customer_contact', unique=True, null=True, to=orm['auth.User'])),
))
db.send_create_signal('distribution', ['CustomerContact'])
# Adding model 'ProducerContact'
db.create_table('distribution_producercontact', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('producer', self.gf('django.db.models.fields.related.ForeignKey')(related_name='contacts', to=orm['distribution.Producer'])),
('name', self.gf('django.db.models.fields.CharField')(max_length=64)),
('email', self.gf('django.db.models.fields.EmailField')(max_length=96, null=True, blank=True)),
('phone', self.gf('django.contrib.localflavor.us.models.PhoneNumberField')(max_length=20, blank=True)),
('cell', self.gf('django.contrib.localflavor.us.models.PhoneNumberField')(max_length=20, blank=True)),
('login_user', self.gf('django.db.models.fields.related.OneToOneField')(blank=True, related_name='producer_contact', unique=True, null=True, to=orm['auth.User'])),
))
db.send_create_signal('distribution', ['ProducerContact'])
# Adding model 'Product'
db.create_table('distribution_product', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('parent', self.gf('django.db.models.fields.related.ForeignKey')(blank=True, related_name='children', null=True, to=orm['distribution.Product'])),
('short_name', self.gf('django.db.models.fields.CharField')(unique=True, max_length=32)),
('long_name', self.gf('django.db.models.fields.CharField')(max_length=64)),
('growing_method', self.gf('django.db.models.fields.CharField')(max_length=255, blank=True)),
('sellable', self.gf('django.db.models.fields.BooleanField')(default=True)),
('plannable', self.gf('django.db.models.fields.BooleanField')(default=True)),
('stockable', self.gf('django.db.models.fields.BooleanField')(default=True)),
('is_parent', self.gf('django.db.models.fields.BooleanField')(default=False)),
('price', self.gf('django.db.models.fields.DecimalField')(default='0', max_digits=8, decimal_places=2)),
('customer_fee_override', self.gf('django.db.models.fields.DecimalField')(null=True, max_digits=3, decimal_places=2, blank=True)),
('pay_producer', self.gf('django.db.models.fields.BooleanField')(default=True)),
('pay_producer_on_terms', self.gf('django.db.models.fields.BooleanField')(default=False)),
('expiration_days', self.gf('django.db.models.fields.IntegerField')(default=6)),
))
db.send_create_signal('distribution', ['Product'])
# Adding model 'Special'
db.create_table('distribution_special', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('product', self.gf('django.db.models.fields.related.ForeignKey')(related_name='specials', to=orm['distribution.Product'])),
('price', self.gf('django.db.models.fields.DecimalField')(default='0', max_digits=8, decimal_places=2)),
('headline', self.gf('django.db.models.fields.CharField')(max_length=128)),
('description', self.gf('django.db.models.fields.TextField')()),
('from_date', self.gf('django.db.models.fields.DateField')()),
('to_date', self.gf('django.db.models.fields.DateField')()),
))
db.send_create_signal('distribution', ['Special'])
# Adding model 'ProductPlan'
db.create_table('distribution_productplan', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('member', self.gf('django.db.models.fields.related.ForeignKey')(related_name='product_plans', to=orm['distribution.Party'])),
('product', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['distribution.Product'])),
('from_date', self.gf('django.db.models.fields.DateField')()),
('to_date', self.gf('django.db.models.fields.DateField')()),
('quantity', self.gf('django.db.models.fields.DecimalField')(default='0', max_digits=8, decimal_places=2)),
('role', self.gf('django.db.models.fields.CharField')(default='producer', max_length=12)),
('inventoried', self.gf('django.db.models.fields.BooleanField')(default=True)),
('distributor', self.gf('django.db.models.fields.related.ForeignKey')(blank=True, related_name='plan_distributors', null=True, to=orm['distribution.Party'])),
))
db.send_create_signal('distribution', ['ProductPlan'])
# Adding model 'ProducerProduct'
db.create_table('distribution_producerproduct', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('producer', self.gf('django.db.models.fields.related.ForeignKey')(related_name='producer_products', to=orm['distribution.Party'])),
('product', self.gf('django.db.models.fields.related.ForeignKey')(related_name='product_producers', to=orm['distribution.Product'])),
('default_quantity', self.gf('django.db.models.fields.DecimalField')(default='0', max_digits=8, decimal_places=2)),
('default_avail_qty', self.gf('django.db.models.fields.DecimalField')(default='0', max_digits=8, decimal_places=2)),
('inventoried', self.gf('django.db.models.fields.BooleanField')(default=True)),
('planned', self.gf('django.db.models.fields.BooleanField')(default=True)),
('distributor', self.gf('django.db.models.fields.related.ForeignKey')(blank=True, related_name='producer_distributors', null=True, to=orm['distribution.Party'])),
))
db.send_create_signal('distribution', ['ProducerProduct'])
# Adding unique constraint on 'ProducerProduct', fields ['producer', 'product']
db.create_unique('distribution_producerproduct', ['producer_id', 'product_id'])
# Adding model 'MemberProductList'
db.create_table('distribution_memberproductlist', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('member', self.gf('django.db.models.fields.related.ForeignKey')(related_name='product_lists', to=orm['distribution.Party'])),
('list_name', self.gf('django.db.models.fields.CharField')(max_length=64)),
('description', self.gf('django.db.models.fields.CharField')(max_length=255)),
))
db.send_create_signal('distribution', ['MemberProductList'])
# Adding model 'CustomerProduct'
db.create_table('distribution_customerproduct', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('customer', self.gf('django.db.models.fields.related.ForeignKey')(related_name='customer_products', to=orm['distribution.Party'])),
('product', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['distribution.Product'])),
('product_list', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['distribution.MemberProductList'], null=True, blank=True)),
('default_quantity', self.gf('django.db.models.fields.DecimalField')(default='0', max_digits=8, decimal_places=2)),
('planned', self.gf('django.db.models.fields.BooleanField')(default=True)),
))
db.send_create_signal('distribution', ['CustomerProduct'])
# Adding unique constraint on 'CustomerProduct', fields ['customer', 'product', 'product_list']
db.create_unique('distribution_customerproduct', ['customer_id', 'product_id', 'product_list_id'])
# Adding model 'InventoryItem'
db.create_table('distribution_inventoryitem', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('freeform_lot_id', self.gf('django.db.models.fields.CharField')(max_length=64, blank=True)),
('producer', self.gf('django.db.models.fields.related.ForeignKey')(related_name='inventory_items', to=orm['distribution.Party'])),
('field_id', self.gf('django.db.models.fields.CharField')(max_length=12, blank=True)),
('custodian', self.gf('django.db.models.fields.related.ForeignKey')(blank=True, related_name='custody_items', null=True, to=orm['distribution.Party'])),
('product', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['distribution.Product'])),
('inventory_date', self.gf('django.db.models.fields.DateField')()),
('expiration_date', self.gf('django.db.models.fields.DateField')()),
('planned', self.gf('django.db.models.fields.DecimalField')(default='0', max_digits=8, decimal_places=2)),
('remaining', self.gf('django.db.models.fields.DecimalField')(default='0', max_digits=8, decimal_places=2)),
('received', self.gf('django.db.models.fields.DecimalField')(default='0', max_digits=8, decimal_places=2)),
('onhand', self.gf('django.db.models.fields.DecimalField')(default='0', max_digits=8, decimal_places=2)),
('notes', self.gf('django.db.models.fields.CharField')(max_length=64, blank=True)),
))
db.send_create_signal('distribution', ['InventoryItem'])
# Adding model 'EconomicEvent'
db.create_table('distribution_economicevent', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('transaction_date', self.gf('django.db.models.fields.DateField')()),
('from_whom', self.gf('django.db.models.fields.related.ForeignKey')(related_name='given_events', to=orm['distribution.Party'])),
('to_whom', self.gf('django.db.models.fields.related.ForeignKey')(related_name='taken_events', to=orm['distribution.Party'])),
('amount', self.gf('django.db.models.fields.DecimalField')(max_digits=8, decimal_places=2)),
('notes', self.gf('django.db.models.fields.CharField')(max_length=64, blank=True)),
('content_type', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['contenttypes.ContentType'], null=True)),
))
db.send_create_signal('distribution', ['EconomicEvent'])
# Adding model 'Payment'
db.create_table('distribution_payment', (
('economicevent_ptr', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['distribution.EconomicEvent'], unique=True, primary_key=True)),
('reference', self.gf('django.db.models.fields.CharField')(max_length=64, blank=True)),
))
db.send_create_signal('distribution', ['Payment'])
# Adding model 'TransactionPayment'
db.create_table('distribution_transactionpayment', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('paid_event', self.gf('django.db.models.fields.related.ForeignKey')(related_name='transaction_payments', to=orm['distribution.EconomicEvent'])),
('payment', self.gf('django.db.models.fields.related.ForeignKey')(related_name='paid_events', to=orm['distribution.Payment'])),
('amount_paid', self.gf('django.db.models.fields.DecimalField')(max_digits=8, decimal_places=2)),
))
db.send_create_signal('distribution', ['TransactionPayment'])
# Adding model 'Order'
db.create_table('distribution_order', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('customer', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['distribution.Customer'])),
('purchase_order', self.gf('django.db.models.fields.CharField')(max_length=64, blank=True)),
('order_date', self.gf('django.db.models.fields.DateField')()),
('delivery_date', self.gf('django.db.models.fields.DateField')()),
('distributor', self.gf('django.db.models.fields.related.ForeignKey')(blank=True, related_name='orders', null=True, to=orm['distribution.Party'])),
('paid', self.gf('django.db.models.fields.BooleanField')(default=False)),
('state', self.gf('django.db.models.fields.CharField')(default='Submitted', max_length=16, blank=True)),
('product_list', self.gf('django.db.models.fields.related.ForeignKey')(blank=True, related_name='orders', null=True, to=orm['distribution.MemberProductList'])),
('created_by', self.gf('django.db.models.fields.related.ForeignKey')(blank=True, related_name='orders_created', null=True, to=orm['auth.User'])),
('changed_by', self.gf('django.db.models.fields.related.ForeignKey')(blank=True, related_name='orders_changed', null=True, to=orm['auth.User'])),
))
db.send_create_signal('distribution', ['Order'])
# Adding model 'CustomerPayment'
db.create_table('distribution_customerpayment', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('paid_order', self.gf('django.db.models.fields.related.ForeignKey')(related_name='customer_payments', to=orm['distribution.Order'])),
('payment', self.gf('django.db.models.fields.related.ForeignKey')(related_name='paid_orders', to=orm['distribution.Payment'])),
('amount_paid', self.gf('django.db.models.fields.DecimalField')(max_digits=8, decimal_places=2)),
))
db.send_create_signal('distribution', ['CustomerPayment'])
# Adding model 'OrderItem'
db.create_table('distribution_orderitem', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('order', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['distribution.Order'])),
('product', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['distribution.Product'])),
('quantity', self.gf('django.db.models.fields.DecimalField')(max_digits=8, decimal_places=2)),
('unit_price', self.gf('django.db.models.fields.DecimalField')(max_digits=8, decimal_places=2)),
('fee', self.gf('django.db.models.fields.DecimalField')(default='0', max_digits=3, decimal_places=2)),
('notes', self.gf('django.db.models.fields.CharField')(max_length=64, blank=True)),
))
db.send_create_signal('distribution', ['OrderItem'])
# Adding model 'OrderItemChange'
db.create_table('distribution_orderitemchange', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('action', self.gf('django.db.models.fields.PositiveSmallIntegerField')(max_length='1')),
('reason', self.gf('django.db.models.fields.PositiveSmallIntegerField')(max_length='1')),
('when_changed', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)),
('changed_by', self.gf('django.db.models.fields.related.ForeignKey')(blank=True, related_name='order_items_changed', null=True, to=orm['auth.User'])),
('order', self.gf('django.db.models.fields.related.ForeignKey')(blank=True, related_name='order_changes', null=True, to=orm['distribution.Order'])),
('customer', self.gf('django.db.models.fields.related.ForeignKey')(related_name='order_changes', to=orm['distribution.Customer'])),
('order_item', self.gf('django.db.models.fields.related.ForeignKey')(blank=True, related_name='order_item_changes', null=True, to=orm['distribution.OrderItem'])),
('product', self.gf('django.db.models.fields.related.ForeignKey')(related_name='order_item_changes', to=orm['distribution.Product'])),
('prev_qty', self.gf('django.db.models.fields.DecimalField')(default='0', max_digits=8, decimal_places=2)),
('new_qty', self.gf('django.db.models.fields.DecimalField')(default='0', max_digits=8, decimal_places=2)),
('prev_notes', self.gf('django.db.models.fields.CharField')(max_length=64, blank=True)),
('new_notes', self.gf('django.db.models.fields.CharField')(max_length=64, blank=True)),
))
db.send_create_signal('distribution', ['OrderItemChange'])
# Adding model 'ServiceType'
db.create_table('distribution_servicetype', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=64)),
('invoiced_separately', self.gf('django.db.models.fields.BooleanField')(default=False)),
('pay_provider_on_terms', self.gf('django.db.models.fields.BooleanField')(default=True)),
))
db.send_create_signal('distribution', ['ServiceType'])
# Adding model 'ProcessType'
db.create_table('distribution_processtype', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=64)),
('input_type', self.gf('django.db.models.fields.related.ForeignKey')(related_name='input_types', to=orm['distribution.Product'])),
('use_existing_input_lot', self.gf('django.db.models.fields.BooleanField')(default=True)),
('number_of_processing_steps', self.gf('django.db.models.fields.IntegerField')(default=1)),
('output_type', self.gf('django.db.models.fields.related.ForeignKey')(related_name='output_types', to=orm['distribution.Product'])),
('number_of_output_lots', self.gf('django.db.models.fields.IntegerField')(default=1)),
('notes', self.gf('django.db.models.fields.TextField')(blank=True)),
))
db.send_create_signal('distribution', ['ProcessType'])
# Adding model 'Process'
db.create_table('distribution_process', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('process_type', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['distribution.ProcessType'])),
('process_date', self.gf('django.db.models.fields.DateField')()),
('managed_by', self.gf('django.db.models.fields.related.ForeignKey')(blank=True, related_name='managed_processes', null=True, to=orm['distribution.Party'])),
('notes', self.gf('django.db.models.fields.TextField')(blank=True)),
))
db.send_create_signal('distribution', ['Process'])
# Adding model 'InventoryTransaction'
db.create_table('distribution_inventorytransaction', (
('economicevent_ptr', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['distribution.EconomicEvent'], unique=True, primary_key=True)),
('transaction_type', self.gf('django.db.models.fields.CharField')(default='Delivery', max_length=10)),
('inventory_item', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['distribution.InventoryItem'])),
('process', self.gf('django.db.models.fields.related.ForeignKey')(blank=True, related_name='inventory_transactions', null=True, to=orm['distribution.Process'])),
('order_item', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['distribution.OrderItem'], null=True, blank=True)),
('unit_price', self.gf('django.db.models.fields.DecimalField')(max_digits=8, decimal_places=2)),
))
db.send_create_signal('distribution', ['InventoryTransaction'])
# Adding model 'ServiceTransaction'
db.create_table('distribution_servicetransaction', (
('economicevent_ptr', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['distribution.EconomicEvent'], unique=True, primary_key=True)),
('service_type', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['distribution.ServiceType'])),
('process', self.gf('django.db.models.fields.related.ForeignKey')(related_name='service_transactions', to=orm['distribution.Process'])),
))
db.send_create_signal('distribution', ['ServiceTransaction'])
# Adding model 'TransportationTransaction'
db.create_table('distribution_transportationtransaction', (
('economicevent_ptr', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['distribution.EconomicEvent'], unique=True, primary_key=True)),
('service_type', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['distribution.ServiceType'])),
('order', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['distribution.Order'])),
))
db.send_create_signal('distribution', ['TransportationTransaction'])
# Adding model 'DeliveryCycle'
db.create_table('distribution_deliverycycle', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('delivery_day', self.gf('django.db.models.fields.PositiveSmallIntegerField')(max_length='1')),
('route', self.gf('django.db.models.fields.CharField')(max_length=255)),
('order_closing_day', self.gf('django.db.models.fields.PositiveSmallIntegerField')(max_length='1')),
('order_closing_time', self.gf('django.db.models.fields.TimeField')()),
))
db.send_create_signal('distribution', ['DeliveryCycle'])
# Adding model 'CustomerDeliveryCycle'
db.create_table('distribution_customerdeliverycycle', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('customer', self.gf('django.db.models.fields.related.ForeignKey')(related_name='delivery_cycles', to=orm['distribution.Customer'])),
('delivery_cycle', self.gf('django.db.models.fields.related.ForeignKey')(related_name='delivery_customers', to=orm['distribution.DeliveryCycle'])),
))
db.send_create_signal('distribution', ['CustomerDeliveryCycle'])
# Adding unique constraint on 'CustomerDeliveryCycle', fields ['customer', 'delivery_cycle']
db.create_unique('distribution_customerdeliverycycle', ['customer_id', 'delivery_cycle_id'])
def backwards(self, orm):
# Removing unique constraint on 'CustomerDeliveryCycle', fields ['customer', 'delivery_cycle']
db.delete_unique('distribution_customerdeliverycycle', ['customer_id', 'delivery_cycle_id'])
# Removing unique constraint on 'CustomerProduct', fields ['customer', 'product', 'product_list']
db.delete_unique('distribution_customerproduct', ['customer_id', 'product_id', 'product_list_id'])
# Removing unique constraint on 'ProducerProduct', fields ['producer', 'product']
db.delete_unique('distribution_producerproduct', ['producer_id', 'product_id'])
# Deleting model 'Party'
db.delete_table('distribution_party')
# Deleting model 'PartyUser'
db.delete_table('distribution_partyuser')
# Deleting model 'EmailIntro'
db.delete_table('distribution_emailintro')
# Deleting model 'FoodNetwork'
db.delete_table('distribution_foodnetwork')
# Deleting model 'Producer'
db.delete_table('distribution_producer')
# Deleting model 'Processor'
db.delete_table('distribution_processor')
# Deleting model 'Distributor'
db.delete_table('distribution_distributor')
# Deleting model 'Customer'
db.delete_table('distribution_customer')
# Deleting model 'CustomerContact'
db.delete_table('distribution_customercontact')
# Deleting model 'ProducerContact'
db.delete_table('distribution_producercontact')
# Deleting model 'Product'
db.delete_table('distribution_product')
# Deleting model 'Special'
db.delete_table('distribution_special')
# Deleting model 'ProductPlan'
db.delete_table('distribution_productplan')
# Deleting model 'ProducerProduct'
db.delete_table('distribution_producerproduct')
# Deleting model 'MemberProductList'
db.delete_table('distribution_memberproductlist')
# Deleting model 'CustomerProduct'
db.delete_table('distribution_customerproduct')
# Deleting model 'InventoryItem'
db.delete_table('distribution_inventoryitem')
# Deleting model 'EconomicEvent'
db.delete_table('distribution_economicevent')
# Deleting model 'Payment'
db.delete_table('distribution_payment')
# Deleting model 'TransactionPayment'
db.delete_table('distribution_transactionpayment')
# Deleting model 'Order'
db.delete_table('distribution_order')
# Deleting model 'CustomerPayment'
db.delete_table('distribution_customerpayment')
# Deleting model 'OrderItem'
db.delete_table('distribution_orderitem')
# Deleting model 'OrderItemChange'
db.delete_table('distribution_orderitemchange')
# Deleting model 'ServiceType'
db.delete_table('distribution_servicetype')
# Deleting model 'ProcessType'
db.delete_table('distribution_processtype')
# Deleting model 'Process'
db.delete_table('distribution_process')
# Deleting model 'InventoryTransaction'
db.delete_table('distribution_inventorytransaction')
# Deleting model 'ServiceTransaction'
db.delete_table('distribution_servicetransaction')
# Deleting model 'TransportationTransaction'
db.delete_table('distribution_transportationtransaction')
# Deleting model 'DeliveryCycle'
db.delete_table('distribution_deliverycycle')
# Deleting model 'CustomerDeliveryCycle'
db.delete_table('distribution_customerdeliverycycle')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'distribution.customer': {
'Meta': {'ordering': "('short_name',)", 'object_name': 'Customer', '_ormbases': ['distribution.Party']},
'apply_transportation_fee': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'customer_transportation_fee': ('django.db.models.fields.DecimalField', [], {'default': "'0'", 'max_digits': '8', 'decimal_places': '2'}),
'party_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['distribution.Party']", 'unique': 'True', 'primary_key': 'True'})
},
'distribution.customercontact': {
'Meta': {'object_name': 'CustomerContact'},
'cell': ('django.contrib.localflavor.us.models.PhoneNumberField', [], {'max_length': '20', 'blank': 'True'}),
'customer': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'contacts'", 'to': "orm['distribution.Customer']"}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '96', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'login_user': ('django.db.models.fields.related.OneToOneField', [], {'blank': 'True', 'related_name': "'customer_contact'", 'unique': 'True', 'null': 'True', 'to': "orm['auth.User']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'phone': ('django.contrib.localflavor.us.models.PhoneNumberField', [], {'max_length': '20', 'blank': 'True'})
},
'distribution.customerdeliverycycle': {
'Meta': {'unique_together': "(('customer', 'delivery_cycle'),)", 'object_name': 'CustomerDeliveryCycle'},
'customer': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'delivery_cycles'", 'to': "orm['distribution.Customer']"}),
'delivery_cycle': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'delivery_customers'", 'to': "orm['distribution.DeliveryCycle']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'distribution.customerpayment': {
'Meta': {'object_name': 'CustomerPayment'},
'amount_paid': ('django.db.models.fields.DecimalField', [], {'max_digits': '8', 'decimal_places': '2'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'paid_order': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'customer_payments'", 'to': "orm['distribution.Order']"}),
'payment': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'paid_orders'", 'to': "orm['distribution.Payment']"})
},
'distribution.customerproduct': {
'Meta': {'ordering': "('customer', 'product')", 'unique_together': "(('customer', 'product', 'product_list'),)", 'object_name': 'CustomerProduct'},
'customer': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'customer_products'", 'to': "orm['distribution.Party']"}),
'default_quantity': ('django.db.models.fields.DecimalField', [], {'default': "'0'", 'max_digits': '8', 'decimal_places': '2'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'planned': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['distribution.Product']"}),
'product_list': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['distribution.MemberProductList']", 'null': 'True', 'blank': 'True'})
},
'distribution.deliverycycle': {
'Meta': {'ordering': "('delivery_day',)", 'object_name': 'DeliveryCycle'},
'customers': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['distribution.Customer']", 'through': "orm['distribution.CustomerDeliveryCycle']", 'symmetrical': 'False'}),
'delivery_day': ('django.db.models.fields.PositiveSmallIntegerField', [], {'max_length': "'1'"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'order_closing_day': ('django.db.models.fields.PositiveSmallIntegerField', [], {'max_length': "'1'"}),
'order_closing_time': ('django.db.models.fields.TimeField', [], {}),
'route': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'distribution.distributor': {
'Meta': {'ordering': "('short_name',)", 'object_name': 'Distributor', '_ormbases': ['distribution.Party']},
'party_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['distribution.Party']", 'unique': 'True', 'primary_key': 'True'}),
'transportation_fee': ('django.db.models.fields.DecimalField', [], {'default': "'0'", 'max_digits': '8', 'decimal_places': '2'})
},
'distribution.economicevent': {
'Meta': {'object_name': 'EconomicEvent'},
'amount': ('django.db.models.fields.DecimalField', [], {'max_digits': '8', 'decimal_places': '2'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']", 'null': 'True'}),
'from_whom': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'given_events'", 'to': "orm['distribution.Party']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'notes': ('django.db.models.fields.CharField', [], {'max_length': '64', 'blank': 'True'}),
'to_whom': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'taken_events'", 'to': "orm['distribution.Party']"}),
'transaction_date': ('django.db.models.fields.DateField', [], {})
},
'distribution.emailintro': {
'Meta': {'object_name': 'EmailIntro'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'message': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'notice_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'email_intro'", 'to': "orm['notification.NoticeType']"})
},
'distribution.foodnetwork': {
'Meta': {'ordering': "('short_name',)", 'object_name': 'FoodNetwork', '_ormbases': ['distribution.Party']},
'billing_address': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'billing_contact': ('django.db.models.fields.CharField', [], {'max_length': '64', 'blank': 'True'}),
'billing_email_address': ('django.db.models.fields.EmailField', [], {'max_length': '96', 'null': 'True', 'blank': 'True'}),
'billing_phone': ('django.contrib.localflavor.us.models.PhoneNumberField', [], {'max_length': '20', 'null': 'True', 'blank': 'True'}),
'customer_fee': ('django.db.models.fields.DecimalField', [], {'default': "'0'", 'max_digits': '3', 'decimal_places': '2'}),
'customer_terms': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'default_product_expiration_days': ('django.db.models.fields.IntegerField', [], {'default': '6'}),
'member_terms': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'next_delivery_date': ('django.db.models.fields.DateField', [], {'default': 'datetime.date.today'}),
'order_by_lot': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'party_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['distribution.Party']", 'unique': 'True', 'primary_key': 'True'}),
'producer_fee': ('django.db.models.fields.DecimalField', [], {'default': "'0'", 'max_digits': '3', 'decimal_places': '2'}),
'transportation_fee': ('django.db.models.fields.DecimalField', [], {'default': "'0'", 'max_digits': '8', 'decimal_places': '2'}),
'use_plans_for_ordering': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
'distribution.inventoryitem': {
'Meta': {'ordering': "('product', 'producer', 'inventory_date')", 'object_name': 'InventoryItem'},
'custodian': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'custody_items'", 'null': 'True', 'to': "orm['distribution.Party']"}),
'expiration_date': ('django.db.models.fields.DateField', [], {}),
'field_id': ('django.db.models.fields.CharField', [], {'max_length': '12', 'blank': 'True'}),
'freeform_lot_id': ('django.db.models.fields.CharField', [], {'max_length': '64', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'inventory_date': ('django.db.models.fields.DateField', [], {}),
'notes': ('django.db.models.fields.CharField', [], {'max_length': '64', 'blank': 'True'}),
'onhand': ('django.db.models.fields.DecimalField', [], {'default': "'0'", 'max_digits': '8', 'decimal_places': '2'}),
'planned': ('django.db.models.fields.DecimalField', [], {'default': "'0'", 'max_digits': '8', 'decimal_places': '2'}),
'producer': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'inventory_items'", 'to': "orm['distribution.Party']"}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['distribution.Product']"}),
'received': ('django.db.models.fields.DecimalField', [], {'default': "'0'", 'max_digits': '8', 'decimal_places': '2'}),
'remaining': ('django.db.models.fields.DecimalField', [], {'default': "'0'", 'max_digits': '8', 'decimal_places': '2'})
},
'distribution.inventorytransaction': {
'Meta': {'ordering': "('-transaction_date',)", 'object_name': 'InventoryTransaction', '_ormbases': ['distribution.EconomicEvent']},
'economicevent_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['distribution.EconomicEvent']", 'unique': 'True', 'primary_key': 'True'}),
'inventory_item': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['distribution.InventoryItem']"}),
'order_item': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['distribution.OrderItem']", 'null': 'True', 'blank': 'True'}),
'process': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'inventory_transactions'", 'null': 'True', 'to': "orm['distribution.Process']"}),
'transaction_type': ('django.db.models.fields.CharField', [], {'default': "'Delivery'", 'max_length': '10'}),
'unit_price': ('django.db.models.fields.DecimalField', [], {'max_digits': '8', 'decimal_places': '2'})
},
'distribution.memberproductlist': {
'Meta': {'object_name': 'MemberProductList'},
'description': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'list_name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'member': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'product_lists'", 'to': "orm['distribution.Party']"})
},
'distribution.order': {
'Meta': {'ordering': "('order_date', 'customer')", 'object_name': 'Order'},
'changed_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'orders_changed'", 'null': 'True', 'to': "orm['auth.User']"}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'orders_created'", 'null': 'True', 'to': "orm['auth.User']"}),
'customer': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['distribution.Customer']"}),
'delivery_date': ('django.db.models.fields.DateField', [], {}),
'distributor': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'orders'", 'null': 'True', 'to': "orm['distribution.Party']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'order_date': ('django.db.models.fields.DateField', [], {}),
'paid': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'product_list': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'orders'", 'null': 'True', 'to': "orm['distribution.MemberProductList']"}),
'purchase_order': ('django.db.models.fields.CharField', [], {'max_length': '64', 'blank': 'True'}),
'state': ('django.db.models.fields.CharField', [], {'default': "'Submitted'", 'max_length': '16', 'blank': 'True'})
},
'distribution.orderitem': {
'Meta': {'ordering': "('order', 'product')", 'object_name': 'OrderItem'},
'fee': ('django.db.models.fields.DecimalField', [], {'default': "'0'", 'max_digits': '3', 'decimal_places': '2'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'notes': ('django.db.models.fields.CharField', [], {'max_length': '64', 'blank': 'True'}),
'order': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['distribution.Order']"}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['distribution.Product']"}),
'quantity': ('django.db.models.fields.DecimalField', [], {'max_digits': '8', 'decimal_places': '2'}),
'unit_price': ('django.db.models.fields.DecimalField', [], {'max_digits': '8', 'decimal_places': '2'})
},
'distribution.orderitemchange': {
'Meta': {'object_name': 'OrderItemChange'},
'action': ('django.db.models.fields.PositiveSmallIntegerField', [], {'max_length': "'1'"}),
'changed_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'order_items_changed'", 'null': 'True', 'to': "orm['auth.User']"}),
'customer': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'order_changes'", 'to': "orm['distribution.Customer']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'new_notes': ('django.db.models.fields.CharField', [], {'max_length': '64', 'blank': 'True'}),
'new_qty': ('django.db.models.fields.DecimalField', [], {'default': "'0'", 'max_digits': '8', 'decimal_places': '2'}),
'order': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'order_changes'", 'null': 'True', 'to': "orm['distribution.Order']"}),
'order_item': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'order_item_changes'", 'null': 'True', 'to': "orm['distribution.OrderItem']"}),
'prev_notes': ('django.db.models.fields.CharField', [], {'max_length': '64', 'blank': 'True'}),
'prev_qty': ('django.db.models.fields.DecimalField', [], {'default': "'0'", 'max_digits': '8', 'decimal_places': '2'}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'order_item_changes'", 'to': "orm['distribution.Product']"}),
'reason': ('django.db.models.fields.PositiveSmallIntegerField', [], {'max_length': "'1'"}),
'when_changed': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'})
},
'distribution.party': {
'Meta': {'ordering': "('short_name',)", 'object_name': 'Party'},
'address': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'cell': ('django.contrib.localflavor.us.models.PhoneNumberField', [], {'max_length': '20', 'blank': 'True'}),
'contact': ('django.db.models.fields.CharField', [], {'max_length': '64', 'blank': 'True'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']", 'null': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'email_address': ('django.db.models.fields.EmailField', [], {'max_length': '96', 'null': 'True', 'blank': 'True'}),
'fax': ('django.contrib.localflavor.us.models.PhoneNumberField', [], {'max_length': '20', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'long_name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'member_id': ('django.db.models.fields.CharField', [], {'max_length': '12', 'blank': 'True'}),
'phone': ('django.contrib.localflavor.us.models.PhoneNumberField', [], {'max_length': '20', 'blank': 'True'}),
'short_name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '32'}),
'storage_capacity': ('django.db.models.fields.TextField', [], {'blank': 'True'})
},
'distribution.partyuser': {
'Meta': {'object_name': 'PartyUser'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'party': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'users'", 'to': "orm['distribution.Party']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'parties'", 'to': "orm['auth.User']"})
},
'distribution.payment': {
'Meta': {'ordering': "('transaction_date',)", 'object_name': 'Payment', '_ormbases': ['distribution.EconomicEvent']},
'economicevent_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['distribution.EconomicEvent']", 'unique': 'True', 'primary_key': 'True'}),
'reference': ('django.db.models.fields.CharField', [], {'max_length': '64', 'blank': 'True'})
},
'distribution.process': {
'Meta': {'ordering': "('process_date',)", 'object_name': 'Process'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'managed_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'managed_processes'", 'null': 'True', 'to': "orm['distribution.Party']"}),
'notes': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'process_date': ('django.db.models.fields.DateField', [], {}),
'process_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['distribution.ProcessType']"})
},
'distribution.processor': {
'Meta': {'ordering': "('short_name',)", 'object_name': 'Processor', '_ormbases': ['distribution.Party']},
'party_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['distribution.Party']", 'unique': 'True', 'primary_key': 'True'})
},
'distribution.processtype': {
'Meta': {'object_name': 'ProcessType'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'input_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'input_types'", 'to': "orm['distribution.Product']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'notes': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'number_of_output_lots': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
'number_of_processing_steps': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
'output_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'output_types'", 'to': "orm['distribution.Product']"}),
'use_existing_input_lot': ('django.db.models.fields.BooleanField', [], {'default': 'True'})
},
'distribution.producer': {
'Meta': {'ordering': "('short_name',)", 'object_name': 'Producer', '_ormbases': ['distribution.Party']},
'delivers': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'party_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['distribution.Party']", 'unique': 'True', 'primary_key': 'True'})
},
'distribution.producercontact': {
'Meta': {'object_name': 'ProducerContact'},
'cell': ('django.contrib.localflavor.us.models.PhoneNumberField', [], {'max_length': '20', 'blank': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '96', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'login_user': ('django.db.models.fields.related.OneToOneField', [], {'blank': 'True', 'related_name': "'producer_contact'", 'unique': 'True', 'null': 'True', 'to': "orm['auth.User']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'phone': ('django.contrib.localflavor.us.models.PhoneNumberField', [], {'max_length': '20', 'blank': 'True'}),
'producer': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'contacts'", 'to': "orm['distribution.Producer']"})
},
'distribution.producerproduct': {
'Meta': {'ordering': "('producer', 'product')", 'unique_together': "(('producer', 'product'),)", 'object_name': 'ProducerProduct'},
'default_avail_qty': ('django.db.models.fields.DecimalField', [], {'default': "'0'", 'max_digits': '8', 'decimal_places': '2'}),
'default_quantity': ('django.db.models.fields.DecimalField', [], {'default': "'0'", 'max_digits': '8', 'decimal_places': '2'}),
'distributor': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'producer_distributors'", 'null': 'True', 'to': "orm['distribution.Party']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'inventoried': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'planned': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'producer': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'producer_products'", 'to': "orm['distribution.Party']"}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'product_producers'", 'to': "orm['distribution.Product']"})
},
'distribution.product': {
'Meta': {'ordering': "('short_name',)", 'object_name': 'Product'},
'customer_fee_override': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '3', 'decimal_places': '2', 'blank': 'True'}),
'expiration_days': ('django.db.models.fields.IntegerField', [], {'default': '6'}),
'growing_method': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_parent': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'long_name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'children'", 'null': 'True', 'to': "orm['distribution.Product']"}),
'pay_producer': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'pay_producer_on_terms': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'plannable': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'price': ('django.db.models.fields.DecimalField', [], {'default': "'0'", 'max_digits': '8', 'decimal_places': '2'}),
'sellable': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'short_name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '32'}),
'stockable': ('django.db.models.fields.BooleanField', [], {'default': 'True'})
},
'distribution.productplan': {
'Meta': {'ordering': "('product', 'member', 'from_date')", 'object_name': 'ProductPlan'},
'distributor': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'plan_distributors'", 'null': 'True', 'to': "orm['distribution.Party']"}),
'from_date': ('django.db.models.fields.DateField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'inventoried': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'member': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'product_plans'", 'to': "orm['distribution.Party']"}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['distribution.Product']"}),
'quantity': ('django.db.models.fields.DecimalField', [], {'default': "'0'", 'max_digits': '8', 'decimal_places': '2'}),
'role': ('django.db.models.fields.CharField', [], {'default': "'producer'", 'max_length': '12'}),
'to_date': ('django.db.models.fields.DateField', [], {})
},
'distribution.servicetransaction': {
'Meta': {'object_name': 'ServiceTransaction', '_ormbases': ['distribution.EconomicEvent']},
'economicevent_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['distribution.EconomicEvent']", 'unique': 'True', 'primary_key': 'True'}),
'process': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'service_transactions'", 'to': "orm['distribution.Process']"}),
'service_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['distribution.ServiceType']"})
},
'distribution.servicetype': {
'Meta': {'object_name': 'ServiceType'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'invoiced_separately': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'pay_provider_on_terms': ('django.db.models.fields.BooleanField', [], {'default': 'True'})
},
'distribution.special': {
'Meta': {'ordering': "('-from_date',)", 'object_name': 'Special'},
'description': ('django.db.models.fields.TextField', [], {}),
'from_date': ('django.db.models.fields.DateField', [], {}),
'headline': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'price': ('django.db.models.fields.DecimalField', [], {'default': "'0'", 'max_digits': '8', 'decimal_places': '2'}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'specials'", 'to': "orm['distribution.Product']"}),
'to_date': ('django.db.models.fields.DateField', [], {})
},
'distribution.transactionpayment': {
'Meta': {'object_name': 'TransactionPayment'},
'amount_paid': ('django.db.models.fields.DecimalField', [], {'max_digits': '8', 'decimal_places': '2'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'paid_event': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'transaction_payments'", 'to': "orm['distribution.EconomicEvent']"}),
'payment': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'paid_events'", 'to': "orm['distribution.Payment']"})
},
'distribution.transportationtransaction': {
'Meta': {'object_name': 'TransportationTransaction', '_ormbases': ['distribution.EconomicEvent']},
'economicevent_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['distribution.EconomicEvent']", 'unique': 'True', 'primary_key': 'True'}),
'order': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['distribution.Order']"}),
'service_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['distribution.ServiceType']"})
},
'notification.noticetype': {
'Meta': {'object_name': 'NoticeType'},
'default': ('django.db.models.fields.IntegerField', [], {}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'display': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '40'})
}
}
complete_apps = ['distribution']
| {
"content_hash": "3bb02ad3930f05bf9e16afd188abc97c",
"timestamp": "",
"source": "github",
"line_count": 834,
"max_line_length": 201,
"avg_line_length": 78.02997601918466,
"alnum_prop": 0.6078645297109578,
"repo_name": "bhaugen/nova",
"id": "092d529841eb20b31eff70cebda0c0d9a3dfe49a",
"size": "65095",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "distribution/migrations/0001_initial.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "22759"
},
{
"name": "Python",
"bytes": "1733441"
}
],
"symlink_target": ""
} |
# GOAL: parse the DocLegalDescription to extract the physical address of the associated property
# CHALLENGES:
# 1.) inconsistent format of text in the field
# 2.) not all entries have a physical address listed..
# Last Name Conventions: ETAL, LLC, LLC ETAL, MORTGAGE COMPANY, DECEASED, ESTATE
# COMPLETED:
# For those cells that conform to the PHYSICAL STREET ADDRESS xxx PLAT BOOK format
import pandas as pd
import usaddress
# import numpy as np
file_source_path = 'C:\\Users\\Cameron\\Downloads'
file_destination_path = 'C:\\Users\\Cameron\Documents\\Python'
# file_source_path = 'C:\\Users\\kacollins\\Downloads'
# file_destination_path = 'C:\\Users\\kacollins\Downloads'
# creating the df with the lp data from SearchResults.csv export
df = pd.read_csv(file_source_path + '\SearchResults.csv')
# splitting the IndirectName field into a first and last -- simple at this time, split by first space
df[['Last Name', 'First Name']] = df['IndirectName'].str.split(' ', 1, expand=True)
# creating two new cols -- one for the starting index and one for the ending index
start_bound = "PHYSICAL STREET ADDRESS "
end_bound = "PLAT BOOK"
df['start_index'] = df['DocLegalDescription'].str.find("PHYSICAL STREET ADDRESS")
df['end_index'] = df['DocLegalDescription'].str.find("PLAT BOOK")
# to minimize errors, we only look at rows with the index(es)
df = df[df['start_index'] > 0].reset_index()
df = df[df['end_index'] > df['start_index']].reset_index()
list_of_titles = ['ETAL', 'LLC', 'COMPANY', 'DECEASED', 'ESTATE']
# Line below is from Wes on how to apply the change to the df as a whole
df['Full Address'] = df.apply(lambda x: x['DocLegalDescription'][int(x['start_index']) + len(start_bound) :int(x['end_index'])].strip(),axis=1)
df['Title'] = df.apply(lambda x: x['First Name'].rsplit(' ', 1)[-1],axis=1)
df['Title_A'] = df['Title'].isin(list_of_titles)
df['Title'] = df.apply(lambda x: ' ' if x['Title_A']==False else x['Title'], axis=1)
df['First Name'] = df.apply(lambda x: x['First Name'].replace(x['Title'], '')if x['Title'] in x['First Name'] else x['First Name'], axis=1 )
df['Address Tag'] = df.apply(lambda x: usaddress.tag(x['Full Address']), axis=1)
addr_tag = df['Address Tag'].tolist()
full_address = df['Full Address'].tolist()
first_name = df['First Name'].tolist()
last_name = df['Last Name'].tolist()
title = df['Title'].to_list()
def _get_addy_part(address,part):
result = []
for addy in address:
if str(addy[0]).find(part) == -1:
result.append('')
else:
result.append(str(addy[0])[str(addy[0]).find(part) + len(part): str(addy[0]).find("')", str(addy[0]).find(part))].strip())
return(result)
house_num = _get_addy_part(addr_tag,"'AddressNumber', '")
street = _get_addy_part(addr_tag,"'StreetName', '")
street_post_type = _get_addy_part(addr_tag,"'StreetNamePostType', '")
street_full = [l + ' ' + m + ' ' + str(n) for l,m,n in zip(house_num,street,street_post_type)]
city = _get_addy_part(addr_tag,"'PlaceName', '")
zips = _get_addy_part(addr_tag,"'ZipCode', '")
df2 = pd.DataFrame({'First Name': first_name,
'Last Name': last_name,
# 'Title': title,
'Address': full_address,
'Street': street_full,
'City': city,
'Zip': zips })
print(df2.head)
df2.to_csv(file_destination_path + '\clean_lp_list.csv', index = False)
quit()
| {
"content_hash": "5e849342ebde9a22cca38fb3cdd0908b",
"timestamp": "",
"source": "github",
"line_count": 82,
"max_line_length": 144,
"avg_line_length": 43.40243902439025,
"alnum_prop": 0.6254565889294745,
"repo_name": "CamFlawless/python_projects",
"id": "7049a1819bf983929fe162d698d62e51fd996ba3",
"size": "3559",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "this.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "GCC Machine Description",
"bytes": "1"
},
{
"name": "Python",
"bytes": "32259"
}
],
"symlink_target": ""
} |
import pymel.core as pm
import logging
log = logging.getLogger("ui")
class BaseTemplate(pm.ui.AETemplate):
def addControl(self, control, label=None, **kwargs):
pm.ui.AETemplate.addControl(self, control, label=label, **kwargs)
def beginLayout(self, name, collapse=True):
pm.ui.AETemplate.beginLayout(self, name, collapse=collapse)
class AECoatingBSDFTemplate(BaseTemplate):
def __init__(self, nodeName):
BaseTemplate.__init__(self,nodeName)
log.debug("AECoatingBSDFTemplate")
self.thisNode = None
self.node = pm.PyNode(self.nodeName)
pm.mel.AEswatchDisplay(nodeName)
self.beginScrollLayout()
self.buildBody(nodeName)
self.addExtraControls("ExtraControls")
self.endScrollLayout()
def buildBody(self, nodeName):
self.thisNode = pm.PyNode(nodeName)
self.beginLayout("ShaderSettings" ,collapse=0)
self.beginNoOptimize()
#autoAddBegin
self.addControl("normalMapping", label="NormalMapping")
self.addControl("kappa", label="Kappa")
self.addControl("bump", label="Bump")
self.addControl("anisotropy", label="Anisotropy")
self.addControl("reflectanceColor", label="Reflectance Color")
self.addControl("ior", label="Ior")
self.addControl("roughness", label="Roughness")
self.addControl("microRoughnessHeight", label="MicroRoughnessHeight")
self.addControl("coatingAbsorption", label="CoatingAbsorption")
self.addControl("microRoughness", label="MicroRoughness")
self.addControl("rotation", label="Rotation")
self.addControl("thickness", label="Thickness")
self.addControl("microRoughnessWidth", label="MicroRoughnessWidth")
self.addSeparator()
#autoAddEnd
self.endNoOptimize()
self.endLayout()
| {
"content_hash": "257d839af47f9e291fbcc3e2400ee5e8",
"timestamp": "",
"source": "github",
"line_count": 49,
"max_line_length": 77,
"avg_line_length": 38.755102040816325,
"alnum_prop": 0.6608741442864665,
"repo_name": "haggi/OpenMaya",
"id": "8452d5f52e6919936d17de77f648ef1642959b46",
"size": "1899",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/mayaToThea/mtth_devmodule/scripts/Thea/AETemplate/AECoatingBSDFTemplate.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "AMPL",
"bytes": "5333"
},
{
"name": "Batchfile",
"bytes": "587"
},
{
"name": "C",
"bytes": "246300"
},
{
"name": "C++",
"bytes": "4178594"
},
{
"name": "Mathematica",
"bytes": "12660820"
},
{
"name": "Objective-C",
"bytes": "316"
},
{
"name": "Python",
"bytes": "1583249"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
import responses
import six
from six.moves.urllib.parse import parse_qs, urlencode, urlparse
from sentry.integrations.slack import SlackIntegration
from sentry.models import (
Identity, IdentityProvider, IdentityStatus, Integration,
OrganizationIntegration, UserIdentity
)
from sentry.testutils import IntegrationTestCase
class SlackIntegrationTest(IntegrationTestCase):
provider = SlackIntegration
@responses.activate
def test_basic_flow(self):
resp = self.client.get(self.path)
assert resp.status_code == 302
redirect = urlparse(resp['Location'])
assert redirect.scheme == 'https'
assert redirect.netloc == 'slack.com'
assert redirect.path == '/oauth/authorize'
params = parse_qs(redirect.query)
assert params['scope'] == [' '.join(self.provider.oauth_scopes)]
assert params['state']
assert params['redirect_uri'] == ['http://testserver/extensions/slack/setup/']
assert params['response_type'] == ['code']
assert params['client_id'] == ['slack-client-id']
# once we've asserted on it, switch to a singular values to make life
# easier
authorize_params = {k: v[0] for k, v in six.iteritems(params)}
responses.add(
responses.POST, 'https://slack.com/api/oauth.access',
json={
'ok': True,
'user_id': 'UXXXXXXX1',
'access_token': 'xoxp-xxxxxxxxx-xxxxxxxxxx-xxxxxxxxxxxx',
'team_id': 'TXXXXXXX1',
'team_name': 'Example',
'bot': {
'bot_access_token': 'xoxb-xxxxxxxxx-xxxxxxxxxx-xxxxxxxxxxxx',
'bot_user_id': 'UXXXXXXX2',
},
'scope': ','.join(authorize_params['scope'].split(' ')),
})
resp = self.client.get('{}?{}'.format(
self.path,
urlencode({
'code': 'oauth-code',
'state': authorize_params['state'],
})
))
mock_request = responses.calls[-1].request
req_params = parse_qs(mock_request.body)
assert req_params['grant_type'] == ['authorization_code']
assert req_params['code'] == ['oauth-code']
assert req_params['redirect_uri'] == ['http://testserver/extensions/slack/setup/']
assert req_params['client_id'] == ['slack-client-id']
assert req_params['client_secret'] == ['slack-client-secret']
assert resp.status_code == 200
self.assertDialogSuccess(resp)
integration = Integration.objects.get(provider=self.provider.id)
assert integration.external_id == 'TXXXXXXX1'
assert integration.name == 'Example'
assert integration.metadata == {
'access_token': 'xoxp-xxxxxxxxx-xxxxxxxxxx-xxxxxxxxxxxx',
'bot_access_token': 'xoxb-xxxxxxxxx-xxxxxxxxxx-xxxxxxxxxxxx',
'bot_user_id': 'UXXXXXXX2',
'scopes': list(self.provider.oauth_scopes),
}
oi = OrganizationIntegration.objects.get(
integration=integration,
organization=self.organization,
)
assert oi.config == {}
idp = IdentityProvider.objects.get(
type='slack',
instance='slack.com',
)
identity = Identity.objects.get(
idp=idp,
external_id='UXXXXXXX1',
)
assert identity.status == IdentityStatus.VALID
assert identity.scopes == list(self.provider.oauth_scopes)
assert identity.data == {
'access_token': 'xoxp-xxxxxxxxx-xxxxxxxxxx-xxxxxxxxxxxx',
}
assert UserIdentity.objects.filter(
user=self.user,
identity=identity,
).exists()
| {
"content_hash": "4a4949d6d96d0423e62c4595f3685bae",
"timestamp": "",
"source": "github",
"line_count": 103,
"max_line_length": 90,
"avg_line_length": 37.1747572815534,
"alnum_prop": 0.5863149647427527,
"repo_name": "gencer/sentry",
"id": "a2a11ff3ddeb3b326cba2a8ed622dc435534c603",
"size": "3829",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/sentry/integrations/slack/test_integration.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "318167"
},
{
"name": "HTML",
"bytes": "281885"
},
{
"name": "JavaScript",
"bytes": "2342569"
},
{
"name": "Lua",
"bytes": "65795"
},
{
"name": "Makefile",
"bytes": "8393"
},
{
"name": "Python",
"bytes": "28161647"
},
{
"name": "Ruby",
"bytes": "4233"
},
{
"name": "Shell",
"bytes": "2149"
}
],
"symlink_target": ""
} |
"""Output formatters values only
"""
import six
from . import base
from cliff import columns
class ValueFormatter(base.ListFormatter, base.SingleFormatter):
def add_argument_group(self, parser):
pass
def emit_list(self, column_names, data, stdout, parsed_args):
for row in data:
stdout.write(
' '.join(
six.text_type(c.machine_readable()
if isinstance(c, columns.FormattableColumn)
else c)
for c in row) + u'\n')
return
def emit_one(self, column_names, data, stdout, parsed_args):
for value in data:
stdout.write('%s\n' % six.text_type(
value.machine_readable()
if isinstance(value, columns.FormattableColumn)
else value)
)
return
| {
"content_hash": "6aeea1e5fa686cb678067d6b779ff38a",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 77,
"avg_line_length": 28.0625,
"alnum_prop": 0.5311804008908686,
"repo_name": "dtroyer/cliff",
"id": "24125c0e33dba7cf64c7c56beb729180467a0555",
"size": "1453",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cliff/formatters/value.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "393"
},
{
"name": "Python",
"bytes": "234798"
},
{
"name": "Shell",
"bytes": "1076"
}
],
"symlink_target": ""
} |
from swgpy.object import *
def create(kernel):
result = Intangible()
result.template = "object/draft_schematic/space/chassis/shared_texture_kit_s06.iff"
result.attribute_template_id = -1
result.stfName("string_id_table","")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result | {
"content_hash": "b463ccb9f121046da464dd260d7f6eb6",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 84,
"avg_line_length": 24.076923076923077,
"alnum_prop": 0.6964856230031949,
"repo_name": "anhstudios/swganh",
"id": "f4c363bcfac15ba8fd1069f3b490658902ceacaa",
"size": "458",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "data/scripts/templates/object/draft_schematic/space/chassis/shared_texture_kit_s06.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "11887"
},
{
"name": "C",
"bytes": "7699"
},
{
"name": "C++",
"bytes": "2357839"
},
{
"name": "CMake",
"bytes": "41264"
},
{
"name": "PLSQL",
"bytes": "42065"
},
{
"name": "Python",
"bytes": "7503510"
},
{
"name": "SQLPL",
"bytes": "42770"
}
],
"symlink_target": ""
} |
from cuttlebug.ui.controls import ListControl
import view
import wx
import cuttlebug.settings as settings
import cuttlebug.app as app
import cuttlebug.gdb as gdb
def tabify(s):
TAB_STOP = 5
retval = []
for c in s:
if c != '\t':
retval.append(c)
else:
retval.extend([' ']*(TAB_STOP-(len(retval)%TAB_STOP)))
return ''.join(retval)
class DisassemblyView(view.View):
def __init__(self, *args, **kwargs):
super(DisassemblyView, self).__init__(*args, **kwargs)
self.list = ListControl(self)
self.list.set_columns(['address', 'instruction'])
self.list.SetFont(wx.Font(8, wx.FONTFAMILY_MODERN, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL))
# self.list.auto_size()
try:
self.load_positions()
except:
self.list.auto_size()
self.list.Bind(wx.EVT_LIST_COL_END_DRAG, self.on_col_resize)
sizer = wx.BoxSizer(wx.VERTICAL)
sizer.Add(self.list, 1, wx.EXPAND)
self.SetSizer(sizer)
self.controller.Bind(app.EVT_APP_TARGET_HALTED, self.on_target_halted)
self.controller.Bind(app.EVT_APP_TARGET_DISCONNECTED, self.on_gdb_finished)
def on_col_resize(self, evt):
self.save_positions()
evt.Skip()
def save_positions(self):
cols = self.list.GetColumnCount()
widths = [self.list.GetColumnWidth(i) for i in range(cols)]
settings.session_set('asm_view_col_widths', widths)
def load_positions(self):
widths = settings.session_get('asm_view_col_widths')
cols = self.list.GetColumnCount()
if cols != len(widths):
raise Exception("Wrong number of saved column widths.")
for i, width, in enumerate(widths):
self.list.SetColumnWidth(i, width)
def set_model(self, model):
self.model = model
self.model.Bind(gdb.EVT_GDB_FINISHED, self.on_gdb_finished)
def on_gdb_finished(self, evt):
self.clear()
evt.Skip()
def on_target_halted(self, evt):
self.save_positions()
if self.model:
self.model.data_disassemble(start_addr="$pc-8", end_addr="$pc+8", callback=self.on_disassembled_data)
evt.Skip()
def update_assembly(self, instructions):
self.list.Freeze()
self.list.clear()
for i, instruction in enumerate(instructions):
addr = instruction.address
inst = tabify(instruction.inst.replace("\\t", "\t"))
self.list.add_item((addr, inst), bgcolor=wx.Colour(255, 255, 0) if i == len(instructions)/2 else wx.WHITE)
self.list.Thaw()
def on_disassembled_data(self, dat):
if dat.cls == 'done':
instructions = dat['asm_insns']
wx.CallAfter(self.update_assembly, instructions)
def clear(self):
self.list.clear()
if __name__ == "__main__":
print tabify('abc\tde\tf\tghijkl')
print tabify('a\t\tbcde\tfg\thi\tjklmnop\tq') | {
"content_hash": "987cad437354cf39d06b959b6854f1f3",
"timestamp": "",
"source": "github",
"line_count": 88,
"max_line_length": 118,
"avg_line_length": 35.67045454545455,
"alnum_prop": 0.5798024848677923,
"repo_name": "ryansturmer/cuttlebug",
"id": "aba5105d24b763fb0b7be063642e506a827b0bfe",
"size": "3139",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cuttlebug/ui/views/asm_view.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "513241"
},
{
"name": "Shell",
"bytes": "434"
}
],
"symlink_target": ""
} |
from model.parameters import Group
import random
def test_delete_some_group(app, db, check_ui):
if len(db.get_group_list()) == 0:
app.group.create_new_group(Group(name="test"))
old_groups = db.get_group_list()
group = random.choice(old_groups)
app.group.delete_group_by_id(group.id)
new_groups = db.get_group_list()
old_groups.remove(group)
assert old_groups==new_groups
if check_ui:
assert sorted(new_groups, key=Group.id_or_max) == sorted(app.group.get_group_list(), key=Group.id_or_max)
| {
"content_hash": "577e8a631b1679dca4ac1078a27387bd",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 113,
"avg_line_length": 31.88235294117647,
"alnum_prop": 0.6697416974169742,
"repo_name": "OlgaLa/task_1",
"id": "f53ae15106207be68dde8c5f481f650a1076ab1e",
"size": "542",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_del_group.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "45385"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals, division, absolute_import
from builtins import * # pylint: disable=unused-import, redefined-builtin
class TestOnlyNew(object):
config = """
tasks:
test:
mock:
- {title: 'title 1', url: 'http://localhost/title1'}
only_new: yes
disable: [seen] # Disable the seen plugin to make sure only_new does the filtering.
accept_all: yes
"""
def test_only_new(self, execute_task, manager):
task = execute_task('test')
# only_new will reject the entry on task_exit, make sure accept_all accepted it during filter event though
assert task.find_entry('rejected', title='title 1', accepted_by='accept_all'), 'Test entry missing'
# run again, should filter
task = execute_task('test')
assert task.find_entry('rejected', title='title 1', rejected_by='remember_rejected'), 'Seen test entry remains'
# add another entry to the task
manager.config['tasks']['test']['mock'].append({'title': 'title 2', 'url': 'http://localhost/title2'})
# execute again
task = execute_task('test')
# both entries should be present as config has changed
assert task.find_entry('rejected', title='title 1', accepted_by='accept_all'), 'title 1 was not found'
assert task.find_entry('rejected', title='title 2', accepted_by='accept_all'), 'title 2 was not found'
# TODO: Test that new entries are accepted. Tough to do since we can't change the task name or config..
| {
"content_hash": "6afe704ff0da6bb183a7172233f7eca8",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 119,
"avg_line_length": 47.75757575757576,
"alnum_prop": 0.633248730964467,
"repo_name": "qvazzler/Flexget",
"id": "3eda24881e3e799bec3af1ffdefdd87dab154529",
"size": "1576",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "tests/test_only_new.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "5275"
},
{
"name": "HTML",
"bytes": "33930"
},
{
"name": "JavaScript",
"bytes": "58811"
},
{
"name": "Python",
"bytes": "2428468"
}
],
"symlink_target": ""
} |
from skygrid.device import Device
from skygrid.project import Project
from skygrid.schema import Schema
from skygrid.subscription_manager import SubscriptionManager
from skygrid.user import User | {
"content_hash": "e3911e6a837b7232ebc61157f5002575",
"timestamp": "",
"source": "github",
"line_count": 5,
"max_line_length": 60,
"avg_line_length": 38.8,
"alnum_prop": 0.8711340206185567,
"repo_name": "skygridio/skygrid-sdk-python",
"id": "174134663be1bb9aa3d2dda85d447ac9443f4472",
"size": "194",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "skygrid/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "31191"
}
],
"symlink_target": ""
} |
"""configure script to get build parameters from user."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import errno
import os
import platform
import re
import subprocess
import sys
# pylint: disable=g-import-not-at-top
try:
from shutil import which
except ImportError:
from distutils.spawn import find_executable as which
# pylint: enable=g-import-not-at-top
_TF_BAZELRC = os.path.join(os.path.dirname(os.path.abspath(__file__)),
'.tf_configure.bazelrc')
_TF_WORKSPACE = os.path.join(os.path.dirname(os.path.abspath(__file__)),
'WORKSPACE')
_DEFAULT_CUDA_VERSION = '9.0'
_DEFAULT_CUDNN_VERSION = '7'
_DEFAULT_CUDA_COMPUTE_CAPABILITIES = '3.5,5.2'
_DEFAULT_CUDA_PATH = '/usr/local/cuda'
_DEFAULT_CUDA_PATH_LINUX = '/opt/cuda'
_DEFAULT_CUDA_PATH_WIN = ('C:/Program Files/NVIDIA GPU Computing '
'Toolkit/CUDA/v%s' % _DEFAULT_CUDA_VERSION)
_TF_OPENCL_VERSION = '1.2'
_DEFAULT_COMPUTECPP_TOOLKIT_PATH = '/usr/local/computecpp'
_DEFAULT_TRISYCL_INCLUDE_DIR = '/usr/local/triSYCL/include'
_SUPPORTED_ANDROID_NDK_VERSIONS = [10, 11, 12, 13, 14, 15]
_DEFAULT_PROMPT_ASK_ATTEMPTS = 10
class UserInputError(Exception):
pass
def is_windows():
return platform.system() == 'Windows'
def is_linux():
return platform.system() == 'Linux'
def is_macos():
return platform.system() == 'Darwin'
def is_ppc64le():
return platform.machine() == 'ppc64le'
def is_cygwin():
return platform.system().startswith('CYGWIN_NT')
def get_input(question):
try:
try:
answer = raw_input(question)
except NameError:
answer = input(question) # pylint: disable=bad-builtin
except EOFError:
answer = ''
return answer
def symlink_force(target, link_name):
"""Force symlink, equivalent of 'ln -sf'.
Args:
target: items to link to.
link_name: name of the link.
"""
try:
os.symlink(target, link_name)
except OSError as e:
if e.errno == errno.EEXIST:
os.remove(link_name)
os.symlink(target, link_name)
else:
raise e
def sed_in_place(filename, old, new):
"""Replace old string with new string in file.
Args:
filename: string for filename.
old: string to replace.
new: new string to replace to.
"""
with open(filename, 'r') as f:
filedata = f.read()
newdata = filedata.replace(old, new)
with open(filename, 'w') as f:
f.write(newdata)
def remove_line_with(filename, token):
"""Remove lines that contain token from file.
Args:
filename: string for filename.
token: string token to check if to remove a line from file or not.
"""
with open(filename, 'r') as f:
filedata = f.read()
with open(filename, 'w') as f:
for line in filedata.strip().split('\n'):
if token not in line:
f.write(line + '\n')
def write_to_bazelrc(line):
with open(_TF_BAZELRC, 'a') as f:
f.write(line + '\n')
def write_action_env_to_bazelrc(var_name, var):
write_to_bazelrc('build --action_env %s="%s"' % (var_name, str(var)))
def run_shell(cmd, allow_non_zero=False):
if allow_non_zero:
try:
output = subprocess.check_output(cmd)
except subprocess.CalledProcessError as e:
output = e.output
else:
output = subprocess.check_output(cmd)
return output.decode('UTF-8').strip()
def cygpath(path):
"""Convert path from posix to windows."""
return os.path.abspath(path).replace('\\', '/')
def get_python_path(environ_cp, python_bin_path):
"""Get the python site package paths."""
python_paths = []
if environ_cp.get('PYTHONPATH'):
python_paths = environ_cp.get('PYTHONPATH').split(':')
try:
library_paths = run_shell(
[python_bin_path, '-c',
'import site; print("\\n".join(site.getsitepackages()))']).split('\n')
except subprocess.CalledProcessError:
library_paths = [run_shell(
[python_bin_path, '-c',
'from distutils.sysconfig import get_python_lib;'
'print(get_python_lib())'])]
all_paths = set(python_paths + library_paths)
paths = []
for path in all_paths:
if os.path.isdir(path):
paths.append(path)
return paths
def get_python_major_version(python_bin_path):
"""Get the python major version."""
return run_shell([python_bin_path, '-c', 'import sys; print(sys.version[0])'])
def setup_python(environ_cp):
"""Setup python related env variables."""
# Get PYTHON_BIN_PATH, default is the current running python.
default_python_bin_path = sys.executable
ask_python_bin_path = ('Please specify the location of python. [Default is '
'%s]: ') % default_python_bin_path
while True:
python_bin_path = get_from_env_or_user_or_default(
environ_cp, 'PYTHON_BIN_PATH', ask_python_bin_path,
default_python_bin_path)
# Check if the path is valid
if os.path.isfile(python_bin_path) and os.access(
python_bin_path, os.X_OK):
break
elif not os.path.exists(python_bin_path):
print('Invalid python path: %s cannot be found.' % python_bin_path)
else:
print('%s is not executable. Is it the python binary?' % python_bin_path)
environ_cp['PYTHON_BIN_PATH'] = ''
# Convert python path to Windows style before checking lib and version
if is_windows() or is_cygwin():
python_bin_path = cygpath(python_bin_path)
# Get PYTHON_LIB_PATH
python_lib_path = environ_cp.get('PYTHON_LIB_PATH')
if not python_lib_path:
python_lib_paths = get_python_path(environ_cp, python_bin_path)
if environ_cp.get('USE_DEFAULT_PYTHON_LIB_PATH') == '1':
python_lib_path = python_lib_paths[0]
else:
print('Found possible Python library paths:\n %s' %
'\n '.join(python_lib_paths))
default_python_lib_path = python_lib_paths[0]
python_lib_path = get_input(
'Please input the desired Python library path to use. '
'Default is [%s]\n' % python_lib_paths[0])
if not python_lib_path:
python_lib_path = default_python_lib_path
environ_cp['PYTHON_LIB_PATH'] = python_lib_path
python_major_version = get_python_major_version(python_bin_path)
# Convert python path to Windows style before writing into bazel.rc
if is_windows() or is_cygwin():
python_lib_path = cygpath(python_lib_path)
# Set-up env variables used by python_configure.bzl
write_action_env_to_bazelrc('PYTHON_BIN_PATH', python_bin_path)
write_action_env_to_bazelrc('PYTHON_LIB_PATH', python_lib_path)
write_to_bazelrc('build --force_python=py%s' % python_major_version)
write_to_bazelrc('build --host_force_python=py%s' % python_major_version)
write_to_bazelrc('build --python_path=\"%s"' % python_bin_path)
environ_cp['PYTHON_BIN_PATH'] = python_bin_path
# Write tools/python_bin_path.sh
with open('tools/python_bin_path.sh', 'w') as f:
f.write('export PYTHON_BIN_PATH="%s"' % python_bin_path)
def reset_tf_configure_bazelrc():
"""Reset file that contains customized config settings."""
open(_TF_BAZELRC, 'w').close()
home = os.path.expanduser('~')
if not os.path.exists('.bazelrc'):
if os.path.exists(os.path.join(home, '.bazelrc')):
with open('.bazelrc', 'a') as f:
f.write('import %s/.bazelrc\n' % home.replace('\\', '/'))
else:
open('.bazelrc', 'w').close()
remove_line_with('.bazelrc', 'tf_configure')
with open('.bazelrc', 'a') as f:
f.write('import %workspace%/.tf_configure.bazelrc\n')
def cleanup_makefile():
"""Delete any leftover BUILD files from the Makefile build.
These files could interfere with Bazel parsing.
"""
makefile_download_dir = 'tensorflow/contrib/makefile/downloads'
if os.path.isdir(makefile_download_dir):
for root, _, filenames in os.walk(makefile_download_dir):
for f in filenames:
if f.endswith('BUILD'):
os.remove(os.path.join(root, f))
def get_var(environ_cp,
var_name,
query_item,
enabled_by_default,
question=None,
yes_reply=None,
no_reply=None):
"""Get boolean input from user.
If var_name is not set in env, ask user to enable query_item or not. If the
response is empty, use the default.
Args:
environ_cp: copy of the os.environ.
var_name: string for name of environment variable, e.g. "TF_NEED_HDFS".
query_item: string for feature related to the variable, e.g. "Hadoop File
System".
enabled_by_default: boolean for default behavior.
question: optional string for how to ask for user input.
yes_reply: optionanl string for reply when feature is enabled.
no_reply: optional string for reply when feature is disabled.
Returns:
boolean value of the variable.
Raises:
UserInputError: if an environment variable is set, but it cannot be
interpreted as a boolean indicator, assume that the user has made a
scripting error, and will continue to provide invalid input.
Raise the error to avoid infinitely looping.
"""
if not question:
question = 'Do you wish to build TensorFlow with %s support?' % query_item
if not yes_reply:
yes_reply = '%s support will be enabled for TensorFlow.' % query_item
if not no_reply:
no_reply = 'No %s' % yes_reply
yes_reply += '\n'
no_reply += '\n'
if enabled_by_default:
question += ' [Y/n]: '
else:
question += ' [y/N]: '
var = environ_cp.get(var_name)
if var is not None:
var_content = var.strip().lower()
true_strings = ('1', 't', 'true', 'y', 'yes')
false_strings = ('0', 'f', 'false', 'n', 'no')
if var_content in true_strings:
var = True
elif var_content in false_strings:
var = False
else:
raise UserInputError(
'Environment variable %s must be set as a boolean indicator.\n'
'The following are accepted as TRUE : %s.\n'
'The following are accepted as FALSE: %s.\n'
'Current value is %s.' % (
var_name, ', '.join(true_strings), ', '.join(false_strings),
var))
while var is None:
user_input_origin = get_input(question)
user_input = user_input_origin.strip().lower()
if user_input == 'y':
print(yes_reply)
var = True
elif user_input == 'n':
print(no_reply)
var = False
elif not user_input:
if enabled_by_default:
print(yes_reply)
var = True
else:
print(no_reply)
var = False
else:
print('Invalid selection: %s' % user_input_origin)
return var
def set_build_var(environ_cp, var_name, query_item, option_name,
enabled_by_default, bazel_config_name=None):
"""Set if query_item will be enabled for the build.
Ask user if query_item will be enabled. Default is used if no input is given.
Set subprocess environment variable and write to .bazelrc if enabled.
Args:
environ_cp: copy of the os.environ.
var_name: string for name of environment variable, e.g. "TF_NEED_HDFS".
query_item: string for feature related to the variable, e.g. "Hadoop File
System".
option_name: string for option to define in .bazelrc.
enabled_by_default: boolean for default behavior.
bazel_config_name: Name for Bazel --config argument to enable build feature.
"""
var = str(int(get_var(environ_cp, var_name, query_item, enabled_by_default)))
environ_cp[var_name] = var
if var == '1':
write_to_bazelrc('build --define %s=true' % option_name)
elif bazel_config_name is not None:
# TODO(mikecase): Migrate all users of configure.py to use --config Bazel
# options and not to set build configs through environment variables.
write_to_bazelrc('build:%s --define %s=true'
% (bazel_config_name, option_name))
def set_action_env_var(environ_cp,
var_name,
query_item,
enabled_by_default,
question=None,
yes_reply=None,
no_reply=None):
"""Set boolean action_env variable.
Ask user if query_item will be enabled. Default is used if no input is given.
Set environment variable and write to .bazelrc.
Args:
environ_cp: copy of the os.environ.
var_name: string for name of environment variable, e.g. "TF_NEED_HDFS".
query_item: string for feature related to the variable, e.g. "Hadoop File
System".
enabled_by_default: boolean for default behavior.
question: optional string for how to ask for user input.
yes_reply: optionanl string for reply when feature is enabled.
no_reply: optional string for reply when feature is disabled.
"""
var = int(
get_var(environ_cp, var_name, query_item, enabled_by_default, question,
yes_reply, no_reply))
write_action_env_to_bazelrc(var_name, var)
environ_cp[var_name] = str(var)
def convert_version_to_int(version):
"""Convert a version number to a integer that can be used to compare.
Version strings of the form X.YZ and X.Y.Z-xxxxx are supported. The
'xxxxx' part, for instance 'homebrew' on OS/X, is ignored.
Args:
version: a version to be converted
Returns:
An integer if converted successfully, otherwise return None.
"""
version = version.split('-')[0]
version_segments = version.split('.')
for seg in version_segments:
if not seg.isdigit():
return None
version_str = ''.join(['%03d' % int(seg) for seg in version_segments])
return int(version_str)
def check_bazel_version(min_version):
"""Check installed bezel version is at least min_version.
Args:
min_version: string for minimum bazel version.
Returns:
The bazel version detected.
"""
if which('bazel') is None:
print('Cannot find bazel. Please install bazel.')
sys.exit(0)
curr_version = run_shell(['bazel', '--batch', 'version'])
for line in curr_version.split('\n'):
if 'Build label: ' in line:
curr_version = line.split('Build label: ')[1]
break
min_version_int = convert_version_to_int(min_version)
curr_version_int = convert_version_to_int(curr_version)
# Check if current bazel version can be detected properly.
if not curr_version_int:
print('WARNING: current bazel installation is not a release version.')
print('Make sure you are running at least bazel %s' % min_version)
return curr_version
print('You have bazel %s installed.' % curr_version)
if curr_version_int < min_version_int:
print('Please upgrade your bazel installation to version %s or higher to '
'build TensorFlow!' % min_version)
sys.exit(0)
return curr_version
def set_cc_opt_flags(environ_cp):
"""Set up architecture-dependent optimization flags.
Also append CC optimization flags to bazel.rc..
Args:
environ_cp: copy of the os.environ.
"""
if is_ppc64le():
# gcc on ppc64le does not support -march, use mcpu instead
default_cc_opt_flags = '-mcpu=native'
else:
default_cc_opt_flags = '-march=native'
question = ('Please specify optimization flags to use during compilation when'
' bazel option "--config=opt" is specified [Default is %s]: '
) % default_cc_opt_flags
cc_opt_flags = get_from_env_or_user_or_default(environ_cp, 'CC_OPT_FLAGS',
question, default_cc_opt_flags)
for opt in cc_opt_flags.split():
write_to_bazelrc('build:opt --copt=%s' % opt)
# It should be safe on the same build host.
write_to_bazelrc('build:opt --host_copt=-march=native')
write_to_bazelrc('build:opt --define with_default_optimizations=true')
# TODO(mikecase): Remove these default defines once we are able to get
# TF Lite targets building without them.
write_to_bazelrc('build --copt=-DGEMMLOWP_ALLOW_SLOW_SCALAR_FALLBACK')
write_to_bazelrc('build --host_copt=-DGEMMLOWP_ALLOW_SLOW_SCALAR_FALLBACK')
def set_tf_cuda_clang(environ_cp):
"""set TF_CUDA_CLANG action_env.
Args:
environ_cp: copy of the os.environ.
"""
question = 'Do you want to use clang as CUDA compiler?'
yes_reply = 'Clang will be used as CUDA compiler.'
no_reply = 'nvcc will be used as CUDA compiler.'
set_action_env_var(
environ_cp,
'TF_CUDA_CLANG',
None,
False,
question=question,
yes_reply=yes_reply,
no_reply=no_reply)
def set_tf_download_clang(environ_cp):
"""Set TF_DOWNLOAD_CLANG action_env."""
question = 'Do you want to download a fresh release of clang? (Experimental)'
yes_reply = 'Clang will be downloaded and used to compile tensorflow.'
no_reply = 'Clang will not be downloaded.'
set_action_env_var(
environ_cp,
'TF_DOWNLOAD_CLANG',
None,
False,
question=question,
yes_reply=yes_reply,
no_reply=no_reply)
def get_from_env_or_user_or_default(environ_cp, var_name, ask_for_var,
var_default):
"""Get var_name either from env, or user or default.
If var_name has been set as environment variable, use the preset value, else
ask for user input. If no input is provided, the default is used.
Args:
environ_cp: copy of the os.environ.
var_name: string for name of environment variable, e.g. "TF_NEED_HDFS".
ask_for_var: string for how to ask for user input.
var_default: default value string.
Returns:
string value for var_name
"""
var = environ_cp.get(var_name)
if not var:
var = get_input(ask_for_var)
print('\n')
if not var:
var = var_default
return var
def set_clang_cuda_compiler_path(environ_cp):
"""Set CLANG_CUDA_COMPILER_PATH."""
default_clang_path = which('clang') or ''
ask_clang_path = ('Please specify which clang should be used as device and '
'host compiler. [Default is %s]: ') % default_clang_path
while True:
clang_cuda_compiler_path = get_from_env_or_user_or_default(
environ_cp, 'CLANG_CUDA_COMPILER_PATH', ask_clang_path,
default_clang_path)
if os.path.exists(clang_cuda_compiler_path):
break
# Reset and retry
print('Invalid clang path: %s cannot be found.' % clang_cuda_compiler_path)
environ_cp['CLANG_CUDA_COMPILER_PATH'] = ''
# Set CLANG_CUDA_COMPILER_PATH
environ_cp['CLANG_CUDA_COMPILER_PATH'] = clang_cuda_compiler_path
write_action_env_to_bazelrc('CLANG_CUDA_COMPILER_PATH',
clang_cuda_compiler_path)
def prompt_loop_or_load_from_env(
environ_cp,
var_name,
var_default,
ask_for_var,
check_success,
error_msg,
suppress_default_error=False,
n_ask_attempts=_DEFAULT_PROMPT_ASK_ATTEMPTS
):
"""Loop over user prompts for an ENV param until receiving a valid response.
For the env param var_name, read from the environment or verify user input
until receiving valid input. When done, set var_name in the environ_cp to its
new value.
Args:
environ_cp: (Dict) copy of the os.environ.
var_name: (String) string for name of environment variable, e.g. "TF_MYVAR".
var_default: (String) default value string.
ask_for_var: (String) string for how to ask for user input.
check_success: (Function) function that takes one argument and returns a
boolean. Should return True if the value provided is considered valid. May
contain a complex error message if error_msg does not provide enough
information. In that case, set suppress_default_error to True.
error_msg: (String) String with one and only one '%s'. Formatted with each
invalid response upon check_success(input) failure.
suppress_default_error: (Bool) Suppress the above error message in favor of
one from the check_success function.
n_ask_attempts: (Integer) Number of times to query for valid input before
raising an error and quitting.
Returns:
[String] The value of var_name after querying for input.
Raises:
UserInputError: if a query has been attempted n_ask_attempts times without
success, assume that the user has made a scripting error, and will
continue to provide invalid input. Raise the error to avoid infinitely
looping.
"""
default = environ_cp.get(var_name) or var_default
full_query = '%s [Default is %s]: ' % (
ask_for_var,
default,
)
for _ in range(n_ask_attempts):
val = get_from_env_or_user_or_default(environ_cp,
var_name,
full_query,
default)
if check_success(val):
break
if not suppress_default_error:
print(error_msg % val)
environ_cp[var_name] = ''
else:
raise UserInputError('Invalid %s setting was provided %d times in a row. '
'Assuming to be a scripting mistake.' %
(var_name, n_ask_attempts))
environ_cp[var_name] = val
return val
def create_android_ndk_rule(environ_cp):
"""Set ANDROID_NDK_HOME and write Android NDK WORKSPACE rule."""
if is_windows() or is_cygwin():
default_ndk_path = cygpath('%s/Android/Sdk/ndk-bundle' %
environ_cp['APPDATA'])
elif is_macos():
default_ndk_path = '%s/library/Android/Sdk/ndk-bundle' % environ_cp['HOME']
else:
default_ndk_path = '%s/Android/Sdk/ndk-bundle' % environ_cp['HOME']
def valid_ndk_path(path):
return (os.path.exists(path) and
os.path.exists(os.path.join(path, 'source.properties')))
android_ndk_home_path = prompt_loop_or_load_from_env(
environ_cp,
var_name='ANDROID_NDK_HOME',
var_default=default_ndk_path,
ask_for_var='Please specify the home path of the Android NDK to use.',
check_success=valid_ndk_path,
error_msg=('The path %s or its child file "source.properties" '
'does not exist.')
)
write_android_ndk_workspace_rule(android_ndk_home_path)
def create_android_sdk_rule(environ_cp):
"""Set Android variables and write Android SDK WORKSPACE rule."""
if is_windows() or is_cygwin():
default_sdk_path = cygpath('%s/Android/Sdk' % environ_cp['APPDATA'])
elif is_macos():
default_sdk_path = '%s/library/Android/Sdk/ndk-bundle' % environ_cp['HOME']
else:
default_sdk_path = '%s/Android/Sdk' % environ_cp['HOME']
def valid_sdk_path(path):
return (os.path.exists(path) and
os.path.exists(os.path.join(path, 'platforms')) and
os.path.exists(os.path.join(path, 'build-tools')))
android_sdk_home_path = prompt_loop_or_load_from_env(
environ_cp,
var_name='ANDROID_SDK_HOME',
var_default=default_sdk_path,
ask_for_var='Please specify the home path of the Android SDK to use.',
check_success=valid_sdk_path,
error_msg=('Either %s does not exist, or it does not contain the '
'subdirectories "platforms" and "build-tools".'))
platforms = os.path.join(android_sdk_home_path, 'platforms')
api_levels = sorted(os.listdir(platforms))
api_levels = [x.replace('android-', '') for x in api_levels]
def valid_api_level(api_level):
return os.path.exists(os.path.join(android_sdk_home_path,
'platforms',
'android-' + api_level))
android_api_level = prompt_loop_or_load_from_env(
environ_cp,
var_name='ANDROID_API_LEVEL',
var_default=api_levels[-1],
ask_for_var=('Please specify the Android SDK API level to use. '
'[Available levels: %s]') % api_levels,
check_success=valid_api_level,
error_msg='Android-%s is not present in the SDK path.')
build_tools = os.path.join(android_sdk_home_path, 'build-tools')
versions = sorted(os.listdir(build_tools))
def valid_build_tools(version):
return os.path.exists(os.path.join(android_sdk_home_path,
'build-tools',
version))
android_build_tools_version = prompt_loop_or_load_from_env(
environ_cp,
var_name='ANDROID_BUILD_TOOLS_VERSION',
var_default=versions[-1],
ask_for_var=('Please specify an Android build tools version to use. '
'[Available versions: %s]') % versions,
check_success=valid_build_tools,
error_msg=('The selected SDK does not have build-tools version %s '
'available.'))
write_android_sdk_workspace_rule(android_sdk_home_path,
android_build_tools_version,
android_api_level)
def write_android_sdk_workspace_rule(android_sdk_home_path,
android_build_tools_version,
android_api_level):
print('Writing android_sdk_workspace rule.\n')
with open(_TF_WORKSPACE, 'a') as f:
f.write("""
android_sdk_repository(
name="androidsdk",
api_level=%s,
path="%s",
build_tools_version="%s")\n
""" % (android_api_level, android_sdk_home_path, android_build_tools_version))
def write_android_ndk_workspace_rule(android_ndk_home_path):
print('Writing android_ndk_workspace rule.')
ndk_api_level = check_ndk_level(android_ndk_home_path)
if int(ndk_api_level) not in _SUPPORTED_ANDROID_NDK_VERSIONS:
print('WARNING: The API level of the NDK in %s is %s, which is not '
'supported by Bazel (officially supported versions: %s). Please use '
'another version. Compiling Android targets may result in confusing '
'errors.\n' % (android_ndk_home_path, ndk_api_level,
_SUPPORTED_ANDROID_NDK_VERSIONS))
with open(_TF_WORKSPACE, 'a') as f:
f.write("""
android_ndk_repository(
name="androidndk",
path="%s",
api_level=%s)\n
""" % (android_ndk_home_path, ndk_api_level))
def check_ndk_level(android_ndk_home_path):
"""Check the revision number of an Android NDK path."""
properties_path = '%s/source.properties' % android_ndk_home_path
if is_windows() or is_cygwin():
properties_path = cygpath(properties_path)
with open(properties_path, 'r') as f:
filedata = f.read()
revision = re.search(r'Pkg.Revision = (\d+)', filedata)
if revision:
return revision.group(1)
return None
def workspace_has_any_android_rule():
"""Check the WORKSPACE for existing android_*_repository rules."""
with open(_TF_WORKSPACE, 'r') as f:
workspace = f.read()
has_any_rule = re.search(r'^android_[ns]dk_repository',
workspace,
re.MULTILINE)
return has_any_rule
def set_gcc_host_compiler_path(environ_cp):
"""Set GCC_HOST_COMPILER_PATH."""
default_gcc_host_compiler_path = which('gcc') or ''
cuda_bin_symlink = '%s/bin/gcc' % environ_cp.get('CUDA_TOOLKIT_PATH')
if os.path.islink(cuda_bin_symlink):
# os.readlink is only available in linux
default_gcc_host_compiler_path = os.path.realpath(cuda_bin_symlink)
gcc_host_compiler_path = prompt_loop_or_load_from_env(
environ_cp,
var_name='GCC_HOST_COMPILER_PATH',
var_default=default_gcc_host_compiler_path,
ask_for_var=
'Please specify which gcc should be used by nvcc as the host compiler.',
check_success=os.path.exists,
error_msg='Invalid gcc path. %s cannot be found.',
)
write_action_env_to_bazelrc('GCC_HOST_COMPILER_PATH', gcc_host_compiler_path)
def set_tf_cuda_version(environ_cp):
"""Set CUDA_TOOLKIT_PATH and TF_CUDA_VERSION."""
ask_cuda_version = (
'Please specify the CUDA SDK version you want to use, '
'e.g. 7.0. [Leave empty to default to CUDA %s]: ') % _DEFAULT_CUDA_VERSION
for _ in range(_DEFAULT_PROMPT_ASK_ATTEMPTS):
# Configure the Cuda SDK version to use.
tf_cuda_version = get_from_env_or_user_or_default(
environ_cp, 'TF_CUDA_VERSION', ask_cuda_version, _DEFAULT_CUDA_VERSION)
# Find out where the CUDA toolkit is installed
default_cuda_path = _DEFAULT_CUDA_PATH
if is_windows() or is_cygwin():
default_cuda_path = cygpath(
environ_cp.get('CUDA_PATH', _DEFAULT_CUDA_PATH_WIN))
elif is_linux():
# If the default doesn't exist, try an alternative default.
if (not os.path.exists(default_cuda_path)
) and os.path.exists(_DEFAULT_CUDA_PATH_LINUX):
default_cuda_path = _DEFAULT_CUDA_PATH_LINUX
ask_cuda_path = ('Please specify the location where CUDA %s toolkit is'
' installed. Refer to README.md for more details. '
'[Default is %s]: ') % (tf_cuda_version, default_cuda_path)
cuda_toolkit_path = get_from_env_or_user_or_default(
environ_cp, 'CUDA_TOOLKIT_PATH', ask_cuda_path, default_cuda_path)
if is_windows():
cuda_rt_lib_path = 'lib/x64/cudart.lib'
elif is_linux():
cuda_rt_lib_path = 'lib64/libcudart.so.%s' % tf_cuda_version
elif is_macos():
cuda_rt_lib_path = 'lib/libcudart.%s.dylib' % tf_cuda_version
cuda_toolkit_path_full = os.path.join(cuda_toolkit_path, cuda_rt_lib_path)
if os.path.exists(cuda_toolkit_path_full):
break
# Reset and retry
print('Invalid path to CUDA %s toolkit. %s cannot be found' %
(tf_cuda_version, cuda_toolkit_path_full))
environ_cp['TF_CUDA_VERSION'] = ''
environ_cp['CUDA_TOOLKIT_PATH'] = ''
else:
raise UserInputError('Invalid TF_CUDA_SETTING setting was provided %d '
'times in a row. Assuming to be a scripting mistake.' %
_DEFAULT_PROMPT_ASK_ATTEMPTS)
# Set CUDA_TOOLKIT_PATH and TF_CUDA_VERSION
environ_cp['CUDA_TOOLKIT_PATH'] = cuda_toolkit_path
write_action_env_to_bazelrc('CUDA_TOOLKIT_PATH', cuda_toolkit_path)
environ_cp['TF_CUDA_VERSION'] = tf_cuda_version
write_action_env_to_bazelrc('TF_CUDA_VERSION', tf_cuda_version)
def set_tf_cudnn_version(environ_cp):
"""Set CUDNN_INSTALL_PATH and TF_CUDNN_VERSION."""
ask_cudnn_version = (
'Please specify the cuDNN version you want to use. '
'[Leave empty to default to cuDNN %s.0]: ') % _DEFAULT_CUDNN_VERSION
for _ in range(_DEFAULT_PROMPT_ASK_ATTEMPTS):
tf_cudnn_version = get_from_env_or_user_or_default(
environ_cp, 'TF_CUDNN_VERSION', ask_cudnn_version,
_DEFAULT_CUDNN_VERSION)
default_cudnn_path = environ_cp.get('CUDA_TOOLKIT_PATH')
ask_cudnn_path = (r'Please specify the location where cuDNN %s library is '
'installed. Refer to README.md for more details. [Default'
' is %s]:') % (tf_cudnn_version, default_cudnn_path)
cudnn_install_path = get_from_env_or_user_or_default(
environ_cp, 'CUDNN_INSTALL_PATH', ask_cudnn_path, default_cudnn_path)
# Result returned from "read" will be used unexpanded. That make "~"
# unusable. Going through one more level of expansion to handle that.
cudnn_install_path = os.path.realpath(
os.path.expanduser(cudnn_install_path))
if is_windows() or is_cygwin():
cudnn_install_path = cygpath(cudnn_install_path)
if is_windows():
cuda_dnn_lib_path = 'lib/x64/cudnn.lib'
cuda_dnn_lib_alt_path = 'lib/x64/cudnn.lib'
elif is_linux():
cuda_dnn_lib_path = 'lib64/libcudnn.so.%s' % tf_cudnn_version
cuda_dnn_lib_alt_path = 'libcudnn.so.%s' % tf_cudnn_version
elif is_macos():
cuda_dnn_lib_path = 'lib/libcudnn.%s.dylib' % tf_cudnn_version
cuda_dnn_lib_alt_path = 'libcudnn.%s.dylib' % tf_cudnn_version
cuda_dnn_lib_path_full = os.path.join(cudnn_install_path, cuda_dnn_lib_path)
cuda_dnn_lib_alt_path_full = os.path.join(cudnn_install_path,
cuda_dnn_lib_alt_path)
if os.path.exists(cuda_dnn_lib_path_full) or os.path.exists(
cuda_dnn_lib_alt_path_full):
break
# Try another alternative for Linux
if is_linux():
ldconfig_bin = which('ldconfig') or '/sbin/ldconfig'
cudnn_path_from_ldconfig = run_shell([ldconfig_bin, '-p'])
cudnn_path_from_ldconfig = re.search('.*libcudnn.so .* => (.*)',
cudnn_path_from_ldconfig)
if cudnn_path_from_ldconfig:
cudnn_path_from_ldconfig = cudnn_path_from_ldconfig.group(1)
if os.path.exists('%s.%s' % (cudnn_path_from_ldconfig,
tf_cudnn_version)):
cudnn_install_path = os.path.dirname(cudnn_path_from_ldconfig)
break
# Reset and Retry
print(
'Invalid path to cuDNN %s toolkit. None of the following files can be '
'found:' % tf_cudnn_version)
print(cuda_dnn_lib_path_full)
print(cuda_dnn_lib_alt_path_full)
if is_linux():
print('%s.%s' % (cudnn_path_from_ldconfig, tf_cudnn_version))
environ_cp['TF_CUDNN_VERSION'] = ''
else:
raise UserInputError('Invalid TF_CUDNN setting was provided %d '
'times in a row. Assuming to be a scripting mistake.' %
_DEFAULT_PROMPT_ASK_ATTEMPTS)
# Set CUDNN_INSTALL_PATH and TF_CUDNN_VERSION
environ_cp['CUDNN_INSTALL_PATH'] = cudnn_install_path
write_action_env_to_bazelrc('CUDNN_INSTALL_PATH', cudnn_install_path)
environ_cp['TF_CUDNN_VERSION'] = tf_cudnn_version
write_action_env_to_bazelrc('TF_CUDNN_VERSION', tf_cudnn_version)
def get_native_cuda_compute_capabilities(environ_cp):
"""Get native cuda compute capabilities.
Args:
environ_cp: copy of the os.environ.
Returns:
string of native cuda compute capabilities, separated by comma.
"""
device_query_bin = os.path.join(
environ_cp.get('CUDA_TOOLKIT_PATH'), 'extras/demo_suite/deviceQuery')
if os.path.isfile(device_query_bin) and os.access(device_query_bin, os.X_OK):
try:
output = run_shell(device_query_bin).split('\n')
pattern = re.compile('[0-9]*\\.[0-9]*')
output = [pattern.search(x) for x in output if 'Capability' in x]
output = ','.join(x.group() for x in output if x is not None)
except subprocess.CalledProcessError:
output = ''
else:
output = ''
return output
def set_tf_cuda_compute_capabilities(environ_cp):
"""Set TF_CUDA_COMPUTE_CAPABILITIES."""
while True:
native_cuda_compute_capabilities = get_native_cuda_compute_capabilities(
environ_cp)
if not native_cuda_compute_capabilities:
default_cuda_compute_capabilities = _DEFAULT_CUDA_COMPUTE_CAPABILITIES
else:
default_cuda_compute_capabilities = native_cuda_compute_capabilities
ask_cuda_compute_capabilities = (
'Please specify a list of comma-separated '
'Cuda compute capabilities you want to '
'build with.\nYou can find the compute '
'capability of your device at: '
'https://developer.nvidia.com/cuda-gpus.\nPlease'
' note that each additional compute '
'capability significantly increases your '
'build time and binary size. [Default is: %s]' %
default_cuda_compute_capabilities)
tf_cuda_compute_capabilities = get_from_env_or_user_or_default(
environ_cp, 'TF_CUDA_COMPUTE_CAPABILITIES',
ask_cuda_compute_capabilities, default_cuda_compute_capabilities)
# Check whether all capabilities from the input is valid
all_valid = True
for compute_capability in tf_cuda_compute_capabilities.split(','):
m = re.match('[0-9]+.[0-9]+', compute_capability)
if not m:
print('Invalid compute capability: ' % compute_capability)
all_valid = False
else:
ver = int(m.group(0).split('.')[0])
if ver < 3:
print('Only compute capabilities 3.0 or higher are supported.')
all_valid = False
if all_valid:
break
# Reset and Retry
environ_cp['TF_CUDA_COMPUTE_CAPABILITIES'] = ''
# Set TF_CUDA_COMPUTE_CAPABILITIES
environ_cp['TF_CUDA_COMPUTE_CAPABILITIES'] = tf_cuda_compute_capabilities
write_action_env_to_bazelrc('TF_CUDA_COMPUTE_CAPABILITIES',
tf_cuda_compute_capabilities)
def set_other_cuda_vars(environ_cp):
"""Set other CUDA related variables."""
if is_windows():
# The following three variables are needed for MSVC toolchain configuration
# in Bazel
environ_cp['CUDA_PATH'] = environ_cp.get('CUDA_TOOLKIT_PATH')
environ_cp['CUDA_COMPUTE_CAPABILITIES'] = environ_cp.get(
'TF_CUDA_COMPUTE_CAPABILITIES')
environ_cp['NO_WHOLE_ARCHIVE_OPTION'] = 1
write_action_env_to_bazelrc('CUDA_PATH', environ_cp.get('CUDA_PATH'))
write_action_env_to_bazelrc('CUDA_COMPUTE_CAPABILITIE',
environ_cp.get('CUDA_COMPUTE_CAPABILITIE'))
write_action_env_to_bazelrc('NO_WHOLE_ARCHIVE_OPTION',
environ_cp.get('NO_WHOLE_ARCHIVE_OPTION'))
write_to_bazelrc('build --config=win-cuda')
write_to_bazelrc('test --config=win-cuda')
else:
# If CUDA is enabled, always use GPU during build and test.
if environ_cp.get('TF_CUDA_CLANG') == '1':
write_to_bazelrc('build --config=cuda_clang')
write_to_bazelrc('test --config=cuda_clang')
else:
write_to_bazelrc('build --config=cuda')
write_to_bazelrc('test --config=cuda')
def set_host_cxx_compiler(environ_cp):
"""Set HOST_CXX_COMPILER."""
default_cxx_host_compiler = which('g++') or ''
host_cxx_compiler = prompt_loop_or_load_from_env(
environ_cp,
var_name='HOST_CXX_COMPILER',
var_default=default_cxx_host_compiler,
ask_for_var=('Please specify which C++ compiler should be used as the '
'host C++ compiler.'),
check_success=os.path.exists,
error_msg='Invalid C++ compiler path. %s cannot be found.',
)
write_action_env_to_bazelrc('HOST_CXX_COMPILER', host_cxx_compiler)
def set_host_c_compiler(environ_cp):
"""Set HOST_C_COMPILER."""
default_c_host_compiler = which('gcc') or ''
host_c_compiler = prompt_loop_or_load_from_env(
environ_cp,
var_name='HOST_C_COMPILER',
var_default=default_c_host_compiler,
ask_for_var=('Please specify which C compiler should be used as the host'
'C compiler.'),
check_success=os.path.exists,
error_msg='Invalid C compiler path. %s cannot be found.',
)
write_action_env_to_bazelrc('HOST_C_COMPILER', host_c_compiler)
def set_computecpp_toolkit_path(environ_cp):
"""Set COMPUTECPP_TOOLKIT_PATH."""
def toolkit_exists(toolkit_path):
"""Check if a computecpp toolkit path is valid."""
if is_linux():
sycl_rt_lib_path = 'lib/libComputeCpp.so'
else:
sycl_rt_lib_path = ''
sycl_rt_lib_path_full = os.path.join(toolkit_path,
sycl_rt_lib_path)
exists = os.path.exists(sycl_rt_lib_path_full)
if not exists:
print('Invalid SYCL %s library path. %s cannot be found' %
(_TF_OPENCL_VERSION, sycl_rt_lib_path_full))
return exists
computecpp_toolkit_path = prompt_loop_or_load_from_env(
environ_cp,
var_name='COMPUTECPP_TOOLKIT_PATH',
var_default=_DEFAULT_COMPUTECPP_TOOLKIT_PATH,
ask_for_var=(
'Please specify the location where ComputeCpp for SYCL %s is '
'installed.' % _TF_OPENCL_VERSION),
check_success=toolkit_exists,
error_msg='Invalid SYCL compiler path. %s cannot be found.',
suppress_default_error=True)
write_action_env_to_bazelrc('COMPUTECPP_TOOLKIT_PATH',
computecpp_toolkit_path)
def set_trisycl_include_dir(environ_cp):
"""Set TRISYCL_INCLUDE_DIR."""
ask_trisycl_include_dir = ('Please specify the location of the triSYCL '
'include directory. (Use --config=sycl_trisycl '
'when building with Bazel) '
'[Default is %s]: '
) % (_DEFAULT_TRISYCL_INCLUDE_DIR)
while True:
trisycl_include_dir = get_from_env_or_user_or_default(
environ_cp, 'TRISYCL_INCLUDE_DIR', ask_trisycl_include_dir,
_DEFAULT_TRISYCL_INCLUDE_DIR)
if os.path.exists(trisycl_include_dir):
break
print('Invalid triSYCL include directory, %s cannot be found'
% (trisycl_include_dir))
# Set TRISYCL_INCLUDE_DIR
environ_cp['TRISYCL_INCLUDE_DIR'] = trisycl_include_dir
write_action_env_to_bazelrc('TRISYCL_INCLUDE_DIR',
trisycl_include_dir)
def set_mpi_home(environ_cp):
"""Set MPI_HOME."""
default_mpi_home = which('mpirun') or which('mpiexec') or ''
default_mpi_home = os.path.dirname(os.path.dirname(default_mpi_home))
def valid_mpi_path(mpi_home):
exists = (os.path.exists(os.path.join(mpi_home, 'include')) and
os.path.exists(os.path.join(mpi_home, 'lib')))
if not exists:
print('Invalid path to the MPI Toolkit. %s or %s cannot be found' %
(os.path.join(mpi_home, 'include'),
os.path.exists(os.path.join(mpi_home, 'lib'))))
return exists
_ = prompt_loop_or_load_from_env(
environ_cp,
var_name='MPI_HOME',
var_default=default_mpi_home,
ask_for_var='Please specify the MPI toolkit folder.',
check_success=valid_mpi_path,
error_msg='',
suppress_default_error=True)
def set_other_mpi_vars(environ_cp):
"""Set other MPI related variables."""
# Link the MPI header files
mpi_home = environ_cp.get('MPI_HOME')
symlink_force('%s/include/mpi.h' % mpi_home, 'third_party/mpi/mpi.h')
# Determine if we use OpenMPI or MVAPICH, these require different header files
# to be included here to make bazel dependency checker happy
if os.path.exists(os.path.join(mpi_home, 'include/mpi_portable_platform.h')):
symlink_force(
os.path.join(mpi_home, 'include/mpi_portable_platform.h'),
'third_party/mpi/mpi_portable_platform.h')
# TODO(gunan): avoid editing files in configure
sed_in_place('third_party/mpi/mpi.bzl', 'MPI_LIB_IS_OPENMPI=False',
'MPI_LIB_IS_OPENMPI=True')
else:
# MVAPICH / MPICH
symlink_force(
os.path.join(mpi_home, 'include/mpio.h'), 'third_party/mpi/mpio.h')
symlink_force(
os.path.join(mpi_home, 'include/mpicxx.h'), 'third_party/mpi/mpicxx.h')
# TODO(gunan): avoid editing files in configure
sed_in_place('third_party/mpi/mpi.bzl', 'MPI_LIB_IS_OPENMPI=True',
'MPI_LIB_IS_OPENMPI=False')
if os.path.exists(os.path.join(mpi_home, 'lib/libmpi.so')):
symlink_force(
os.path.join(mpi_home, 'lib/libmpi.so'), 'third_party/mpi/libmpi.so')
else:
raise ValueError('Cannot find the MPI library file in %s/lib' % mpi_home)
def set_grpc_build_flags():
write_to_bazelrc('build --define grpc_no_ares=true')
def set_windows_build_flags():
if is_windows():
# The non-monolithic build is not supported yet
write_to_bazelrc('build --config monolithic')
# Suppress warning messages
write_to_bazelrc('build --copt=-w --host_copt=-w')
# Output more verbose information when something goes wrong
write_to_bazelrc('build --verbose_failures')
def config_info_line(name, help_text):
"""Helper function to print formatted help text for Bazel config options."""
print('\t--config=%-12s\t# %s' % (name, help_text))
def main():
# Make a copy of os.environ to be clear when functions and getting and setting
# environment variables.
environ_cp = dict(os.environ)
check_bazel_version('0.5.4')
reset_tf_configure_bazelrc()
cleanup_makefile()
setup_python(environ_cp)
if is_windows():
environ_cp['TF_NEED_S3'] = '0'
environ_cp['TF_NEED_GCP'] = '0'
environ_cp['TF_NEED_HDFS'] = '0'
environ_cp['TF_NEED_JEMALLOC'] = '0'
environ_cp['TF_NEED_OPENCL_SYCL'] = '0'
environ_cp['TF_NEED_COMPUTECPP'] = '0'
environ_cp['TF_NEED_OPENCL'] = '0'
environ_cp['TF_CUDA_CLANG'] = '0'
if is_macos():
environ_cp['TF_NEED_JEMALLOC'] = '0'
set_build_var(environ_cp, 'TF_NEED_JEMALLOC', 'jemalloc as malloc',
'with_jemalloc', True)
set_build_var(environ_cp, 'TF_NEED_GCP', 'Google Cloud Platform',
'with_gcp_support', True, 'gcp')
set_build_var(environ_cp, 'TF_NEED_HDFS', 'Hadoop File System',
'with_hdfs_support', True, 'hdfs')
set_build_var(environ_cp, 'TF_NEED_S3', 'Amazon S3 File System',
'with_s3_support', True, 's3')
set_build_var(environ_cp, 'TF_ENABLE_XLA', 'XLA JIT', 'with_xla_support',
False, 'xla')
set_build_var(environ_cp, 'TF_NEED_GDR', 'GDR', 'with_gdr_support',
False, 'gdr')
set_build_var(environ_cp, 'TF_NEED_VERBS', 'VERBS', 'with_verbs_support',
False, 'verbs')
set_action_env_var(environ_cp, 'TF_NEED_OPENCL_SYCL', 'OpenCL SYCL', False)
if environ_cp.get('TF_NEED_OPENCL_SYCL') == '1':
set_host_cxx_compiler(environ_cp)
set_host_c_compiler(environ_cp)
set_action_env_var(environ_cp, 'TF_NEED_COMPUTECPP', 'ComputeCPP', True)
if environ_cp.get('TF_NEED_COMPUTECPP') == '1':
set_computecpp_toolkit_path(environ_cp)
else:
set_trisycl_include_dir(environ_cp)
set_action_env_var(environ_cp, 'TF_NEED_CUDA', 'CUDA', False)
if (environ_cp.get('TF_NEED_CUDA') == '1' and
'TF_CUDA_CONFIG_REPO' not in environ_cp):
set_tf_cuda_version(environ_cp)
set_tf_cudnn_version(environ_cp)
set_tf_cuda_compute_capabilities(environ_cp)
set_tf_cuda_clang(environ_cp)
if environ_cp.get('TF_CUDA_CLANG') == '1':
if not is_windows():
# Ask if we want to download clang release while building.
set_tf_download_clang(environ_cp)
else:
# We use bazel's generated crosstool on Windows and there is no
# way to provide downloaded toolchain for that yet.
# TODO(ibiryukov): Investigate using clang as a cuda compiler on
# Windows.
environ_cp['TF_DOWNLOAD_CLANG'] = '0'
if environ_cp.get('TF_DOWNLOAD_CLANG') != '1':
# Set up which clang we should use as the cuda / host compiler.
set_clang_cuda_compiler_path(environ_cp)
else:
# Set up which gcc nvcc should use as the host compiler
# No need to set this on Windows
if not is_windows():
set_gcc_host_compiler_path(environ_cp)
set_other_cuda_vars(environ_cp)
set_build_var(environ_cp, 'TF_NEED_MPI', 'MPI', 'with_mpi_support', False)
if environ_cp.get('TF_NEED_MPI') == '1':
set_mpi_home(environ_cp)
set_other_mpi_vars(environ_cp)
set_grpc_build_flags()
set_cc_opt_flags(environ_cp)
set_windows_build_flags()
if workspace_has_any_android_rule():
print('The WORKSPACE file has at least one of ["android_sdk_repository", '
'"android_ndk_repository"] already set. Will not ask to help '
'configure the WORKSPACE. Please delete the existing rules to '
'activate the helper.\n')
else:
if get_var(
environ_cp, 'TF_SET_ANDROID_WORKSPACE', 'android workspace',
False,
('Would you like to interactively configure ./WORKSPACE for '
'Android builds?'),
'Searching for NDK and SDK installations.',
'Not configuring the WORKSPACE for Android builds.'):
create_android_ndk_rule(environ_cp)
create_android_sdk_rule(environ_cp)
print('Preconfigured Bazel build configs. You can use any of the below by '
'adding "--config=<>" to your build command. See tools/bazel.rc for '
'more details.')
config_info_line('mkl', 'Build with MKL support.')
config_info_line('monolithic', 'Config for mostly static monolithic build.')
if __name__ == '__main__':
main()
| {
"content_hash": "ee415908ed580c1fe395a99bebd3e919",
"timestamp": "",
"source": "github",
"line_count": 1323,
"max_line_length": 80,
"avg_line_length": 35.99470899470899,
"alnum_prop": 0.6409567207744482,
"repo_name": "jwlawson/tensorflow",
"id": "cf16ef483763733cc12c838ea92b144c6493f0b1",
"size": "48310",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "configure.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "8913"
},
{
"name": "C",
"bytes": "322059"
},
{
"name": "C++",
"bytes": "35810753"
},
{
"name": "CMake",
"bytes": "189463"
},
{
"name": "Go",
"bytes": "1058216"
},
{
"name": "HTML",
"bytes": "4680032"
},
{
"name": "Java",
"bytes": "543556"
},
{
"name": "Jupyter Notebook",
"bytes": "1940884"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "Makefile",
"bytes": "49545"
},
{
"name": "Objective-C",
"bytes": "12456"
},
{
"name": "Objective-C++",
"bytes": "94754"
},
{
"name": "PHP",
"bytes": "1429"
},
{
"name": "Perl",
"bytes": "6179"
},
{
"name": "Perl 6",
"bytes": "1357"
},
{
"name": "PureBasic",
"bytes": "24932"
},
{
"name": "Python",
"bytes": "31483641"
},
{
"name": "Ruby",
"bytes": "547"
},
{
"name": "Shell",
"bytes": "408005"
}
],
"symlink_target": ""
} |
try:
from unittest2 import TestCase # Python2.6
except ImportError:
from unittest import TestCase
import transaction
from webob.multidict import MultiDict
from pyramid import testing
from pyramid.httpexceptions import HTTPFound
# from pyramid.authorization import ACLAuthorizationPolicy
from mock import patch
from pyvac.models import DBSession
class ModelTestCase(TestCase):
def setUp(self):
transaction.begin()
self.session = DBSession()
def tearDown(self):
transaction.commit()
class DummyRoute(object):
name = 'index'
class DummyRequest(testing.DummyRequest):
method = 'GET'
application_url = 'http://pyvac.example.net'
host = 'pyvac.example.net:80'
client_addr = '127.0.0.8'
matched_route = DummyRoute
def auto_translate(string, country):
return string
translate = auto_translate
class UnauthenticatedViewTestCase(TestCase):
mocks = []
def __init__(self, methodName='runTest'):
super(UnauthenticatedViewTestCase, self).__init__(methodName)
# pylint: disable=W0142
self.mocks = [patch(*mock_args) for mock_args in self.mocks]
self.maxDiff = None
def setUp(self):
from pyvac.config import includeme
from .conf import settings
super(UnauthenticatedViewTestCase, self).setUp()
self.maxDiff = None
# authz_policy = ACLAuthorizationPolicy()
self.config = testing.setUp(settings=settings)
self.config.include(includeme)
self.session = DBSession()
transaction.begin()
for dummy in self.mocks:
dummy.start()
def tearDown(self):
super(UnauthenticatedViewTestCase, self).tearDown()
self.session.flush()
transaction.commit()
testing.tearDown()
for dummy in reversed(self.mocks):
dummy.stop()
def create_request(self, params=None, environ=None, matchdict=None,
headers=None, path='/', cookies=None, post=None, **kw):
if params and not isinstance(params, MultiDict):
mparams = MultiDict()
for k, v in params.items():
mparams.add(k, v)
params = mparams
rv = DummyRequest(params, environ, headers, path, cookies,
post, matchdict=(matchdict or {}), **kw)
return rv
def assertIsRedirect(self, view):
self.assertIsInstance(view, HTTPFound)
class ViewTestCase(UnauthenticatedViewTestCase):
def setUp(self):
super(ViewTestCase, self).setUp()
def set_userid(self, userid='admin', permissive=False):
self.config.testing_securitypolicy(userid=userid,
permissive=permissive)
class ViewAdminTestCase(ViewTestCase):
def setUp(self):
super(ViewAdminTestCase, self).setUp()
self.config.testing_securitypolicy(userid='admin',
permissive=True)
| {
"content_hash": "68232de2f63251317ea654f8c887978d",
"timestamp": "",
"source": "github",
"line_count": 109,
"max_line_length": 78,
"avg_line_length": 27.504587155963304,
"alnum_prop": 0.6350900600400267,
"repo_name": "sayoun/pyvac",
"id": "0c75e3850c4fdecb8fba31800a8c1a1ec26a7b7b",
"size": "3000",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pyvac/tests/case.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "13012"
},
{
"name": "HTML",
"bytes": "110448"
},
{
"name": "JavaScript",
"bytes": "4635"
},
{
"name": "Python",
"bytes": "420836"
}
],
"symlink_target": ""
} |
import json
# SBaaS
from .stage03_quantification_dG_f_query import stage03_quantification_dG_f_query
from SBaaS_base.sbaas_template_io import sbaas_template_io
# Resources
from io_utilities.base_importData import base_importData
from io_utilities.base_exportData import base_exportData
class stage03_quantification_dG_f_io(stage03_quantification_dG_f_query,sbaas_template_io):
def import_dataStage03QuantificationDGf_add(self, filename):
'''table adds'''
data = base_importData();
data.read_csv(filename);
data.format_data();
self.add_dataStage03QuantificationDGf(data.data);
data.clear_data();
def import_dataStage03QuantificationDGf_update(self, filename):
'''table adds'''
data = base_importData();
data.read_csv(filename);
data.format_data();
self.update_dataStage03QuantificationDGf(data.data);
data.clear_data();
def import_dataStage03QuantificationDG0f_add(self, filename):
'''table adds'''
data = base_importData();
#data.read_csv(filename);
data.read_json(filename);
#data.format_data();
self.add_dataStage03QuantificationDG0f(data.data);
#data.clear_data();
def import_dataStage03QuantificationDG0f_update(self, filename):
'''table adds'''
data = base_importData();
data.read_csv(filename);
data.format_data();
self.update_dataStage03QuantificationDG0f(data.data);
data.clear_data();
| {
"content_hash": "218a83b4e281ccdcdd97defa4ce86cc7",
"timestamp": "",
"source": "github",
"line_count": 43,
"max_line_length": 90,
"avg_line_length": 35.58139534883721,
"alnum_prop": 0.6679738562091503,
"repo_name": "dmccloskey/SBaaS_thermodynamics",
"id": "28d54aa76abcc81fe8b371623950287329ab1ce6",
"size": "1539",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "SBaaS_thermodynamics/stage03_quantification_dG_f_io.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Matlab",
"bytes": "394"
},
{
"name": "Python",
"bytes": "369163"
}
],
"symlink_target": ""
} |
"""A module to produce a model validation plot
It uses one external program:
map_align for contact map alignment
*** This program needs to be installed separately from https://github.com/sokrypton/map_align***
"""
from __future__ import division
from __future__ import print_function
import os
from Bio.PDB.DSSP import DSSP
import numpy as np
import pandas as pd
import tempfile
from conkit.applications import MapAlignCommandline
from conkit.core.distance import Distance
import conkit.io
from conkit.misc import load_validation_model, SELECTED_VALIDATION_FEATURES, ALL_VALIDATION_FEATURES
from conkit.plot.figure import Figure
import conkit.plot.tools as tools
LINEKWARGS = dict(linestyle="--", linewidth=1.0, alpha=0.5, color=tools.ColorDefinitions.MISMATCH, zorder=1)
MARKERKWARGS = dict(marker='|', linestyle='None')
_MARKERKWARGS = dict(marker='s', linestyle='None')
class ModelValidationFigure(Figure):
"""A Figure object specifc for a model validation. This figure represents the proabbility that each given residue
in the model is involved in a model error. This is donw by feeding a trained classfier the differences observed
between the predicted distogram and the observed inter-residue contacts and distances at the PDB model.
Attributes
----------
model: :obj:`~conkit.core.distogram.Distogram`
The PDB model that will be validated
prediction: :obj:`~conkit.core.distogram.Distogram`
The distogram with the residue distance predictions
sequence: :obj:`~conkit.core.sequence.Sequence`
The sequence of the structure
dssp: :obj:`Bio.PDB.DSSP.DSSP`
The DSSP output for the PDB model that will be validated
map_align_exe: str
The path to map_align executable [default: None]
dist_bins: list, tuple
A list of tuples with the boundaries of the distance bins to use in the calculation [default: CASP2 bins]
l_factor: float
The L/N factor used to filter the contacts before finding the False Negatives [default: 0.5]
absent_residues: set
The residues not observed in the model that will be validated (only if in PDB format)
Examples
--------
>>> from Bio.PDB import PDBParser
>>> from Bio.PDB.DSSP import DSSP
>>> p = PDBParser()
>>> structure = p.get_structure('TOXD', 'toxd/toxd.pdb')[0]
>>> dssp = DSSP(structure, 'toxd/toxd.pdb', dssp='mkdssp', acc_array='Wilke')
>>> import conkit
>>> sequence = conkit.io.read('toxd/toxd.fasta', 'fasta').top
>>> model = conkit.io.read('toxd/toxd.pdb', 'pdb').top_map
>>> prediction = conkit.io.read('toxd/toxd.npz', 'rosettanpz').top_map
>>> conkit.plot.ModelValidationFigure(model, prediction, sequence, dssp)
"""
def __init__(self, model, prediction, sequence, dssp, map_align_exe=None, dist_bins=None, l_factor=0.5, **kwargs):
"""A new model validation plot
Parameters
----------
model: :obj:`~conkit.core.distogram.Distogram`
The PDB model that will be validated
prediction: :obj:`~conkit.core.distogram.Distogram`
The distogram with the residue distance predictions
sequence: :obj:`~conkit.core.sequence.Sequence`
The sequence of the structure
dssp: :obj:`Bio.PDB.DSSP.DSSP`
The DSSP output for the PDB model that will be validated
map_align_exe: str
The path to map_align executable [default: None]
dist_bins: list, tuple
A list of tuples with the boundaries of the distance bins to use in the calculation [default: CASP2 bins]
l_factor: float
The L/N factor used to filter the contacts before finding the False Negatives [default: 0.5]
**kwargs
General :obj:`~conkit.plot.figure.Figure` keyword arguments
"""
super(ModelValidationFigure, self).__init__(**kwargs)
self._model = None
self._prediction = None
self._sequence = None
self._distance_bins = None
self.data = None
self.alignment = {}
self.sorted_scores = None
self.smooth_scores = None
if len(sequence) < 5:
raise ValueError('Cannot validate a model with less than 5 residues')
self.map_align_exe = map_align_exe
self.l_factor = l_factor
self.dist_bins = dist_bins
self.model = model
self.prediction = prediction
self.sequence = sequence
self.classifier, self.scaler = load_validation_model()
self.absent_residues = self._get_absent_residues()
self.dssp = self._parse_dssp(dssp)
self.draw()
def __repr__(self):
return self.__class__.__name__
@property
def dist_bins(self):
return self._dist_bins
@dist_bins.setter
def dist_bins(self, dist_bins):
if dist_bins is None:
self._dist_bins = ((0, 4), (4, 6), (6, 8), (8, 10), (10, 12), (12, 14),
(14, 16), (16, 18), (18, 20), (20, np.inf))
else:
Distance._assert_valid_bins(dist_bins)
self._dist_bins = dist_bins
@property
def sequence(self):
return self._sequence
@sequence.setter
def sequence(self, sequence):
if sequence and tools._isinstance(sequence, "Sequence"):
self._sequence = sequence
else:
raise TypeError("Invalid hierarchy type for sequence: %s" % sequence.__class__.__name__)
@property
def prediction(self):
return self._prediction
@prediction.setter
def prediction(self, prediction):
if prediction and tools._isinstance(prediction, "Distogram"):
self._prediction = prediction
else:
raise TypeError("Invalid hierarchy type for prediction: %s" % prediction.__class__.__name__)
@property
def model(self):
return self._model
@model.setter
def model(self, model):
if model and tools._isinstance(model, "Distogram"):
self._model = model
else:
raise TypeError("Invalid hierarchy type for model: %s" % model.__class__.__name__)
def _get_absent_residues(self):
"""Get a set of residues absent from the :attr:`~conkit.plot.ModelValidationFigure.model` and
:attr:`~conkit.plot.ModelValidationFigure.prediction`. Only distograms originating from PDB files
are considered."""
absent_residues = []
if self.model.original_file_format == "pdb":
absent_residues += self.model.get_absent_residues(len(self.sequence))
if self.prediction.original_file_format == "pdb":
absent_residues += self.prediction.get_absent_residues(len(self.sequence))
return set(absent_residues)
def _prepare_distogram(self, distogram):
"""General operations to prepare a :obj:`~conkit.core.distogram.Distogram` instance before plotting."""
distogram.get_unique_distances(inplace=True)
distogram.sequence = self.sequence
distogram.set_sequence_register()
if distogram.original_file_format != "pdb":
distogram.reshape_bins(self.dist_bins)
return distogram
def _prepare_contactmap(self, distogram):
"""General operations to prepare a :obj:`~conkit.core.contactmap.ContactMap` instance before plotting."""
contactmap = distogram.as_contactmap()
contactmap.sequence = self.sequence
contactmap.set_sequence_register()
contactmap.remove_neighbors(inplace=True)
if distogram.original_file_format != "pdb":
contactmap.sort("raw_score", reverse=True, inplace=True)
contactmap.slice_map(seq_len=len(self.sequence), l_factor=self.l_factor, inplace=True)
return contactmap
def _parse_dssp(self, dssp):
"""Parse :obj:`Bio.PDB.DSSP.DSSP` into a :obj:`pandas.DataFrame` with secondary structure information
about the model"""
if not tools._isinstance(dssp, DSSP):
raise TypeError("Invalid hierarchy type for dssp: %s" % dssp.__class__.__name__)
_dssp_list = []
for residue in sorted(dssp.keys(), key=lambda x: x[1][1]):
resnum = residue[1][1]
if resnum in self.absent_residues:
_dssp_list.append((resnum, np.nan, np.nan, np.nan, np.nan))
continue
acc = dssp[residue][3]
if dssp[residue][2] in ('-', 'T', 'S'):
ss2 = (1, 0, 0)
elif dssp[residue][2] in ('H', 'G', 'I'):
ss2 = (0, 1, 0)
else:
ss2 = (0, 0, 1)
_dssp_list.append((resnum, *ss2, acc))
dssp = pd.DataFrame(_dssp_list)
dssp.columns = ['RESNUM', 'COIL', 'HELIX', 'SHEET', 'ACC']
return dssp
def _get_cmap_alignment(self):
"""Obtain a contact map alignment between :attr:`~conkit.plot.ModelValidationFigure.model` and
:attr:`~conkit.plot.ModelValidationFigure.prediction` and get the misaligned residues"""
with tempfile.TemporaryDirectory() as tmpdirname:
contact_map_a = os.path.join(tmpdirname, 'contact_map_a.mapalign')
contact_map_b = os.path.join(tmpdirname, 'contact_map_b.mapalign')
conkit.io.write(contact_map_a, 'mapalign', self.prediction)
conkit.io.write(contact_map_b, 'mapalign', self.model)
map_align_cline = MapAlignCommandline(
cmd=self.map_align_exe,
contact_map_a=contact_map_a,
contact_map_b=contact_map_b)
stdout, stderr = map_align_cline()
self.alignment = tools.parse_map_align_stdout(stdout)
def _parse_data(self, predicted_dict, *metrics):
"""Create a :obj:`pandas.DataFrame` with the features of the residues in the model"""
_features = []
for residue_features in zip(sorted(predicted_dict.keys()), *metrics):
_features.append((*residue_features,))
self.data = pd.DataFrame(_features)
self.data.columns = ALL_VALIDATION_FEATURES
self.data = self.data.merge(self.dssp, how='inner', on=['RESNUM'])
if self.map_align_exe is not None:
self._get_cmap_alignment()
self.data['MISALIGNED'] = self.data.RESNUM.isin(self.alignment.keys())
else:
self.data['MISALIGNED'] = False
def _add_legend(self):
"""Adds legend to the :obj:`~conkit.plot.ModelValidationFigure`"""
_error = self.ax.plot([], [], c=tools.ColorDefinitions.ERROR, label='Predicted Error', **_MARKERKWARGS)
_correct = self.ax.plot([], [], c=tools.ColorDefinitions.CORRECT, label='Predicted Correct', **_MARKERKWARGS)
_threshold_line = [self.ax.axvline(0, ymin=0, ymax=0, label="Score Threshold", **LINEKWARGS)]
_score_plot = self.ax.plot([], [], color=tools.ColorDefinitions.SCORE, label='Smoothed Score')
plots = _score_plot + _threshold_line + _correct + _error
if self.map_align_exe is not None:
_misaligned = self.ax.plot([], [], c=tools.ColorDefinitions.MISALIGNED, label='Misaligned', **_MARKERKWARGS)
_aligned = self.ax.plot([], [], c=tools.ColorDefinitions.ALIGNED, label='Aligned', **_MARKERKWARGS)
plots += _misaligned + _aligned
labels = [l.get_label() for l in plots]
self.ax.legend(plots, labels, bbox_to_anchor=(0.0, 1.02, 1.0, 0.102), loc=3,
ncol=3, mode="expand", borderaxespad=0.0, scatterpoints=1)
def _predict_score(self, resnum):
"""Predict whether a given residue is part of a model error or not"""
residue_features = self.data.loc[self.data.RESNUM == resnum][SELECTED_VALIDATION_FEATURES]
if (self.absent_residues and resnum in self.absent_residues) or residue_features.isnull().values.any():
return np.nan
scaled_features = self.scaler.transform(residue_features.values)
return self.classifier.predict_proba(scaled_features)[0, 1]
def draw(self):
model_distogram = self._prepare_distogram(self.model.copy())
prediction_distogram = self._prepare_distogram(self.prediction.copy())
model_cmap = self._prepare_contactmap(self.model.copy())
model_dict = model_cmap.as_dict()
prediction_cmap = self._prepare_contactmap(self.prediction.copy())
predicted_dict = prediction_cmap.as_dict()
cmap_metrics, cmap_metrics_smooth = tools.get_cmap_validation_metrics(model_dict, predicted_dict,
self.sequence, self.absent_residues)
rmsd, rmsd_smooth = tools.get_rmsd(prediction_distogram, model_distogram)
zscore_metrics = tools.get_zscores(model_distogram, predicted_dict, self.absent_residues, rmsd, *cmap_metrics)
self._parse_data(predicted_dict, rmsd_smooth, *cmap_metrics, *cmap_metrics_smooth, *zscore_metrics)
scores = {}
misaligned_residues = set(self.alignment.keys())
for resnum in sorted(predicted_dict.keys()):
_score = self._predict_score(resnum)
scores[resnum] = _score
color = tools.ColorDefinitions.ERROR if _score > 0.5 else tools.ColorDefinitions.CORRECT
self.ax.plot(resnum - 1, -0.01, mfc=color, c=color, **MARKERKWARGS)
if self.map_align_exe is not None:
if resnum in misaligned_residues:
color = tools.ColorDefinitions.MISALIGNED
else:
color = tools.ColorDefinitions.ALIGNED
self.ax.plot(resnum - 1, -0.05, mfc=color, c=color, **MARKERKWARGS)
self.data['SCORE'] = self.data['RESNUM'].apply(lambda x: scores.get(x))
self.sorted_scores = np.nan_to_num([scores[resnum] for resnum in sorted(scores.keys())])
self.smooth_scores = tools.convolution_smooth_values(self.sorted_scores)
self.ax.axhline(0.5, **LINEKWARGS)
self.ax.plot(self.smooth_scores, color=tools.ColorDefinitions.SCORE)
self.ax.set_xlabel('Residue Number')
self.ax.set_ylabel('Smoothed score')
if self.legend:
self._add_legend()
# TODO: deprecate this in 0.14
if self._file_name:
self.savefig(self._file_name, dpi=self._dpi)
| {
"content_hash": "fb3deecbc3a5ca3afe33473324d417a1",
"timestamp": "",
"source": "github",
"line_count": 330,
"max_line_length": 120,
"avg_line_length": 43.46666666666667,
"alnum_prop": 0.6291829336307864,
"repo_name": "rigdenlab/conkit",
"id": "a70bec98c0555d81a1ea7743bdc61aeda6ad4000",
"size": "15922",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "conkit/plot/modelvalidation.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Cython",
"bytes": "4290"
},
{
"name": "Python",
"bytes": "900218"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
import sentry.db.models.fields.bounded
import sentry.db.models.fields.foreignkey
class Migration(migrations.Migration):
# This flag is used to mark that a migration shouldn't be automatically run in
# production. We set this to True for operations that we think are risky and want
# someone from ops to run manually and monitor.
# General advice is that if in doubt, mark your migration as `is_dangerous`.
# Some things you should always mark as dangerous:
# - Large data migrations. Typically we want these to be run manually by ops so that
# they can be monitored. Since data migrations will now hold a transaction open
# this is even more important.
# - Adding columns to highly active tables, even ones that are NULL.
is_dangerous = False
# This flag is used to decide whether to run this migration in a transaction or not.
# By default we prefer to run in a transaction, but for migrations where you want
# to `CREATE INDEX CONCURRENTLY` this needs to be set to False. Typically you'll
# want to create an index concurrently when adding one to an existing table.
atomic = True
dependencies = [
('sentry', '0048_auto_20200302_1825'),
]
operations = [
migrations.CreateModel(
name='QuerySubscriptionEnvironment',
fields=[
('id', sentry.db.models.fields.bounded.BoundedBigAutoField(primary_key=True, serialize=False)),
('date_added', models.DateTimeField(default=django.utils.timezone.now)),
('environment', sentry.db.models.fields.foreignkey.FlexibleForeignKey(on_delete=django.db.models.deletion.CASCADE, to='sentry.Environment')),
('query_subscription', sentry.db.models.fields.foreignkey.FlexibleForeignKey(on_delete=django.db.models.deletion.CASCADE, to='sentry.QuerySubscription')),
],
options={
'db_table': 'sentry_querysubscriptionenvironment',
},
),
migrations.AddField(
model_name='querysubscription',
name='environments',
field=models.ManyToManyField(through='sentry.QuerySubscriptionEnvironment', to='sentry.Environment'),
),
migrations.AlterUniqueTogether(
name='querysubscriptionenvironment',
unique_together=set([('query_subscription', 'environment')]),
),
]
| {
"content_hash": "e39cf7362b73ff08159137eee6f458f4",
"timestamp": "",
"source": "github",
"line_count": 55,
"max_line_length": 170,
"avg_line_length": 46.58181818181818,
"alnum_prop": 0.6814988290398126,
"repo_name": "beeftornado/sentry",
"id": "3bdbd42e648d373a6404616dd6af8a9e6e1f9bba",
"size": "2636",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/sentry/migrations/0049_auto_20200304_0254.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "157195"
},
{
"name": "HTML",
"bytes": "197026"
},
{
"name": "JavaScript",
"bytes": "380379"
},
{
"name": "Makefile",
"bytes": "2832"
},
{
"name": "Python",
"bytes": "6473603"
}
],
"symlink_target": ""
} |
from google.cloud import apigee_registry_v1
def sample_delete_instance():
# Create a client
client = apigee_registry_v1.ProvisioningClient()
# Initialize request argument(s)
request = apigee_registry_v1.DeleteInstanceRequest(
name="name_value",
)
# Make the request
operation = client.delete_instance(request=request)
print("Waiting for operation to complete...")
response = operation.result()
# Handle the response
print(response)
# [END apigeeregistry_v1_generated_Provisioning_DeleteInstance_sync]
| {
"content_hash": "64b7b4f8855c01e915b76654a2078239",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 68,
"avg_line_length": 24.391304347826086,
"alnum_prop": 0.7094474153297683,
"repo_name": "googleapis/python-apigee-registry",
"id": "dca1a25a5f0d813d06eb2a17ead69fa40c966e80",
"size": "1962",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "samples/generated_samples/apigeeregistry_v1_generated_provisioning_delete_instance_sync.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2050"
},
{
"name": "Python",
"bytes": "2223877"
},
{
"name": "Shell",
"bytes": "30687"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from wtforms import Form
from wtforms import TextField
from wtforms import HiddenField
from ez2pay.i18n import LocalizerFactory
get_localizer = LocalizerFactory()
class FormFactory(object):
def __init__(self, localizer):
self.localizer = localizer
_ = self.localizer
self.required_msg = _(u'This field is required.')
def make_payment_form(self):
_ = self.localizer
class PaymentForm(Form):
first_name = TextField(_('First name'), [
])
last_name = TextField(_('Last name'), [
])
card_number = TextField(_('Card number'), [
])
expire_year = TextField(_('Expire Year (e.g. 2013)'), [
])
expire_month = TextField(_('Expire month (e.g. 10)'), [
])
security_code = TextField(_('Security code'), [
])
payment_uri = HiddenField(_(''), [
])
return PaymentForm
| {
"content_hash": "23dba61c4bdfdeb88113c242ea86bdab",
"timestamp": "",
"source": "github",
"line_count": 37,
"max_line_length": 67,
"avg_line_length": 27.89189189189189,
"alnum_prop": 0.5416666666666666,
"repo_name": "victorlin/ez2pay",
"id": "6e5d0c45cbc5d1fa947ae5332c574c9f05e35649",
"size": "1032",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ez2pay/modules/front/forms.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "286197"
},
{
"name": "JavaScript",
"bytes": "58274"
},
{
"name": "Python",
"bytes": "126600"
}
],
"symlink_target": ""
} |
"""Platform-specific support for compiling/executing C sources."""
import py, os, sys
from rpython.tool.runsubprocess import run_subprocess as _run_subprocess
from rpython.tool.udir import udir
from rpython.tool.version import rpythonroot
log = py.log.Producer("platform")
class CompilationError(Exception):
def __init__(self, out, err):
self.out = out.replace('\r\n', '\n')
self.err = err.replace('\r\n', '\n')
def __repr__(self):
if self.err:
attr = 'err'
else:
attr = 'out'
text = getattr(self, attr).replace('\n', '\n\t')
return 'CompilationError(%s="""\n\t%s""")' % (attr, text)
__str__ = __repr__
class ExecutionResult(object):
def __init__(self, returncode, out, err):
self.returncode = returncode
self.out = out.replace('\r\n', '\n')
self.err = err.replace('\r\n', '\n')
def __repr__(self):
return "<ExecutionResult retcode=%d>" % (self.returncode,)
class Platform(object):
name = "abstract platform"
c_environ = None
relevant_environ = ()
log_errors = True
so_prefixes = ('',)
extra_libs = ()
def __init__(self, cc):
if self.__class__ is Platform:
raise TypeError("You should not instantiate Platform class directly")
self.cc = cc
def compile(self, cfiles, eci, outputfilename=None, standalone=True):
ofiles = self._compile_o_files(cfiles, eci, standalone)
return self._finish_linking(ofiles, eci, outputfilename, standalone)
def _all_cfiles(self, cfiles, eci):
seen = set()
result = []
for cfile in list(cfiles) + list(eci.separate_module_files):
cfile = py.path.local(cfile)
if cfile not in seen:
seen.add(cfile)
result.append(cfile)
return result
def _compile_o_files(self, cfiles, eci, standalone=True):
cfiles = self._all_cfiles(cfiles, eci)
compile_args = self._compile_args_from_eci(eci, standalone)
ofiles = []
for cfile in cfiles:
# Windows hack: use masm for files ending in .asm
if str(cfile).lower().endswith('.asm'):
ofiles.append(self._compile_c_file(self.masm, cfile, []))
else:
ofiles.append(self._compile_c_file(self.cc, cfile, compile_args))
return ofiles
def execute(self, executable, args=None, env=None, compilation_info=None):
if env is None:
env = os.environ.copy()
else:
env = env.copy()
# On Windows, %SystemRoot% must be present for most programs to start
if (os.name == 'nt' and
"SystemRoot" not in env and
"SystemRoot" in os.environ):
env["SystemRoot"] = os.environ["SystemRoot"]
# Set LD_LIBRARY_PATH on posix platforms
if os.name == 'posix' and compilation_info is not None:
library_path = ':'.join([str(i) for i in compilation_info.library_dirs])
if sys.platform == 'darwin':
env['DYLD_LIBRARY_PATH'] = library_path
else:
env['LD_LIBRARY_PATH'] = library_path
returncode, stdout, stderr = _run_subprocess(str(executable), args,
env)
return ExecutionResult(returncode, stdout, stderr)
def gen_makefile(self, cfiles, eci, exe_name=None, path=None,
shared=False, headers_to_precompile=[],
no_precompile_cfiles = [], icon=None):
raise NotImplementedError("Pure abstract baseclass")
def __repr__(self):
return '<%s cc=%s>' % (self.__class__.__name__, self.cc)
def __hash__(self):
return hash(self.__class__.__name__)
def __ne__(self, other):
return not self == other
def __eq__(self, other):
return (self.__class__ is other.__class__ and
self.__dict__ == other.__dict__)
def key(self):
bits = [self.__class__.__name__, 'cc=%r' % self.cc]
for varname in self.relevant_environ:
bits.append('%s=%r' % (varname, os.environ.get(varname)))
# adding sys.maxint to disambiguate windows
bits.append('%s=%r' % ('sys.maxint', sys.maxint))
return ' '.join(bits)
# some helpers which seem to be cross-platform enough
def _execute_c_compiler(self, cc, args, outname, cwd=None):
log.execute(cc + ' ' + ' '.join(args))
# 'cc' can also contain some options for the C compiler;
# e.g. it can be "gcc -m32". We handle it by splitting on ' '.
cclist = cc.split()
cc = cclist[0]
args = cclist[1:] + args
returncode, stdout, stderr = _run_subprocess(cc, args, self.c_environ,
cwd)
self._handle_error(returncode, stdout, stderr, outname)
def _handle_error(self, returncode, stdout, stderr, outname):
if returncode != 0:
errorfile = outname.new(ext='errors')
errorfile.write(stderr, 'wb')
if self.log_errors:
stderrlines = stderr.splitlines()
for line in stderrlines:
log.Error(line)
# ^^^ don't use ERROR, because it might actually be fine.
# Also, ERROR confuses lib-python/conftest.py.
raise CompilationError(stdout, stderr)
else:
for line in stderr.splitlines():
log.WARNING(line)
def _make_o_file(self, cfile, ext):
"""Create an object file name under the udir for a .c file"""
ofile = cfile.new(ext=ext)
if ofile.relto(udir):
return ofile
assert ofile.relto(rpythonroot), (
"%r should be relative to either %r or %r" % (
ofile, rpythonroot, udir))
ofile = udir.join(ofile.relto(rpythonroot))
ofile.dirpath().ensure(dir=True)
return ofile
def preprocess_include_dirs(self, include_dirs):
if 'PYPY_LOCALBASE' in os.environ:
dirs = list(self._preprocess_include_dirs(include_dirs))
return [os.environ['PYPY_LOCALBASE'] + '/include'] + dirs
return self._preprocess_include_dirs(include_dirs)
def _preprocess_include_dirs(self, include_dirs):
return include_dirs
def _compile_args_from_eci(self, eci, standalone):
include_dirs = self.preprocess_include_dirs(eci.include_dirs)
args = self._includedirs(include_dirs)
if standalone:
extra = self.standalone_only
else:
extra = self.get_shared_only_compile_flags()
cflags = list(self.cflags) + list(extra)
return (cflags + list(eci.compile_extra) + args)
def get_shared_only_compile_flags(self):
return tuple(self.shared_only)
def preprocess_library_dirs(self, library_dirs):
if 'PYPY_LOCALBASE' in os.environ:
dirs = list(self._preprocess_library_dirs(library_dirs))
return [os.environ['PYPY_LOCALBASE'] + '/lib'] + dirs
return self._preprocess_library_dirs(library_dirs)
def _preprocess_library_dirs(self, library_dirs):
return library_dirs
def _link_args_from_eci(self, eci, standalone):
library_dirs = self.preprocess_library_dirs(eci.library_dirs)
library_dirs = self._libdirs(library_dirs)
libraries = self._libs(eci.libraries)
link_files = self._linkfiles(eci.link_files)
export_flags = self._exportsymbols_link_flags()
return (library_dirs + list(self.link_flags) + export_flags +
link_files + list(eci.link_extra) + libraries +
list(self.extra_libs))
def _exportsymbols_link_flags(self):
return []
def _finish_linking(self, ofiles, eci, outputfilename, standalone):
if outputfilename is None:
outputfilename = ofiles[0].purebasename
if ofiles:
dirname = ofiles[0].dirpath()
else:
dirname = udir.join('module_cache')
exe_name = dirname.join(outputfilename, abs=True)
if standalone:
if self.exe_ext:
exe_name += '.' + self.exe_ext
else:
exe_name += '.' + self.so_ext
if eci.use_cpp_linker:
cc_link = 'g++' # XXX hard-coded so far
else:
cc_link = self.cc
largs = self._link_args_from_eci(eci, standalone)
return self._link(cc_link, ofiles, largs, standalone, exe_name)
# below are some detailed information for platforms
def include_dirs_for_libffi(self):
dirs = self._include_dirs_for_libffi()
if 'PYPY_LOCALBASE' in os.environ:
return [os.environ['PYPY_LOCALBASE'] + '/include'] + dirs
return dirs
def library_dirs_for_libffi(self):
dirs = self._library_dirs_for_libffi()
if 'PYPY_LOCALBASE' in os.environ:
return [os.environ['PYPY_LOCALBASE'] + '/lib'] + dirs
return dirs
def _include_dirs_for_libffi(self):
raise NotImplementedError("Needs to be overwritten")
def _library_dirs_for_libffi(self):
raise NotImplementedError("Needs to be overwritten")
def check___thread(self):
return True
if sys.platform.startswith('linux'):
from rpython.translator.platform.linux import Linux, LinuxPIC
import platform
# Only required on armhf and mips{,el}, not armel. But there's no way to
# detect armhf without shelling out
if (platform.architecture()[0] == '64bit'
or platform.machine().startswith(('arm', 'mips', 'ppc'))):
host_factory = LinuxPIC
else:
host_factory = Linux
elif sys.platform == 'darwin':
from rpython.translator.platform.darwin import Darwin_i386, Darwin_x86_64, Darwin_PowerPC
import platform
assert platform.machine() in ('Power Macintosh', 'i386', 'x86_64')
if platform.machine() == 'Power Macintosh':
host_factory = Darwin_PowerPC
elif sys.maxint <= 2147483647:
host_factory = Darwin_i386
else:
host_factory = Darwin_x86_64
elif "gnukfreebsd" in sys.platform:
from rpython.translator.platform.freebsd import GNUkFreebsd, GNUkFreebsd_64
import platform
if platform.architecture()[0] == '32bit':
host_factory = GNUkFreebsd
else:
host_factory = GNUkFreebsd_64
elif "freebsd" in sys.platform:
from rpython.translator.platform.freebsd import Freebsd, Freebsd_64
import platform
if platform.architecture()[0] == '32bit':
host_factory = Freebsd
else:
host_factory = Freebsd_64
elif sys.platform.startswith('netbsd'):
from rpython.translator.platform.netbsd import Netbsd, Netbsd_64
import platform
if platform.architecture()[0] == '32bit':
host_factory = Netbsd
else:
host_factory = Netbsd_64
elif "openbsd" in sys.platform:
from rpython.translator.platform.openbsd import OpenBSD, OpenBSD_64
import platform
if platform.architecture()[0] == '32bit':
host_factory = OpenBSD
else:
host_factory = OpenBSD_64
elif os.name == 'nt':
from rpython.translator.platform.windows import Windows, Windows_x64
import platform
if platform.architecture()[0] == '32bit':
host_factory = Windows
else:
host_factory = Windows_x64
elif sys.platform == 'cygwin':
from rpython.translator.platform.cygwin import Cygwin, Cygwin64
import platform
if platform.architecture()[0] == '32bit':
host_factory = Cygwin
else:
host_factory = Cygwin64
else:
# pray
from rpython.translator.platform.distutils_platform import DistutilsPlatform
host_factory = DistutilsPlatform
platform = host = host_factory()
def pick_platform(new_platform, cc):
if new_platform == 'host':
return host_factory(cc)
elif new_platform == 'maemo':
from rpython.translator.platform.maemo import Maemo
return Maemo(cc)
elif new_platform == 'arm':
from rpython.translator.platform.arm import ARM
return ARM(cc)
elif new_platform == 'distutils':
from rpython.translator.platform.distutils_platform import DistutilsPlatform
return DistutilsPlatform()
else:
raise ValueError("platform = %s" % (new_platform,))
def set_platform(new_platform, cc):
global platform
platform = pick_platform(new_platform, cc)
if not platform:
raise ValueError("pick_platform(%r, %s) failed"%(new_platform, cc))
log.msg("Set platform with %r cc=%s, using cc=%r, version=%r" % (new_platform, cc,
getattr(platform, 'cc','Unknown'),
getattr(platform, 'version','Unknown'),
))
if new_platform == 'host':
global host
host = platform
def is_host_build():
return host == platform
| {
"content_hash": "81ad0dbe841a09b8def38c80230038fa",
"timestamp": "",
"source": "github",
"line_count": 357,
"max_line_length": 93,
"avg_line_length": 36.221288515406165,
"alnum_prop": 0.6001082669553786,
"repo_name": "jptomo/rpython-lang-scheme",
"id": "9b85d3d38c7f455f1dea5a57bce39ea464038d71",
"size": "12931",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "rpython/translator/platform/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "161293"
},
{
"name": "Batchfile",
"bytes": "5289"
},
{
"name": "C",
"bytes": "335765"
},
{
"name": "C++",
"bytes": "12638"
},
{
"name": "Emacs Lisp",
"bytes": "3149"
},
{
"name": "HCL",
"bytes": "155"
},
{
"name": "Makefile",
"bytes": "6988"
},
{
"name": "Objective-C",
"bytes": "1907"
},
{
"name": "Python",
"bytes": "16129160"
},
{
"name": "Scheme",
"bytes": "3"
},
{
"name": "Shell",
"bytes": "721"
},
{
"name": "VimL",
"bytes": "1107"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('dictionary', '0003_comment_author_ip'),
]
operations = [
migrations.AlterField(
model_name='wordentry',
name='word_content',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='word_entries', to='dictionary.WordContent'),
),
migrations.AlterField(
model_name='wordentry',
name='words_related',
field=models.ManyToManyField(blank=True, related_name='word_related_to', to='dictionary.WordEntry'),
),
]
| {
"content_hash": "7b66cefe668992cdbdc56023f39d6233",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 162,
"avg_line_length": 31.625,
"alnum_prop": 0.6376811594202898,
"repo_name": "nirvaris/nirvaris-dictionary",
"id": "b0f69ac53bddf95aff761a2540f1caddd890b17a",
"size": "829",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "dictionary/migrations/0004_auto_20160228_1449.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "4056"
},
{
"name": "HTML",
"bytes": "19724"
},
{
"name": "JavaScript",
"bytes": "882"
},
{
"name": "Python",
"bytes": "63545"
}
],
"symlink_target": ""
} |
__revision__ = "src/engine/SCons/CacheDirTests.py rel_2.5.1:3735:9dc6cee5c168 2016/11/03 14:02:02 bdbaddog"
import os.path
import shutil
import sys
import unittest
from TestCmd import TestCmd
import TestUnit
import SCons.CacheDir
built_it = None
class Action(object):
def __call__(self, targets, sources, env, **kw):
global built_it
if kw.get('execute', 1):
built_it = 1
return 0
def genstring(self, target, source, env):
return str(self)
def get_contents(self, target, source, env):
return ''
class Builder(object):
def __init__(self, environment, action):
self.env = environment
self.action = action
self.overrides = {}
self.source_scanner = None
self.target_scanner = None
class Environment(object):
def __init__(self, cachedir):
self.cachedir = cachedir
def Override(self, overrides):
return self
def get_CacheDir(self):
return self.cachedir
class BaseTestCase(unittest.TestCase):
"""
Base fixtures common to our other unittest classes.
"""
def setUp(self):
self.test = TestCmd(workdir='')
import SCons.Node.FS
self.fs = SCons.Node.FS.FS()
self._CacheDir = SCons.CacheDir.CacheDir('cache')
def File(self, name, bsig=None, action=Action()):
node = self.fs.File(name)
node.builder_set(Builder(Environment(self._CacheDir), action))
if bsig:
node.cachesig = bsig
#node.binfo = node.BuildInfo(node)
#node.binfo.ninfo.bsig = bsig
return node
def tearDown(self):
os.remove(os.path.join(self._CacheDir.path, 'config'))
os.rmdir(self._CacheDir.path)
# Should that be shutil.rmtree?
class CacheDirTestCase(BaseTestCase):
"""
Test calling CacheDir code directly.
"""
def test_cachepath(self):
"""Test the cachepath() method"""
# Verify how the cachepath() method determines the name
# of the file in cache.
def my_collect(list):
return list[0]
save_collect = SCons.Util.MD5collect
SCons.Util.MD5collect = my_collect
try:
name = 'a_fake_bsig'
f5 = self.File("cd.f5", name)
result = self._CacheDir.cachepath(f5)
len = self._CacheDir.config['prefix_len']
dirname = os.path.join('cache', name.upper()[:len])
filename = os.path.join(dirname, name)
assert result == (dirname, filename), result
finally:
SCons.Util.MD5collect = save_collect
class FileTestCase(BaseTestCase):
"""
Test calling CacheDir code through Node.FS.File interfaces.
"""
# These tests were originally in Nodes/FSTests.py and got moved
# when the CacheDir support was refactored into its own module.
# Look in the history for Node/FSTests.py if any of this needs
# to be re-examined.
def retrieve_succeed(self, target, source, env, execute=1):
self.retrieved.append(target)
return 0
def retrieve_fail(self, target, source, env, execute=1):
self.retrieved.append(target)
return 1
def push(self, target, source, env):
self.pushed.append(target)
return 0
def test_CacheRetrieve(self):
"""Test the CacheRetrieve() function"""
save_CacheRetrieve = SCons.CacheDir.CacheRetrieve
self.retrieved = []
f1 = self.File("cd.f1")
try:
SCons.CacheDir.CacheRetrieve = self.retrieve_succeed
self.retrieved = []
built_it = None
r = f1.retrieve_from_cache()
assert r == 1, r
assert self.retrieved == [f1], self.retrieved
assert built_it is None, built_it
SCons.CacheDir.CacheRetrieve = self.retrieve_fail
self.retrieved = []
built_it = None
r = f1.retrieve_from_cache()
assert not r, r
assert self.retrieved == [f1], self.retrieved
assert built_it is None, built_it
finally:
SCons.CacheDir.CacheRetrieve = save_CacheRetrieve
def test_CacheRetrieveSilent(self):
"""Test the CacheRetrieveSilent() function"""
save_CacheRetrieveSilent = SCons.CacheDir.CacheRetrieveSilent
SCons.CacheDir.cache_show = 1
f2 = self.File("cd.f2", 'f2_bsig')
try:
SCons.CacheDir.CacheRetrieveSilent = self.retrieve_succeed
self.retrieved = []
built_it = None
r = f2.retrieve_from_cache()
assert r == 1, r
assert self.retrieved == [f2], self.retrieved
assert built_it is None, built_it
SCons.CacheDir.CacheRetrieveSilent = self.retrieve_fail
self.retrieved = []
built_it = None
r = f2.retrieve_from_cache()
assert r is False, r
assert self.retrieved == [f2], self.retrieved
assert built_it is None, built_it
finally:
SCons.CacheDir.CacheRetrieveSilent = save_CacheRetrieveSilent
def test_CachePush(self):
"""Test the CachePush() function"""
save_CachePush = SCons.CacheDir.CachePush
SCons.CacheDir.CachePush = self.push
try:
self.pushed = []
cd_f3 = self.test.workpath("cd.f3")
f3 = self.File(cd_f3)
f3.push_to_cache()
assert self.pushed == [], self.pushed
self.test.write(cd_f3, "cd.f3\n")
f3.push_to_cache()
assert self.pushed == [f3], self.pushed
self.pushed = []
cd_f4 = self.test.workpath("cd.f4")
f4 = self.File(cd_f4)
f4.visited()
assert self.pushed == [], self.pushed
self.test.write(cd_f4, "cd.f4\n")
f4.clear()
f4.visited()
assert self.pushed == [], self.pushed
SCons.CacheDir.cache_force = 1
f4.clear()
f4.visited()
assert self.pushed == [f4], self.pushed
finally:
SCons.CacheDir.CachePush = save_CachePush
def test_warning(self):
"""Test raising a warning if we can't copy a file to cache."""
test = TestCmd(workdir='')
save_copy2 = shutil.copy2
def copy2(src, dst):
raise OSError
shutil.copy2 = copy2
save_mkdir = os.mkdir
def mkdir(dir, mode=0):
pass
os.mkdir = mkdir
old_warn_exceptions = SCons.Warnings.warningAsException(1)
SCons.Warnings.enableWarningClass(SCons.Warnings.CacheWriteErrorWarning)
try:
cd_f7 = self.test.workpath("cd.f7")
self.test.write(cd_f7, "cd.f7\n")
f7 = self.File(cd_f7, 'f7_bsig')
warn_caught = 0
try:
f7.push_to_cache()
except SCons.Errors.BuildError, e:
assert e.exc_info[0] == SCons.Warnings.CacheWriteErrorWarning
warn_caught = 1
assert warn_caught
finally:
shutil.copy2 = save_copy2
os.mkdir = save_mkdir
SCons.Warnings.warningAsException(old_warn_exceptions)
SCons.Warnings.suppressWarningClass(SCons.Warnings.CacheWriteErrorWarning)
def test_no_strfunction(self):
"""Test handling no strfunction() for an action."""
save_CacheRetrieveSilent = SCons.CacheDir.CacheRetrieveSilent
f8 = self.File("cd.f8", 'f8_bsig')
try:
SCons.CacheDir.CacheRetrieveSilent = self.retrieve_succeed
self.retrieved = []
built_it = None
r = f8.retrieve_from_cache()
assert r == 1, r
assert self.retrieved == [f8], self.retrieved
assert built_it is None, built_it
SCons.CacheDir.CacheRetrieveSilent = self.retrieve_fail
self.retrieved = []
built_it = None
r = f8.retrieve_from_cache()
assert r is False, r
assert self.retrieved == [f8], self.retrieved
assert built_it is None, built_it
finally:
SCons.CacheDir.CacheRetrieveSilent = save_CacheRetrieveSilent
if __name__ == "__main__":
suite = unittest.TestSuite()
tclasses = [
CacheDirTestCase,
FileTestCase,
]
for tclass in tclasses:
names = unittest.getTestCaseNames(tclass, 'test_')
suite.addTests(list(map(tclass, names)))
TestUnit.run(suite)
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| {
"content_hash": "8cf39332ee65de2db60e225e4684c529",
"timestamp": "",
"source": "github",
"line_count": 281,
"max_line_length": 107,
"avg_line_length": 30.99644128113879,
"alnum_prop": 0.5788748564867968,
"repo_name": "EmanueleCannizzaro/scons",
"id": "2daa1778d64a6474b84ec553d434a1cfd4ae1020",
"size": "9823",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/engine/SCons/CacheDirTests.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "2491"
},
{
"name": "C",
"bytes": "659"
},
{
"name": "C++",
"bytes": "598"
},
{
"name": "CSS",
"bytes": "18502"
},
{
"name": "D",
"bytes": "1997"
},
{
"name": "HTML",
"bytes": "817651"
},
{
"name": "Java",
"bytes": "6860"
},
{
"name": "JavaScript",
"bytes": "215495"
},
{
"name": "Makefile",
"bytes": "3795"
},
{
"name": "Perl",
"bytes": "29978"
},
{
"name": "Python",
"bytes": "7510453"
},
{
"name": "Roff",
"bytes": "556545"
},
{
"name": "Ruby",
"bytes": "11074"
},
{
"name": "Shell",
"bytes": "52682"
},
{
"name": "XSLT",
"bytes": "7567242"
}
],
"symlink_target": ""
} |
import copy
import hashlib
import json
import os
import re
import uuid
import xml.sax
from StringIO import StringIO
from collections import namedtuple
from datetime import datetime
from urlparse import urljoin
from xml.sax import ContentHandler
from django.conf import settings
from django.core.cache import cache
from django.core.exceptions import ValidationError
from django.core.files.base import ContentFile
from django.core.urlresolvers import reverse
from django.db import models
from django.db.models.manager import Manager
from django.template.loader import render_to_string
import jingo
from caching.base import CachingManager, CachingMixin
from jinja2 import Markup
from jinja2.utils import LRUCache
from snippets.base import ENGLISH_COUNTRIES
from snippets.base.fields import CountryField, LocaleField, RegexField
from snippets.base.managers import ClientMatchRuleManager, SnippetManager
from snippets.base.storage import OverwriteStorage
CHANNELS = ('release', 'beta', 'aurora', 'nightly')
FIREFOX_STARTPAGE_VERSIONS = ('1', '2', '3', '4')
FENNEC_STARTPAGE_VERSIONS = ('1',)
SNIPPET_WEIGHTS = ((33, 'Appear 1/3rd as often as an average snippet'),
(50, 'Appear half as often as an average snippet'),
(66, 'Appear 2/3rds as often as an average snippet'),
(100, 'Appear as often as an average snippet'),
(150, 'Appear 1.5 times as often as an average snippet'),
(200, 'Appear twice as often as an average snippet'),
(300, 'Appear three times as often as an average snippet'))
def validate_xml_template(data):
parser = xml.sax.make_parser()
parser.setContentHandler(ContentHandler())
parser.setFeature(xml.sax.handler.feature_external_ges, 0)
data = data.encode('utf-8')
xml_str = '<div>{0}</div>'.format(data)
try:
parser.parse(StringIO(xml_str))
except xml.sax.SAXParseException as e:
# getLineNumber() - 1 to get the correct line number because
# we're wrapping contents into a div.
error_msg = (
'XML Error: {message} in line {line} column {column}').format(
message=e.getMessage(), line=e.getLineNumber()-1, column=e.getColumnNumber())
raise ValidationError(error_msg)
return data
def validate_xml_variables(data):
data_dict = json.loads(data)
# set up a safer XML parser that does not resolve external
# entities
parser = xml.sax.make_parser()
parser.setContentHandler(ContentHandler())
parser.setFeature(xml.sax.handler.feature_external_ges, 0)
for name, value in data_dict.items():
# Skip over values that aren't strings.
if not isinstance(value, basestring):
continue
value = value.encode('utf-8')
xml_str = '<div>{0}</div>'.format(value)
try:
parser.parse(StringIO(xml_str))
except xml.sax.SAXParseException as e:
error_msg = (
'XML Error in value "{name}": {message} in column {column}'
.format(name=name, message=e.getMessage(),
column=e.getColumnNumber()))
raise ValidationError(error_msg)
return data
# NamedTuple that represents a user's client program.
Client = namedtuple('Client', (
'startpage_version',
'name',
'version',
'appbuildid',
'build_target',
'locale',
'channel',
'os_version',
'distribution',
'distribution_version'
))
# Cache for compiled snippet templates. Using jinja's built in cache
# requires either an extra trip to the database/cache or jumping through
# hoops.
template_cache = LRUCache(100)
class SnippetBundle(object):
"""
Group of snippets to be sent to a particular client configuration.
"""
def __init__(self, client):
self.client = client
self.storage = OverwriteStorage()
self._snippets = None
@property
def key(self):
"""A unique key for this bundle as a sha1 hexdigest."""
# Key should consist of snippets that are in the bundle plus any
# properties of the client that may change the snippet code
# being sent.
key_properties = ['{id}-{date}'.format(id=snippet.id, date=snippet.modified.isoformat())
for snippet in self.snippets]
key_properties.extend([
self.client.startpage_version,
self.client.locale,
])
key_string = u'_'.join(unicode(prop) for prop in key_properties)
return hashlib.sha1(key_string).hexdigest()
@property
def cache_key(self):
return u'bundle_' + self.key
@property
def expired(self):
"""
If True, the code for this bundle should be re-generated before
use.
"""
return not cache.get(self.cache_key)
@property
def filename(self):
return u'bundles/bundle_{0}.html'.format(self.key)
@property
def url(self):
return self.storage.url(self.filename)
@property
def snippets(self):
# Lazy-load snippets on first access.
if self._snippets is None:
self._snippets = (Snippet.cached_objects
.filter(disabled=False)
.match_client(self.client)
.order_by('priority')
.select_related('template')
.prefetch_related('countries', 'exclude_from_search_providers')
.filter_by_available())
return self._snippets
def generate(self):
"""Generate and save the code for this snippet bundle."""
bundle_content = render_to_string('base/fetch_snippets.html', {
'snippets': self.snippets,
'client': self.client,
'locale': self.client.locale,
})
if isinstance(bundle_content, unicode):
bundle_content = bundle_content.encode('utf-8')
self.storage.save(self.filename, ContentFile(bundle_content))
cache.set(self.cache_key, True, settings.SNIPPET_BUNDLE_TIMEOUT)
class SnippetTemplate(CachingMixin, models.Model):
"""
A template for the body of a snippet. Can have multiple variables that the
snippet will fill in.
"""
name = models.CharField(max_length=255, unique=True)
code = models.TextField(validators=[validate_xml_template])
created = models.DateTimeField(auto_now_add=True)
modified = models.DateTimeField(auto_now=True)
objects = models.Manager()
cached_objects = CachingManager()
def render(self, ctx):
ctx.setdefault('snippet_id', 0)
# Check if template is in cache, and cache it if it's not.
cache_key = hashlib.sha1(self.code).hexdigest()
template = template_cache.get(cache_key)
if not template:
template = jingo.env.from_string(self.code)
template_cache[cache_key] = template
return template.render(ctx)
def __unicode__(self):
return self.name
class SnippetTemplateVariable(CachingMixin, models.Model):
"""
A variable for a template that an individual snippet can fill in with its
own content.
"""
TEXT = 0
IMAGE = 1
SMALLTEXT = 2
CHECKBOX = 3
BODY = 4
TYPE_CHOICES = ((BODY, 'Main Text'), (TEXT, 'Text'), (SMALLTEXT, 'Small Text'),
(IMAGE, 'Image'), (CHECKBOX, 'Checkbox'))
template = models.ForeignKey(SnippetTemplate, related_name='variable_set')
name = models.CharField(max_length=255)
type = models.IntegerField(choices=TYPE_CHOICES, default=TEXT)
description = models.TextField(blank=True, default='')
objects = models.Manager()
cached_objects = CachingManager()
def __unicode__(self):
return u'{0}: {1}'.format(self.template.name, self.name)
class Meta:
ordering = ('name',)
class ClientMatchRule(CachingMixin, models.Model):
"""Defines a rule that matches a snippet to certain clients."""
description = models.CharField(max_length=255, unique=True)
is_exclusion = models.BooleanField(default=False)
startpage_version = RegexField()
name = RegexField()
version = RegexField()
appbuildid = RegexField()
build_target = RegexField()
locale = RegexField()
channel = RegexField()
os_version = RegexField()
distribution = RegexField()
distribution_version = RegexField()
created = models.DateTimeField(auto_now_add=True)
modified = models.DateTimeField(auto_now=True)
objects = models.Manager()
cached_objects = ClientMatchRuleManager()
class Meta:
ordering = ('-modified',)
def matches(self, client):
"""Evaluate whether this rule matches the given client."""
match = True
for field in client._fields:
field_value = getattr(self, field, None)
if not field_value:
continue
client_field_value = getattr(client, field)
if field_value.startswith('/'): # Match field as a regex.
if re.match(field_value[1:-1], client_field_value) is None:
match = False
break
elif field_value != client_field_value: # Match field as a string.
match = False
break
# Exclusion rules match clients that do not match their rule.
return not match if self.is_exclusion else match
def __unicode__(self):
return self.description
class SnippetBaseModel(models.Model):
def duplicate(self):
snippet_copy = copy.copy(self)
snippet_copy.id = None
snippet_copy.disabled = True
snippet_copy.name = '{0} - {1}'.format(
self.name,
datetime.strftime(datetime.now(), '%Y.%m.%d %H:%M:%S'))
snippet_copy.save()
for field in self._meta.get_all_field_names():
if isinstance(getattr(self, field), Manager):
manager = getattr(self, field)
if manager.__class__.__name__ == 'RelatedManager':
for itm in manager.all():
itm_copy = copy.copy(itm)
itm_copy.id = None
getattr(snippet_copy, field).add(itm_copy)
elif manager.__class__.__name__ == 'ManyRelatedManager':
for snippet in manager.all():
getattr(snippet_copy, field).add(snippet)
return snippet_copy
class Meta:
abstract = True
class Snippet(CachingMixin, SnippetBaseModel):
name = models.CharField(max_length=255, unique=True)
template = models.ForeignKey(SnippetTemplate)
data = models.TextField(default='{}', validators=[validate_xml_variables])
priority = models.IntegerField(default=0, blank=True)
disabled = models.BooleanField(default=True)
countries = models.ManyToManyField(
'TargetedCountry', blank=True, verbose_name='Targeted Countries')
publish_start = models.DateTimeField(blank=True, null=True)
publish_end = models.DateTimeField(blank=True, null=True)
on_release = models.BooleanField(default=True, verbose_name='Release')
on_beta = models.BooleanField(default=False, verbose_name='Beta')
on_aurora = models.BooleanField(default=False, verbose_name='Aurora')
on_nightly = models.BooleanField(default=False, verbose_name='Nightly')
on_startpage_1 = models.BooleanField(default=False, verbose_name='Version 1')
on_startpage_2 = models.BooleanField(default=True, verbose_name='Version 2')
on_startpage_3 = models.BooleanField(default=True, verbose_name='Version 3')
on_startpage_4 = models.BooleanField(default=True, verbose_name='Version 4')
weight = models.IntegerField(
'Prevalence', choices=SNIPPET_WEIGHTS, default=100,
help_text='How often should this snippet be shown to users?')
client_match_rules = models.ManyToManyField(
ClientMatchRule, blank=True, verbose_name='Client Match Rules')
exclude_from_search_providers = models.ManyToManyField(
'SearchProvider', blank=True, verbose_name='Excluded Search Providers')
campaign = models.CharField(
max_length=255, blank=True, default='',
help_text='Optional campaign name. Will be added in the stats ping.')
created = models.DateTimeField(auto_now_add=True)
modified = models.DateTimeField(auto_now=True)
objects = models.Manager()
cached_objects = SnippetManager()
class Meta:
ordering = ('-modified',)
def render(self):
data = json.loads(self.data)
snippet_id = self.id or 0
data.setdefault('snippet_id', snippet_id)
# Add snippet ID to template variables.
for key, value in data.items():
if isinstance(value, basestring):
data[key] = value.replace(u'[[snippet_id]]', unicode(snippet_id))
# Use a list for attrs to make the output order predictable.
attrs = [('data-snippet-id', self.id),
('data-weight', self.weight),
('data-campaign', self.campaign),
('class', 'snippet-metadata')]
if self.id:
countries = ','.join([country.code for country in self.countries.all()])
if countries:
attrs.append(('data-countries', countries))
# Avoid using values_list() because django-cache-machine
# does not support it.
search_engine_identifiers = [
provider.identifier for provider in self.exclude_from_search_providers.all()
]
if search_engine_identifiers:
attrs.append(('data-exclude-from-search-engines',
','.join(search_engine_identifiers)))
attr_string = ' '.join('{0}="{1}"'.format(key, value) for key, value in
attrs)
rendered_snippet = u'<div {attrs}>{content}</div>'.format(
attrs=attr_string,
content=self.template.render(data)
)
return Markup(rendered_snippet)
@property
def channels(self):
channels = []
for channel in CHANNELS:
if getattr(self, 'on_{0}'.format(channel), False):
channels.append(channel)
return channels
def __unicode__(self):
return self.name
def get_absolute_url(self):
return reverse('base.show', kwargs={'snippet_id': self.id})
class SnippetLocale(CachingMixin, models.Model):
snippet = models.ForeignKey(Snippet, related_name='locale_set')
locale = LocaleField()
objects = models.Manager()
cached_objects = CachingManager()
class JSONSnippet(CachingMixin, SnippetBaseModel):
name = models.CharField(max_length=255, unique=True)
priority = models.IntegerField(default=0, blank=True)
disabled = models.BooleanField(default=True)
icon = models.TextField(help_text='Icon should be a 96x96px PNG.')
text = models.CharField(max_length=140,
help_text='Maximum length 140 characters.')
url = models.CharField(max_length=500)
countries = models.ManyToManyField(
'TargetedCountry', blank=True, verbose_name='Targeted Countries')
publish_start = models.DateTimeField(blank=True, null=True)
publish_end = models.DateTimeField(blank=True, null=True)
on_release = models.BooleanField(default=True, verbose_name='Release')
on_beta = models.BooleanField(default=False, verbose_name='Beta')
on_aurora = models.BooleanField(default=False, verbose_name='Aurora')
on_nightly = models.BooleanField(default=False, verbose_name='Nightly')
on_startpage_1 = models.BooleanField(default=True, verbose_name='Version 1')
weight = models.IntegerField(
'Prevalence', choices=SNIPPET_WEIGHTS, default=100,
help_text='How often should this snippet be shown to users?')
client_match_rules = models.ManyToManyField(
ClientMatchRule, blank=True, verbose_name='Client Match Rules')
created = models.DateTimeField(auto_now_add=True)
modified = models.DateTimeField(auto_now=True)
objects = models.Manager()
cached_objects = SnippetManager()
class Meta:
ordering = ('-modified',)
verbose_name = 'JSON Snippet'
def __unicode__(self):
return self.name
class JSONSnippetLocale(CachingMixin, models.Model):
snippet = models.ForeignKey(JSONSnippet, related_name='locale_set')
locale = LocaleField()
objects = models.Manager()
cached_objects = CachingManager()
class UploadedFile(models.Model):
FILES_ROOT = 'files' # Directory name inside MEDIA_ROOT
def _generate_filename(instance, filename):
"""Generate a new unique filename while preserving the original
filename extension. If an existing UploadedFile gets updated
do not generate a new filename.
"""
# Instance is new UploadedFile, generate a filename
if not instance.id:
ext = os.path.splitext(filename)[1]
filename = str(uuid.uuid4()) + ext
return os.path.join(UploadedFile.FILES_ROOT, filename)
# Use existing filename.
obj = UploadedFile.objects.get(id=instance.id)
return obj.file.name
file = models.FileField(storage=OverwriteStorage(), upload_to=_generate_filename)
name = models.CharField(max_length=255)
created = models.DateTimeField(auto_now_add=True)
modified = models.DateTimeField(auto_now=True)
def __unicode__(self):
return self.name
@property
def url(self):
site_url = getattr(settings, 'CDN_URL', settings.SITE_URL)
full_url = urljoin(site_url, self.file.url)
return full_url
@property
def snippets(self):
return Snippet.objects.filter(
models.Q(data__contains=self.file.url) |
models.Q(template__code__contains=self.file.url)
)
class SearchProvider(CachingMixin, models.Model):
name = models.CharField(max_length=255, unique=True)
identifier = models.CharField(max_length=255)
objects = CachingManager()
def __unicode__(self):
return self.name
class Meta:
ordering = ('id',)
class TargetedCountry(CachingMixin, models.Model):
code = CountryField('Geolocation Country', unique=True)
objects = CachingManager()
def __unicode__(self):
return u'{0} ({1})'.format(ENGLISH_COUNTRIES.get(self.code), self.code)
class Meta:
ordering = ('id',)
| {
"content_hash": "92d02ca0e9a55ea8824eba522327fecc",
"timestamp": "",
"source": "github",
"line_count": 544,
"max_line_length": 96,
"avg_line_length": 34.27205882352941,
"alnum_prop": 0.6350032181935207,
"repo_name": "Osmose/snippets-service",
"id": "bcf602d320969e24b71f03a880defb4034a33334",
"size": "18644",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "snippets/base/models.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "ApacheConf",
"bytes": "199"
},
{
"name": "CSS",
"bytes": "2066"
},
{
"name": "HTML",
"bytes": "45925"
},
{
"name": "JavaScript",
"bytes": "5928866"
},
{
"name": "Python",
"bytes": "991548"
},
{
"name": "Shell",
"bytes": "2165"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
import frappe
from frappe import _, msgprint
from frappe.utils import flt, cint, cstr, now, get_url_to_form
from frappe.modules import load_doctype_module
from frappe.model.base_document import BaseDocument
from frappe.model.naming import set_new_name
from werkzeug.exceptions import NotFound, Forbidden
# once_only validation
# methods
def get_doc(arg1, arg2=None):
if isinstance(arg1, BaseDocument):
return arg1
elif isinstance(arg1, basestring):
doctype = arg1
else:
doctype = arg1.get("doctype")
controller = get_controller(doctype)
if controller:
return controller(arg1, arg2)
raise ImportError, arg1
_classes = {}
def get_controller(doctype):
if not doctype in _classes:
module = load_doctype_module(doctype)
classname = doctype.replace(" ", "").replace("-", "")
if hasattr(module, classname):
_class = getattr(module, classname)
if issubclass(_class, Document):
_class = getattr(module, classname)
else:
raise ImportError, doctype
else:
raise ImportError, doctype
_classes[doctype] = _class
return _classes[doctype]
class Document(BaseDocument):
def __init__(self, arg1, arg2=None):
self.doctype = self.name = None
if arg1 and isinstance(arg1, basestring):
if not arg2:
# single
self.doctype = self.name = arg1
else:
self.doctype = arg1
if isinstance(arg2, dict):
# filter
self.name = frappe.db.get_value(arg1, arg2, "name")
if self.name is None:
raise frappe.DoesNotExistError, (arg1, arg2)
else:
self.name = arg2
self.load_from_db()
elif isinstance(arg1, dict):
super(Document, self).__init__(arg1)
self.init_valid_columns()
else:
# incorrect arguments. let's not proceed.
raise frappe.DataError("Document({0}, {1})".format(arg1, arg2))
self.dont_update_if_missing = []
def load_from_db(self):
if not getattr(self, "_metaclass", False) and self.meta.issingle:
self.update(frappe.db.get_singles_dict(self.doctype))
self.init_valid_columns()
self._fix_numeric_types()
else:
d = frappe.db.get_value(self.doctype, self.name, "*", as_dict=1)
if not d:
frappe.throw(_("{0} {1} not found").format(_(self.doctype), self.name), frappe.DoesNotExistError)
self.update(d)
if self.name=="DocType" and self.doctype=="DocType":
from frappe.model.meta import doctype_table_fields
table_fields = doctype_table_fields
else:
table_fields = self.meta.get_table_fields()
for df in table_fields:
children = frappe.db.get_values(df.options,
{"parent": self.name, "parenttype": self.doctype, "parentfield": df.fieldname},
"*", as_dict=True, order_by="idx asc")
if children:
self.set(df.fieldname, children)
else:
self.set(df.fieldname, [])
def has_permission(self, permtype):
if getattr(self, "ignore_permissions", False):
return True
return frappe.has_permission(self.doctype, permtype, self)
def raise_no_permission_to(self, perm_type):
raise frappe.PermissionError("No permission to {} {} {}".format(perm_type, self.doctype, self.name or ""))
def insert(self, ignore_permissions=None):
if ignore_permissions!=None:
self.ignore_permissions = ignore_permissions
self.set("__islocal", True)
if not self.has_permission("create"):
self.raise_no_permission_to("create")
self._set_defaults()
self._set_docstatus_user_and_timestamp()
self.check_if_latest()
self.set_new_name()
self.run_method("before_insert")
self.set_parent_in_children()
self.set("__in_insert", True)
self.run_before_save_methods()
self._validate()
self.delete_key("__in_insert")
# run validate, on update etc.
# parent
if getattr(self.meta, "issingle", 0):
self.update_single(self.get_valid_dict())
else:
self.db_insert()
# children
for d in self.get_all_children():
d.db_insert()
self.run_method("after_insert")
self.set("__in_insert", True)
self.run_post_save_methods()
self.delete_key("__in_insert")
return self
def save(self, ignore_permissions=None):
if ignore_permissions!=None:
self.ignore_permissions = ignore_permissions
if self.get("__islocal") or not self.get("name"):
self.insert()
return
if not self.has_permission("write"):
self.raise_no_permission_to("save")
self._set_docstatus_user_and_timestamp()
self.check_if_latest()
self.set_parent_in_children()
self.run_before_save_methods()
if self._action != "cancel":
self._validate()
if self._action == "update_after_submit":
self.validate_update_after_submit()
# parent
if self.meta.issingle:
self.update_single(self.get_valid_dict())
else:
self.db_update()
# children
child_map = {}
ignore_children_type = self.get("ignore_children_type") or []
for d in self.get_all_children():
d.db_update()
child_map.setdefault(d.doctype, []).append(d.name)
for df in self.meta.get_table_fields():
if df.options not in ignore_children_type:
cnames = child_map.get(df.options) or []
if cnames:
frappe.db.sql("""delete from `tab%s` where parent=%s and parenttype=%s and
name not in (%s)""" % (df.options, '%s', '%s', ','.join(['%s'] * len(cnames))),
tuple([self.name, self.doctype] + cnames))
else:
frappe.db.sql("""delete from `tab%s` where parent=%s and parenttype=%s""" \
% (df.options, '%s', '%s'), (self.name, self.doctype))
self.run_post_save_methods()
return self
def set_new_name(self):
set_new_name(self)
# set name for children
for d in self.get_all_children():
set_new_name(d)
def update_single(self, d):
frappe.db.sql("""delete from tabSingles where doctype=%s""", self.doctype)
for field, value in d.iteritems():
if field != "doctype":
frappe.db.sql("""insert into tabSingles(doctype, field, value)
values (%s, %s, %s)""", (self.doctype, field, value))
def _set_docstatus_user_and_timestamp(self):
self._original_modified = self.modified
self.modified = now()
self.modified_by = frappe.session.user
if not self.creation:
self.creation = self.modified
if not self.owner:
self.owner = self.modified_by
if self.docstatus==None:
self.docstatus=0
for d in self.get_all_children():
d.docstatus = self.docstatus
d.modified = self.modified
d.modified_by = self.modified_by
if not d.owner:
d.owner = self.owner
if not d.creation:
d.creation = self.creation
def _validate(self):
self._validate_mandatory()
self._validate_links()
self._validate_selects()
self._validate_constants()
for d in self.get_all_children():
d._validate_selects()
d._validate_constants()
self._extract_images_from_text_editor()
def _set_defaults(self):
if frappe.flags.in_import:
return
new_doc = frappe.new_doc(self.doctype)
self.update_if_missing(new_doc)
# children
for df in self.meta.get_table_fields():
new_doc = frappe.new_doc(df.options)
value = self.get(df.fieldname)
if isinstance(value, list):
for d in value:
d.update_if_missing(new_doc)
def check_if_latest(self):
conflict = False
self._action = "save"
if not self.get('__islocal'):
if self.meta.issingle:
modified = frappe.db.get_value(self.doctype, self.name, "modified")
if cstr(modified) and cstr(modified) != cstr(self._original_modified):
conflict = True
else:
tmp = frappe.db.get_value(self.doctype, self.name,
["modified", "docstatus"], as_dict=True)
if not tmp:
frappe.throw(_("Record does not exist"))
modified = cstr(tmp.modified)
if modified and modified != cstr(self._original_modified):
conflict = True
self.check_docstatus_transition(tmp.docstatus)
if conflict:
frappe.msgprint(_("Error: Document has been modified after you have opened it") \
+ (" (%s, %s). " % (modified, self.modified)) \
+ _("Please refresh to get the latest document."),
raise_exception=frappe.TimestampMismatchError)
else:
self.check_docstatus_transition(0)
def check_docstatus_transition(self, docstatus):
if not self.docstatus:
self.docstatus = 0
if docstatus==0:
if self.docstatus==0:
self._action = "save"
elif self.docstatus==1:
self._action = "submit"
if not self.has_permission("submit"):
self.raise_no_permission_to("submit")
else:
raise frappe.DocstatusTransitionError("Cannot change docstatus from 0 to 2")
elif docstatus==1:
if self.docstatus==1:
self._action = "update_after_submit"
if not self.has_permission("submit"):
self.raise_no_permission_to("submit")
elif self.docstatus==2:
self._action = "cancel"
if not self.has_permission("cancel"):
self.raise_no_permission_to("cancel")
else:
raise frappe.DocstatusTransitionError("Cannot change docstatus from 1 to 0")
elif docstatus==2:
raise frappe.ValidationError
def set_parent_in_children(self):
for d in self.get_all_children():
d.parent = self.name
d.parenttype = self.doctype
def validate_update_after_submit(self):
if getattr(self, "ignore_validate_update_after_submit", False):
return
self._validate_update_after_submit()
for d in self.get_all_children():
d._validate_update_after_submit()
# TODO check only allowed values are updated
def _validate_mandatory(self):
if self.get("ignore_mandatory"):
return
missing = self._get_missing_mandatory_fields()
for d in self.get_all_children():
missing.extend(d._get_missing_mandatory_fields())
if not missing:
return
for fieldname, msg in missing:
msgprint(msg)
raise frappe.MandatoryError(", ".join((each[0] for each in missing)))
def _validate_links(self):
if self.get("ignore_links"):
return
invalid_links = self.get_invalid_links()
for d in self.get_all_children():
invalid_links.extend(d.get_invalid_links())
if not invalid_links:
return
msg = ", ".join((each[2] for each in invalid_links))
frappe.throw(_("Could not find {0}").format(msg),
frappe.LinkValidationError)
def get_all_children(self, parenttype=None):
ret = []
for df in self.meta.get("fields", {"fieldtype": "Table"}):
if parenttype:
if df.options==parenttype:
return self.get(df.fieldname)
value = self.get(df.fieldname)
if isinstance(value, list):
ret.extend(value)
return ret
def _extract_images_from_text_editor(self):
from frappe.utils.file_manager import extract_images_from_html
if self.doctype != "DocType":
for df in self.meta.get("fields", {"fieldtype":"Text Editor"}):
extract_images_from_html(self, df.fieldname)
def run_method(self, method, *args, **kwargs):
"""run standard triggers, plus those in frappe"""
if hasattr(self, method) and hasattr(getattr(self, method), "__call__"):
fn = lambda self, *args, **kwargs: getattr(self, method)(*args, **kwargs)
else:
# hack! to run hooks even if method does not exist
fn = lambda self, *args, **kwargs: None
fn.__name__ = method.encode("utf-8")
return Document.hook(fn)(self, *args, **kwargs)
def submit(self):
self.docstatus = 1
self.save()
def cancel(self):
self.docstatus = 2
self.save()
def run_before_save_methods(self):
if getattr(self, "ignore_validate", False):
return
if self._action=="save":
self.run_method("validate")
self.run_method("before_save")
elif self._action=="submit":
self.run_method("validate")
self.run_method("before_submit")
elif self._action=="cancel":
self.run_method("before_cancel")
elif self._action=="update_after_submit":
self.run_method("before_update_after_submit")
def run_post_save_methods(self):
if self._action=="save":
self.run_method("on_update")
elif self._action=="submit":
self.run_method("on_update")
self.run_method("on_submit")
elif self._action=="cancel":
self.run_method("on_cancel")
self.check_no_back_links_exist()
elif self._action=="update_after_submit":
self.run_method("on_update_after_submit")
def check_no_back_links_exist(self):
from frappe.model.delete_doc import check_if_doc_is_linked
if not self.get("ignore_links"):
check_if_doc_is_linked(self, method="Cancel")
@staticmethod
def whitelist(f):
f.whitelisted = True
return f
@staticmethod
def hook(f):
def add_to_return_value(self, new_return_value):
if isinstance(new_return_value, dict):
if not self.get("_return_value"):
self._return_value = {}
self._return_value.update(new_return_value)
else:
self._return_value = new_return_value or self.get("_return_value")
def compose(fn, *hooks):
def runner(self, method, *args, **kwargs):
add_to_return_value(self, fn(self, *args, **kwargs))
for f in hooks:
add_to_return_value(self, f(self, method, *args, **kwargs))
return self._return_value
return runner
def composer(self, *args, **kwargs):
hooks = []
method = f.__name__
doc_events = frappe.get_hooks("doc_events", {})
for handler in doc_events.get(self.doctype, {}).get(method, []) \
+ doc_events.get("*", {}).get(method, []):
hooks.append(frappe.get_attr(handler))
composed = compose(f, *hooks)
return composed(self, method, *args, **kwargs)
return composer
def is_whitelisted(self, method):
fn = getattr(self, method, None)
if not fn:
raise NotFound("Method {0} not found".format(method))
elif not getattr(fn, "whitelisted", False):
raise Forbidden("Method {0} not whitelisted".format(method))
def validate_value(self, fieldname, condition, val2, doc=None, raise_exception=None):
"""check that value of fieldname should be 'condition' val2
else throw exception"""
error_condition_map = {
"in": _("one of"),
"not in": _("none of"),
"^": _("beginning with"),
}
if not doc:
doc = self
df = doc.meta.get_field(fieldname)
val1 = doc.get(fieldname)
if df.fieldtype in ("Currency", "Float"):
val1 = flt(val1, self.precision(df.fieldname, doc.parentfield or None))
val2 = flt(val2, self.precision(df.fieldname, doc.parentfield or None))
elif df.fieldtype in ("Int", "Check"):
val1 = cint(val1)
val2 = cint(val2)
elif df.fieldtype in ("Data", "Text", "Small Text", "Long Text",
"Text Editor", "Select", "Link", "Dynamic Link"):
val1 = cstr(val1)
val2 = cstr(val2)
if not frappe.compare(val1, condition, val2):
label = doc.meta.get_label(fieldname)
condition_str = error_condition_map.get(condition, condition)
if doc.parentfield:
msg = _("Incorrect value in row {0}: {1} must be {2} {3}".format(doc.idx, label, condition_str, val2))
else:
msg = _("Incorrect value: {0} must be {1} {2}".format(label, condition_str, val2))
# raise passed exception or True
msgprint(msg, raise_exception=raise_exception or True)
def validate_table_has_rows(self, parentfield, raise_exception=None):
if not (isinstance(self.get(parentfield), list) and len(self.get(parentfield)) > 0):
label = self.meta.get_label(parentfield)
frappe.throw(_("Table {0} cannot be empty").format(label), raise_exception or frappe.EmptyTableError)
def round_floats_in(self, doc, fieldnames=None):
if not fieldnames:
fieldnames = (df.fieldname for df in
doc.meta.get("fields", {"fieldtype": ["in", ["Currency", "Float"]]}))
for fieldname in fieldnames:
doc.set(fieldname, flt(doc.get(fieldname), self.precision(fieldname, doc.parentfield)))
def precision(self, fieldname, parentfield=None):
from frappe.model.meta import get_field_precision
if parentfield and not isinstance(parentfield, basestring):
parentfield = parentfield.parentfield
if not hasattr(self, "_precision"):
self._precision = frappe._dict({
"default": cint(frappe.db.get_default("float_precision")) or 3,
"options": {}
})
if self._precision.setdefault(parentfield or "main", {}).get(fieldname) is None:
meta = frappe.get_meta(self.meta.get_field(parentfield).options if parentfield else self.doctype)
df = meta.get_field(fieldname)
if df.fieldtype == "Currency" and df.options and not self._precision.options.get(df.options):
self._precision.options[df.options] = get_field_precision(df, self)
if df.fieldtype == "Currency":
self._precision[parentfield or "main"][fieldname] = cint(self._precision.options.get(df.options)) or \
self._precision.default
elif df.fieldtype == "Float":
self._precision[parentfield or "main"][fieldname] = self._precision.default
return self._precision[parentfield or "main"][fieldname]
def get_url(self):
return "/desk#Form/{doctype}/{name}".format(doctype=self.doctype, name=self.name)
| {
"content_hash": "354f63809a975c51dec67530cc8aab8a",
"timestamp": "",
"source": "github",
"line_count": 555,
"max_line_length": 108,
"avg_line_length": 29.75855855855856,
"alnum_prop": 0.6775248244126907,
"repo_name": "gangadharkadam/office_frappe",
"id": "fe385cf022bb7916261891774e596a90729b5a1c",
"size": "16620",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "frappe/model/document.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "80527"
},
{
"name": "HTML",
"bytes": "60452"
},
{
"name": "JavaScript",
"bytes": "1182079"
},
{
"name": "Python",
"bytes": "906331"
}
],
"symlink_target": ""
} |
from SCons.Script import *
from SCons.Environment import Environment
import os
import os.path as path
"""
opts_dotnet
Options for dotnet core projects
"""
def setup_vars(env, vars):
Help("""
Dotnet Core Options:
""")
# Build directory to use
dotnet_builddir = path.join(env['common_builddir'], 'SourceBuild', 'dotnet')
vars.Add('dotnet_builddir', 'Build directory for .Net', dotnet_builddir)
# If to build Debug or Release
vars.Add(EnumVariable('dotnet_config', 'Building mode', 'Debug', allowed_values=('Debug', 'Release')))
# Additional Build options
vars.Add('dotnet_extra', 'Additional options to pass to dotnet during the build', None)
vars.Update(env)
return vars
# * --output : Use env['dotnet_builddir']
# * --version-suffix: Use env['BuildVersion']
# * --configuration : variable: dotnet_configuration = 'Debug'
# * additional dotnet build options: varable: dotnet_additional = ''
#TODO
# * Set the build output directory to default in the csproj file
# * Is there a way of compiling for x32 / x64 explicitly
# * Incorperate above options into build command
# * Cleanup dotnet builder class | {
"content_hash": "fe306f614e67c29a8572f5557b602644",
"timestamp": "",
"source": "github",
"line_count": 41,
"max_line_length": 106,
"avg_line_length": 29.365853658536587,
"alnum_prop": 0.6744186046511628,
"repo_name": "ASoftTech/Scons-Tools-Grbd",
"id": "57aada9c8e39e23cfb0fad80346c20528aa7f76e",
"size": "1228",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scons_tools_grbd/Tools/MSBuild/DotNetCore/old/DotnetCoreOpts.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "33"
},
{
"name": "CSS",
"bytes": "3966"
},
{
"name": "HTML",
"bytes": "11346"
},
{
"name": "Python",
"bytes": "67078"
}
],
"symlink_target": ""
} |
from timeit import timeit, Timer
from random import randrange
for i in range(1, 1000):
list_str = "x = " + str(list(range(i)))
list_time = Timer(list_str)
del_list_time = Timer(list_str + ";del x[randrange(%d)]"%i,
"from __main__ import randrange")
dict_str = "y = " + str({j:None for j in range(i)})
dict_time = Timer(dict_str)
del_dict_time = Timer(dict_str + "; del y[randrange(%d)] "%i,
"from __main__ import randrange")
list_del_time = del_list_time.timeit(number = 1000) - list_time.timeit(number = 1000)
dict_del_time = del_dict_time.timeit(number = 1000) - dict_time.timeit(number = 1000)
print("%d, %10.3f, %10.3f" % (i, list_del_time, dict_del_time))
| {
"content_hash": "5281cd3f42412bbb94b4c4d99160a556",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 89,
"avg_line_length": 42.5,
"alnum_prop": 0.5751633986928104,
"repo_name": "jackys-95/coding-practice",
"id": "581c403d6ba864d5fd584b09484b9f194e20d6b9",
"size": "765",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/listdict_del_test.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "6946"
},
{
"name": "Python",
"bytes": "49617"
},
{
"name": "Ruby",
"bytes": "1000"
}
],
"symlink_target": ""
} |
diagnostics=False
#################################
## Import packages ##
#################################
import sys
import os
from trackeddy.tracking import *
from trackeddy.savedata import *
from numpy import *
from pylab import *
import random
import pytest
import time
#################################
## Import tools to create ##
## syntetic fields ##
#################################
from trackeddy.utils.gaussian_field_functions import *
import trackeddy.utils.field_generator as fg
n=2
a = 0.1
b = 0.1
t0 = 0
t = 1
xx=linspace(10,12,200)
yy=linspace(10,12,200)
gf=fg.Generate_field(a,b,n,xx,yy,'Nint')
data = abs(gf.assemble_field(t))
x=linspace(10,13,300)
y=linspace(10,13,300)
preferences={'ellipse':0.85,'eccentricity':0.85,'gaussian':0.8}
eddytd={}
eddytdn={}
levels = {'max':data.max(),'min':0.1,'step':0.1}
eddytd = analyseddyzt(data,x,y,t0,t,1,levels,preferences=preferences,areamap='',mask='',maskopt='forcefit'\
,destdir='',physics='',diagnostics=False,plotdata=False,pprint=True,debug=False)
@pytest.mark.ttrackeddy_data
def test_data2npy():
save_data('./test.npy', eddytd)
assert os.path.isfile('./test.npy')
@pytest.mark.ttrackeddy_data
def test_tracknpy2nc():
track2nc = Trackeddy2dataset('./test.npy','./','nc')
track2nc.file2dict()
track2nc.trackeddy2nc()
assert os.path.isfile('./output_000.nc')
@pytest.mark.ttrackeddy_data
def test_trackdata2nc():
track2nc = Trackeddy2dataset(eddytd,'./','nc')
track2nc.trackeddy2nc()
assert os.path.isfile('./output_001.nc')
@pytest.mark.ttrackeddy_data
def test_rm_files():
os.remove('./test.npy')
os.remove('./output_000.nc')
os.remove('./output_001.nc') | {
"content_hash": "9e30a21b57d5e182bed57de58e0e27ba",
"timestamp": "",
"source": "github",
"line_count": 74,
"max_line_length": 107,
"avg_line_length": 23.64864864864865,
"alnum_prop": 0.6205714285714286,
"repo_name": "Josue-Martinez-Moreno/trackeddy",
"id": "e3716beb41470d1b9a9cfb4534c8000c93233839",
"size": "1916",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "tests/test_savedata.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "146001"
}
],
"symlink_target": ""
} |
''' Copyright 2017 Vprime
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.*/
@author Rami Alshafi <ralshafi@vprime.com> '''
# to install the envirophat dependecies, type the following command
# $ curl https://get.pimoroni.com/envirophat | bash
# make sure to type 'y' or 'n' when prompted
import sys
import envirophat
if __name__ == "__main__":
arguments = sys.argv
if len(arguments) == 2:
if arguments[1] == "0":
envirophat.leds.off()
print "leds:status:False"
elif arguments[1] == "1":
envirophat.leds.on()
print "leds:status:True"
elif arguments[1] == "2":
print "weather:temperature:"+str(envirophat.weather.temperature())
elif arguments[1] == "3":
print "motion:accelerometer:"+str(list(envirophat.motion.accelerometer()))
elif arguments[1] == "4":
print "light:rgb:"+str(list(envirophat.light.rgb()))
else:
print "error:wrong_command:please press any of the following options: 0, 1, 2, 3, 4"
| {
"content_hash": "896bfa6219de2e5dbfb80bd91100d2d5",
"timestamp": "",
"source": "github",
"line_count": 38,
"max_line_length": 86,
"avg_line_length": 37.76315789473684,
"alnum_prop": 0.7114982578397212,
"repo_name": "gabrielschulhof/iotivity-node",
"id": "af15d53aeb06ed813513f15f139411176f60d5f0",
"size": "1435",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "js/envirophat/enviro_phat.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "131082"
},
{
"name": "JavaScript",
"bytes": "301375"
},
{
"name": "Python",
"bytes": "7529"
}
],
"symlink_target": ""
} |
from ..orm import Factory
class Seeder:
factory = None
def __init__(self, resolver=None):
self._command = None
self._resolver = resolver
if self.factory is None:
self.factory = Factory(resolver=resolver)
else:
self.factory.set_connection_resolver(self._resolver)
def run(self):
"""
Run the database seeds.
"""
pass
def call(self, klass):
"""
Seed the given connection from the given class.
:param klass: The Seeder class
:type klass: class
"""
self._resolve(klass).run()
if self._command:
self._command.line(
'<info>Seeded:</info> <fg=cyan>%s</>' % klass.__name__)
def _resolve(self, klass):
"""
Resolve an instance of the given seeder klass.
:param klass: The Seeder class
:type klass: class
"""
resolver = None
if self._resolver:
resolver = self._resolver
elif self._command:
resolver = self._command.resolver
instance = klass()
instance.set_connection_resolver(resolver)
if self._command:
instance.set_command(self._command)
return instance
def set_command(self, command):
"""
Set the console command instance.
:param command: The command
:type command: cleo.Command
"""
self._command = command
return self
def set_connection_resolver(self, resolver):
self._resolver = resolver
self.factory.set_connection_resolver(resolver)
@property
def db(self):
return self._resolver
| {
"content_hash": "b465ab72b9a2ed5bd73964756cb190bf",
"timestamp": "",
"source": "github",
"line_count": 75,
"max_line_length": 71,
"avg_line_length": 22.85333333333333,
"alnum_prop": 0.5530921820303384,
"repo_name": "Hanaasagi/sorator",
"id": "93dd92a692a582fb597e2655410e83cbaa357cc7",
"size": "1739",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "orator/seeds/seeder.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "2023"
},
{
"name": "Python",
"bytes": "1070898"
}
],
"symlink_target": ""
} |
"""The Virtual File System (VFS) file entry interface.
The file entry can be various file system elements like a regular file,
a directory or file system metadata.
"""
import abc
from dfvfs.lib import definitions
from dfvfs.lib import py2to3
from dfvfs.resolver import resolver
from dfvfs.vfs import vfs_stat
class Attribute(object):
"""VFS attribute interface."""
@property
def type_indicator(self):
"""str: type indicator."""
type_indicator = getattr(self, u'TYPE_INDICATOR', None)
if type_indicator is None:
raise NotImplementedError(
u'Invalid attribute missing type indicator.')
return type_indicator
class DataStream(object):
"""VFS data stream interface."""
# The data stream object should not have a reference to its
# file entry since that will create a cyclic reference.
@property
def name(self):
"""str: name."""
return u''
class Directory(object):
"""VFS directory interface.
Attributes:
path_spec (PathSpec): path specification of the directory.
"""
def __init__(self, file_system, path_spec):
"""Initializes a directory.
Args:
file_system (FileSystem): file system.
path_spec (PathSpec): path specification.
"""
super(Directory, self).__init__()
self._entries = None
self._file_system = file_system
self.path_spec = path_spec
def _EntriesGenerator(self):
"""Retrieves directory entries.
Since a directory can contain a vast number of entries using
a generator is more memory efficient.
Yields:
PathSpec: path specification.
"""
return iter(())
@property
def entries(self):
"""generator[PathSpec]: path specifications of the directory entries."""
return self._EntriesGenerator()
class FileEntry(object):
"""VFS file entry interface."""
def __init__(
self, resolver_context, file_system, path_spec, is_root=False,
is_virtual=False):
"""Initializes a file entry.
Args:
resolver_context (Context): resolver context.
file_system (FileSystem): file system.
path_spec (PathSpec): path specification.
is_root (Optional[bool]): True if the file entry is the root file entry
of the corresponding file system.
is_virtual (Optional[bool]): True if the file entry is a virtual file
entry emulated by the corresponding file system.
"""
super(FileEntry, self).__init__()
self._attributes = None
self._data_streams = None
self._directory = None
self._file_system = file_system
self._is_root = is_root
self._is_virtual = is_virtual
self._link = None
self._resolver_context = resolver_context
self._stat_object = None
self._type = None
self.path_spec = path_spec
self._file_system.Open(path_spec)
def __del__(self):
"""Cleans up the file entry."""
if self._file_system:
self._file_system.Close()
self._file_system = None
def _GetAttributes(self):
"""Retrieves the attributes.
Returns:
list[Attribute]: attributes.
"""
if self._attributes is None:
self._attributes = []
return self._attributes
def _GetDataStreams(self):
"""Retrieves the data streams.
Returns:
list[DataStream]: data streams.
"""
if self._data_streams is None:
if self._directory is None:
self._directory = self._GetDirectory()
self._data_streams = []
# It is assumed that directory and link file entries typically
# do not have data streams.
if not self._directory and not self.link:
self._data_streams.append(DataStream())
return self._data_streams
@abc.abstractmethod
def _GetDirectory(self):
"""Retrieves the directory.
Returns:
Directory: a directory or None.
"""
def _GetLink(self):
"""Retrieves the link.
Returns:
str: full path of the linked file entry.
"""
if self._link is None:
self._link = u''
return self._link
def _GetStat(self):
"""Retrieves information about the file entry.
Returns:
VFSStat: a stat object.
"""
stat_object = vfs_stat.VFSStat()
# Date and time stat information.
access_time = self.access_time
if access_time:
stat_time, stat_time_nano = access_time.CopyToStatTimeTuple()
if stat_time is not None:
stat_object.atime = stat_time
stat_object.atime_nano = stat_time_nano
change_time = self.change_time
if change_time:
stat_time, stat_time_nano = change_time.CopyToStatTimeTuple()
if stat_time is not None:
stat_object.ctime = stat_time
stat_object.ctime_nano = stat_time_nano
creation_time = self.creation_time
if creation_time:
stat_time, stat_time_nano = creation_time.CopyToStatTimeTuple()
if stat_time is not None:
stat_object.crtime = stat_time
stat_object.crtime_nano = stat_time_nano
modification_time = self.modification_time
if modification_time:
stat_time, stat_time_nano = modification_time.CopyToStatTimeTuple()
if stat_time is not None:
stat_object.mtime = stat_time
stat_object.mtime_nano = stat_time_nano
# File entry type stat information.
if self._type:
stat_object.type = self._type
return stat_object
@property
def access_time(self):
"""dfdatetime.DateTimeValues: access time or None if not available."""
return
@property
def attributes(self):
"""generator[Attribute]: attributes."""
return self._GetAttributes()
@property
def change_time(self):
"""dfdatetime.DateTimeValues: change time or None if not available."""
return
@property
def creation_time(self):
"""dfdatetime.DateTimeValues: creation time or None if not available."""
return
@property
def data_streams(self):
"""generator[DataStream]: data streams."""
return self._GetDataStreams()
@property
def link(self):
"""str: full path of the linked file entry."""
return self._GetLink()
@property
def modification_time(self):
"""dfdatetime.DateTimeValues: modification time or None if not available."""
return
@abc.abstractproperty
def name(self):
"""str: name of the file entry, without the full path."""
@property
def number_of_attributes(self):
"""int: number of attributes."""
attributes = self._GetAttributes()
return len(attributes)
@property
def number_of_data_streams(self):
"""int: number of data streams."""
data_streams = self._GetDataStreams()
return len(data_streams)
@property
def number_of_sub_file_entries(self):
"""int: number of sub file entries."""
if self._directory is None:
self._directory = self._GetDirectory()
if self._directory is None:
return 0
# We cannot use len(self._directory.entries) since entries is a generator.
return sum(1 for path_spec in self._directory.entries)
@abc.abstractproperty
def sub_file_entries(self):
"""generator[FileEntry]: sub file entries."""
@property
def type_indicator(self):
"""str: type indicator."""
type_indicator = getattr(self, u'TYPE_INDICATOR', None)
if type_indicator is None:
raise NotImplementedError(
u'Invalid file entry missing type indicator.')
return type_indicator
def GetDataStream(self, name, case_sensitive=True):
"""Retrieves a data stream by name.
Args:
name (str): name of the data stream.
case_sentitive (Optional[bool]): True if the name is case sensitive.
Returns:
DataStream: a data stream or None if not available.
Raises:
ValueError: if the name is not string.
"""
if not isinstance(name, py2to3.STRING_TYPES):
raise ValueError(u'Name is not a string.')
name_lower = name.lower()
matching_data_stream = None
for data_stream in self._GetDataStreams():
if data_stream.name == name:
return data_stream
if not case_sensitive and data_stream.name.lower() == name_lower:
if not matching_data_stream:
matching_data_stream = data_stream
return matching_data_stream
def GetFileObject(self, data_stream_name=u''):
"""Retrieves the file-like object.
Args:
data_stream_name (Optional[str]): name of the data stream, where an empty
string represents the default data stream.
Returns:
FileIO: a file-like object or None if not available.
"""
if not data_stream_name:
return resolver.Resolver.OpenFileObject(
self.path_spec, resolver_context=self._resolver_context)
def GetFileSystem(self):
"""Retrieves the file system which contains the file entry.
Returns:
FileSystem: a file system.
"""
return self._file_system
def GetLinkedFileEntry(self):
"""Retrieves the linked file entry, for example for a symbolic link.
Retruns:
FileEntry: linked file entry or None if not available.
"""
return
def GetParentFileEntry(self):
"""Retrieves the parent file entry.
Returns:
FileEntry: parent file entry or None if not available.
"""
return
def GetSubFileEntryByName(self, name, case_sensitive=True):
"""Retrieves a sub file entry by name.
Args:
name (str): name of the file entry.
case_sentitive (Optional[bool]): True if the name is case sensitive.
Returns:
FileEntry: a file entry or None if not available.
"""
name_lower = name.lower()
matching_sub_file_entry = None
for sub_file_entry in self.sub_file_entries:
if sub_file_entry.name == name:
return sub_file_entry
if not case_sensitive and sub_file_entry.name.lower() == name_lower:
if not matching_sub_file_entry:
matching_sub_file_entry = sub_file_entry
return matching_sub_file_entry
def GetStat(self):
"""Retrieves information about the file entry.
Returns:
VFSStat: a stat object or None if not available.
"""
if self._stat_object is None:
self._stat_object = self._GetStat()
return self._stat_object
def HasDataStream(self, name, case_sensitive=True):
"""Determines if the file entry has specific data stream.
Args:
name (str): name of the data stream.
case_sentitive (Optional[bool]): True if the name is case sensitive.
Returns:
bool: True if the file entry has the data stream.
Raises:
ValueError: if the name is not string.
"""
if not isinstance(name, py2to3.STRING_TYPES):
raise ValueError(u'Name is not a string.')
name_lower = name.lower()
for data_stream in self._GetDataStreams():
if data_stream.name == name:
return True
if not case_sensitive and data_stream.name.lower() == name_lower:
return True
return False
def HasExternalData(self):
"""Determines if the file entry has external stored data.
Returns:
bool: True if the file entry has external stored data.
"""
return False
def IsAllocated(self):
"""Determines if the file entry is allocated.
Returns:
bool: True if the file entry is allocated.
"""
if self._stat_object is None:
self._stat_object = self._GetStat()
return self._stat_object and self._stat_object.is_allocated
def IsDevice(self):
"""Determines if the file entry is a device.
Returns:
bool: True if the file entry is a device.
"""
if self._stat_object is None:
self._stat_object = self._GetStat()
if self._stat_object is not None:
self._type = self._stat_object.type
return self._type == definitions.FILE_ENTRY_TYPE_DEVICE
def IsDirectory(self):
"""Determines if the file entry is a directory.
Returns:
bool: True if the file entry is a directory.
"""
if self._stat_object is None:
self._stat_object = self._GetStat()
if self._stat_object is not None:
self._type = self._stat_object.type
return self._type == definitions.FILE_ENTRY_TYPE_DIRECTORY
def IsFile(self):
"""Determines if the file entry is a file.
Returns:
bool: True if the file entry is a file.
"""
if self._stat_object is None:
self._stat_object = self._GetStat()
if self._stat_object is not None:
self._type = self._stat_object.type
return self._type == definitions.FILE_ENTRY_TYPE_FILE
def IsLink(self):
"""Determines if the file entry is a link.
Returns:
bool: True if the file entry is a link.
"""
if self._stat_object is None:
self._stat_object = self._GetStat()
if self._stat_object is not None:
self._type = self._stat_object.type
return self._type == definitions.FILE_ENTRY_TYPE_LINK
def IsPipe(self):
"""Determines if the file entry is a pipe.
Returns:
bool: True if the file entry is a pipe.
"""
if self._stat_object is None:
self._stat_object = self._GetStat()
if self._stat_object is not None:
self._type = self._stat_object.type
return self._type == definitions.FILE_ENTRY_TYPE_PIPE
def IsRoot(self):
"""Determines if the file entry is the root file entry.
Returns:
bool: True if the file entry is the root file entry.
"""
return self._is_root
def IsSocket(self):
"""Determines if the file entry is a socket.
Returns:
bool: True if the file entry is a socket.
"""
if self._stat_object is None:
self._stat_object = self._GetStat()
if self._stat_object is not None:
self._type = self._stat_object.type
return self._type == definitions.FILE_ENTRY_TYPE_SOCKET
def IsVirtual(self):
"""Determines if the file entry is virtual (emulated by dfVFS).
Returns:
bool: True if the file entry is virtual.
"""
return self._is_virtual
| {
"content_hash": "3595ad742e4faf8599635a2c1440cd13",
"timestamp": "",
"source": "github",
"line_count": 512,
"max_line_length": 80,
"avg_line_length": 26.990234375,
"alnum_prop": 0.6533757869599827,
"repo_name": "dc3-plaso/dfvfs",
"id": "b872f1eb1cad16647088bf563aa0c736304f4d48",
"size": "13843",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "dfvfs/vfs/file_entry.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "609"
},
{
"name": "Python",
"bytes": "1397977"
},
{
"name": "Shell",
"bytes": "1522"
}
],
"symlink_target": ""
} |
"""Chapter 4 Practice Questions
Answers Chapter 4 Practice Questions via Python code.
"""
def main():
# 1. What does the following piece of code print to the screen?
print(len("Hello") + len("Hello"))
# 2. What does this code print?
i = 0
while i < 3:
print("Hello")
i = i + 1
# 3. How about this code?
i = 0
spam = "Hello"
while i < 5:
spam = spam + spam[i]
i = i + 1
print(spam)
# 4. And this?
i = 0
while i < 4:
while i < 6:
i = i + 2
print(i)
# If PracticeQuestions.py is run (instead of imported as a module), call
# the main() function:
if __name__ == '__main__':
main()
| {
"content_hash": "811ec282567e9075aeaa910c9f4e78a5",
"timestamp": "",
"source": "github",
"line_count": 36,
"max_line_length": 72,
"avg_line_length": 19.583333333333332,
"alnum_prop": 0.5219858156028369,
"repo_name": "JoseALermaIII/python-tutorials",
"id": "32f65350ab6faef2bee43efa6d53de0d18a27e97",
"size": "705",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pythontutorials/books/CrackingCodes/Ch04/PracticeQuestions.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "326"
},
{
"name": "Python",
"bytes": "2020260"
}
],
"symlink_target": ""
} |
from django.conf.urls.defaults import *
from views import descriptor, login_begin, login_init, login_process, logout
from metadata import get_deeplink_resources
def deeplink_url_patterns(
prefix='',
url_base_pattern=r'^init/%s/$',
login_init_func=login_init,
):
"""
Returns new deeplink URLs based on 'links' from settings.SAML2IDP_REMOTES.
Parameters:
- url_base_pattern - Specify this if you need non-standard deeplink URLs.
NOTE: This will probably closely match the 'login_init' URL.
"""
resources = get_deeplink_resources()
new_patterns = []
for resource in resources:
new_patterns += patterns(prefix,
url( url_base_pattern % resource,
login_init_func,
{
'resource': resource,
},
)
)
return new_patterns
urlpatterns = patterns('',
url( r'^login/$', login_begin, name="login_begin"),
url( r'^login/process/$', login_process, name='login_process'),
url( r'^logout/$', logout, name="logout"),
(r'^metadata/xml/$', descriptor),
# For "simple" deeplinks:
url( r'^init/(?P<resource>\w+)/(?P<target>\w+)/$', login_init, name="login_init"),
)
# Issue 13 - Add new automagically-created URLs for deeplinks:
urlpatterns += deeplink_url_patterns()
| {
"content_hash": "c7dd5158fc933fdb089745c2cae6f66f",
"timestamp": "",
"source": "github",
"line_count": 38,
"max_line_length": 86,
"avg_line_length": 35.23684210526316,
"alnum_prop": 0.6153846153846154,
"repo_name": "anentropic/django-saml2-idp",
"id": "73a9778559d786348f4c2f71a688959269104e75",
"size": "1339",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "saml2idp/urls.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "77585"
},
{
"name": "Shell",
"bytes": "659"
}
],
"symlink_target": ""
} |
import os
import struct
import threading
import time
from http.server import BaseHTTPRequestHandler, HTTPServer
from select import select
from socketserver import TCPServer, ThreadingTCPServer
from threading import RLock, Thread
import cv2
class JpegStreamer(Thread):
def __init__(self, camera):
Thread.__init__(self)
self.cap = cv2.VideoCapture(camera)
self.lock = RLock()
self.pipes = {}
def register(self):
pr, pw = os.pipe()
self.lock.acquire()
self.pipes[pr] = pw
self.lock.release()
return pr
def unregister(self, pr):
self.lock.acquire()
pw = self.pipes.pop(pr)
self.lock.release()
os.close(pr)
os.close(pw)
def capture(self):
cap = self.cap
while cap.isOpened():
ret, frame = cap.read()
if ret:
# ret, data = cv2.imencode('.jpg', frame)
ret, data = cv2.imencode('.jpg', frame,
(cv2.IMWRITE_JPEG_QUALITY, 40))
yield data.tostring()
def send(self, frame):
n = struct.pack('l', len(frame))
self.lock.acquire()
if len(self.pipes):
_, pipes, _ = select([], iter(list(self.pipes.values())), [], 1)
for pipe in pipes:
os.write(pipe, n)
os.write(pipe, frame)
self.lock.release()
def run(self):
for frame in self.capture():
self.send(frame)
class JpegRetriever(object):
def __init__(self, streamer):
self.streamer = streamer
self.local = threading.local()
def retrieve(self):
while True:
ns = os.read(self.local.pipe, 8)
n = struct.unpack('l', ns)[0]
data = os.read(self.local.pipe, n)
yield data
def __enter__(self):
if hasattr(self.local, 'pipe'):
raise RuntimeError()
self.local.pipe = streamer.register()
return self.retrieve()
def __exit__(self, *args):
self.streamer.unregister(self.local.pipe)
del self.local.pipe
return True
class Handler(BaseHTTPRequestHandler):
retriever = None
@staticmethod
def setJpegRetriever(retriever):
Handler.retriever = retriever
def do_GET(self):
if self.retriever is None:
raise RuntimeError('no retriver')
if self.path != '/':
return
self.send_response(200)
self.send_header('Content-type',
'multipart/x-mixed-replace;boundary=abcde')
self.end_headers()
with self.retriever as frames:
for frame in frames:
self.send_frame(frame)
def send_frame(self, frame):
s = '--abcde\r\n'
s += 'Content-Type: image/jpeg\r\n'
s += 'Content-Length: %s\r\n\r\n' % len(frame)
self.wfile.write(s.encode('utf-8'))
self.wfile.write(frame)
if __name__ == '__main__':
streamer = JpegStreamer(0)
streamer.start()
retriever = JpegRetriever(streamer)
Handler.setJpegRetriever(retriever)
print('Start server...')
httpd = ThreadingTCPServer(('', 9000), Handler)
httpd.serve_forever()
| {
"content_hash": "5148c95223dce7beb1d70d2bec165e53",
"timestamp": "",
"source": "github",
"line_count": 124,
"max_line_length": 76,
"avg_line_length": 26.31451612903226,
"alnum_prop": 0.5562365920931658,
"repo_name": "quietcoolwu/python-playground",
"id": "4f052dae8680e196e11bdad5e67451ad7b71b3d7",
"size": "3310",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "imooc/python_advanced/8_4_local_thread_data_py35.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "356932"
},
{
"name": "Python",
"bytes": "807157"
}
],
"symlink_target": ""
} |
"""
Gradient Boosted Trees Classification Example.
"""
from __future__ import print_function
from pyspark import SparkContext
# $example on$
from pyspark.mllib.tree import GradientBoostedTrees, GradientBoostedTreesModel
from pyspark.mllib.util import MLUtils
# $example off$
if __name__ == "__main__":
sc = SparkContext(appName="PythonGradientBoostedTreesClassificationExample")
# $example on$
# Load and parse the data file.
data = MLUtils.loadLibSVMFile(sc, "data/mllib/sample_libsvm_data.txt")
# Split the data into training and test sets (30% held out for testing)
(trainingData, testData) = data.randomSplit([0.7, 0.3])
# Train a GradientBoostedTrees model.
# Notes: (a) Empty categoricalFeaturesInfo indicates all features are continuous.
# (b) Use more iterations in practice.
model = GradientBoostedTrees.trainClassifier(trainingData,
categoricalFeaturesInfo={}, numIterations=3)
# Evaluate model on test instances and compute test error
predictions = model.predict(testData.map(lambda x: x.features))
labelsAndPredictions = testData.map(lambda lp: lp.label).zip(predictions)
testErr = labelsAndPredictions.filter(
lambda lp: lp[0] != lp[1]).count() / float(testData.count())
print('Test Error = ' + str(testErr))
print('Learned classification GBT model:')
print(model.toDebugString())
# Save and load model
model.save(sc, "target/tmp/myGradientBoostingClassificationModel")
sameModel = GradientBoostedTreesModel.load(sc,
"target/tmp/myGradientBoostingClassificationModel")
# $example off$
| {
"content_hash": "1d036e19511c0541308eafcbff287648",
"timestamp": "",
"source": "github",
"line_count": 40,
"max_line_length": 98,
"avg_line_length": 43.525,
"alnum_prop": 0.6731763354394027,
"repo_name": "lhfei/spark-in-action",
"id": "e411a59bceed5e87533cde12b97233c23b1adbe3",
"size": "2541",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "spark-2.x/src/main/python/mllib/gradient_boosting_classification_example.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Java",
"bytes": "1033051"
},
{
"name": "Python",
"bytes": "648927"
},
{
"name": "R",
"bytes": "101755"
},
{
"name": "Scala",
"bytes": "1244311"
},
{
"name": "Shell",
"bytes": "1754"
}
],
"symlink_target": ""
} |
import numpy as np
from multiprocessing import Process
from sklearn import mixture
import time
import os
import sys
import logging
# counts = [0]*256
# path1 = "d:/count.txt"
# path2 = "d:/gaussian.txt"
# def save_count(path,count):
# file = open(path,'w')
# for i in range(len(count)):
# file.write(str(count[i])+'\n')
# file.close()
# single gauss component
class Gauss:
def __init__(self, weight, mean, covariance):
self.weight_ = weight
self.mean_ = mean
self.covariance_ = covariance
# single block
class Block:
def __init__(self):
self.gauss_num_ = 0
self.gausses_ = []
def add_gauss(self, gauss):
self.gausses_.append(gauss)
def save_block(path, block):
file = open(path,'w')
for i in range(len(block.gausses_)):
file.write(str(block.gausses_[i].weight_)+' '+str(block.gausses_[i].mean_)+' '+str(block.gausses_[i].covariance_)+'\n')
file.close()
# read all data into a array
def read_all_data(file_path, data):
f = open(file_path, 'rb')
filedata = f.read()
filesize = f.tell()
print(filesize)
filedata2 = bytearray(filedata)
for index in range(0, filesize):
data[index] = filedata2[index]
# data = bytearray(f.read())
print("file size:"+str(filesize)+" byte(s)")
f.close()
# train xth GMM
def train_single_block(index,
width,
depth,
width_num,
depth_num,
side,
ubg,
data):
height_index = int(index / (width_num * depth_num))
depth_index = int((index - height_index * (width_num * depth_num)) / width_num)
width_index = int(index - height_index * (width_num * depth_num) - depth_index * width_num)
start_width = width_index * side
start_depth = depth_index * side
start_height = height_index * side
# print("--------IN BLOCK:"+str(index))
# print("block num:"+str(width_num)+" "+str(depth_num))
# print("block coordinates:"+str(width_index)+" "+str(depth_index)+" "+str(height_index))
obs = [[]] * side * side * side
zero = True
zero_count = 0;
for x in range(0, side):
for y in range(0, side):
for z in range(0, side):
final_index = x * side * side + y * side + z
data_index = (start_height + x) * width * depth + (start_depth + y) * width + (start_width + z)
temp = data[data_index]
# if index == 456:
# counts.append(temp)
# if temp != 0:
# zero_count+=1
# zero = False
obs[final_index] = [temp]
# if zero == True:
# print("block:"+str(index)+" is zero")
# return Block()
# print(str(index)+" is non-zero:"+str(zero_count))
# if index == 456:
# save_count(path1,counts)
final_component_num = 4
g = mixture.GaussianMixture(n_components=final_component_num)
g.fit(obs)
final_g = g
# max_bic = g.bic(np.array(obs))
# max_num = min(ubg, len(obs))
# for component_num in range(2, max_num+1):
# g = mixture.GaussianMixture(n_components=component_num)
# g.fit(obs)
# bic_temp = g.bic(np.array(obs))
# if index == 456:
# print component_num,bic_temp
# if bic_temp < max_bic:
# final_g = g
# final_component_num = component_num
# max_bic = bic_temp
block = Block()
block.gauss_num_ = final_component_num
for component_index in range(0, final_component_num):
gauss = Gauss(final_g.weights_[component_index], final_g.means_[component_index][0], final_g.covariances_[component_index][0][0])
block.add_gauss(gauss)
# if index == 456:
# save_block(path2,block)
return block
# train a part of original data
def train_blocks(result_disk_address, data_source, block_num, index, stride, data, width, depth, depth_num, width_num, side, ubg):
block_gmm = [Block()] * stride
end_block = (index+1)*stride
end_index = stride
for i in range(0, stride):
if index * stride + i >= block_num:
end_block = index*stride+i;
end_index = i
break;
block_gmm[i] = train_single_block(index * stride + i, width, depth, width_num, depth_num, side, ubg, data)
gmm_output = result_disk_address + data_source + '_GMM_Result_'+str(index)+'.txt'
# restore block_sgmm into txt file
with open(gmm_output, "w") as f_out:
for i in range(0, end_index):
f_out.write(str(block_gmm[i].gauss_num_)+'\n')
for j in range(0, block_gmm[i].gauss_num_):
f_out.write(str(block_gmm[i].gausses_[j].weight_)+'\n')
f_out.write(str(block_gmm[i].gausses_[j].mean_)+'\n')
f_out.write(str(block_gmm[i].gausses_[j].covariance_)+'\n')
print("-----------IN FILE:"+str(index)+" training and saving blocks from "+str(index*stride)+" to "+str(end_block)+" done")
if __name__ == '__main__':
result_disk_address = ""
data_source = ""
width = 0
depth = 0
height = 0
process_num = 0
side = 0
if len(sys.argv) == 1:
result_disk_address = raw_input("input disk address:")
data_source = raw_input('input the data name:')
width = int(raw_input('weight:'))
depth = int(raw_input('depth:'))
height = int(raw_input('height:'))
side = int(raw_input('side:'))
process_num = int(raw_input('input the process num (must be the divisor of the block number):'))
else:
result_disk_address = sys.argv[1]
data_source = sys.argv[2]
width = int(sys.argv[3])
depth = int(sys.argv[4])
height = int(sys.argv[5])
side = int(sys.argv[6])
process_num = int(sys.argv[7])
if not os.path.exists(result_disk_address + data_source + ".raw"):
print('file doesn\'t exists')
exit(0)
print("disk address:" + result_disk_address)
print("data name:" + data_source)
print("width:" + str(width) + " depth:" + str(depth) + " height:" + str(height) + " side:" + str(side))
print("process num (file num):" + str(process_num))
ubg = 4
np.random.seed(1)
width_num = width / side
depth_num = depth / side
height_num = height / side
total_num = width_num * depth_num * height_num
data = [0] * width * depth * height
stride = (total_num+process_num-1) / process_num
print("stride:"+str(stride))
read_all_data(result_disk_address + data_source + '.raw', data)
begin_time = time.localtime(time.time())
cpu_time_begin = time.clock()
print("total_num = " + str(total_num))
proc_record = []
for i in range(0, process_num):
p = Process(target=train_blocks, args=(result_disk_address,
data_source,
total_num,
i,
stride,
data,
width,
depth,
width_num,
depth_num,
side,
ubg))
p.start()
proc_record.append(p)
for p in proc_record:
p.join()
print("training GMM done.")
cpu_time_end = time.clock()
print time.strftime('Training began at %Y-%m-%d %H:%M:%S',begin_time)
print time.strftime('Training finished at %Y-%m-%d %H:%M:%S', time.localtime(time.time()))
print("cpu time cost in python:" + str(cpu_time_end - cpu_time_begin)+"s.")
# train_single_block(73800)
| {
"content_hash": "d997dd918379e191ce4554270b12bb98",
"timestamp": "",
"source": "github",
"line_count": 233,
"max_line_length": 137,
"avg_line_length": 34.27896995708154,
"alnum_prop": 0.5277325654188055,
"repo_name": "yslib/SGMMCluster",
"id": "f6087ad4d5acd6dda1439334fa021cccc6746f95",
"size": "7987",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "SGMM/TrainBlockGMM.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "1589828"
},
{
"name": "C++",
"bytes": "315586"
},
{
"name": "Cuda",
"bytes": "119"
},
{
"name": "Python",
"bytes": "45075"
}
],
"symlink_target": ""
} |
"""Functions to plot ICA specific data (besides topographies)."""
from __future__ import print_function
# Authors: Denis Engemann <denis.engemann@gmail.com>
# Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr>
# Teon Brooks <teon.brooks@gmail.com>
#
# License: Simplified BSD
from functools import partial
from numbers import Integral
import numpy as np
from .utils import (tight_layout, _prepare_trellis, _select_bads,
_layout_figure, _plot_raw_onscroll, _mouse_click,
_helper_raw_resize, _plot_raw_onkey, plt_show)
from .topomap import (_prepare_topo_plot, plot_topomap, _hide_frame,
_plot_ica_topomap)
from .raw import _prepare_mne_browse_raw, _plot_raw_traces
from .epochs import _prepare_mne_browse_epochs, plot_epochs_image
from .evoked import _butterfly_on_button_press, _butterfly_onpick
from ..utils import warn
from ..defaults import _handle_default
from ..io.meas_info import create_info
from ..io.pick import pick_types
from ..externals.six import string_types
from ..time_frequency.psd import psd_multitaper
def plot_ica_sources(ica, inst, picks=None, exclude=None, start=None,
stop=None, title=None, show=True, block=False,
show_first_samp=False):
"""Plot estimated latent sources given the unmixing matrix.
Typical usecases:
1. plot evolution of latent sources over time based on (Raw input)
2. plot latent source around event related time windows (Epochs input)
3. plot time-locking in ICA space (Evoked input)
Parameters
----------
ica : instance of mne.preprocessing.ICA
The ICA solution.
inst : instance of mne.io.Raw, mne.Epochs, mne.Evoked
The object to plot the sources from.
picks : int | array_like of int | None.
The components to be displayed. If None, plot will show the
sources in the order as fitted.
exclude : array_like of int
The components marked for exclusion. If None (default), ICA.exclude
will be used.
start : int
X-axis start index. If None, from the beginning.
stop : int
X-axis stop index. If None, next 20 are shown, in case of evoked to the
end.
title : str | None
The figure title. If None a default is provided.
show : bool
Show figure if True.
block : bool
Whether to halt program execution until the figure is closed.
Useful for interactive selection of components in raw and epoch
plotter. For evoked, this parameter has no effect. Defaults to False.
show_first_samp : bool
If True, show time axis relative to the ``raw.first_samp``.
Returns
-------
fig : instance of pyplot.Figure
The figure.
Notes
-----
For raw and epoch instances, it is possible to select components for
exclusion by clicking on the line. The selected components are added to
``ica.exclude`` on close.
.. versionadded:: 0.10.0
"""
from ..io.base import BaseRaw
from ..evoked import Evoked
from ..epochs import BaseEpochs
if exclude is None:
exclude = ica.exclude
elif len(ica.exclude) > 0:
exclude = np.union1d(ica.exclude, exclude)
if isinstance(inst, BaseRaw):
fig = _plot_sources_raw(ica, inst, picks, exclude, start=start,
stop=stop, show=show, title=title,
block=block, show_first_samp=show_first_samp)
elif isinstance(inst, BaseEpochs):
fig = _plot_sources_epochs(ica, inst, picks, exclude, start=start,
stop=stop, show=show, title=title,
block=block)
elif isinstance(inst, Evoked):
if start is not None or stop is not None:
inst = inst.copy().crop(start, stop)
sources = ica.get_sources(inst)
fig = _plot_ica_sources_evoked(
evoked=sources, picks=picks, exclude=exclude, title=title,
labels=getattr(ica, 'labels_', None), show=show)
else:
raise ValueError('Data input must be of Raw or Epochs type')
return fig
def _create_properties_layout(figsize=None):
"""Create main figure and axes layout used by plot_ica_properties."""
import matplotlib.pyplot as plt
if figsize is None:
figsize = [7., 6.]
fig = plt.figure(figsize=figsize, facecolor=[0.95] * 3)
ax = list()
ax.append(fig.add_axes([0.08, 0.5, 0.3, 0.45], label='topomap'))
ax.append(fig.add_axes([0.5, 0.6, 0.45, 0.35], label='image'))
ax.append(fig.add_axes([0.5, 0.5, 0.45, 0.1], label='erp'))
ax.append(fig.add_axes([0.08, 0.1, 0.32, 0.3], label='spectrum'))
ax.append(fig.add_axes([0.5, 0.1, 0.45, 0.25], label='variance'))
return fig, ax
def plot_ica_properties(ica, inst, picks=None, axes=None, dB=True,
plot_std=True, topomap_args=None, image_args=None,
psd_args=None, figsize=None, show=True):
"""Display component properties.
Properties include the topography, epochs image, ERP/ERF, power
spectrum, and epoch variance.
Parameters
----------
ica : instance of mne.preprocessing.ICA
The ICA solution.
inst: instance of Epochs or Raw
The data to use in plotting properties.
picks : int | array-like of int | None
The components to be displayed. If None, plot will show the first
five sources. If more than one components were chosen in the picks,
each one will be plotted in a separate figure. Defaults to None.
axes: list of matplotlib axes | None
List of five matplotlib axes to use in plotting: [topomap_axis,
image_axis, erp_axis, spectrum_axis, variance_axis]. If None a new
figure with relevant axes is created. Defaults to None.
dB: bool
Whether to plot spectrum in dB. Defaults to True.
plot_std: bool | float
Whether to plot standard deviation in ERP/ERF and spectrum plots.
Defaults to True, which plots one standard deviation above/below.
If set to float allows to control how many standard deviations are
plotted. For example 2.5 will plot 2.5 standard deviation above/below.
topomap_args : dict | None
Dictionary of arguments to ``plot_topomap``. If None, doesn't pass any
additional arguments. Defaults to None.
image_args : dict | None
Dictionary of arguments to ``plot_epochs_image``. If None, doesn't pass
any additional arguments. Defaults to None.
psd_args : dict | None
Dictionary of arguments to ``psd_multitaper``. If None, doesn't pass
any additional arguments. Defaults to None.
figsize : array-like of size (2,) | None
Allows to control size of the figure. If None, the figure size
defauls to [7., 6.].
show : bool
Show figure if True.
Returns
-------
fig : list
List of matplotlib figures.
Notes
-----
.. versionadded:: 0.13
"""
from ..io.base import BaseRaw
from ..epochs import BaseEpochs
from ..preprocessing import ICA
if not isinstance(inst, (BaseRaw, BaseEpochs)):
raise ValueError('inst should be an instance of Raw or Epochs,'
' got %s instead.' % type(inst))
if not isinstance(ica, ICA):
raise ValueError('ica has to be an instance of ICA, '
'got %s instead' % type(ica))
if isinstance(plot_std, bool):
num_std = 1. if plot_std else 0.
elif isinstance(plot_std, (float, int)):
num_std = plot_std
plot_std = True
else:
raise ValueError('plot_std has to be a bool, int or float, '
'got %s instead' % type(plot_std))
# if no picks given - plot the first 5 components
picks = list(range(min(5, ica.n_components_))) if picks is None else picks
picks = [picks] if isinstance(picks, Integral) else picks
if axes is None:
fig, axes = _create_properties_layout(figsize=figsize)
else:
if len(picks) > 1:
raise ValueError('Only a single pick can be drawn '
'to a set of axes.')
from .utils import _validate_if_list_of_axes
_validate_if_list_of_axes(axes, obligatory_len=5)
fig = axes[0].get_figure()
psd_args = dict() if psd_args is None else psd_args
topomap_args = dict() if topomap_args is None else topomap_args
image_args = dict() if image_args is None else image_args
for d in (psd_args, topomap_args, image_args):
if not isinstance(d, dict):
raise ValueError('topomap_args, image_args and psd_args have to be'
' dictionaries, got %s instead.' % type(d))
if dB is not None and isinstance(dB, bool) is False:
raise ValueError('dB should be bool, got %s instead' %
type(dB))
# calculations
# ------------
plot_line_at_zero = False
if isinstance(inst, BaseRaw):
# break up continuous signal into segments
from ..epochs import _segment_raw
inst = _segment_raw(inst, segment_length=2., verbose=False,
preload=True)
if inst.times[0] < 0. and inst.times[-1] > 0.:
plot_line_at_zero = True
epochs_src = ica.get_sources(inst)
ica_data = np.swapaxes(epochs_src.get_data()[:, picks, :], 0, 1)
# spectrum
Nyquist = inst.info['sfreq'] / 2.
if 'fmax' not in psd_args:
psd_args['fmax'] = min(inst.info['lowpass'] * 1.25, Nyquist)
plot_lowpass_edge = inst.info['lowpass'] < Nyquist and (
psd_args['fmax'] > inst.info['lowpass'])
psds, freqs = psd_multitaper(epochs_src, picks=picks, **psd_args)
def set_title_and_labels(ax, title, xlab, ylab):
if title:
ax.set_title(title)
if xlab:
ax.set_xlabel(xlab)
if ylab:
ax.set_ylabel(ylab)
ax.axis('auto')
ax.tick_params('both', labelsize=8)
ax.axis('tight')
all_fig = list()
# the rest is component-specific
for idx, pick in enumerate(picks):
if idx > 0:
fig, axes = _create_properties_layout(figsize=figsize)
# spectrum
this_psd = psds[:, idx, :]
if dB:
this_psd = 10 * np.log10(this_psd)
psds_mean = this_psd.mean(axis=0)
diffs = this_psd - psds_mean
# the distribution of power for each frequency bin is highly
# skewed so we calculate std for values below and above average
# separately - this is used for fill_between shade
spectrum_std = [
[np.sqrt((d[d < 0] ** 2).mean(axis=0)) for d in diffs.T],
[np.sqrt((d[d > 0] ** 2).mean(axis=0)) for d in diffs.T]]
spectrum_std = np.array(spectrum_std) * num_std
# erp std
if plot_std:
erp = ica_data[idx].mean(axis=0)
diffs = ica_data[idx] - erp
erp_std = [
[np.sqrt((d[d < 0] ** 2).mean(axis=0)) for d in diffs.T],
[np.sqrt((d[d > 0] ** 2).mean(axis=0)) for d in diffs.T]]
erp_std = np.array(erp_std) * num_std
# epoch variance
epoch_var = np.var(ica_data[idx], axis=1)
# plotting
# --------
# component topomap
_plot_ica_topomap(ica, pick, show=False, axes=axes[0], **topomap_args)
# image and erp
plot_epochs_image(epochs_src, picks=pick, axes=axes[1:3],
colorbar=False, show=False, **image_args)
# spectrum
axes[3].plot(freqs, psds_mean, color='k')
if plot_std:
axes[3].fill_between(freqs, psds_mean - spectrum_std[0],
psds_mean + spectrum_std[1],
color='k', alpha=.15)
if plot_lowpass_edge:
axes[3].axvline(inst.info['lowpass'], lw=2, linestyle='--',
color='k', alpha=0.15)
# epoch variance
axes[4].scatter(range(len(epoch_var)), epoch_var, alpha=0.5,
facecolor=[0, 0, 0], lw=0)
# aesthetics
# ----------
axes[0].set_title('IC #{0:0>3}'.format(pick))
set_title_and_labels(axes[1], 'epochs image and ERP/ERF', [], 'Epochs')
# erp
set_title_and_labels(axes[2], [], 'time', 'AU')
# line color and std
axes[2].lines[0].set_color('k')
if plot_std:
erp_xdata = axes[2].lines[0].get_data()[0]
axes[2].fill_between(erp_xdata, erp - erp_std[0],
erp + erp_std[1], color='k', alpha=.15)
axes[2].autoscale(enable=True, axis='y')
axes[2].axis('auto')
axes[2].set_xlim(erp_xdata[[0, -1]])
# remove half of yticks if more than 5
yt = axes[2].get_yticks()
if len(yt) > 5:
yt = yt[::2]
axes[2].yaxis.set_ticks(yt)
if not plot_line_at_zero:
xlims = [1e3 * inst.times[0], 1e3 * inst.times[-1]]
for k, ax in enumerate(axes[1:3]):
ax.lines[k].remove()
ax.set_xlim(xlims)
# remove xticks - erp plot shows xticks for both image and erp plot
axes[1].xaxis.set_ticks([])
yt = axes[1].get_yticks()
axes[1].yaxis.set_ticks(yt[1:])
axes[1].set_ylim([-0.5, ica_data.shape[1] + 0.5])
# spectrum
ylabel = 'dB' if dB else 'power'
set_title_and_labels(axes[3], 'spectrum', 'frequency', ylabel)
axes[3].yaxis.labelpad = 0
axes[3].set_xlim(freqs[[0, -1]])
ylim = axes[3].get_ylim()
air = np.diff(ylim)[0] * 0.1
axes[3].set_ylim(ylim[0] - air, ylim[1] + air)
# epoch variance
set_title_and_labels(axes[4], 'epochs variance', 'epoch', 'AU')
all_fig.append(fig)
plt_show(show)
return all_fig
def _plot_ica_sources_evoked(evoked, picks, exclude, title, show, labels=None):
"""Plot average over epochs in ICA space.
Parameters
----------
evoked : instance of mne.Evoked
The Evoked to be used.
picks : int | array_like of int | None.
The components to be displayed. If None, plot will show the
sources in the order as fitted.
exclude : array_like of int
The components marked for exclusion. If None (default), ICA.exclude
will be used.
title : str
The figure title.
show : bool
Show figure if True.
labels : None | dict
The ICA labels attribute.
"""
import matplotlib.pyplot as plt
if title is None:
title = 'Reconstructed latent sources, time-locked'
fig, axes = plt.subplots(1)
ax = axes
axes = [axes]
times = evoked.times * 1e3
# plot unclassified sources and label excluded ones
lines = list()
texts = list()
if picks is None:
picks = np.arange(evoked.data.shape[0])
picks = np.sort(picks)
idxs = [picks]
if labels is not None:
labels_used = [k for k in labels if '/' not in k]
exclude_labels = list()
for ii in picks:
if ii in exclude:
line_label = 'IC #%03d' % ii
if labels is not None:
annot = list()
for this_label in labels_used:
indices = labels[this_label]
if ii in indices:
annot.append(this_label)
line_label += (' - ' + ', '.join(annot))
exclude_labels.append(line_label)
else:
exclude_labels.append(None)
if labels is not None:
# compute colors only based on label categories
unique_labels = set([k.split(' - ')[1] for k in exclude_labels if k])
label_colors = plt.cm.rainbow(np.linspace(0, 1, len(unique_labels)))
label_colors = dict(zip(unique_labels, label_colors))
else:
label_colors = dict((k, 'red') for k in exclude_labels)
for exc_label, ii in zip(exclude_labels, picks):
if exc_label is not None:
# create look up for color ...
if ' - ' in exc_label:
key = exc_label.split(' - ')[1]
else:
key = exc_label
color = label_colors[key]
# ... but display component number too
lines.extend(ax.plot(times, evoked.data[ii].T, picker=3.,
zorder=2, color=color, label=exc_label))
else:
lines.extend(ax.plot(times, evoked.data[ii].T, picker=3.,
color='k', zorder=1))
ax.set_title(title)
ax.set_xlim(times[[0, -1]])
ax.set_xlabel('Time (ms)')
ax.set_ylabel('(NA)')
if len(exclude) > 0:
plt.legend(loc='best')
tight_layout(fig=fig)
# for old matplotlib, we actually need this to have a bounding
# box (!), so we have to put some valid text here, change
# alpha and path effects later
texts.append(ax.text(0, 0, 'blank', zorder=3,
verticalalignment='baseline',
horizontalalignment='left',
fontweight='bold', alpha=0))
# this is done to give the structure of a list of lists of a group of lines
# in each subplot
lines = [lines]
ch_names = evoked.ch_names
from matplotlib import patheffects
path_effects = [patheffects.withStroke(linewidth=2, foreground="w",
alpha=0.75)]
params = dict(axes=axes, texts=texts, lines=lines, idxs=idxs,
ch_names=ch_names, need_draw=False,
path_effects=path_effects)
fig.canvas.mpl_connect('pick_event',
partial(_butterfly_onpick, params=params))
fig.canvas.mpl_connect('button_press_event',
partial(_butterfly_on_button_press,
params=params))
plt_show(show)
return fig
def plot_ica_scores(ica, scores, exclude=None, labels=None, axhline=None,
title='ICA component scores', figsize=(12, 6), show=True):
"""Plot scores related to detected components.
Use this function to asses how well your score describes outlier
sources and how well you were detecting them.
Parameters
----------
ica : instance of mne.preprocessing.ICA
The ICA object.
scores : array_like of float, shape (n ica components) | list of arrays
Scores based on arbitrary metric to characterize ICA components.
exclude : array_like of int
The components marked for exclusion. If None (default), ICA.exclude
will be used.
labels : str | list | 'ecg' | 'eog' | None
The labels to consider for the axes tests. Defaults to None.
If list, should match the outer shape of `scores`.
If 'ecg' or 'eog', the ``labels_`` attributes will be looked up.
Note that '/' is used internally for sublabels specifying ECG and
EOG channels.
axhline : float
Draw horizontal line to e.g. visualize rejection threshold.
title : str
The figure title.
figsize : tuple of int
The figure size. Defaults to (12, 6).
show : bool
Show figure if True.
Returns
-------
fig : instance of matplotlib.pyplot.Figure
The figure object
"""
import matplotlib.pyplot as plt
my_range = np.arange(ica.n_components_)
if exclude is None:
exclude = ica.exclude
exclude = np.unique(exclude)
if not isinstance(scores[0], (list, np.ndarray)):
scores = [scores]
n_rows = len(scores)
figsize = (12, 6) if figsize is None else figsize
fig, axes = plt.subplots(n_rows, figsize=figsize, sharex=True, sharey=True)
if isinstance(axes, np.ndarray):
axes = axes.flatten()
else:
axes = [axes]
plt.suptitle(title)
if labels == 'ecg':
labels = [l for l in ica.labels_ if l.startswith('ecg/')]
elif labels == 'eog':
labels = [l for l in ica.labels_ if l.startswith('eog/')]
labels.sort(key=lambda l: l.split('/')[1]) # sort by index
elif isinstance(labels, string_types):
if len(axes) > 1:
raise ValueError('Need as many labels as axes (%i)' % len(axes))
labels = [labels]
elif isinstance(labels, (tuple, list)):
if len(labels) != len(axes):
raise ValueError('Need as many labels as axes (%i)' % len(axes))
elif labels is None:
labels = (None, None)
for label, this_scores, ax in zip(labels, scores, axes):
if len(my_range) != len(this_scores):
raise ValueError('The length of `scores` must equal the '
'number of ICA components.')
ax.bar(my_range, this_scores, color='w')
for excl in exclude:
ax.bar(my_range[excl], this_scores[excl], color='r')
if axhline is not None:
if np.isscalar(axhline):
axhline = [axhline]
for axl in axhline:
ax.axhline(axl, color='r', linestyle='--')
ax.set_ylabel('score')
if label is not None:
if 'eog/' in label:
split = label.split('/')
label = ', '.join([split[0], split[2]])
elif '/' in label:
label = ', '.join(label.split('/'))
ax.set_title('(%s)' % label)
ax.set_xlabel('ICA components')
ax.set_xlim(0, len(this_scores))
tight_layout(fig=fig)
if len(axes) > 1:
plt.subplots_adjust(top=0.9)
plt_show(show)
return fig
def plot_ica_overlay(ica, inst, exclude=None, picks=None, start=None,
stop=None, title=None, show=True):
"""Overlay of raw and cleaned signals given the unmixing matrix.
This method helps visualizing signal quality and artifact rejection.
Parameters
----------
ica : instance of mne.preprocessing.ICA
The ICA object.
inst : instance of mne.io.Raw or mne.Evoked
The signals to be compared given the ICA solution. If Raw input,
The raw data are displayed before and after cleaning. In a second
panel the cross channel average will be displayed. Since dipolar
sources will be canceled out this display is sensitive to
artifacts. If evoked input, butterfly plots for clean and raw
signals will be superimposed.
exclude : array_like of int
The components marked for exclusion. If None (default), ICA.exclude
will be used.
picks : array-like of int | None (default)
Indices of channels to include (if None, all channels
are used that were included on fitting).
start : int
X-axis start index. If None from the beginning.
stop : int
X-axis stop index. If None to the end.
title : str
The figure title.
show : bool
Show figure if True.
Returns
-------
fig : instance of pyplot.Figure
The figure.
"""
# avoid circular imports
from ..io.base import BaseRaw
from ..evoked import Evoked
from ..preprocessing.ica import _check_start_stop
if not isinstance(inst, (BaseRaw, Evoked)):
raise ValueError('Data input must be of Raw or Evoked type')
if title is None:
title = 'Signals before (red) and after (black) cleaning'
if picks is None:
picks = [inst.ch_names.index(k) for k in ica.ch_names]
if exclude is None:
exclude = ica.exclude
if isinstance(inst, BaseRaw):
if start is None:
start = 0.0
if stop is None:
stop = 3.0
ch_types_used = [k for k in ['mag', 'grad', 'eeg'] if k in ica]
start_compare, stop_compare = _check_start_stop(inst, start, stop)
data, times = inst[picks, start_compare:stop_compare]
raw_cln = ica.apply(inst.copy(), exclude=exclude,
start=start, stop=stop)
data_cln, _ = raw_cln[picks, start_compare:stop_compare]
fig = _plot_ica_overlay_raw(data=data, data_cln=data_cln,
times=times * 1e3, title=title,
ch_types_used=ch_types_used, show=show)
elif isinstance(inst, Evoked):
if start is not None and stop is not None:
inst = inst.copy().crop(start, stop)
if picks is not None:
inst.pick_channels([inst.ch_names[p] for p in picks])
evoked_cln = ica.apply(inst.copy(), exclude=exclude)
fig = _plot_ica_overlay_evoked(evoked=inst, evoked_cln=evoked_cln,
title=title, show=show)
return fig
def _plot_ica_overlay_raw(data, data_cln, times, title, ch_types_used, show):
"""Plot evoked after and before ICA cleaning.
Parameters
----------
ica : instance of mne.preprocessing.ICA
The ICA object.
epochs : instance of mne.Epochs
The Epochs to be regarded.
show : bool
Show figure if True.
Returns
-------
fig : instance of pyplot.Figure
"""
import matplotlib.pyplot as plt
# Restore sensor space data and keep all PCA components
# let's now compare the date before and after cleaning.
# first the raw data
assert data.shape == data_cln.shape
fig, (ax1, ax2) = plt.subplots(2, 1, sharex=True)
plt.suptitle(title)
ax1.plot(times, data.T, color='r')
ax1.plot(times, data_cln.T, color='k')
ax1.set_xlabel('time (s)')
ax1.set_xlim(times[0], times[-1])
ax1.set_xlim(times[0], times[-1])
ax1.set_title('Raw data')
_ch_types = {'mag': 'Magnetometers',
'grad': 'Gradiometers',
'eeg': 'EEG'}
ch_types = ', '.join([_ch_types[k] for k in ch_types_used])
ax2.set_title('Average across channels ({0})'.format(ch_types))
ax2.plot(times, data.mean(0), color='r')
ax2.plot(times, data_cln.mean(0), color='k')
ax2.set_xlim(100, 106)
ax2.set_xlabel('time (ms)')
ax2.set_xlim(times[0], times[-1])
tight_layout(fig=fig)
fig.subplots_adjust(top=0.90)
fig.canvas.draw()
plt_show(show)
return fig
def _plot_ica_overlay_evoked(evoked, evoked_cln, title, show):
"""Plot evoked after and before ICA cleaning.
Parameters
----------
ica : instance of mne.preprocessing.ICA
The ICA object.
epochs : instance of mne.Epochs
The Epochs to be regarded.
show : bool
If True, all open plots will be shown.
Returns
-------
fig : instance of pyplot.Figure
"""
import matplotlib.pyplot as plt
ch_types_used = [c for c in ['mag', 'grad', 'eeg'] if c in evoked]
n_rows = len(ch_types_used)
ch_types_used_cln = [c for c in ['mag', 'grad', 'eeg'] if
c in evoked_cln]
if len(ch_types_used) != len(ch_types_used_cln):
raise ValueError('Raw and clean evokeds must match. '
'Found different channels.')
fig, axes = plt.subplots(n_rows, 1)
fig.suptitle('Average signal before (red) and after (black) ICA')
axes = axes.flatten() if isinstance(axes, np.ndarray) else axes
evoked.plot(axes=axes, show=show)
for ax in fig.axes:
for l in ax.get_lines():
l.set_color('r')
fig.canvas.draw()
evoked_cln.plot(axes=axes, show=show)
tight_layout(fig=fig)
fig.subplots_adjust(top=0.90)
fig.canvas.draw()
plt_show(show)
return fig
def _plot_sources_raw(ica, raw, picks, exclude, start, stop, show, title,
block, show_first_samp):
"""Plot the ICA components as raw array."""
color = _handle_default('color', (0., 0., 0.))
orig_data = ica._transform_raw(raw, 0, len(raw.times)) * 0.2
if picks is None:
picks = range(len(orig_data))
types = ['misc' for _ in picks]
picks = list(sorted(picks))
eog_chs = pick_types(raw.info, meg=False, eog=True, ref_meg=False)
ecg_chs = pick_types(raw.info, meg=False, ecg=True, ref_meg=False)
data = [orig_data[pick] for pick in picks]
c_names = ['IC #%03d' % x for x in range(len(orig_data))]
for eog_idx in eog_chs:
c_names.append(raw.ch_names[eog_idx])
types.append('eog')
for ecg_idx in ecg_chs:
c_names.append(raw.ch_names[ecg_idx])
types.append('ecg')
extra_picks = np.append(eog_chs, ecg_chs).astype(int)
if len(extra_picks) > 0:
eog_ecg_data, _ = raw[extra_picks, :]
for idx in range(len(eog_ecg_data)):
if idx < len(eog_chs):
eog_ecg_data[idx] /= 150e-6 # scaling for eog
else:
eog_ecg_data[idx] /= 5e-4 # scaling for ecg
data = np.append(data, eog_ecg_data, axis=0)
for idx in range(len(extra_picks)):
picks = np.append(picks, ica.n_components_ + idx)
if title is None:
title = 'ICA components'
info = create_info([c_names[x] for x in picks], raw.info['sfreq'])
info['bads'] = [c_names[x] for x in exclude]
if start is None:
start = 0
if stop is None:
stop = start + 20
stop = min(stop, raw.times[-1])
duration = stop - start
if duration <= 0:
raise RuntimeError('Stop must be larger than start.')
t_end = int(duration * raw.info['sfreq'])
times = raw.times[0:t_end]
bad_color = (1., 0., 0.)
inds = list(range(len(picks)))
data = np.array(data)
n_channels = min([20, len(picks)])
first_time = raw._first_time if show_first_samp else 0
start += first_time
params = dict(raw=raw, orig_data=data, data=data[:, 0:t_end], inds=inds,
ch_start=0, t_start=start, info=info, duration=duration,
ica=ica, n_channels=n_channels, times=times, types=types,
n_times=raw.n_times, bad_color=bad_color, picks=picks,
first_time=first_time, data_picks=[], decim=1)
_prepare_mne_browse_raw(params, title, 'w', color, bad_color, inds,
n_channels)
params['scale_factor'] = 1.0
params['plot_fun'] = partial(_plot_raw_traces, params=params, color=color,
bad_color=bad_color)
params['update_fun'] = partial(_update_data, params)
params['pick_bads_fun'] = partial(_pick_bads, params=params)
params['label_click_fun'] = partial(_label_clicked, params=params)
_layout_figure(params)
# callbacks
callback_key = partial(_plot_raw_onkey, params=params)
params['fig'].canvas.mpl_connect('key_press_event', callback_key)
callback_scroll = partial(_plot_raw_onscroll, params=params)
params['fig'].canvas.mpl_connect('scroll_event', callback_scroll)
callback_pick = partial(_mouse_click, params=params)
params['fig'].canvas.mpl_connect('button_press_event', callback_pick)
callback_resize = partial(_helper_raw_resize, params=params)
params['fig'].canvas.mpl_connect('resize_event', callback_resize)
callback_close = partial(_close_event, params=params)
params['fig'].canvas.mpl_connect('close_event', callback_close)
params['fig_proj'] = None
params['event_times'] = None
params['butterfly'] = False
params['update_fun']()
params['plot_fun']()
try:
plt_show(show, block=block)
except TypeError: # not all versions have this
plt_show(show)
return params['fig']
def _update_data(params):
"""Prepare the data on horizontal shift of the viewport."""
sfreq = params['info']['sfreq']
start = int((params['t_start'] - params['first_time']) * sfreq)
end = int((params['t_start'] + params['duration']) * sfreq)
params['data'] = params['orig_data'][:, start:end]
params['times'] = params['raw'].times[start:end]
def _pick_bads(event, params):
"""Select components on click."""
bads = params['info']['bads']
params['info']['bads'] = _select_bads(event, params, bads)
params['update_fun']()
params['plot_fun']()
def _close_event(events, params):
"""Exclude the selected components on close."""
info = params['info']
c_names = ['IC #%03d' % x for x in range(params['ica'].n_components_)]
exclude = [c_names.index(x) for x in info['bads'] if x.startswith('IC')]
params['ica'].exclude = exclude
def _plot_sources_epochs(ica, epochs, picks, exclude, start, stop, show,
title, block):
"""Plot the components as epochs."""
data = ica._transform_epochs(epochs, concatenate=True)
eog_chs = pick_types(epochs.info, meg=False, eog=True, ref_meg=False)
ecg_chs = pick_types(epochs.info, meg=False, ecg=True, ref_meg=False)
c_names = ['IC #%03d' % x for x in range(ica.n_components_)]
ch_types = np.repeat('misc', ica.n_components_)
for eog_idx in eog_chs:
c_names.append(epochs.ch_names[eog_idx])
ch_types = np.append(ch_types, 'eog')
for ecg_idx in ecg_chs:
c_names.append(epochs.ch_names[ecg_idx])
ch_types = np.append(ch_types, 'ecg')
extra_picks = np.append(eog_chs, ecg_chs).astype(int)
if len(extra_picks) > 0:
eog_ecg_data = np.concatenate(epochs.get_data()[:, extra_picks],
axis=1)
data = np.append(data, eog_ecg_data, axis=0)
scalings = _handle_default('scalings_plot_raw')
scalings['misc'] = 5.0
info = create_info(ch_names=c_names, sfreq=epochs.info['sfreq'],
ch_types=ch_types)
info['projs'] = list()
info['bads'] = [c_names[x] for x in exclude]
if title is None:
title = 'ICA components'
if picks is None:
picks = list(range(ica.n_components_))
if start is None:
start = 0
if stop is None:
stop = start + 20
stop = min(stop, len(epochs.events))
for idx in range(len(extra_picks)):
picks = np.append(picks, ica.n_components_ + idx)
n_epochs = stop - start
if n_epochs <= 0:
raise RuntimeError('Stop must be larger than start.')
params = {'ica': ica,
'epochs': epochs,
'info': info,
'orig_data': data,
'bads': list(),
'bad_color': (1., 0., 0.),
't_start': start * len(epochs.times),
'data_picks': [],
'decim': 1}
params['label_click_fun'] = partial(_label_clicked, params=params)
_prepare_mne_browse_epochs(params, projs=list(), n_channels=20,
n_epochs=n_epochs, scalings=scalings,
title=title, picks=picks,
order=['misc', 'eog', 'ecg'])
params['plot_update_proj_callback'] = _update_epoch_data
_update_epoch_data(params)
params['hsel_patch'].set_x(params['t_start'])
callback_close = partial(_close_epochs_event, params=params)
params['fig'].canvas.mpl_connect('close_event', callback_close)
try:
plt_show(show, block=block)
except TypeError: # not all versions have this
plt_show(show)
return params['fig']
def _update_epoch_data(params):
"""Prepare the data on horizontal shift."""
start = params['t_start']
n_epochs = params['n_epochs']
end = start + n_epochs * len(params['epochs'].times)
data = params['orig_data'][:, start:end]
types = params['types']
for pick, ind in enumerate(params['inds']):
params['data'][pick] = data[ind] / params['scalings'][types[pick]]
params['plot_fun']()
def _close_epochs_event(events, params):
"""Exclude the selected components on close."""
info = params['info']
exclude = [info['ch_names'].index(x) for x in info['bads']
if x.startswith('IC')]
params['ica'].exclude = exclude
def _label_clicked(pos, params):
"""Plot independent components on click to label."""
import matplotlib.pyplot as plt
offsets = np.array(params['offsets']) + params['offsets'][0]
line_idx = np.searchsorted(offsets, pos[1]) + params['ch_start']
if line_idx >= len(params['picks']):
return
ic_idx = [params['picks'][line_idx]]
if params['types'][ic_idx[0]] != 'misc':
warn('Can only plot ICA components.')
return
types = list()
info = params['ica'].info
if len(pick_types(info, meg=False, eeg=True, ref_meg=False)) > 0:
types.append('eeg')
if len(pick_types(info, meg='mag', ref_meg=False)) > 0:
types.append('mag')
if len(pick_types(info, meg='grad', ref_meg=False)) > 0:
types.append('grad')
ica = params['ica']
data = np.dot(ica.mixing_matrix_[:, ic_idx].T,
ica.pca_components_[:ica.n_components_])
data = np.atleast_2d(data)
fig, axes = _prepare_trellis(len(types), max_col=3)
for ch_idx, ch_type in enumerate(types):
try:
data_picks, pos, merge_grads, _, _ = _prepare_topo_plot(ica,
ch_type,
None)
except Exception as exc:
warn(exc)
plt.close(fig)
return
this_data = data[:, data_picks]
ax = axes[ch_idx]
if merge_grads:
from ..channels.layout import _merge_grad_data
for ii, data_ in zip(ic_idx, this_data):
ax.set_title('IC #%03d ' % ii + ch_type, fontsize=12)
data_ = _merge_grad_data(data_) if merge_grads else data_
plot_topomap(data_.flatten(), pos, axes=ax, show=False)
_hide_frame(ax)
tight_layout(fig=fig)
fig.subplots_adjust(top=0.95)
fig.canvas.draw()
plt_show(True)
| {
"content_hash": "572e139b9772c414eb438d9442c828dd",
"timestamp": "",
"source": "github",
"line_count": 989,
"max_line_length": 79,
"avg_line_length": 38.21840242669363,
"alnum_prop": 0.5829673527699878,
"repo_name": "jaeilepp/mne-python",
"id": "5e49be837c76c8415d158d026d12f91c5ec26af2",
"size": "37798",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mne/viz/ica.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Csound Document",
"bytes": "69806"
},
{
"name": "Makefile",
"bytes": "3928"
},
{
"name": "Python",
"bytes": "6113850"
},
{
"name": "Shell",
"bytes": "936"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('Labels', '0001_initial'),
('Projects', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='ProjectEmployee',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('firstName', models.CharField(max_length=100)),
('lastName', models.CharField(blank=True, max_length=100)),
('title', models.CharField(max_length=100)),
('emailAddress', models.EmailField(blank=True, max_length=200)),
('project', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='Projects.Project')),
],
),
migrations.CreateModel(
name='ProjectSettings',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('bundlesLoomsMulticore', models.CharField(choices=[('Bundle', 'Bundle'), ('Loom', 'Loom'), ('Multicore', 'Multicore')], max_length=50)),
('UIStyle', models.CharField(choices=[('Standard', 'Standard'), ('Dark', 'Dark')], default='Standard', max_length=25)),
('lengthUnits', models.CharField(choices=[('feet', 'feet'), ('meters', 'meters')], max_length=20)),
('tagBundleEnds', models.BooleanField(default=False)),
('largeLabelTemplate', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='large_label_template', to='Labels.LabelTemplate')),
('project', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='projectSettingsProject', to='Projects.Project')),
('smallLabelTemplate', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='small_label_template', to='Labels.LabelTemplate')),
],
),
migrations.AlterField(
model_name='venue',
name='state',
field=models.CharField(blank=True, default='', max_length=100),
preserve_default=False,
),
migrations.AlterField(
model_name='venue',
name='streetAddress2',
field=models.CharField(blank=True, default='', max_length=200),
preserve_default=False,
),
]
| {
"content_hash": "682f5d864d917721a7f2bef7e53a9b38",
"timestamp": "",
"source": "github",
"line_count": 51,
"max_line_length": 167,
"avg_line_length": 49.88235294117647,
"alnum_prop": 0.597877358490566,
"repo_name": "matt-dale/designdb",
"id": "92eb449c3ee1b454faea48777fd38bc4e371303e",
"size": "2616",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "DESIGNDB_REBUILD/Projects/migrations/0002_auto_20160704_2042.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "88198"
}
],
"symlink_target": ""
} |
"""Copyright 2015 Ericsson AB
Licensed under the Apache License, Version 2.0 (the "License"); you may not use
this file except in compliance with the License. You may obtain a copy of the
License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed
under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
"""
from deap import tools, algorithms
import numpy
from dbConnection import DB
import toolBox
from fitness import Fitness
# Variables
MUTATION_PROB = 0.5
CROSS_OVER_PROB = 0.5
NO_OF_GENERATION = 2
POPULATION_SIZE = 1
def main():
fitnessClass =Fitness()
# Generate the population
pop = toolBox.toolbox.population(n=POPULATION_SIZE)
fitnessClass.evalIndividualCapacity(pop[0])
hof = tools.HallOfFame(1)
stats = tools.Statistics(lambda ind: ind.fitness.values)
stats.register("avg", numpy.mean)
stats.register("std", numpy.std)
stats.register("min", numpy.min)
stats.register("max", numpy.max)
pop, log = algorithms.eaSimple(pop, toolBox.toolbox, cxpb=CROSS_OVER_PROB,
mutpb=MUTATION_PROB, ngen=NO_OF_GENERATION, stats=stats,
halloffame=hof, verbose=True)
## Evaluate the entire population
#fitnesses = list(map(toolBox.toolbox.evaluate, pop))
#for ind, fit in zip(pop, fitnesses):
# ind.fitness.values = fit
# Iterate trough a number of generations
# for g in range(NGEN):
# print("-- Generation %i --" % g)
# # Select individuals based on their fitness
# offspring = toolBox.toolbox.select(pop, len(pop))
# # Cloning those individuals into a new population
# offspring = list(map(toolBox.toolbox.clone, offspring))
# # Calling the crossover function
# crossover(offspring)
# mutation(offspring)
# invalidfitness(offspring)
# The Best Individual found
best_ind = tools.selBest(pop, 1)[0]
print("Best individual is %s, %s" % (best_ind, best_ind.fitness.values))
generateTimeTable(best_ind)
# def crossover(offspring):
# # Apply Crossover
# for child1, child2 in zip(offspring[::2], offspring[1::2]):
# if random.random() < CXPB:
# toolBox.toolbox.mate(child1, child2)
# del child1.fitness.values
# del child2.fitness.values
#
#
# def mutation(offspring):
# # Testing mutation
# for mutant in offspring:
# if random.random() < MUTPB:
# toolBox.toolbox.mutate(mutant)
# del mutant.fitness.values
#
#
# def invalidfitness(offspring):
# # Evaluate the individuals with an invalid fitness
# invalid_ind = [ind for ind in offspring if not ind.fitness.valid]
# fitnesses = map(toolBox.toolbox.evaluate, invalid_ind)
# for ind, fit in zip(invalid_ind, fitnesses):
# ind.fitness.values = fit
# print(" Evaluated %i individuals" % len(invalid_ind))
# # The population is entirely replaced by the offspring
# pop[:] = offspring
# # Gather all the fitnesses in one list and print the stats
# fits = [ind.fitness.values[0] for ind in pop]
# length = len(pop)
# mean = sum(fits) / length
# sum2 = sum(x*x for x in fits)
# std = abs(sum2 / length - mean**2)**0.5
# print(" Min %s" % min(fits))
# print(" Max %s" % max(fits))
# print(" Avg %s" % mean)
# print(" Std %s" % std)
def generateTimeTable(individual):
databaseClass = DB()
timetable = databaseClass.generateTripTimeTable(individual)
databaseClass.insertTimeTable(timetable)
if __name__ == '__main__':
main()
| {
"content_hash": "99a540b8d80b6ba457775a7b4cbb36f9",
"timestamp": "",
"source": "github",
"line_count": 119,
"max_line_length": 91,
"avg_line_length": 32,
"alnum_prop": 0.6617647058823529,
"repo_name": "s87485759/Monad2015-trial",
"id": "e2f52a3744c40956bf3607f916d9a2bcb9347241",
"size": "3832",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "LookAhead/main.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "35977"
}
],
"symlink_target": ""
} |
import pyaf.Bench.TS_datasets as tsds
import tests.artificial.process_artificial_dataset as art
art.process_dataset(N = 1024 , FREQ = 'D', seed = 0, trendtype = "MovingAverage", cycle_length = 5, transform = "Difference", sigma = 0.0, exog_count = 20, ar_order = 0); | {
"content_hash": "8218320ce68d74d026f50a1b4e2ad668",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 170,
"avg_line_length": 38.57142857142857,
"alnum_prop": 0.7111111111111111,
"repo_name": "antoinecarme/pyaf",
"id": "3b4cbe69c09daa2a448188d270fada1d1351ab22",
"size": "270",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/artificial/transf_Difference/trend_MovingAverage/cycle_5/ar_/test_artificial_1024_Difference_MovingAverage_5__20.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "6773299"
},
{
"name": "Procfile",
"bytes": "24"
},
{
"name": "Python",
"bytes": "54209093"
},
{
"name": "R",
"bytes": "807"
},
{
"name": "Shell",
"bytes": "3619"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import, unicode_literals
import os
from mock import patch
from oauthlib import signals
from oauthlib.oauth2 import BackendApplicationClient
from ....unittest import TestCase
@patch('time.time', new=lambda: 1000)
class BackendApplicationClientTest(TestCase):
client_id = "someclientid"
scope = ["/profile"]
kwargs = {
"some": "providers",
"require": "extra arguments"
}
body = "not=empty"
body_up = "not=empty&grant_type=client_credentials"
body_kwargs = body_up + "&some=providers&require=extra+arguments"
token_json = ('{ "access_token":"2YotnFZFEjr1zCsicMWpAA",'
' "token_type":"example",'
' "expires_in":3600,'
' "scope":"/profile",'
' "example_parameter":"example_value"}')
token = {
"access_token": "2YotnFZFEjr1zCsicMWpAA",
"token_type": "example",
"expires_in": 3600,
"expires_at": 4600,
"scope": ["/profile"],
"example_parameter": "example_value"
}
def test_request_body(self):
client = BackendApplicationClient(self.client_id)
# Basic, no extra arguments
body = client.prepare_request_body(body=self.body)
self.assertFormBodyEqual(body, self.body_up)
rclient = BackendApplicationClient(self.client_id)
body = rclient.prepare_request_body(body=self.body)
self.assertFormBodyEqual(body, self.body_up)
# With extra parameters
body = client.prepare_request_body(body=self.body, **self.kwargs)
self.assertFormBodyEqual(body, self.body_kwargs)
def test_parse_token_response(self):
client = BackendApplicationClient(self.client_id)
# Parse code and state
response = client.parse_request_body_response(self.token_json, scope=self.scope)
self.assertEqual(response, self.token)
self.assertEqual(client.access_token, response.get("access_token"))
self.assertEqual(client.refresh_token, response.get("refresh_token"))
self.assertEqual(client.token_type, response.get("token_type"))
# Mismatching state
self.assertRaises(Warning, client.parse_request_body_response, self.token_json, scope="invalid")
os.environ['OAUTHLIB_RELAX_TOKEN_SCOPE'] = '3'
token = client.parse_request_body_response(self.token_json, scope="invalid")
self.assertTrue(token.scope_changed)
scope_changes_recorded = []
def record_scope_change(sender, message, old, new):
scope_changes_recorded.append((message, old, new))
signals.scope_changed.connect(record_scope_change)
try:
client.parse_request_body_response(self.token_json, scope="invalid")
self.assertEqual(len(scope_changes_recorded), 1)
message, old, new = scope_changes_recorded[0]
self.assertEqual(message, 'Scope has changed from "invalid" to "/profile".')
self.assertEqual(old, ['invalid'])
self.assertEqual(new, ['/profile'])
finally:
signals.scope_changed.disconnect(record_scope_change)
del os.environ['OAUTHLIB_RELAX_TOKEN_SCOPE']
| {
"content_hash": "8437694119fcff82e336b9d9d18e6537",
"timestamp": "",
"source": "github",
"line_count": 87,
"max_line_length": 104,
"avg_line_length": 37.160919540229884,
"alnum_prop": 0.6390349520569131,
"repo_name": "skion/oauthlib-oidc",
"id": "6b342f0e8f2d5a6eb5688a17d916718c145c6016",
"size": "3257",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tests/oauth2/rfc6749/clients/test_backend_application.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "1617"
},
{
"name": "Python",
"bytes": "560438"
}
],
"symlink_target": ""
} |
"""Utility to bulk load resources into the system, e.g. for initial preload"""
__author__ = 'Michael Meisinger'
import yaml
import re
import os
from pyon.core import MSG_HEADER_ACTOR, MSG_HEADER_ROLES, MSG_HEADER_VALID
from pyon.core.bootstrap import get_service_registry
from pyon.core.governance import get_system_actor
from pyon.ion.identifier import create_unique_resource_id, create_unique_association_id
from pyon.ion.resource import get_restype_lcsm
from pyon.public import CFG, log, BadRequest, Inconsistent, NotFound, IonObject, RT, OT, AS, LCS, named_any, get_safe, get_ion_ts, PRED
from ion.util.parse_utils import get_typed_value
# Well known action config keys
KEY_SCENARIO = "scenario"
KEY_ID = "id"
KEY_OWNER = "owner"
KEY_LCSTATE = "lcstate"
KEY_ORGS = "orgs"
# Well known aliases
ID_ORG_ION = "ORG_ION"
ID_SYSTEM_ACTOR = "USER_SYSTEM"
UUID_RE = '^[0-9a-fA-F]{32}$'
class Preloader(object):
def initialize_preloader(self, process, preload_cfg):
log.info("Initialize preloader")
self.process = process
self.preload_cfg = preload_cfg or {}
self._init_preload()
self.rr = self.process.container.resource_registry
self.bulk = self.preload_cfg.get("bulk", False) is True
# Loads internal bootstrapped resource ids that will be referenced during preload
self._load_system_ids()
# Load existing resources by preload ID
self._prepare_incremental()
def _init_preload(self):
self.obj_classes = {} # Cache of class for object types
self.object_definitions = None # Dict of preload rows before processing
self.resource_ids = {} # Holds a mapping of preload IDs to internal resource ids
self.resource_objs = {} # Holds a mapping of preload IDs to the actual resource objects
self.resource_assocs = {} # Holds a mapping of existing associations list by predicate
self.bulk_resources = {} # Keeps resource objects to be bulk inserted/updated
self.bulk_associations = {} # Keeps association objects to be bulk inserted/updated
self.bulk_existing = set() # This keeps the ids of the bulk objects to update instead of delete
def preload_master(self, filename, skip_steps=None):
"""Executes a preload master file"""
log.info("Preloading from master file: %s", filename)
with open(filename, "r") as f:
master_yml = f.read()
master_cfg = yaml.load(master_yml)
if not "preload_type" in master_cfg or master_cfg["preload_type"] != "steps":
raise BadRequest("Invalid preload steps file")
for step in master_cfg["steps"]:
if skip_steps and step in skip_steps:
log.info("Skipping step %s" % step)
continue
step_filename = "%s/%s.yml" % (os.path.dirname(filename), step)
self._execute_step(step_filename)
def _execute_step(self, filename):
"""Executes a preload step file"""
with open(filename, "r") as f:
step_yml = f.read()
step_cfg = yaml.safe_load(step_yml)
if not "preload_type" in step_cfg or step_cfg["preload_type"] != "actions":
raise BadRequest("Invalid preload actions file")
for action in step_cfg["actions"]:
try:
self._execute_action(action)
except Exception as ex:
log.warn("Action failed: " + str(ex), exc_info=True)
self.commit_bulk()
def _execute_action(self, action):
"""Executes a preload action"""
action_type = action["action"]
#log.debug("Preload action %s id=%s", action_type, action.get("id", ""))
scope, func_type = action_type.split(":", 1)
default_funcname = "_load_%s_%s" % (scope, func_type)
action_func = getattr(self.process, default_funcname, None)
if not action_func:
action_funcname = self.preload_cfg["action_plugins"].get(action_type, {})
if not action_funcname:
log.warn("Unknown action: %s", action_type)
return
action_func = getattr(self.process, action_funcname, None)
if not action_func:
log.warn("Action function %s not found for action %s", action_funcname, action_type)
return
action_func(action)
# -------------------------------------------------------------------------
def _load_system_ids(self):
"""Read some system objects for later reference"""
org_objs, _ = self.rr.find_resources(name="ION", restype=RT.Org, id_only=False)
if not org_objs:
raise BadRequest("ION org not found. Was system force_cleaned since bootstrap?")
ion_org_id = org_objs[0]._id
self._register_id(ID_ORG_ION, ion_org_id, org_objs[0])
system_actor = get_system_actor()
system_actor_id = system_actor._id if system_actor else 'anonymous'
self._register_id(ID_SYSTEM_ACTOR, system_actor_id, system_actor if system_actor else None)
def _prepare_incremental(self):
"""
Look in the resource registry for any resources that have a preload ID on them so that
they can be referenced under this preload ID during this load run.
"""
log.debug("Loading prior preloaded resources for reference")
res_objs, res_keys = self.rr.find_resources_ext(alt_id_ns="PRE", id_only=False)
res_preload_ids = [key['alt_id'] for key in res_keys]
res_ids = [obj._id for obj in res_objs]
log.debug("Found %s previously preloaded resources", len(res_objs))
res_assocs = self.rr.find_associations(predicate="*", id_only=False)
[self.resource_assocs.setdefault(assoc["p"], []).append(assoc) for assoc in res_assocs]
log.debug("Found %s existing associations", len(res_assocs))
existing_resources = dict(zip(res_preload_ids, res_objs))
if len(existing_resources) != len(res_objs):
raise BadRequest("Stored preload IDs are NOT UNIQUE!!! Cannot link to old resources")
res_id_mapping = dict(zip(res_preload_ids, res_ids))
self.resource_ids.update(res_id_mapping)
res_obj_mapping = dict(zip(res_preload_ids, res_objs))
self.resource_objs.update(res_obj_mapping)
def create_object_from_cfg(self, cfg, objtype, key="resource", prefix="", existing_obj=None):
"""
Construct an IonObject of a determined type from given config dict with attributes.
Convert all attributes according to their schema target type. Supports nested objects.
Supports edit of objects of same type.
"""
log.trace("Create object type=%s, prefix=%s", objtype, prefix)
if objtype == "dict":
schema = None
else:
schema = self._get_object_class(objtype)._schema
obj_fields = {} # Attributes for IonObject creation as dict
nested_done = set() # Names of attributes with nested objects already created
obj_cfg = get_safe(cfg, key)
for subkey, value in obj_cfg.iteritems():
if subkey.startswith(prefix):
attr = subkey[len(prefix):]
if '.' in attr: # We are a parent entry
# TODO: Make sure to not create nested object multiple times
slidx = attr.find('.')
nested_obj_field = attr[:slidx]
parent_field = attr[:slidx+1]
nested_prefix = prefix + parent_field # prefix plus nested object name
if '[' in nested_obj_field and nested_obj_field[-1] == ']':
sqidx = nested_obj_field.find('[')
nested_obj_type = nested_obj_field[sqidx+1:-1]
nested_obj_field = nested_obj_field[:sqidx]
elif objtype == "dict":
nested_obj_type = "dict"
else:
nested_obj_type = schema[nested_obj_field]['type']
# Make sure to not create the same nested object twice
if parent_field in nested_done:
continue
# Support direct indexing in a list
list_idx = -1
if nested_obj_type.startswith("list/"):
_, list_idx, nested_obj_type = nested_obj_type.split("/")
list_idx = int(list_idx)
log.trace("Get nested object field=%s type=%s, prefix=%s", nested_obj_field, nested_obj_type, prefix)
nested_obj = self.create_object_from_cfg(cfg, nested_obj_type, key, nested_prefix)
if list_idx >= 0:
my_list = obj_fields.setdefault(nested_obj_field, [])
if list_idx >= len(my_list):
my_list[len(my_list):list_idx] = [None]*(list_idx-len(my_list)+1)
my_list[list_idx] = nested_obj
else:
obj_fields[nested_obj_field] = nested_obj
nested_done.add(parent_field)
elif objtype == "dict":
# TODO: What about type?
obj_fields[attr] = value
elif attr in schema: # We are the leaf attribute
try:
if value:
fieldvalue = get_typed_value(value, schema[attr])
obj_fields[attr] = fieldvalue
except Exception:
log.warn("Object type=%s, prefix=%s, field=%s cannot be converted to type=%s. Value=%s",
objtype, prefix, attr, schema[attr]['type'], value, exc_info=True)
#fieldvalue = str(fieldvalue)
else:
# warn about unknown fields just once -- not on each row
log.warn("Skipping unknown field in %s: %s%s", objtype, prefix, attr)
if objtype == "dict":
obj = obj_fields
else:
if existing_obj:
# Edit attributes
if existing_obj.type_ != objtype:
raise Inconsistent("Cannot edit resource. Type mismatch old=%s, new=%s" % (existing_obj.type_, objtype))
# TODO: Don't edit empty nested attributes
for attr in list(obj_fields.keys()):
if not obj_fields[attr]:
del obj_fields[attr]
for attr in ('alt_ids','_id','_rev','type_'):
if attr in obj_fields:
del obj_fields[attr]
existing_obj.__dict__.update(obj_fields)
log.trace("Update object type %s using field names %s", objtype, obj_fields.keys())
obj = existing_obj
else:
if cfg.get(KEY_ID, None) and 'alt_ids' in schema:
if 'alt_ids' in obj_fields:
obj_fields['alt_ids'].append("PRE:"+cfg[KEY_ID])
else:
obj_fields['alt_ids'] = ["PRE:"+cfg[KEY_ID]]
log.trace("Create object type %s from field names %s", objtype, obj_fields.keys())
obj = IonObject(objtype, **obj_fields)
return obj
def _get_object_class(self, objtype):
if objtype in self.obj_classes:
return self.obj_classes[objtype]
try:
obj_class = named_any("interface.objects.%s" % objtype)
self.obj_classes[objtype] = obj_class
return obj_class
except Exception:
log.error('failed to find class for type %s' % objtype)
def _get_service_client(self, service):
return get_service_registry().services[service].client(process=self.process)
def _register_id(self, alias, resid, res_obj=None, is_update=False):
"""Keep preload resource in internal dict for later reference"""
if not is_update and alias in self.resource_ids:
raise BadRequest("ID alias %s used twice" % alias)
self.resource_ids[alias] = resid
self.resource_objs[alias] = res_obj
log.trace("Added resource alias=%s to id=%s", alias, resid)
def _read_resource_id(self, res_id):
existing_obj = self.rr.read(res_id)
self.resource_objs[res_id] = existing_obj
self.resource_ids[res_id] = res_id
return existing_obj
def _get_resource_id(self, alias_id):
"""Returns resource ID from preload alias ID, scanning also for real resource IDs to be loaded"""
if alias_id in self.resource_ids:
return self.resource_ids[alias_id]
elif re.match(UUID_RE, alias_id):
# This is obviously an ID of a real resource - let it fail if not existing
self._read_resource_id(alias_id)
log.debug("Referencing existing resource via direct ID: %s", alias_id)
return alias_id
else:
raise KeyError(alias_id)
def _get_resource_obj(self, res_id, silent=False):
"""Returns a resource object from one of the memory locations for given preload or internal ID"""
if self.bulk and res_id in self.bulk_resources:
return self.bulk_resources[res_id]
elif res_id in self.resource_objs:
return self.resource_objs[res_id]
else:
# Real ID not alias - reverse lookup
alias_ids = [alias_id for alias_id,int_id in self.resource_ids.iteritems() if int_id==res_id]
if alias_ids:
return self.resource_objs[alias_ids[0]]
if not silent:
log.debug("_get_resource_obj(): No object found for '%s'", res_id)
return None
def _resource_exists(self, res_id):
if not res_id:
return None
res = self._get_resource_obj(res_id, silent=True)
return res is not None
def _has_association(self, sub, pred, obj):
"""Returns True if the described associated already exists."""
for assoc in self.resource_assocs.get(pred, []):
if assoc.s == sub and assoc.o == obj:
return True
return False
def _update_resource_obj(self, res_id):
"""Updates an existing resource object"""
res_obj = self._get_resource_obj(res_id)
self.rr.update(res_obj)
log.debug("Updating resource %s (pre=%s id=%s): '%s'", res_obj.type_, res_id, res_obj._id, res_obj.name)
def _get_alt_id(self, res_obj, prefix):
alt_ids = getattr(res_obj, 'alt_ids', [])
for alt_id in alt_ids:
if alt_id.startswith(prefix+":"):
alt_id_str = alt_id[len(prefix)+1:]
return alt_id_str
def _get_op_headers(self, owner_id, force_user=False):
headers = {}
if owner_id:
owner_id = self.resource_ids[owner_id]
headers[MSG_HEADER_ACTOR] = owner_id
headers[MSG_HEADER_ROLES] = {'ION': ['SUPERUSER', 'MODERATOR']}
headers[MSG_HEADER_VALID] = '0'
elif force_user:
return self._get_system_actor_headers()
return headers
def _get_system_actor_headers(self):
return {MSG_HEADER_ACTOR: self.resource_ids[ID_SYSTEM_ACTOR],
MSG_HEADER_ROLES: {'ION': ['SUPERUSER', 'MODERATOR']},
MSG_HEADER_VALID: '0'}
def basic_resource_create(self, cfg, restype, svcname, svcop, key="resource",
set_attributes=None, support_bulk=False, **kwargs):
"""
Orchestration method doing the following:
- create an object from a row,
- add any defined constraints,
- make a service call to create resource for given object,
- share resource in a given Org
- store newly created resource id and obj for future reference
- (optional) support bulk create/update
"""
res_id_alias = cfg[KEY_ID]
existing_obj = None
if res_id_alias in self.resource_ids:
# TODO: Catch case when ID used twice
existing_obj = self.resource_objs[res_id_alias]
elif re.match(UUID_RE, res_id_alias):
# This is obviously an ID of a real resource
try:
existing_obj = self._read_resource_id(res_id_alias)
log.debug("Updating existing resource via direct ID: %s", res_id_alias)
except NotFound as nf:
pass # Ok it was not there after all
try:
res_obj = self.create_object_from_cfg(cfg, restype, key, "", existing_obj=existing_obj)
except Exception as ex:
log.exception("Error creating object")
raise
if set_attributes:
for attr, attr_val in set_attributes.iteritems():
setattr(res_obj, attr, attr_val)
if existing_obj:
res_id = self.resource_ids[res_id_alias]
if self.bulk and support_bulk:
self.bulk_resources[res_id] = res_obj
self.bulk_existing.add(res_id) # Make sure to remember which objects are existing
else:
# TODO: Use the appropriate service call here
self.rr.update(res_obj)
else:
if self.bulk and support_bulk:
res_id = self._create_bulk_resource(res_obj, res_id_alias)
headers = self._get_op_headers(cfg.get(KEY_OWNER, None))
self._resource_assign_owner(headers, res_obj)
self._resource_advance_lcs(cfg, res_id)
else:
svc_client = self._get_service_client(svcname)
headers = self._get_op_headers(cfg.get(KEY_OWNER, None), force_user=True)
res_id = getattr(svc_client, svcop)(res_obj, headers=headers, **kwargs)
if res_id:
if svcname == "resource_registry" and svcop == "create":
res_id = res_id[0]
res_obj._id = res_id
self._register_id(res_id_alias, res_id, res_obj)
self._resource_assign_org(cfg, res_id)
return res_id
def _create_bulk_resource(self, res_obj, res_alias=None):
if not hasattr(res_obj, "_id"):
res_obj._id = create_unique_resource_id()
ts = get_ion_ts()
if hasattr(res_obj, "ts_created") and not res_obj.ts_created:
res_obj.ts_created = ts
if hasattr(res_obj, "ts_updated") and not res_obj.ts_updated:
res_obj.ts_updated = ts
res_id = res_obj._id
self.bulk_resources[res_id] = res_obj
if res_alias:
self._register_id(res_alias, res_id, res_obj)
return res_id
def _resource_advance_lcs(self, cfg, res_id):
"""
Change lifecycle state of object to requested state. Supports bulk.
"""
res_obj = self._get_resource_obj(res_id)
restype = res_obj.type_
lcsm = get_restype_lcsm(restype)
initial_lcmat = lcsm.initial_state if lcsm else LCS.DEPLOYED
initial_lcav = lcsm.initial_availability if lcsm else AS.AVAILABLE
lcstate = cfg.get(KEY_LCSTATE, None)
if lcstate:
row_lcmat, row_lcav = lcstate.split("_", 1)
if self.bulk and res_id in self.bulk_resources:
self.bulk_resources[res_id].lcstate = row_lcmat
self.bulk_resources[res_id].availability = row_lcav
else:
if row_lcmat != initial_lcmat: # Vertical transition
self.rr.set_lifecycle_state(res_id, row_lcmat)
if row_lcav != initial_lcav: # Horizontal transition
self.rr.set_lifecycle_state(res_id, row_lcav)
elif self.bulk and res_id in self.bulk_resources:
# Set the lcs to resource type appropriate initial values
self.bulk_resources[res_id].lcstate = initial_lcmat
self.bulk_resources[res_id].availability = initial_lcav
def _resource_assign_org(self, cfg, res_id):
"""
Shares the resource in the given orgs. Supports bulk.
"""
org_ids = cfg.get(KEY_ORGS, None)
if org_ids:
org_ids = get_typed_value(org_ids, targettype="simplelist")
for org_id in org_ids:
org_res_id = self.resource_ids[org_id]
if self.bulk and res_id in self.bulk_resources:
# Note: org_id is alias, res_id is internal ID
org_obj = self._get_resource_obj(org_id)
res_obj = self._get_resource_obj(res_id)
# Create association to given Org
assoc_obj = self._create_association(org_obj, PRED.hasResource, res_obj, support_bulk=True)
else:
svc_client = self._get_service_client("org_management")
svc_client.share_resource(org_res_id, res_id, headers=self._get_system_actor_headers())
def _resource_assign_owner(self, headers, res_obj):
if self.bulk and 'ion-actor-id' in headers:
owner_id = headers['ion-actor-id']
user_obj = self._get_resource_obj(owner_id)
if owner_id and owner_id != 'anonymous':
self._create_association(res_obj, PRED.hasOwner, user_obj, support_bulk=True)
def basic_associations_create(self, cfg, res_alias, support_bulk=False):
for assoc in cfg.get("associations", []):
direction, other_id, predicate = assoc.split(",")
res_id = self.resource_ids[res_alias]
other_res_id = self.resource_ids[other_id]
if direction == "TO":
self._create_association(res_id, predicate, other_res_id, support_bulk=support_bulk)
elif direction == "FROM":
self._create_association(other_res_id, predicate, res_id, support_bulk=support_bulk)
def _create_association(self, subject=None, predicate=None, obj=None, support_bulk=False):
"""
Create an association between two IonObjects with a given predicate.
Supports bulk mode
"""
if self.bulk and support_bulk:
if not subject or not predicate or not obj:
raise BadRequest("Association must have all elements set: %s/%s/%s" % (subject, predicate, obj))
if isinstance(subject, basestring):
subject = self._get_resource_obj(subject)
if "_id" not in subject:
raise BadRequest("Subject id not available")
subject_id = subject._id
st = subject.type_
if isinstance(obj, basestring):
obj = self._get_resource_obj(obj)
if "_id" not in obj:
raise BadRequest("Object id not available")
object_id = obj._id
ot = obj.type_
assoc_id = create_unique_association_id()
assoc_obj = IonObject("Association",
s=subject_id, st=st,
p=predicate,
o=object_id, ot=ot,
ts=get_ion_ts())
assoc_obj._id = assoc_id
self.bulk_associations[assoc_id] = assoc_obj
return assoc_id, '1-norev'
else:
return self.rr.create_association(subject, predicate, obj)
def commit_bulk(self):
if not self.bulk_resources and not self.bulk_associations:
return
# Perform the create for resources
res_new = [obj for obj in self.bulk_resources.values() if obj["_id"] not in self.bulk_existing]
res = self.rr.rr_store.create_mult(res_new, allow_ids=True)
# Perform the update for resources
res_upd = [obj for obj in self.bulk_resources.values() if obj["_id"] in self.bulk_existing]
res = self.rr.rr_store.update_mult(res_upd)
# Perform the create for associations
assoc_new = [obj for obj in self.bulk_associations.values()]
res = self.rr.rr_store.create_mult(assoc_new, allow_ids=True)
log.info("Bulk stored {} resource objects ({} updates) and {} associations".format(len(res_new), len(res_upd), len(assoc_new)))
self.bulk_resources.clear()
self.bulk_associations.clear()
self.bulk_existing.clear()
| {
"content_hash": "8d1825063deddb482cf45ac4db016c4d",
"timestamp": "",
"source": "github",
"line_count": 539,
"max_line_length": 135,
"avg_line_length": 45.41372912801484,
"alnum_prop": 0.5722281232126808,
"repo_name": "crchemist/scioncc",
"id": "19dd4d724abf13fc73f00663230d5c32d5b86b62",
"size": "24501",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/ion/util/preload.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "CSS",
"bytes": "689"
},
{
"name": "JavaScript",
"bytes": "11408"
},
{
"name": "Python",
"bytes": "2528428"
},
{
"name": "Shell",
"bytes": "2163"
}
],
"symlink_target": ""
} |
try:
frozenset
except NameError:
from sets import ImmutableSet as frozenset
def indent(width, s):
space = width * ' '
return '\n'.join([(space + line) for line in s.split('\n')])
def make_indenter(width):
def indenter(s): return indent(width, s)
return indenter
class AutoIndent(object):
r"""AutoIndent
replace bracketed template '( %s )' with multi-line template that will
block-indents elements when interpolated with parameter
>>> s1 = AutoIndent('speak(%s)')
>>> s2 = AutoIndent('speak( %s )')
>>> word = 'hello'
>>> paragraph = '\n'.join(['Hello !', 'How are you ?', 'It\'s nice to see you.'])
>>> print s1 % word
speak(hello)
>>> print s2 % paragraph
speak(
Hello !
How are you ?
It's nice to see you.
)
"""
def __init__(self, template):
super(AutoIndent, self).__init__()
multiline = template.count('( %s )')
if multiline and (multiline != template.count('%s')):
raise ValueError('Template must be homogenious')
self.template = template.replace('( %s )', '(\n%s\n)')
if multiline:
self.indent = make_indenter(2)
else:
self.indent = lambda x: x
def __mod__(self, value):
if isinstance(value, basestring):
t = (self.indent(value),)
else:
t = tuple([self.indent(v) for v in value])
return self.template % t
def multiline(s):
r"""multiline
>>> multiline('Hello there')
False
>>> multiline('\n'.join(['Hello', 'there']))
True
"""
return s.find('\n') >= 0
class Source(object):
"""Source
>>> i = Source('items')
>>> print i.ITEM_ID
items.ITEM_ID
"""
def __init__(self, alias):
super(Source, self).__init__()
self.alias = alias
self.fields = {}
def __getattr__(self, name):
if name in self.fields:
return self.fields[name]
else:
f = Field(self, name)
self.fields[name] = f
return f
def __str__(self): return self.alias
def declaration(self): raise NotImplementedError()
class StatementSource(Source):
"""StatementSource
>>> s = StatementSource('items', 'select * from IMA')
>>> print s.declaration()
(
select * from IMA
) items
"""
def __init__(self, alias, statement):
super(StatementSource, self).__init__(alias)
self.statement = statement
def declaration(self):
return AutoIndent('( %s ) ') % str(self.statement) + self.alias
class NamedSource(Source):
"""NamedSource
>>> i = NamedSource('items', 'i')
>>> print i
i
>>> print i.ITEM_ID
i.ITEM_ID
"""
def __init__(self, name, alias=None):
super(NamedSource, self).__init__(alias)
self.name = name
def __str__(self): return self.alias or self.name
def declaration(self):
if self.alias:
return '%s %s' % (self.name, self.alias)
else:
return self.name
class Expression(object): pass
class CustomExpression(Expression):
def __init__(self, text): self.text = text
def __str__(self): return str(self.text)
custom = CustomExpression
class StringLiteralExpression(Expression):
"""StringLiteralExpression
>>> s = StringLiteralExpression('Huh?')
>>> print s
'Huh?'
"""
def __init__(self, text): self.text = text
def __str__(self): return "'%s'" % (self.text.replace("'", "''"))
def expression(cls, expr):
if isinstance(expr, Expression) and not isinstance(expr, cls): return expr
else: return cls(expr)
expression = classmethod(expression)
strlit = StringLiteralExpression.expression
class BooleanExpression(Expression): pass
class UnaryOperated(object):
def __init__(self, element):
super(UnaryOperated, self).__init__()
self.element = element
def __nonzero__(self): return bool(self.element)
class BinaryOperated(object):
def __init__(self, *elements):
super(BinaryOperated, self).__init__()
self.elements = elements
def __len__(self): return len(self.elements)
def __nonzero__(self):
for e in self.elements:
if e: return True
return False
class IsNull(UnaryOperated, Expression):
"""IsNull
>>> print IsNull('ITEM_ID')
ITEM_ID is null
"""
def __str__(self): return '%s is null' % str(self.element)
class Blanket(UnaryOperated, Expression):
def __str__(self):
s = str(self.element)
if multiline(s): return AutoIndent('( %s )') % s
else: return '(%s)' % s
def blanket(expression):
"""blanket
>>> print blanket('Huh?')
(Huh?)
>>> print blanket(blanket('Huh?'))
(Huh?)
"""
if isinstance(expression, Blanket):
return expression
else:
return Blanket(expression)
def is_composite(expr):
"""is_composite
>>> is_composite(Not('1=1'))
False
>>> is_composite(And('1=1'))
False
>>> is_composite(And('1=1', '2=2'))
True
"""
return isinstance(expr, BinaryOperated) and (len(expr) > 1)
def blanket_as_needed(expression):
"""blanket_as_needed
>>> print blanket_as_needed('Huh?')
(Huh?)
>>> print blanket_as_needed(Not('0=0'))
not (0=0)
>>> print blanket_as_needed(And('0=0', '1=1', '2=2'))
(
(0=0) and
(1=1) and
(2=2)
)
"""
if is_composite(expression) or isinstance(expression, basestring): return Blanket(expression)
else: return expression
class Prefixed(UnaryOperated):
def __init__(self, prefix, element):
super(Prefixed, self).__init__(element)
self.prefix = prefix
def __str__(self): return '%s %s' % (self.prefix, str(blanket_as_needed(self.element)))
class Infixed(BinaryOperated):
def __init__(self, infix, *elements):
super(Infixed, self).__init__(*elements)
self.infix = infix
def __str__(self): return (' %s ' % self.infix).join([str(blanket_as_needed(e)) for e in self.elements if e])
class PrefixedExpression(Prefixed, Expression):
def __init__(self, prefix, expression):
Prefixed.__init__(self, prefix, expression)
Expression.__init__(self)
class Plus(PrefixedExpression):
def __init__(self, expression): super(Plus, self).__init__('+', expression)
class Minus(PrefixedExpression):
def __init__(self, expression): super(Minus, self).__init__('-', expression)
class PrefixedBooleanExpression(Prefixed, BooleanExpression):
def __init__(self, prefix, boolexpr):
Prefixed.__init__(self, prefix, boolexpr)
Expression.__init__(self)
class Not(Prefixed, BooleanExpression):
"""Not
>>> print Not('0=0')
not (0=0)
"""
def __init__(self, boolexpr): super(Not, self).__init__('not', boolexpr)
def invert(boolexpr):
"""invert
>>> print invert('0=0')
not (0=0)
>>> print invert(invert('0=0'))
0=0
"""
if isinstance(boolexpr, Not):
return boolexpr.element
else:
return Not(boolexpr)
class Combinative(object):
def combine(cls, *combinatives):
args = []
for c in combinatives:
if type(c) is cls:
args.extend([e for e in c.elements if e])
else:
args.append(c)
return cls(*args)
combine = classmethod(combine)
class InfixedExpression(Infixed, Expression):
def __init__(self, infix, *expressions):
Infixed.__init__(self, infix, *expressions)
Expression.__init__(self)
class Add(InfixedExpression, Combinative):
"""Add
>>> print Add(1,2,3)
1 + 2 + 3
>>> print Add.combine(1,2,3)
1 + 2 + 3
>>> print Add.combine(1, Add(2,3), 4)
1 + 2 + 3 + 4
"""
def __init__(self, *expressions): super(Add, self).__init__('+', *expressions)
class Sub(InfixedExpression):
def __init__(self, *expressions): super(Sub, self).__init__('-', *expressions)
class Mul(InfixedExpression, Combinative):
def __init__(self, *expressions): super(Mul, self).__init__('*', *expressions)
class Div(InfixedExpression):
def __init__(self, *expressions): super(Div, self).__init__('/', *expressions)
class InfixedBooleanExpression(Infixed, BooleanExpression):
def __init__(self, infix, *boolexprs):
Infixed.__init__(self, infix, *boolexprs)
BooleanExpression.__init__(self)
def __str__(self): return (' %s\n' % self.infix).join([str(blanket(e)) for e in self.elements])
class And(InfixedBooleanExpression, Combinative):
"""And
>>> print And('0=0', '1=1', '2=2')
(0=0) and
(1=1) and
(2=2)
"""
def __init__(self, *boolexprs): super(And, self).__init__('and', *boolexprs)
class Or(InfixedBooleanExpression, Combinative):
def __init__(self, *boolexprs): super(Or, self).__init__('or', *boolexprs)
class InfixedComparisonExpression(Infixed, BooleanExpression):
def __init__(self, infix, left, right):
if isinstance(left, basestring): left = strlit(left)
if isinstance(right, basestring): right = strlit(right)
Infixed.__init__(self, infix, left, right)
BooleanExpression.__init__(self)
left = property(lambda self: self.elements[0])
right = property(lambda self: self.elements[1])
class Like(InfixedComparisonExpression):
"""Like
>>> print Like(custom('ITEM_NAME'), 'Staron%')
ITEM_NAME like 'Staron%'
"""
def __init__(self, value, pattern):
super(Like, self).__init__('like', value, pattern)
class LessThan(InfixedComparisonExpression):
"""LessThan
>>> lessthan = LessThan(1, 2)
>>> print lessthan
1 < 2
>>> lessthan.left
1
>>> lessthan.right
2
"""
def __init__(self, left, right): super(LessThan, self).__init__('<', left, right)
class LessThanOrEqual(InfixedComparisonExpression):
def __init__(self, left, right): super(LessThanOrEqual, self).__init__('<=', left, right)
class GreaterThan(InfixedComparisonExpression):
def __init__(self, left, right): super(GreaterThan, self).__init__('>', left, right)
class GreaterThanOrEqual(InfixedComparisonExpression):
def __init__(self, left, right): super(GreaterThanOrEqual, self).__init__('>=', left, right)
class Equal(InfixedComparisonExpression):
def __init__(self, left, right): super(Equal, self).__init__('=', left, right)
class OracleOuterDecorator(InfixedComparisonExpression):
def __init__(self, decorated, left_outer=True):
super(OracleOuterDecorator, self).__init__(decorated.infix, decorated.left, decorated.right)
self.decorated = decorated
self.left_outer = left_outer
def __str__(self):
if self.left_outer:
return '%s %s %s(+)' % (self.left, self.infix, self.right)
else:
return '%s(+) %s %s' % (self.left, self.infix, self.right)
class NotEqual(InfixedComparisonExpression):
def __init__(self, left, right): super(NotEqual, self).__init__('<>', left, right)
class Field(Expression):
r"""Field
>>> name = Field('item', 'ITEM_NAME')
>>> price = Field('item', 'PRICE')
>>> cost = Field('item', 'COST')
>>> quantity = Field('invoice', 'QUANTITY')
>>> print '\n'.join([str(x) for x in [name, price, cost, quantity]])
item.ITEM_NAME
item.PRICE
item.COST
invoice.QUANTITY
>>> print price > cost
item.PRICE > item.COST
>>> print price * quantity
item.PRICE * invoice.QUANTITY
>>> print name.like('Staron%')
item.ITEM_NAME like 'Staron%'
>>> print name.is_null
item.ITEM_NAME is null
"""
def __init__(self, source, fieldname):
super(Field, self).__init__()
self.source = source
self.fieldname = fieldname
def __str__(self): return '%s.%s' % (str(self.source), self.fieldname)
def __add__(self, other): return Add(self, other)
def __radd__(self, other): return Add(other, self)
def __sub__(self, other): return Sub(self, other)
def __rsub__(self, other): return Sub(other, self)
def __mul__(self, other): return Mul(self, other)
def __rmul__(self, other): return Mul(other, self)
def __div__(self, other): return Div(self, other)
def __rdiv__(self, other): return Div(other, self)
def __pos__(self): return Plus(self)
def __neg__(self): return Minus(self)
def __pow__(self, other): raise NotImplementedError()
def __rpow__(self, other): raise NotImplementedError()
def __abs__(self): raise NotImplementedError()
def __mod__(self, other): raise NotImplementedError()
def __rmod__(self, other): raise NotImplementedError()
def __lt__(self, other): return LessThan(self, other)
def __le__(self, other): return LessThanOrEqual(self, other)
def __gt__(self, other): return GreaterThan(self, other)
def __ge__(self, other): return GreaterThanOrEqual(self, other)
def __eq__(self, other): return Equal(self, other)
def __ne__(self, other): return NotEqual(self, other)
def __and__(self, other): return And(self, other)
def __rand__(self, other): return And(other, self)
def __or__(self, other): return Or(self, other)
def __ror__(self, other): return Or(other, self)
is_null = property(lambda self: IsNull(self))
def like(self, other): return Like(self, other)
class SourceList(object):
"""SourceList
>>> s = SourceList()
>>> item = s('ITEMS')
>>> print item
ITEMS
>>> s('ITEMS')
Traceback (most recent call last):
...
ValueError: Duplicated key ('ITEMS')
"""
def __init__(self):
super(SourceList, self).__init__()
self.clear()
def clear(self):
self.keys = []
self.exprs = []
self.aliases = []
def clone(self):
r = SourceList()
r.assign(self)
return r
def assign(self, other):
self.keys = list(other.keys)
self.exprs = list(other.exprs)
self.aliases = list(other.aliases)
def __str__(self):
def line(expr, alias):
if not alias: return str(expr)
return '%s %s' % (str(expr), str(alias))
return ',\n'.join([line(expr, alias) for expr, alias in zip(self.exprs, self.aliases)])
def __iter__(self): return iter(self.exprs)
def _append(self, expr, alias):
if isinstance(expr, Source):
key = str(expr)
else:
key = str(alias or expr)
if key in self.keys: raise ValueError("Duplicated key ('%s')" % key)
self.keys.append(key)
self.exprs.append(expr)
self.aliases.append(alias)
def __call__(self, name, alias=None):
result = NamedSource(name, alias)
self._append(result, alias)
return result
def statement(self, alias, statement):
result = StatementSource(alias, statement)
self._append(result, alias)
return result
def iter_fields(expression):
r"""iter_fields
>>> ima = NamedSource('ima')
>>> wko = NamedSource('wko')
>>> join = And(ima.IMA_ItemID == wko.WKO_ItemID,
... ima.IMA_UnitMeasure == wko.WKO_UnitMeasure,
... Or(ima.IMA_ItemPrice.is_null, wko.WKO_WorkOrderDate == 3))
...
>>> print '\n'.join([str(x) for x in iter_fields(join)])
ima.IMA_ItemID
wko.WKO_ItemID
ima.IMA_UnitMeasure
wko.WKO_UnitMeasure
ima.IMA_ItemPrice
wko.WKO_WorkOrderDate
"""
if isinstance(expression, Field):
yield expression
elif isinstance(expression, UnaryOperated) and isinstance(expression, Expression):
for x in iter_fields(expression.element):
yield x
elif isinstance(expression, BinaryOperated) and isinstance(expression, Expression):
for expr in expression.elements:
for x in iter_fields(expr):
yield x
class FieldList(object):
"""FieldList
>>> fields = FieldList()
>>> print fields
<BLANKLINE>
>>> sources = SourceList()
>>> ima = sources('IMA')
>>> wko = sources('WKO')
>>> fields.append(ima.IMA_ItemID)
>>> len(fields)
1
>>> fields.append(wko.IMA_ItemID)
Traceback (most recent call last):
...
ValueError: Duplicated key ('IMA_ItemID')
>>> fields.append(wko.IMA_ItemID, 'ITEM_ID')
>>> len(fields)
2
>>> fields.append(ima.WKO_ItemID, 'ITEM_ID')
Traceback (most recent call last):
...
ValueError: Duplicated key ('ITEM_ID')
>>> len(fields)
2
>>> fields.append('getdate()', 'TODAY')
>>> len(fields)
3
>>> print fields
IMA.IMA_ItemID IMA_ItemID,
WKO.IMA_ItemID ITEM_ID,
getdate() TODAY
>>> fields = FieldList()
>>> fields.append(ima.IMA_ItemID)
>>> fields.append(wko.WKO_ItemID)
>>> fields.append('DATE')
>>> print fields
IMA.IMA_ItemID,
WKO.WKO_ItemID,
DATE
"""
def __init__(self):
super(FieldList, self).__init__()
self.clear()
def clear(self):
self.keys = []
self.values = []
self.aliases = []
def clone(self):
r = FieldList()
r.assign(self)
return r
def assign(self, other):
self.keys = list(other.keys)
self.values = list(other.values)
self.aliases = list(other.aliases)
def __len__(self): return len(self.keys)
def __str__(self):
if self.some_was_aliased():
def line(key, value, alias): return '%-50s %s' % (str(value), str(key or alias or value))
return ',\n'.join([line(k,v,a) for k,v,a in zip(self.keys, self.values, self.aliases)])
else:
return ',\n'.join([str(v) for v in self.values])
def _check_key(self, key):
if (key is not None) and (key in self.keys):
raise ValueError("Duplicated key ('%s')" % key)
def _do_append(self, key, value, alias):
self._check_key(key)
self.keys.append(key)
self.values.append(value)
self.aliases.append(alias)
def _append_value(self, value, alias): self._do_append(alias, value, alias)
def _append_field(self, field, alias): self._do_append(alias or field.fieldname, field, alias)
def some_was_aliased(self):
return [a for a in self.aliases if a is not None]
def append(self, value, alias=None):
if isinstance(value, Field):
self._append_field(value, alias)
else:
self._append_value(value, alias)
class BooleanExpressionList(object):
"""BooleanExpressionList
>>> exprs = BooleanExpressionList()
>>> print exprs
<BLANKLINE>
>>> exprs.And('1=1')
>>> print exprs
(1=1)
>>> exprs.invert()
>>> print exprs
not (1=1)
>>> exprs.invert()
>>> print exprs
(1=1)
>>> exprs.And('2=2')
>>> print exprs
(1=1) and
(2=2)
>>> exprs.Or('3=3', '4=4', '5=5')
>>> print exprs
(
(1=1) and
(2=2)
) or
(3=3) or
(4=4) or
(5=5)
>>> exprs.Or('6=6', '7=7')
>>> print exprs
(
(1=1) and
(2=2)
) or
(3=3) or
(4=4) or
(5=5) or
(6=6) or
(7=7)
"""
def __init__(self):
super(BooleanExpressionList, self).__init__()
self.clear()
def clear(self): self.root = None
def clone(self):
r = BooleanExpressionList()
r.assign(self)
return r
def assign(self, other):
import copy
self.root = copy.deepcopy(other.root)
def __str__(self): return str(self.root or '')
def _append(self, booloper, *boolexprs):
if len(boolexprs) > 0:
if self.root is None:
self.root = booloper(*boolexprs)
else:
self.root = booloper.combine(self.root, *boolexprs)
def And(self, *boolexprs): self._append(And, *boolexprs)
def Or(self, *boolexprs): self._append(Or, *boolexprs)
def invert(self):
if self.root is not None: self.root = invert(self.root)
class GroupByList(object):
"""GroupByList
>>> groupby = GroupByList()
>>> print groupby
<BLANKLINE>
>>> groupby.append('INVO_DATE')
>>> groupby.append('ORDER_DATE')
>>> print groupby
INVO_DATE,
ORDER_DATE
"""
def __init__(self):
super(GroupByList, self).__init__()
self.clear()
def clear(self): self.values = []
def clone(self):
r = GroupByList()
r.assign(self)
return r
def assign(self, other): self.values = list(other.values)
def append(self, value): self.values.append(value)
def __str__(self):
return ',\n'.join([str(v) for v in self.values])
class OrderByList(object):
"""OrderByList
>>> orderby = OrderByList()
>>> print orderby
<BLANKLINE>
>>> orderby.append('INVO_DATE')
>>> orderby.append('ORDER_DATE', False)
>>> orderby.append('LINE_NUMBER', True)
>>> print orderby
INVO_DATE,
ORDER_DATE desc,
LINE_NUMBER
"""
def __init__(self):
super(OrderByList, self).__init__()
self.clear()
def clear(self):
self.values = []
self.ascendings = []
def clone(self):
r = OrderByList()
r.assign(self)
return r
def assign(self, other):
self.values = list(other.values)
self.ascendings = list(other.ascendings)
def append(self, value, ascending=True):
self.values.append(value)
self.ascendings.append(ascending)
def __str__(self):
def line(value, ascending): return (ascending and str(value)) or (str(value) + ' desc')
return ',\n'.join([line(v,a) for v,a in zip(self.values, self.ascendings)])
class Function(object):
def __init__(self, name):
super(Function, self).__init__()
self.name = name
def __call__(self, *args):
args_text = ', '.join([blanket_as_needed(a) for a in args])
return '%s(%s)' % (self.name, args_text)
class FunctionFactory(object):
def __getattr__(self, name): return Function(name)
class Statement(object):
def __init__(self):
super(Statement, self).__init__()
self.func = FunctionFactory()
class BaseJoinList(object):
def __init__(self):
super(BaseJoinList, self).__init__()
self.clear()
def clear(self):
self.lefts = []
self.rights = []
self.expressions = []
def clone(self):
r = type(self)()
r.assign(self)
return r
def assign(self, other):
self.lefts = list(other.lefts)
self.rights = list(other.rights)
self.expressions = list(other.expressions)
def is_joined(self, left, right): raise NotImplementedError()
def append(self, left, right, expression):
if self.is_joined(left, right): raise ValueError('Already joined')
sources = frozenset([f.source for f in iter_fields(expression)])
if (len(sources) != 2) or (left not in sources) or (right not in sources):
raise ValueError('Only expressions of fields of joining sources are allowed')
self.lefts.append(left)
self.rights.append(right)
self.expressions.append(expression)
def iter_joinings(self, joined): raise NotImplementedError()
class InnerJoinList(BaseJoinList):
def __init__(self): super(InnerJoinList, self).__init__()
def is_joined(self, left, right):
for l,r in zip(self.lefts, self.rights):
if (l==left) and (r==right): return True
if (l==right) and (r==left): return True
return False
def iter_joinings(self, joined):
for i in xrange(len(self.lefts)):
if self.lefts[i]==joined: yield self.rights[i], self.expressions[i]
for i in xrange(len(self.rights)):
if self.rights[i]==joined: yield self.lefts[i], self.expressions[i]
class OuterJoinList(BaseJoinList):
def __init__(self): super(OuterJoinList, self).__init__()
def is_joined(self, left, right):
for l,r in zip(self.lefts, self.rights):
if (l==left) and (r==right): return True
return False
def iter_joinings(self, joined):
for i in xrange(len(self.lefts)):
if self.lefts[i]==joined: yield self.rights[i], self.expressions[i]
def make_oracle_outer(outer, inner, expression):
if isinstance(expression, InfixedComparisonExpression) and (not isinstance(expression, OracleOuterDecorator)):
if (expression.left == outer) and (expression.right == inner): return OracleOuterDecorator(expression, True)
if (expression.left == inner) and (expression.right == outer): return OracleOuterDecorator(expression, False)
raise Exception('Not supported expression')
elif isinstance(expression, InfixedBooleanExpression):
return type(expression)(*[make_oracle_outer(outer, inner, e) for e in expression.elements])
class BaseSelect(Statement):
def __init__(self):
super(BaseSelect, self).__init__()
self.fields = FieldList()
self.sources = SourceList()
self.where = BooleanExpressionList()
self.groupby = GroupByList()
self.having = BooleanExpressionList()
self.orderby = OrderByList()
self.innerjoins = InnerJoinList()
self.outerjoins = OuterJoinList()
def get_from_clause(self): raise NotImplementedError()
def get_where_clause(self): raise NotImplementedError()
def __str__(self):
def section(term, clause):
if clause:
return '%s\n%s' % (term, indent(2, clause))
else:
return ''
fields = str(self.fields) or '*'
froms = section('from', self.get_from_clause())
where = section('where', self.get_where_clause())
groupby = section('group by', str(self.groupby))
having = section('having', str(self.having))
orderby = section('order by', str(self.orderby))
return '\n'.join([x for x in ['select', indent(2, fields), froms, where, groupby, having, orderby] if x])
def join(self, left, right):
def joiner(*exprs): self.innerjoins.append(left, right, And(*exprs))
return joiner
def outer_join(self, left, right):
def joiner(*exprs): self.outerjoins.append(left, right, And(*exprs))
return joiner
class StandardSelect(BaseSelect):
"""StandardSelect
>>> s = StandardSelect()
>>> ima = s.sources('ima')
>>> pst = s.sources('pst')
>>> ima2 = s.sources('ima', 'ima2')
>>> s.join(ima, pst)(ima.IMA_ItemID == pst.PST_ItemID)
>>> s.outer_join(ima, ima2)(ima.IMA_ItemID == ima2.IMA_ItemID)
>>> s.where.And(ima.ItemCost > 100000)
>>> s.where.And(ima.ItemCost < 200000)
>>> s.fields.append(ima.IMA_ItemID, 'ID')
>>> s.fields.append(ima.IMA_ItemCost, 'COST')
>>> s.fields.append(ima.IMA_ItemName, 'ITEM_NAME')
>>> s.groupby.append(ima.IMA_ItemID)
>>> s.groupby.append(ima.IMA_ItemCost)
>>> s.groupby.append(ima.IMA_ItemName)
>>> s.having.And(ima.IMA_ItemName.like(strlit('Huh%')))
>>> s.orderby.append(ima.IMA_ItemName, False)
>>> s.orderby.append(ima.IMA_ItemCost)
>>> print s
select
ima.IMA_ItemID ID,
ima.IMA_ItemCost COST,
ima.IMA_ItemName ITEM_NAME
from
ima
join pst on (ima.IMA_ItemID = pst.PST_ItemID)
left outer join ima ima2 on (ima.IMA_ItemID = ima2.IMA_ItemID)
where
(ima.ItemCost > 100000) and
(ima.ItemCost < 200000)
group by
ima.IMA_ItemID,
ima.IMA_ItemCost,
ima.IMA_ItemName
having
(ima.IMA_ItemName like 'Huh%')
order by
ima.IMA_ItemName desc,
ima.IMA_ItemCost
"""
def get_where_clause(self): return str(self.where)
def get_from_clause(self):
result = []
included = []
for source in self.sources:
if source not in included:
queued = [source]
joinlines = []
while queued:
s = queued.pop(0)
if s not in included:
included.append(s)
for joining, expression in self.innerjoins.iter_joinings(s):
if joining not in included:
queued.append(joining)
joinlines.append('join %s on %s' % (joining.declaration(), str(expression)))
for joining, expression in self.outerjoins.iter_joinings(s):
if joining not in included:
queued.append(joining)
joinlines.append('left outer join %s on %s' % (joining.declaration(), str(expression)))
included.append(s)
if result: result[-1] += ','
result.append(str(source))
result.extend([' %s' % x for x in joinlines])
return '\n'.join(result)
class OracleSelect(BaseSelect):
"""OracleSelect
>>> s = OracleSelect()
>>> ima = s.sources('ima')
>>> pst = s.sources('pst')
>>> ima2 = s.sources('ima', 'ima2')
>>> s.join(ima, pst)(ima.IMA_ItemID == pst.PST_ItemID)
>>> s.outer_join(ima, ima2)(ima.IMA_ItemID == ima2.IMA_ItemID)
>>> s.where.And(ima.ItemCost > 100000)
>>> s.where.And(ima.ItemCost < 200000)
>>> print s
select
*
from
ima,
pst,
ima ima2
where
(ima.IMA_ItemID = pst.PST_ItemID) and
(ima.IMA_ItemID = ima2.IMA_ItemID(+)) and
(ima.ItemCost > 100000) and
(ima.ItemCost < 200000)
"""
def get_where_clause(self):
inner_exprs = self.innerjoins.expressions
outer_packs = zip(self.outerjoins.lefts, self.outerjoins.rights, self.outerjoins.expressions)
outer_exprs = [make_oracle_outer(outer, inner, expr) for outer, inner, expr in outer_packs]
if self.where.root:
where_exprs = [self.where.root]
else:
where_exprs = []
return str(And.combine(*(inner_exprs + outer_exprs + where_exprs)))
def get_from_clause(self): return ',\n'.join([source.declaration() for source in self.sources])
if __name__ == '__main__':
try:
from zope.testing import doctest
except ImportError:
import doctest
doctest.testmod()
| {
"content_hash": "171eebc4954ed9f00dbbf39990e6c0d8",
"timestamp": "",
"source": "github",
"line_count": 976,
"max_line_length": 119,
"avg_line_length": 32.43032786885246,
"alnum_prop": 0.5594907114874258,
"repo_name": "ActiveState/code",
"id": "a475fd237af4cfe27e2bc7cfc1ffea140c23eb48",
"size": "31652",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "recipes/Python/415345_write_sql_via_python/recipe-415345.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "35894"
},
{
"name": "C",
"bytes": "56048"
},
{
"name": "C++",
"bytes": "90880"
},
{
"name": "HTML",
"bytes": "11656"
},
{
"name": "Java",
"bytes": "57468"
},
{
"name": "JavaScript",
"bytes": "181218"
},
{
"name": "PHP",
"bytes": "250144"
},
{
"name": "Perl",
"bytes": "37296"
},
{
"name": "Perl 6",
"bytes": "9914"
},
{
"name": "Python",
"bytes": "17387779"
},
{
"name": "Ruby",
"bytes": "40233"
},
{
"name": "Shell",
"bytes": "190732"
},
{
"name": "Tcl",
"bytes": "674650"
}
],
"symlink_target": ""
} |
from unittest import skipUnless
from django.db import connection
from django.db.models.deletion import CASCADE
from django.db.models.fields.related import ForeignKey
from django.test import TestCase
from .models import Article, ArticleTranslation, IndexTogetherSingleList
class SchemaIndexesTests(TestCase):
"""
Test index handling by the db.backends.schema infrastructure.
"""
def test_index_name_hash(self):
"""
Index names should be deterministic.
"""
with connection.schema_editor() as editor:
index_name = editor._create_index_name(
model=Article,
column_names=("c1",),
suffix="123",
)
self.assertEqual(index_name, "indexes_article_c1_a52bd80b123")
def test_index_name(self):
"""
Index names on the built-in database backends::
* Are truncated as needed.
* Include all the column names.
* Include a deterministic hash.
"""
long_name = 'l%sng' % ('o' * 100)
with connection.schema_editor() as editor:
index_name = editor._create_index_name(
model=Article,
column_names=('c1', 'c2', long_name),
suffix='ix',
)
expected = {
'mysql': 'indexes_article_c1_c2_looooooooooooooooooo_255179b2ix',
'oracle': 'indexes_a_c1_c2_loo_255179b2ix',
'postgresql': 'indexes_article_c1_c2_loooooooooooooooooo_255179b2ix',
'sqlite': 'indexes_article_c1_c2_l%sng_255179b2ix' % ('o' * 100),
}
if connection.vendor not in expected:
self.skipTest('This test is only supported on the built-in database backends.')
self.assertEqual(index_name, expected[connection.vendor])
def test_index_together(self):
editor = connection.schema_editor()
index_sql = editor._model_indexes_sql(Article)
self.assertEqual(len(index_sql), 1)
# Ensure the index name is properly quoted
self.assertIn(
connection.ops.quote_name(
editor._create_index_name(Article, ['headline', 'pub_date'], suffix='_idx')
),
index_sql[0]
)
def test_index_together_single_list(self):
# Test for using index_together with a single list (#22172)
index_sql = connection.schema_editor()._model_indexes_sql(IndexTogetherSingleList)
self.assertEqual(len(index_sql), 1)
@skipUnless(connection.vendor == 'postgresql', "This is a postgresql-specific issue")
def test_postgresql_text_indexes(self):
"""Test creation of PostgreSQL-specific text indexes (#12234)"""
from .models import IndexedArticle
index_sql = connection.schema_editor()._model_indexes_sql(IndexedArticle)
self.assertEqual(len(index_sql), 5)
self.assertIn('("headline" varchar_pattern_ops)', index_sql[1])
self.assertIn('("body" text_pattern_ops)', index_sql[3])
# unique=True and db_index=True should only create the varchar-specific
# index (#19441).
self.assertIn('("slug" varchar_pattern_ops)', index_sql[4])
@skipUnless(connection.vendor == 'postgresql', "This is a postgresql-specific issue")
def test_postgresql_virtual_relation_indexes(self):
"""Test indexes are not created for related objects"""
index_sql = connection.schema_editor()._model_indexes_sql(Article)
self.assertEqual(len(index_sql), 1)
@skipUnless(connection.vendor == 'mysql', "This is a mysql-specific issue")
def test_no_index_for_foreignkey(self):
"""
MySQL on InnoDB already creates indexes automatically for foreign keys.
(#14180). An index should be created if db_constraint=False (#26171).
"""
storage = connection.introspection.get_storage_engine(
connection.cursor(), ArticleTranslation._meta.db_table
)
if storage != "InnoDB":
self.skip("This test only applies to the InnoDB storage engine")
index_sql = connection.schema_editor()._model_indexes_sql(ArticleTranslation)
self.assertEqual(index_sql, [
'CREATE INDEX `indexes_articletranslation_article_no_constraint_id_d6c0806b` '
'ON `indexes_articletranslation` (`article_no_constraint_id`)'
])
# The index also shouldn't be created if the ForeignKey is added after
# the model was created.
with connection.schema_editor() as editor:
new_field = ForeignKey(Article, CASCADE)
new_field.set_attributes_from_name('new_foreign_key')
editor.add_field(ArticleTranslation, new_field)
self.assertEqual(editor.deferred_sql, [
'ALTER TABLE `indexes_articletranslation` '
'ADD CONSTRAINT `indexes_articletrans_new_foreign_key_id_d27a9146_fk_indexes_a` '
'FOREIGN KEY (`new_foreign_key_id`) REFERENCES `indexes_article` (`id`)'
])
| {
"content_hash": "82d881cd65abcbcf4a98415c11a8e8d1",
"timestamp": "",
"source": "github",
"line_count": 114,
"max_line_length": 97,
"avg_line_length": 44.24561403508772,
"alnum_prop": 0.6280729579698652,
"repo_name": "erikr/django",
"id": "81e2278b19bb83ac406dc9bd87b1b74bc8c5a05a",
"size": "5044",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tests/indexes/tests.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "53169"
},
{
"name": "HTML",
"bytes": "173592"
},
{
"name": "JavaScript",
"bytes": "448151"
},
{
"name": "Makefile",
"bytes": "125"
},
{
"name": "Python",
"bytes": "12192494"
},
{
"name": "Shell",
"bytes": "809"
},
{
"name": "Smarty",
"bytes": "130"
}
],
"symlink_target": ""
} |
__author__ = 'Bohdan Mushkevych'
from threading import RLock
from bson import ObjectId
from db.model.site_statistics import SiteStatistics, DOMAIN_NAME, TIMEPERIOD
from synergy.db.manager import ds_manager
from synergy.system.decorator import thread_safe
class SiteDao(object):
""" Thread-safe Data Access Object for site_XXX table/collection """
def __init__(self, logger):
super(SiteDao, self).__init__()
self.logger = logger
self.lock = RLock()
self.ds = ds_manager.ds_factory(logger)
@thread_safe
def get_one(self, collection_name, domain_name, timeperiod):
collection = self.ds.connection(collection_name)
document = collection.find_one(filter={DOMAIN_NAME: domain_name, TIMEPERIOD: timeperiod})
if document is None:
raise LookupError(f'MongoDB has no site record in {collection_name} for ({domain_name}, {timeperiod})')
return SiteStatistics.from_json(document)
@thread_safe
def update(self, collection_name, instance):
""" method finds Site Statistics record and update it DB representation """
assert isinstance(instance, SiteStatistics)
if instance.db_id:
query = {'_id': ObjectId(instance.db_id)}
else:
query = {DOMAIN_NAME: instance.domain_name,
TIMEPERIOD: instance.timeperiod}
self.ds.update(collection_name, query, instance)
return instance.db_id
@thread_safe
def insert(self, collection_name, instance):
""" inserts a unit of work into MongoDB. """
assert isinstance(instance, SiteStatistics)
collection = self.ds.connection(collection_name)
return collection.insert_one(instance.document).inserted_id
@thread_safe
def remove(self, collection_name, domain_name, timeperiod):
query = {DOMAIN_NAME: domain_name, TIMEPERIOD: timeperiod}
collection = self.ds.connection(collection_name)
collection.delete_one(query)
| {
"content_hash": "a692e393391f02ad8a6361d444e4b727",
"timestamp": "",
"source": "github",
"line_count": 51,
"max_line_length": 115,
"avg_line_length": 39.23529411764706,
"alnum_prop": 0.672663668165917,
"repo_name": "mushkevych/scheduler",
"id": "fb7a0afc558fce15a4d4bab5c90c4b0e1787640c",
"size": "2001",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "db/dao/site_dao.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "44059"
},
{
"name": "HTML",
"bytes": "48705"
},
{
"name": "JavaScript",
"bytes": "85240"
},
{
"name": "Python",
"bytes": "593827"
},
{
"name": "Shell",
"bytes": "4570"
}
],
"symlink_target": ""
} |
'''
this algorithm is presented in:
Guibiao Xu, Zheng Cao, Bao-Gang Hu and Jose Principe, Robust support vector machines based on the
rescaled hinge loss, Pattern Recognition, 2017.
'''
import numpy as np
from sklearn.svm import SVC
from collections import OrderedDict
from config import config
class rsvm:
def __init__(self, config):
'''
config: parameter settings
'''
self.config = config
def _create_svm_object(self):
'''
create an svm object according to kernel type
'''
if self.config['kernel'] == 'linear':
return SVC(C = self.config['C'], kernel = self.config['kernel'], \
shrinking = self.config['shrinking'], probability = self.config['probability'], \
tol = self.config['tol'], cache_size = self.config['cache_size'], \
class_weight = self.config['class_weight'], verbose = self.config['verbose'], \
max_iter = self.config['max_iter'], decision_function_shape = self.config['decision_function_shape'], \
random_state = self.config['random_state'])
elif self.config['kernel'] == 'poly':
return SVC(C = self.config['C'], kernel = self.config['kernel'], \
degree = self.config['degree'], gamma = self.config['gamma'], coef0 = self.config['coef0'], \
shrinking = self.config['shrinking'], probability = self.config['probability'], \
tol = self.config['tol'], cache_size = self.config['cache_size'], \
class_weight = self.config['class_weight'], verbose = self.config['verbose'], \
max_iter = self.config['max_iter'], decision_function_shape = self.config['decision_function_shape'], \
random_state = self.config['random_state'])
elif self.config['kernel'] == 'rbf':
return SVC(C = self.config['C'], kernel = self.config['kernel'], gamma = self.config['gamma'], \
shrinking = self.config['shrinking'], probability = self.config['probability'], \
tol = self.config['tol'], cache_size = self.config['cache_size'], \
class_weight = self.config['class_weight'], verbose = self.config['verbose'], \
max_iter = self.config['max_iter'], decision_function_shape = self.config['decision_function_shape'], \
random_state = self.config['random_state'])
elif self.config['kernel'] == 'sigmoid':
return SVC(C = self.config['C'], kernel = self.config['kernel'], \
gamma = self.config['gamma'], coef0 = self.config['coef0'], \
shrinking = self.config['shrinking'], probability = self.config['probability'], \
tol = self.config['tol'], cache_size = self.config['cache_size'], \
class_weight = self.config['class_weight'], verbose = self.config['verbose'], \
max_iter = self.config['max_iter'], decision_function_shape = self.config['decision_function_shape'], \
random_state = self.config['random_state'])
def fit(self, train_fea, train_gnd):
'''
training method
train_fea: array like, shape = (smp_num, fea_num)
train_gnd: array like, shape = (smp_num,), -1 and +1
'''
# check elements in train_gnd, the element should be -1 or +1
assert set(train_gnd) == set([-1, 1])
train_num = train_fea.shape[0]
# save sample weights across iterations
self.smp_weights_mat = np.zeros(shape = (self.config['rsvm_iter_num'], train_num))
# save svm models across iterations
self.svmmodel_dict = OrderedDict()
# save support vector ratios across iterations
self.sv_ratio_vec = np.zeros(shape = (self.config['rsvm_iter_num'],))
self.smp_weights_mat[0] = self.config['rsvm_v0']
for iter_i in range(self.config['rsvm_iter_num']):
self.svmmodel_dict[iter_i] = self._create_svm_object()
self.svmmodel_dict[iter_i].fit(train_fea, train_gnd, sample_weight = self.smp_weights_mat[iter_i])
self.sv_ratio_vec[iter_i] = np.float64(self.svmmodel_dict[iter_i].n_support_.sum()) / train_num * 100
# update weights of samples
if iter_i == (self.config['rsvm_iter_num'] - 1):
break
else:
tmp_outputs = self.svmmodel_dict[iter_i].decision_function(train_fea)
tmp_hinge_loss = np.maximum(0.0, 1.0 - tmp_outputs * train_gnd)
# weights update function
self.smp_weights_mat[iter_i + 1] = np.exp(-self.config['rsvm_eta'] * tmp_hinge_loss)
self.smp_weights_mat = self.smp_weights_mat.transpose()
def predict(self, test_fea, last_model_flag = True):
'''
prediction function
test_fea: array like, shape = (smp_num, fea_num)
last_model_flag: whether only use the last svm model or not
return
pred: array like, shape = (smp_num, iter_num)
'''
if last_model_flag:
return self.svmmodel_dict[self.config['rsvm_iter_num'] - 1].predict(test_fea)
else:
test_num = test_fea.shape[0]
pred = np.zeros(shape = (test_num, self.config['rsvm_iter_num']), dtype = np.int32)
for iter_i in range(self.config['rsvm_iter_num']):
pred[:, iter_i] = self.svmmodel_dict[iter_i].predict(test_fea)
return pred
def score(self, test_fea, test_gnd, last_model_flag = True):
'''
return accuracy on the given test_fea and test_gnd
test_fea: array like, shape = (smp_num, fea_num)
test_gnd: array like, shape = (smp_num,), -1 and +1
last_model_flag: whether only use the last svm model or not
return
accu_vec: a vector
'''
if last_model_flag:
return self.svmmodel_dict[self.config['rsvm_iter_num'] - 1].score(test_fea, test_gnd) * 100
else:
accu_vec = np.zeros(shape = (self.config['rsvm_iter_num'],))
for iter_i in range(self.config['rsvm_iter_num']):
accu_vec[iter_i] = self.svmmodel_dict[iter_i].score(test_fea, test_gnd) * 100
return accu_vec
def decision_function(self, test_fea, last_model_flag = True):
'''
svm outputs
test_fea: array like, shape = (smp_num, fea_num)
last_model_flag: whether only use the last svm model or not
return
distance: array like, shape = (smp_num, iter_num)
'''
if last_model_flag:
return self.svmmodel_dict[self.config['rsvm_iter_num'] - 1].decision_function(test_fea)
else:
test_num = test_fea.shape[0]
distance = np.zeros(shape = (test_num, self.config['rsvm_iter_num']), dtype = np.float64)
for iter_i in range(self.config['rsvm_iter_num']):
distance[:, iter_i] = self.svmmodel_dict[iter_i].decision_function(test_fea)
return distance
if __name__ == '__main__':
np.random.seed(0)
X = np.r_[np.random.randn(10, 2) + [1, 1], np.random.randn(10, 2)]
y = [-1] * 10 + [1] * 10
train_num = 20
config['rsvm_v0'] = np.ones(shape = (20, ), dtype = np.float64)
config['rsvm_eta'] = 0.5
rsvm_obj = rsvm(config)
rsvm_obj.fit(X, y)
print '#### sv ratio vector ####'
print rsvm_obj.sv_ratio_vec
print '#### smp_weights_mat ####'
print rsvm_obj.smp_weights_mat
| {
"content_hash": "9b3659c3edc759049d347f787adae2fb",
"timestamp": "",
"source": "github",
"line_count": 151,
"max_line_length": 107,
"avg_line_length": 43.450331125827816,
"alnum_prop": 0.6622466087486664,
"repo_name": "FrankTsui/robust_rescaled_svm",
"id": "0f54d33c216f33db7e053889212d414df4479761",
"size": "6561",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "robust_rescaled_svm.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "13180"
}
],
"symlink_target": ""
} |
import unittest
from redmate.keyformat import KeyPattern
from redmate.messaging import Message
class KeyFormatTest(unittest.TestCase):
test_data = (
("justkey", "justkey", (1,), ('id',)),
("key:1", "key:{id}", (1,), ('id',)),
("key:2:1", "key:{1}:{0}", (1, 2), ('id', 'val')),
("key:1:0:hi", "key:{id}:{0}:{name}", (0, 1, "hi"), ('x','id','name'))
)
def test_standard_patterns(self):
for case in self.test_data:
expected = case[0]
key_pattern = KeyPattern(case[1])
attrs = case[2]
columns = case[3]
formatted = key_pattern.format(Message(columns, attrs))
self.assertEqual(expected, formatted)
| {
"content_hash": "3eebf87e661cd62c33768718334bcda0",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 78,
"avg_line_length": 34.476190476190474,
"alnum_prop": 0.5290055248618785,
"repo_name": "roman-kashitsyn/redmate",
"id": "9b4f5bc2a1a191b0f653252939ddd019ae785c26",
"size": "724",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "redmate/test/testkeyformat.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "32673"
}
],
"symlink_target": ""
} |
from datetime import date, datetime, time
import operator
import re
from threading import Thread, Event
import time as sleeper
from hydeengine import url
from hydeengine.file_system import File, Folder
class SiteResource(object):
def __init__(self, a_file, node):
super(SiteResource, self).__init__()
self.node = node
self.file = a_file
self.source_file = self.file
self.prerendered = False
if self.node.target_folder:
self.target_file = File(
self.node.target_folder.child(self.file.name))
self.temp_file = File(
self.node.temp_folder.child(self.file.name))
self.last_known_modification_time = a_file.last_modified
@property
def is_layout(self):
return (self.node.type == "layout" or
self.file.name.startswith("_"))
@property
def has_changes(self):
return (not self.last_known_modification_time ==
self.file.last_modified)
@property
def url(self):
if self.node.url is None:
return None
return url.join(self.node.url, self.file.name)
@property
def last_modified(self):
return self.file.last_modified
@property
def name(self):
return self.file.name
@property
def full_url(self):
if not self.node.full_url:
return None
return url.join(self.node.full_url, self.file.name)
def __repr__(self):
return str(self.file)
class Page(SiteResource):
def __init__(self, a_file, node):
if not node:
raise ValueError("Page cannot exist without a node")
super(Page, self).__init__(a_file, node)
listing_pages = self.node.site.settings.LISTING_PAGE_NAMES
self.listing = a_file.name_without_extension in listing_pages
self.exclude = False
self.display_in_list = None
self.module = node.module
self.created = datetime.strptime("2000-01-01", "%Y-%m-%d")
self.updated = None
self.process()
if type(self.created) == date:
self.created = datetime.combine(self.created, time())
if type(self.updated) == date:
self.updated = datetime.combine(self.updated, time())
elif type(self.updated) != datetime:
self.updated = self.created
@property
def page_name(self):
return self.file.name_without_extension
def get_context_text(self):
start = re.compile(r'.*?{%\s*hyde\s+(.*?)(%}|$)')
end = re.compile(r'(.*?)(%})')
fin = open(self.file.path, 'r')
started = False
text = ''
matcher = start
for line in fin:
match = matcher.match(line)
if match:
text = text + match.group(1)
if started:
break
else:
matcher = end
started = True
elif started:
text = text + line
fin.close()
return text
def add_variables(self, page_vars):
if not page_vars: return
for key, value in page_vars.iteritems():
if not hasattr(Page, key):
setattr(Page, key, None)
setattr(self, key, value)
def process(self):
text = self.get_context_text()
import yaml
context = yaml.load(text)
if not context:
context = {}
self.add_variables(context)
if (self.file.name_without_extension.lower() ==
self.node.folder.name.lower() or
self.file.name_without_extension.lower() in
self.node.site.settings.LISTING_PAGE_NAMES):
self.listing = True
if self.display_in_list is None:
self.display_in_list = (not self.listing and
not self.exclude and
not self.file.name.startswith("_") and
self.file.kind == "html")
def _make_clean_url(self, page_url):
if self.node.listing_page == self:
page_url = self.node.url
else:
page_url = url.clean_url(page_url)
if self.node.site.settings.APPEND_SLASH or not page_url:
page_url += "/"
return page_url
@property
def url(self):
page_url = super(Page, self).url
# clean url generation requires knowing whether or not a page is a
# listing page prior to generating its url
if self.node.site.settings.GENERATE_CLEAN_URLS:
page_url = self._make_clean_url(page_url)
return page_url
@property
def full_url(self):
page_url = super(Page, self).full_url
# clean url generation requires knowing whether or not a page is a
# listing page prior to generating its url
if self.node.site.settings.GENERATE_CLEAN_URLS:
page_url = self._make_clean_url(page_url)
return page_url
class SiteNode(object):
def __init__(self, folder, parent=None):
super(SiteNode, self).__init__()
self.folder = folder
self.parent = parent
self.site = self
if self.parent:
self.site = self.parent.site
self.children = []
self.resources = []
def __repr__(self):
return str(self.folder)
@property
def level(self, include_top_level=False):
level = -1 if not include_top_level else 0
parent = self.parent
while parent:
level +=1
parent = parent.parent
return level
@property
def level_list(self):
# for loop in templates can only be used on sequences.
level_list = [ True for x in range(0, self.level)]
return level_list
@property
def simple_dict(self):
ress = []
for resource in self.walk_resources():
fragment = Folder(
resource.node.folder.get_fragment(
self.site.folder.path)).child(resource.file.name)
res = dict(name=resource.file.name, path=fragment)
ress.append(res)
nodes = []
for node in self.children:
nodes.append(node.simple_dict)
return dict(
name=self.folder.name,
path=self.folder.get_fragment(self.site.folder.path),
resources=ress,
nodes=nodes)
@property
def isroot(self):
return not self.parent
@property
def name(self):
return self.folder.name
@property
def author(self):
return self.site.settings.SITE_AUTHOR
@property
def has_listing(self):
return not self.listing_page is None
def walk(self):
yield self
for child in self.children:
for node in child.walk():
yield node
def walk_reverse(self):
yield self
for child in reversed(self.children):
for node in child.walk_reverse():
yield node
def walk_resources(self):
for node in self.walk():
for resource in node.resources:
yield resource
def walk_resources_reverse(self):
for node in self.walk_reverse():
for resource in reversed(node.resources):
yield resource
def add_child(self, folder):
if ContentNode.is_content(self.site, folder):
node = ContentNode(folder, parent=self)
elif LayoutNode.is_layout(self.site, folder):
node = LayoutNode(folder, parent=self)
elif MediaNode.is_media(self.site, folder):
node = MediaNode(folder, parent=self)
else:
node = SiteNode(folder, parent=self)
self.children.append(node)
self.site.child_added(node)
return node
def add_resource(self, a_file):
resource = self._add_resource(a_file)
self.site.resource_added(resource)
return resource
def remove_resource(self, resource):
self.resources.remove(resource)
self.site.resource_removed(resource)
def _add_resource(self, a_file):
resource = SiteResource(a_file, self)
self.resources.append(resource)
return resource
def find_node(self, folder):
try:
#print 'FIND NODE', folder, self.site.nodemap.get(folder.path)
return self.site.nodemap[folder.path]
except KeyError:
#print 'FAILED FIND NODE', folder
return None
find_child = find_node
def find_resource(self, a_file):
try:
return self.site.resourcemap[a_file.path]
except KeyError:
return None
@property
def source_folder(self):
return self.folder
@property
def target_folder(self):
return None
@property
def temp_folder(self):
return None
@property
def url(self):
return None
@property
def full_url(self):
if self.url is None:
return None
return url.join(self.site.settings.SITE_WWW_URL, self.url)
@property
def type(self):
return None
class ContentNode(SiteNode):
def __init__(self, folder, parent=None):
super(ContentNode, self).__init__(folder, parent)
self.listing_page = None
self.feed_url = None
walk_pages = SiteNode.walk_resources
walk_pages_reverse = SiteNode.walk_resources_reverse
def walk_child_pages(self, sorting_key='url'):
"""
Like walk_resources, but start with the children nodes of the
current node, and only yield .html Page objects (instead of Pages
and other files).
Also add another attribute, level, used to create indented
display when listing content.
"""
child_pages = []
for child in self.children:
for node in child.walk():
for resource in node.resources:
if resource.file.kind == "html":
resource.level = resource.url.count('/')
child_pages.append(resource)
def get_sorting_key(a_resource):
return getattr(a_resource, sorting_key)
child_pages.sort(key=get_sorting_key)
return child_pages
def walk_child_pages_by_updated(self):
"""
Like walk_child_pages, but return results sorted by the
updated date, i.e. chronological order with most recent Page
objects first.
"""
child_pages = self.walk_child_pages(sorting_key='updated')
child_pages.reverse()
return child_pages
@property
def module(self):
module = self
while (module.parent and
not module.parent == self.site.content_node):
module = module.parent
return module
@property
def name(self):
if self == self.site.content_node:
return self.site.name
else:
return super(ContentNode, self).name
@property
def pages(self):
return self.resources
@property
def ancestors(self):
node = self
ancestors = []
while not node.isroot:
ancestors.append(node)
node = node.parent
ancestors.reverse()
return ancestors
@staticmethod
def is_content(site, folder):
return (site.content_folder.same_as(folder) or
site.content_folder.is_ancestor_of(folder))
def _add_resource(self, a_file):
page = Page(a_file, self)
if page.listing and not self.listing_page:
self.listing_page = page
self.resources.append(page)
page.node.sort()
if not page.module == self.site:
page.module.flatten_and_sort()
return page
def sort(self):
self.resources.sort(key=operator.attrgetter("created"), reverse=True)
prev = None
for page in self.resources:
page.prev = None
page.next = None
if page.display_in_list:
if prev:
prev.next = page
page.prev = prev
page.next = None
prev = page
for node in self.children:
node.sort()
def flatten_and_sort(self):
flattened_pages = []
prev_in_module = None
for page in self.walk_pages():
flattened_pages.append(page)
flattened_pages.sort(key=operator.attrgetter("created"), reverse=True)
for page in flattened_pages:
page.next_in_module = None
if page.display_in_list:
if prev_in_module:
prev_in_module.next_in_module = page
page.prev_in_module = prev_in_module
page.next_in_module = None
prev_in_module = page
@property
def target_folder(self):
deploy_folder = self.site.target_folder
return deploy_folder.child_folder_with_fragment(self.fragment)
@property
def temp_folder(self):
temp_folder = self.site.temp_folder
return temp_folder.child_folder_with_fragment(self.fragment)
@property
def fragment(self):
return self.folder.get_fragment(self.site.content_folder)
@property
def url(self):
return url.join(self.site.settings.SITE_ROOT,
url.fixslash(
self.folder.get_fragment(self.site.content_folder)))
@property
def type(self):
return "content"
@property
def listing_url(self):
return self.listing_page.url
class LayoutNode(SiteNode):
@staticmethod
def is_layout(site, folder):
return (site.layout_folder.same_as(folder) or
site.layout_folder.is_ancestor_of(folder))
@property
def fragment(self):
return self.folder.get_fragment(self.site.layout_folder)
@property
def type(self):
return "layout"
class MediaNode(SiteNode):
@staticmethod
def is_media(site, folder):
return (site.media_folder.same_as(folder) or
site.media_folder.is_ancestor_of(folder))
@property
def fragment(self):
return self.folder.get_fragment(self.site.media_folder)
@property
def url(self):
return url.join(self.site.settings.SITE_ROOT,
url.fixslash(
self.folder.get_fragment(self.site.folder)))
@property
def type(self):
return "media"
@property
def target_folder(self):
deploy_folder = self.site.target_folder
return deploy_folder.child_folder_with_fragment(
Folder(self.site.media_folder.name).child(self.fragment))
@property
def temp_folder(self):
temp_folder = self.site.temp_folder
return temp_folder.child_folder_with_fragment(
Folder(self.site.media_folder.name).child(self.fragment))
class SiteInfo(SiteNode):
def __init__(self, settings, site_path):
super(SiteInfo, self).__init__(Folder(site_path))
self.settings = settings
self.m = None
self._stop = Event()
self.nodemap = {site_path:self}
self.resourcemap = {}
@property
def name(self):
return self.settings.SITE_NAME
@property
def content_node(self):
return self.nodemap[self.content_folder.path]
@property
def fragment(self):
return ""
@property
def media_node(self):
return self.nodemap[self.media_folder.path]
@property
def layout_node(self):
return self.nodemap[self.layout_folder.path]
@property
def content_folder(self):
return Folder(self.settings.CONTENT_DIR)
@property
def layout_folder(self):
return Folder(self.settings.LAYOUT_DIR)
@property
def media_folder(self):
return Folder(self.settings.MEDIA_DIR)
@property
def temp_folder(self):
return Folder(self.settings.TMP_DIR)
@property
def target_folder(self):
return Folder(self.settings.DEPLOY_DIR)
def child_added(self, node):
self.nodemap[node.folder.path] = node
def resource_added(self, resource):
self.resourcemap[resource.file.path] = resource
def resource_removed(self, resource):
if resource.file.path in self.resourcemap:
del self.resourcemap[resource.file.path]
def remove_node(self, node):
for node in node.walk():
if node.folder.path in self.nodemap:
del self.nodemap[node.folder.path]
for resource in node.walk_resources():
self.resource_removed(resource)
if node.parent and node in node.parent.children:
node.parent.children.remove(node)
def monitor(self, queue=None, waittime=1):
if self.m and self.m.isAlive():
raise "A monitor is currently running."
self._stop.clear()
self.m = Thread(target=self.__monitor_thread__,
kwargs={"waittime":waittime, "queue": queue})
self.m.start()
return self.m
def dont_monitor(self):
if not self.m or not self.m.isAlive():
return
self._stop.set()
self.m.join()
self._stop.clear()
def __monitor_thread__(self, queue, waittime):
while not self._stop.isSet():
try:
self.refresh(queue)
except:
if queue:
queue.put({"exception": True})
raise
if self._stop.isSet():
break
sleeper.sleep(waittime)
def find_and_add_resource(self, a_file):
resource = self.find_resource(a_file)
if resource:
return resource
node = self.find_and_add_node(a_file.parent)
return node.add_resource(a_file)
def find_and_add_node(self, folder):
node = self.find_node(folder)
if node:
return node
node = self.find_and_add_node(folder.parent)
return node.add_child(folder)
def refresh(self, queue=None):
site = self
# Have to poll for changes since there is no reliable way
# to get notification in a platform independent manner
class Visitor(object):
def visit_folder(self, folder):
return folder.allow(**site.settings.FILTER)
def visit_file(self, a_file):
if not a_file.allow(**site.settings.FILTER):
return
resource = site.find_resource(a_file)
change = None
if not resource:
resource = site.find_and_add_resource(a_file)
change = "Added"
elif resource.has_changes:
change = "Modified"
resource.last_known_modification_time = a_file.last_modified
if change:
if queue:
queue.put({
"change": change,
"resource": resource,
"exception": False
})
visitor = Visitor()
self.layout_folder.walk(visitor)
self.content_folder.walk(visitor)
self.media_folder.walk(visitor)
nodes_to_remove = []
for node in self.walk():
if not node.folder.exists:
queue.put({
"change":"NodeRemoved",
"node":node,
"exception": False
})
nodes_to_remove += [node]
for node in nodes_to_remove:
self.remove_node(node)
for resource in self.walk_resources():
if not resource.file.exists:
if queue:
queue.put({
"change":"Deleted",
"resource":resource,
"exception": False
})
resource.node.remove_resource(resource)
| {
"content_hash": "11b7102793e4bfda684f20e63a998915",
"timestamp": "",
"source": "github",
"line_count": 676,
"max_line_length": 79,
"avg_line_length": 29.866863905325445,
"alnum_prop": 0.5631996037642397,
"repo_name": "cloudkick/cast-site",
"id": "7e2b240216a54a2346aae9f9caadd054b2c384b3",
"size": "20190",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "hyde/hydeengine/siteinfo.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "3085"
},
{
"name": "Emacs Lisp",
"bytes": "349"
},
{
"name": "JavaScript",
"bytes": "23058"
},
{
"name": "Python",
"bytes": "990673"
}
],
"symlink_target": ""
} |
import unittest
import tutorial as validator
class TestTutorialValidators(unittest.TestCase):
pass
| {
"content_hash": "b58ef65edbf382dd6e575b73ea2bd748",
"timestamp": "",
"source": "github",
"line_count": 5,
"max_line_length": 48,
"avg_line_length": 20.8,
"alnum_prop": 0.8269230769230769,
"repo_name": "othieno/geotagx-tool-validator",
"id": "a6d79c76de53934648dcb9abeac0122c691cd347",
"size": "1414",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "tests/test_tutorial.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "116315"
}
],
"symlink_target": ""
} |
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.8.2
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import os
import sys
import unittest
import kubernetes.client
from kubernetes.client.rest import ApiException
from kubernetes.client.models.v1_role_ref import V1RoleRef
class TestV1RoleRef(unittest.TestCase):
""" V1RoleRef unit test stubs """
def setUp(self):
pass
def tearDown(self):
pass
def testV1RoleRef(self):
"""
Test V1RoleRef
"""
# FIXME: construct object with mandatory attributes with example values
#model = kubernetes.client.models.v1_role_ref.V1RoleRef()
pass
if __name__ == '__main__':
unittest.main()
| {
"content_hash": "47f7ff754b7565c851e633c0173bc4fe",
"timestamp": "",
"source": "github",
"line_count": 42,
"max_line_length": 105,
"avg_line_length": 21.142857142857142,
"alnum_prop": 0.6734234234234234,
"repo_name": "mbohlool/client-python",
"id": "7fbb7a25b1bfc412aacfb8af853ae9d3b9d86dd1",
"size": "905",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "kubernetes/test/test_v1_role_ref.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "8417639"
},
{
"name": "Shell",
"bytes": "16830"
}
],
"symlink_target": ""
} |
import random
def cxSimulatedBinary(var1, var2, eta=15):
"""Executes a simulated binary crossover that modify in-place the input
individuals. The simulated binary crossover expects :term:`sequence`
individuals of floating point numbers.
:param var1: The first variable participating in the crossover.
:param var2: The second variable participating in the crossover.
:param eta: Crowding degree of the crossover. A high eta will produce
children resembling to their parents, while a small eta will
produce solutions much more different.
:returns: A tuple of two variables.
This function uses the :func:`~random.random` function from the python base
:mod:`random` module.
"""
for i, (x1, x2) in enumerate(zip(var1, var2)):
rand = random.random()
if rand <= 0.5:
beta = 2. * rand
else:
beta = 1. / (2. * (1. - rand))
beta **= 1. / (eta + 1.)
var1[i] = 0.5 * (((1 + beta) * x1) + ((1 - beta) * x2))
var2[i] = 0.5 * (((1 - beta) * x1) + ((1 + beta) * x2))
return var1, var2
| {
"content_hash": "ffcd82642acc42faead7c148b157960c",
"timestamp": "",
"source": "github",
"line_count": 29,
"max_line_length": 79,
"avg_line_length": 38.827586206896555,
"alnum_prop": 0.6047957371225577,
"repo_name": "DailyActie/Surrogate-Model",
"id": "2a8ef2404f5ae8e40b51ca04237873c0c6e66e99",
"size": "2324",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "surrogate/crossover/cxSimulatedBinary.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Awk",
"bytes": "345"
},
{
"name": "Batchfile",
"bytes": "18746"
},
{
"name": "C",
"bytes": "13004913"
},
{
"name": "C++",
"bytes": "14692003"
},
{
"name": "CMake",
"bytes": "72831"
},
{
"name": "CSS",
"bytes": "303488"
},
{
"name": "Fortran",
"bytes": "7339415"
},
{
"name": "HTML",
"bytes": "854774"
},
{
"name": "Java",
"bytes": "38854"
},
{
"name": "JavaScript",
"bytes": "2432846"
},
{
"name": "Jupyter Notebook",
"bytes": "829689"
},
{
"name": "M4",
"bytes": "1379"
},
{
"name": "Makefile",
"bytes": "48708"
},
{
"name": "Matlab",
"bytes": "4346"
},
{
"name": "Objective-C",
"bytes": "567"
},
{
"name": "PHP",
"bytes": "93585"
},
{
"name": "Pascal",
"bytes": "1449"
},
{
"name": "Perl",
"bytes": "1152272"
},
{
"name": "PowerShell",
"bytes": "17042"
},
{
"name": "Python",
"bytes": "34668203"
},
{
"name": "Roff",
"bytes": "5925"
},
{
"name": "Ruby",
"bytes": "92498"
},
{
"name": "Shell",
"bytes": "94698"
},
{
"name": "TeX",
"bytes": "156540"
},
{
"name": "TypeScript",
"bytes": "41691"
}
],
"symlink_target": ""
} |