text
stringlengths 4
1.02M
| meta
dict |
---|---|
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: win_toast
version_added: "2.4"
short_description: Sends Toast windows notification to logged in users on Windows 10 or later hosts
description:
- Sends alerts which appear in the Action Center area of the windows desktop.
options:
expire:
description:
- How long in seconds before the notification expires.
default: 45
group:
description:
- Which notification group to add the notification to.
default: Powershell
msg:
description:
- The message to appear inside the notification. May include \n to format the message to appear within the Action Center.
default: 'Hello, World!'
popup:
description:
- If false, the notification will not pop up and will only appear in the Action Center.
type: bool
default: yes
tag:
description:
- The tag to add to the notification.
default: Ansible
title:
description:
- The notification title, which appears in the pop up..
default: Notification HH:mm
author:
- Jon Hawkesworth (@jhawkesworth)
notes:
- This module must run on a windows 10 or Server 2016 host, so ensure your play targets windows hosts, or delegates to a windows host.
- The module does not fail if there are no logged in users to notify.
- Messages are only sent to the local host where the module is run.
- You must run this module with async, otherwise it will hang until the expire period has passed.
'''
EXAMPLES = r'''
- name: Warn logged in users of impending upgrade (note use of async to stop the module from waiting until notification expires).
win_toast:
expire: 60
title: System Upgrade Notification
msg: Automated upgrade about to start. Please save your work and log off before {{ deployment_start_time }}
async: 60
poll: 0
'''
RETURN = r'''
expire_at_utc:
description: Calculated utc date time when the notification expires.
returned: allways
type: string
sample: 07 July 2017 04:50:54
no_toast_sent_reason:
description: Text containing the reason why a notification was not sent.
returned: when no logged in users are detected
type: string
sample: No logged in users to notify
sent_localtime:
description: local date time when the notification was sent.
returned: allways
type: string
sample: 07 July 2017 05:45:54
time_taken:
description: How long the module took to run on the remote windows host in seconds.
returned: allways
type: float
sample: 0.3706631999999997
toast_sent:
description: Whether the module was able to send a toast notification or not.
returned: allways
type: boolean
sample: false
'''
| {
"content_hash": "afe5b92546e3705bcc025e6b8c5bc9da",
"timestamp": "",
"source": "github",
"line_count": 83,
"max_line_length": 137,
"avg_line_length": 33.95180722891566,
"alnum_prop": 0.7026259758694109,
"repo_name": "e-gob/plataforma-kioscos-autoatencion",
"id": "c56c32f0a15268765991189d8272b711f1d0dd94",
"size": "3157",
"binary": false,
"copies": "42",
"ref": "refs/heads/master",
"path": "scripts/ansible-play/.venv/lib/python2.7/site-packages/ansible/modules/windows/win_toast.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "41110"
},
{
"name": "C++",
"bytes": "3804"
},
{
"name": "CSS",
"bytes": "34823"
},
{
"name": "CoffeeScript",
"bytes": "8521"
},
{
"name": "HTML",
"bytes": "61168"
},
{
"name": "JavaScript",
"bytes": "7206"
},
{
"name": "Makefile",
"bytes": "1347"
},
{
"name": "PowerShell",
"bytes": "584344"
},
{
"name": "Python",
"bytes": "25506593"
},
{
"name": "Ruby",
"bytes": "245726"
},
{
"name": "Shell",
"bytes": "5075"
}
],
"symlink_target": ""
} |
import dateparser
import re
import os
import sys
import requests
from pyquery import PyQuery as pq
sys.path.insert(1, os.path.join(sys.path[0], '..'))
from models import trackingStatus, trackingEvent
NAME = "UPS"
ID = __name__[10:]
POPULARITY = 4
def guess(number):
return number.startswith('1Z')
def track(number):
"""Request tracking company about package status.
:param number: package number in tracking company
:return: Package status
"""
r = requests.post("https://wwwapps.ups.com/WebTracking/track",
data={'trackNums': number,
'loc': 'pl_PL',
'track.x': 'Monitoruj'
}
)
d = pq(r.text)
table = d('table.dataTable')
status = 'TRANSIT'
events = []
for x, row in enumerate(table('tr').items()):
if x > 0:
stage = []
for t in row('td').items():
td = t.text()
td = td.translate({ord(c): None for c in '\n\t\r'})
td = re.sub(r'\s+', ' ', td)
stage.append(td)
stage_date = dateparser.parse("{} {}".format(stage[1], stage[2]), settings={'DATE_ORDER': 'YMD'})
events.append(trackingEvent(time=stage_date, place=stage[0], status=stage[3]))
if re.search("Doręczono", stage[3]):
status = "DELIVERED"
if len(events) > 0:
return trackingStatus(number, ID, status, events)
else:
return trackingStatus(number, ID, 'NOTFOUND', [])
| {
"content_hash": "3d077f639b94854f280e57ab0a97cbf6",
"timestamp": "",
"source": "github",
"line_count": 55,
"max_line_length": 109,
"avg_line_length": 28.70909090909091,
"alnum_prop": 0.5332488917036099,
"repo_name": "krzynio/pl-packagetrack",
"id": "dea39c2e7d745f7bb008656e08ab19dc1ae946c1",
"size": "1602",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "providers/ups.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "17778"
}
],
"symlink_target": ""
} |
import os
import train
import argparse
def launch_training(model_name, **kwargs):
# Launch training
train.train(model_name, **d_params)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Experiments for Eve')
parser.add_argument('list_experiments', type=str, nargs='+',
help='List of experiment names. E.g. Eve SGD Adam --> will run a training session with each optimizer')
parser.add_argument('--model_name', default='CNN', type=str,
help='Model name: CNN, Big_CNN or FCN')
parser.add_argument('--batch_size', default=128, type=int,
help='Batch size')
parser.add_argument('--nb_epoch', default=30, type=int,
help='Number of epochs')
parser.add_argument('--dataset', type=str, default="cifar10",
help='Dataset, cifar10, cifar100 or mnist')
args = parser.parse_args()
list_dir = ["figures", "log"]
for d in list_dir:
if not os.path.exists(d):
os.makedirs(d)
for experiment_name in args.list_experiments:
optimizer = experiment_name.split("_")[0]
assert optimizer in ["Eve", "Adam", "SGD"], "Invalid optimizer"
assert args.model_name in ["CNN", "Big_CNN", "FCN"], "Invalid model name"
# Set default params
d_params = {"optimizer": optimizer,
"experiment_name": experiment_name,
"batch_size": args.batch_size,
"nb_epoch": args.nb_epoch,
"dataset": args.dataset
}
# Launch training
launch_training(args.model_name, **d_params)
| {
"content_hash": "eb7c0aa0ea88dc84bb25d61e750208c0",
"timestamp": "",
"source": "github",
"line_count": 47,
"max_line_length": 127,
"avg_line_length": 36.170212765957444,
"alnum_prop": 0.5682352941176471,
"repo_name": "ChampionZP/DeepLearningImplementations",
"id": "4c0e660a1eb46c2712e2b00efe49743d58cd3195",
"size": "1700",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "Eve/main.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "315943"
}
],
"symlink_target": ""
} |
from decimal import Decimal as D
import logging
from django.views.generic import RedirectView as DjangoRedirectView
from django.views.generic import View
from django.conf import settings
from django.http import HttpResponse
from django.shortcuts import get_object_or_404
from django.contrib import messages
from django.contrib.auth.models import AnonymousUser
from django.core.urlresolvers import reverse
from django.http import HttpResponseRedirect
from django.db.models import get_model
from django.utils.http import urlencode
from django.utils import six
from django.utils.translation import ugettext_lazy as _
import oscar
from oscar.apps.payment.exceptions import UnableToTakePayment
from oscar.core.loading import get_class
from oscar.apps.shipping.methods import FixedPrice, NoShippingRequired
from oscar.apps.checkout import utils as checkout_utils
from paypal.express.facade import (
get_paypal_url, fetch_transaction_details, confirm_transaction)
from paypal.express.exceptions import (
EmptyBasketException, MissingShippingAddressException,
MissingShippingMethodException, InvalidBasket)
from paypal.exceptions import PayPalError
# Load views dynamically
PaymentDetailsView = get_class('checkout.views', 'PaymentDetailsView')
CheckoutSessionMixin = get_class('checkout.session', 'CheckoutSessionMixin')
ShippingAddress = get_model('order', 'ShippingAddress')
Country = get_model('address', 'Country')
Basket = get_model('basket', 'Basket')
Repository = get_class('shipping.repository', 'Repository')
Applicator = get_class('offer.utils', 'Applicator')
Selector = get_class('partner.strategy', 'Selector')
Source = get_model('payment', 'Source')
SourceType = get_model('payment', 'SourceType')
logger = logging.getLogger('paypal.express')
class ShippingMethodMixin(object):
def get_current_shipping_method(self):
session_data = checkout_utils.CheckoutSessionData(self.request)
shipping_method_code = session_data._get('shipping', 'method_code')
shipping_method = Repository().find_by_code(
shipping_method_code,
self.request.basket,
)
if not shipping_method:
shipping_method = self.get_default_shipping_method(
self.request.basket,
)
return shipping_method
def get_default_shipping_method(self, basket):
return Repository().get_default_shipping_method(
request=self.request,
user=self.request.user,
basket=self.request.basket,
)
class RedirectView(ShippingMethodMixin, CheckoutSessionMixin, DjangoRedirectView):
"""
Initiate the transaction with Paypal and redirect the user
to PayPal's Express Checkout to perform the transaction.
"""
permanent = False
# Setting to distinguish if the site has already collected a shipping
# address. This is False when redirecting to PayPal straight from the
# basket page but True when redirecting from checkout.
as_payment_method = False
def get_redirect_url(self, **kwargs):
try:
basket = self.request.basket
url = self._get_redirect_url(basket, **kwargs)
except PayPalError, e:
messages.error(
self.request, _("An error occurred communicating with PayPal"))
logger.exception("An error occurred communicating with PayPal")
if self.as_payment_method:
url = reverse('checkout:payment-details')
else:
url = reverse('basket:summary')
return url
except InvalidBasket as e:
messages.warning(self.request, six.text_type(e))
logger.exception("Invalid Basket")
return reverse('basket:summary')
except EmptyBasketException, e:
messages.error(self.request, _("Your basket is empty"))
logger.exception("Empty basket")
return reverse('basket:summary')
except MissingShippingAddressException, e:
messages.error(
self.request, _("A shipping address must be specified"))
logger.exception("A shipping address must be specified")
return reverse('checkout:shipping-address')
except MissingShippingMethodException:
messages.error(
self.request,
_("A shipping method must be specified"),
)
return reverse('checkout:shipping-method')
else:
# Transaction successfully registered with PayPal. Now freeze the
# basket so it can't be edited while the customer is on the PayPal
# site.
basket.freeze()
logger.info("Basket #%s - redirecting to %s", basket.id, url)
return url
def _get_redirect_url(self, basket, **kwargs):
if basket.is_empty:
raise EmptyBasketException()
params = {
'basket': basket,
'shipping_methods': [] # setup a default empty list
} # to support no_shipping
user = self.request.user
if self.as_payment_method:
if basket.is_shipping_required():
# Only check for shipping details if required.
shipping_addr = self.get_shipping_address(basket)
if not shipping_addr:
raise MissingShippingAddressException()
shipping_method = self.get_shipping_method(
basket, shipping_addr)
if not shipping_method:
raise MissingShippingMethodException()
params['shipping_address'] = shipping_addr
params['shipping_method'] = shipping_method
params['shipping_methods'] = []
else:
shipping_method = self.get_current_shipping_method()
if shipping_method:
params['shipping_methods'] = [shipping_method]
if settings.DEBUG:
# Determine the localserver's hostname to use when
# in testing mode
params['host'] = self.request.META['HTTP_HOST']
if user.is_authenticated():
params['user'] = user
params['paypal_params'] = self._get_paypal_params()
return get_paypal_url(**params)
def _get_paypal_params(self):
"""
Return any additional PayPal parameters
"""
return {}
class CancelResponseView(RedirectView):
permanent = False
def get(self, request, *args, **kwargs):
basket = get_object_or_404(Basket, id=kwargs['basket_id'],
status=Basket.FROZEN)
basket.thaw()
logger.info("Payment cancelled (token %s) - basket #%s thawed",
request.GET.get('token', '<no token>'), basket.id)
return super(CancelResponseView, self).get(request, *args, **kwargs)
def get_redirect_url(self, **kwargs):
messages.error(self.request, _("PayPal transaction cancelled"))
return reverse('basket:summary')
# Upgrading notes: when we drop support for Oscar 0.6, this class can be
# refactored to pass variables around more explicitly (instead of assigning
# things to self so they are accessible in a later method).
class SuccessResponseView(ShippingMethodMixin, PaymentDetailsView):
template_name_preview = 'paypal/express/preview.html'
preview = True
# We don't have the usual pre-conditions (Oscar 0.7+)
@property
def pre_conditions(self):
return [] if oscar.VERSION[:2] >= (0, 8) else ()
def get(self, request, *args, **kwargs):
"""
Fetch details about the successful transaction from PayPal. We use
these details to show a preview of the order with a 'submit' button to
place it.
"""
try:
self.payer_id = request.GET['PayerID']
self.token = request.GET['token']
except KeyError:
# Manipulation - redirect to basket page with warning message
logger.warning("Missing GET params on success response page")
messages.error(
self.request,
_("Unable to determine PayPal transaction details"))
return HttpResponseRedirect(reverse('basket:summary'))
try:
self.txn = fetch_transaction_details(self.token)
except PayPalError as e:
logger.warning(
"Unable to fetch transaction details for token %s: %s",
self.token, e)
messages.error(
self.request,
_("A problem occurred communicating with PayPal - please try again later"))
return HttpResponseRedirect(reverse('basket:summary'))
# Reload frozen basket which is specified in the URL
kwargs['basket'] = self.load_frozen_basket(kwargs['basket_id'])
if not kwargs['basket']:
logger.warning(
"Unable to load frozen basket with ID %s", kwargs['basket_id'])
messages.error(
self.request,
_("No basket was found that corresponds to your "
"PayPal transaction"))
return HttpResponseRedirect(reverse('basket:summary'))
self.request.basket = kwargs['basket']
logger.info(
"Basket #%s showing preview with payer ID %s and token %s",
kwargs['basket'].id, self.payer_id, self.token)
return super(SuccessResponseView, self).get(request, *args, **kwargs)
def load_frozen_basket(self, basket_id):
# Lookup the frozen basket that this txn corresponds to
try:
basket = Basket.objects.get(id=basket_id, status=Basket.FROZEN)
except Basket.DoesNotExist:
return None
# Assign strategy to basket instance
if Selector:
basket.strategy = Selector().strategy(self.request)
# Re-apply any offers
Applicator().apply(self.request, basket)
return basket
def get_context_data(self, **kwargs):
basket = kwargs.get("basket")
shipping_method = self.get_shipping_method(basket, shipping_address=self.get_shipping_address(basket))
if shipping_method is None:
return kwargs
ctx = super(SuccessResponseView, self).get_context_data(**kwargs)
if not hasattr(self, 'payer_id'):
return ctx
# This context generation only runs when in preview mode
ctx.update({
'payer_id': self.payer_id,
'token': self.token,
'paypal_user_email': self.txn.value('EMAIL'),
'paypal_amount': D(self.txn.value('AMT')),
'show_tax_separately': True,
})
return ctx
def post(self, request, *args, **kwargs):
"""
Place an order.
We fetch the txn details again and then proceed with oscar's standard
payment details view for placing the order.
"""
error_msg = _(
"A problem occurred communicating with PayPal "
"- please try again later"
)
try:
self.payer_id = request.POST['payer_id']
self.token = request.POST['token']
except KeyError:
# Probably suspicious manipulation if we get here
messages.error(self.request, error_msg)
return HttpResponseRedirect(reverse('basket:summary'))
try:
self.txn = fetch_transaction_details(self.token)
except PayPalError:
# Unable to fetch txn details from PayPal - we have to bail out
messages.error(self.request, error_msg)
return HttpResponseRedirect(reverse('basket:summary'))
# Reload frozen basket which is specified in the URL
basket = self.load_frozen_basket(kwargs['basket_id'])
if not basket:
messages.error(self.request, error_msg)
return HttpResponseRedirect(reverse('basket:summary'))
submission = self.build_submission(basket=basket)
return self.submit(**submission)
def build_submission(self, **kwargs):
basket = kwargs['basket']
basket.calculate_tax(
self.get_shipping_address(basket)
)
submission = super(
SuccessResponseView, self).build_submission(**kwargs)
# Adding the tax to the total transaction amount to be sent to Paypal
self.txn.amount = self.txn.amount + basket.total_tax
submission['shipping_method'].set_basket(basket)
shipping_method = submission['shipping_method']
total = self.get_order_totals(
basket, shipping_method=shipping_method)
submission['order_total'] = total
# Pass the user email so it can be stored with the order
submission['order_kwargs']['guest_email'] = self.txn.value('EMAIL')
# Pass PP params
submission['payment_kwargs']['payer_id'] = self.payer_id
submission['payment_kwargs']['token'] = self.token
submission['payment_kwargs']['txn'] = self.txn
return submission
# Warning: This method can be removed when we drop support for Oscar 0.6
def get_error_response(self):
# Check that the user's basket is not empty
if self.request.basket.is_empty:
messages.error(self.request, _(
"You need to add some items to your basket to checkout"))
return HttpResponseRedirect(reverse('basket:summary'))
if self.request.basket.is_shipping_required():
shipping_address = self.get_shipping_address(
self.request.basket)
shipping_method = self.get_shipping_method(
self.request.basket, shipping_address=shipping_address)
# Check that shipping address has been completed
if not shipping_address:
messages.error(
self.request, _("Please choose a shipping address"))
return HttpResponseRedirect(
reverse('checkout:shipping-address'))
# Check that shipping method has been set
if not shipping_method:
self.request.basket.thaw()
return HttpResponseRedirect(reverse('basket:summary'))
def handle_payment(self, order_number, total, **kwargs):
"""
Complete payment with PayPal - this calls the 'DoExpressCheckout'
method to capture the money from the initial transaction.
"""
try:
confirm_txn = confirm_transaction(
kwargs['payer_id'], kwargs['token'], kwargs['txn'].amount,
kwargs['txn'].currency)
except PayPalError:
raise UnableToTakePayment()
if not confirm_txn.is_successful:
raise UnableToTakePayment()
# Record payment source and event
source_type, is_created = SourceType.objects.get_or_create(
name='PayPal')
txn_id = getattr(confirm_txn,"txn_id",None)
source = Source(source_type=source_type,
currency=confirm_txn.currency,
amount_allocated=confirm_txn.amount,
amount_debited=confirm_txn.amount,
reference=txn_id)
self.add_payment_source(source)
self.add_payment_event('Settled', confirm_txn.amount,
reference=confirm_txn.correlation_id)
def get_shipping_address(self, basket):
"""
Return a created shipping address instance, created using
the data returned by PayPal.
"""
# Determine names - PayPal uses a single field
ship_to_name = self.txn.value('PAYMENTREQUEST_0_SHIPTONAME')
if ship_to_name is None:
return None
first_name = last_name = None
parts = ship_to_name.split()
if len(parts) == 1:
last_name = ship_to_name
elif len(parts) > 1:
first_name = parts[0]
last_name = " ".join(parts[1:])
return ShippingAddress(
first_name=first_name,
last_name=last_name,
line1=self.txn.value('PAYMENTREQUEST_0_SHIPTOSTREET'),
line2=self.txn.value('PAYMENTREQUEST_0_SHIPTOSTREET2', default=""),
line4=self.txn.value('PAYMENTREQUEST_0_SHIPTOCITY', default=""),
state=self.txn.value('PAYMENTREQUEST_0_SHIPTOSTATE', default=""),
postcode=self.txn.value('PAYMENTREQUEST_0_SHIPTOZIP'),
country=Country.objects.get(iso_3166_1_a2=self.txn.value('PAYMENTREQUEST_0_SHIPTOCOUNTRYCODE'))
)
def get_shipping_method(self, basket, shipping_address=None, **kwargs):
"""
Return the shipping method used
"""
if not basket.is_shipping_required():
return NoShippingRequired()
code = self.checkout_session.shipping_method_code(basket)
shipping_method = self.get_current_shipping_method()
allowed_countries = [country.pk for country in \
shipping_method.countries.all()]
if shipping_address.country.pk not in allowed_countries:
countries = ", ".join(allowed_countries)
message=_("We do not yet ship to countries outside of {}.".format(
countries))
messages.error(self.request, _(message))
return None
charge_incl_tax = D(self.txn.value('PAYMENTREQUEST_0_SHIPPINGAMT'))
# Assume no tax for now
charge_excl_tax = charge_incl_tax
method = shipping_method
name = self.txn.value('SHIPPINGOPTIONNAME')
if shipping_method:
method.name = shipping_method.name
else:
method.name = name
return method
class ShippingOptionsView(View):
def post(self, request, *args, **kwargs):
"""
We use the shipping address given to use by PayPal to
determine the available shipping method
"""
# Basket ID is passed within the URL path. We need to do this as some
# shipping options depend on the user and basket contents. PayPal do
# pass back details of the basket contents but it would be royal pain to
# reconstitute the basket based on those - easier to just to piggy-back
# the basket ID in the callback URL.
basket = get_object_or_404(Basket, id=kwargs['basket_id'])
user = basket.owner
if not user:
user = AnonymousUser()
# Create a shipping address instance using the data passed back
country_code = self.request.POST.get(
'PAYMENTREQUEST_0_SHIPTOCOUNTRY', None)
try:
country = Country.objects.get(iso_3166_1_a2=country_code)
except Country.DoesNotExist:
country = Country()
shipping_address = ShippingAddress(
line1=self.request.POST.get('PAYMENTREQUEST_0_SHIPTOSTREET', None),
line2=self.request.POST.get('PAYMENTREQUEST_0_SHIPTOSTREET2', None),
line4=self.request.POST.get('PAYMENTREQUEST_0_SHIPTOCITY', None),
state=self.request.POST.get('PAYMENTREQUEST_0_SHIPTOSTATE', None),
postcode=self.request.POST.get('PAYMENTREQUEST_0_SHIPTOZIP', None),
country=country
)
methods = self.get_shipping_methods(user, basket, shipping_address)
return self.render_to_response(methods, basket)
def render_to_response(self, methods, basket):
pairs = [
('METHOD', 'CallbackResponse'),
('CURRENCYCODE', self.request.POST.get('CURRENCYCODE', 'GBP')),
]
for index, method in enumerate(methods):
if hasattr(method, 'set_basket'):
# Oscar < 0.8
method.set_basket(basket)
charge = method.charge_incl_tax
else:
cost = method.calculate(basket)
charge = cost.incl_tax
pairs.append(('L_SHIPPINGOPTIONNAME%d' % index,
six.text_type(method.name)))
pairs.append(('L_SHIPPINGOPTIONLABEL%d' % index,
six.text_type(method.name)))
pairs.append(('L_SHIPPINGOPTIONAMOUNT%d' % index, charge))
# For now, we assume tax and insurance to be zero
pairs.append(('L_TAXAMT%d' % index, D('0.00')))
pairs.append(('L_INSURANCEAMT%d' % index, D('0.00')))
# We assume that the first returned method is the default one
pairs.append(('L_SHIPPINGOPTIONISDEFAULT%d' % index, 1 if index == 0 else 0))
else:
# No shipping methods available - we flag this up to PayPal indicating that we
# do not ship to the shipping address.
pairs.append(('NO_SHIPPING_OPTION_DETAILS', 1))
payload = urlencode(pairs)
return HttpResponse(payload)
def get_shipping_methods(self, user, basket, shipping_address):
repo = Repository()
return repo.get_shipping_methods(
user, basket, shipping_addr=shipping_address)
| {
"content_hash": "a3e8c65bd2d004d3eda037868b20a6e9",
"timestamp": "",
"source": "github",
"line_count": 546,
"max_line_length": 110,
"avg_line_length": 38.97435897435897,
"alnum_prop": 0.6113721804511278,
"repo_name": "britco/django-oscar-paypal",
"id": "e115f63304621ecd14aa0abf14cfb965844df1e3",
"size": "21280",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "paypal/express/views.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "14220"
},
{
"name": "Makefile",
"bytes": "440"
},
{
"name": "Python",
"bytes": "130211"
},
{
"name": "Shell",
"bytes": "104"
}
],
"symlink_target": ""
} |
import unittest
from dce_lti_py import LaunchParams, DEFAULT_LTI_VERSION, InvalidLTIConfigError
from dce_lti_py.launch_params import InvalidLaunchParamError
class TestLaunchParams(unittest.TestCase):
def test_constructor(self):
lp = LaunchParams()
self.assertTrue(lp['lti_version'], DEFAULT_LTI_VERSION)
self.assertTrue(lp['lti_message_type'], 'basic-lti-launch-request')
lp = LaunchParams({
'lti_version': 'LTI-foo',
'lti_message_type': 'bar',
'resource_link_id': 123
})
self.assertTrue(lp['resource_link_id'], 123)
self.assertTrue(lp['lti_version'], 'LTI-foo')
self.failUnlessRaises(InvalidLaunchParamError, LaunchParams, {
'foo': 'bar'
})
def test_get_item(self):
lp = LaunchParams()
self.assertEqual(lp['lti_version'], DEFAULT_LTI_VERSION)
with self.assertRaises(KeyError):
foo = lp['foo']
def test_set_item(self):
lp = LaunchParams()
lp['lti_version'] = 'bar'
self.assertEqual(lp['lti_version'], 'bar')
def test_list_params(self):
lp = LaunchParams({'roles': 'foo,bar,baz'})
self.assertEqual(lp['roles'], ['foo','bar','baz'])
self.assertEqual(lp._params['roles'], 'foo,bar,baz')
lp['roles'] = ['bar','baz']
self.assertEqual(lp['roles'], ['bar','baz'])
self.assertEqual(lp._params['roles'], 'bar,baz')
lp['roles'] = 'blah, bluh '
self.assertEqual(lp['roles'], ['blah','bluh'])
def test_non_spec_params(self):
lp = LaunchParams()
lp.set_non_spec_param('foo', 'bar')
self.assertEqual(lp.get_non_spec_param('foo'), 'bar')
self.assertEqual(lp._params['foo'], 'bar')
self.assertRaises(KeyError, lp.get('foo'))
def test_dict_behavior(self):
lp = LaunchParams({
'lti_version': 'foo',
'lti_message_type': 'bar'
})
self.assertEqual(len(lp), 2)
lp.update({'resource_link_id': 1})
self.assertEqual(len(lp), 3)
self.failUnlessRaises(InvalidLaunchParamError, lp.update, {
'foo': 'bar'
})
self.assertItemsEqual(
lp.keys(),
['lti_version', 'lti_message_type', 'resource_link_id']
)
self.assertEqual(dict(lp), {
'lti_version': 'foo',
'lti_message_type': 'bar',
'resource_link_id': 1
})
| {
"content_hash": "3a1f743f94dbfbdbd1b8b703c6de201c",
"timestamp": "",
"source": "github",
"line_count": 82,
"max_line_length": 79,
"avg_line_length": 30.317073170731707,
"alnum_prop": 0.5619469026548672,
"repo_name": "brainheart/dce_lti_py",
"id": "c40d97b3c176de5fea283f14d9bd1fccb25f0a40",
"size": "2486",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tests/test_launch_params.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "78868"
}
],
"symlink_target": ""
} |
from .vnsgitmd import MdApi
from .vnsgittd import TdApi
from .sgit_constant import * | {
"content_hash": "45ddcffca1f6bc8bc354ea4bef266894",
"timestamp": "",
"source": "github",
"line_count": 3,
"max_line_length": 28,
"avg_line_length": 28,
"alnum_prop": 0.8095238095238095,
"repo_name": "msincenselee/vnpy",
"id": "ccec073e3ffa5e2ac7cf9d2edbf2710c47acfb04",
"size": "84",
"binary": false,
"copies": "2",
"ref": "refs/heads/vnpy2",
"path": "vnpy/api/sgit/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "751"
},
{
"name": "C",
"bytes": "2862615"
},
{
"name": "C++",
"bytes": "14985812"
},
{
"name": "Cython",
"bytes": "42495"
},
{
"name": "Python",
"bytes": "12716181"
},
{
"name": "Shell",
"bytes": "17068"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
import datetime
from django.contrib.admin import ModelAdmin, TabularInline
from django.contrib.admin.helpers import InlineAdminForm
from django.contrib.admin.tests import AdminSeleniumWebDriverTestCase
from django.contrib.auth.models import Permission, User
from django.contrib.contenttypes.models import ContentType
from django.test import RequestFactory, TestCase, override_settings
from django.urls import reverse
from .admin import InnerInline, site as admin_site
from .models import (
Author, BinaryTree, Book, Chapter, Child, ChildModel1, ChildModel2,
Fashionista, FootNote, Holder, Holder2, Holder3, Holder4, Inner, Inner2,
Inner3, Inner4Stacked, Inner4Tabular, Novel, OutfitItem, Parent,
ParentModelWithCustomPk, Person, Poll, Profile, ProfileCollection,
Question, Sighting, SomeChildModel, SomeParentModel, Teacher,
)
INLINE_CHANGELINK_HTML = 'class="inlinechangelink">Change</a>'
class TestDataMixin(object):
@classmethod
def setUpTestData(cls):
# password = "secret"
User.objects.create(
pk=100, username='super', first_name='Super', last_name='User', email='super@example.com',
password='sha1$995a3$6011485ea3834267d719b4c801409b8b1ddd0158', is_active=True, is_superuser=True,
is_staff=True, last_login=datetime.datetime(2007, 5, 30, 13, 20, 10),
date_joined=datetime.datetime(2007, 5, 30, 13, 20, 10)
)
@override_settings(PASSWORD_HASHERS=['django.contrib.auth.hashers.SHA1PasswordHasher'],
ROOT_URLCONF="admin_inlines.urls")
class TestInline(TestDataMixin, TestCase):
def setUp(self):
holder = Holder(dummy=13)
holder.save()
Inner(dummy=42, holder=holder).save()
result = self.client.login(username='super', password='secret')
self.assertEqual(result, True)
self.factory = RequestFactory()
def test_can_delete(self):
"""
can_delete should be passed to inlineformset factory.
"""
holder = Holder.objects.get(dummy=13)
response = self.client.get(
reverse('admin:admin_inlines_holder_change', args=(holder.id,))
)
inner_formset = response.context['inline_admin_formsets'][0].formset
expected = InnerInline.can_delete
actual = inner_formset.can_delete
self.assertEqual(expected, actual, 'can_delete must be equal')
def test_readonly_stacked_inline_label(self):
"""Bug #13174."""
holder = Holder.objects.create(dummy=42)
Inner.objects.create(holder=holder, dummy=42, readonly='')
response = self.client.get(
reverse('admin:admin_inlines_holder_change', args=(holder.id,))
)
self.assertContains(response, '<label>Inner readonly label:</label>')
def test_many_to_many_inlines(self):
"Autogenerated many-to-many inlines are displayed correctly (#13407)"
response = self.client.get(reverse('admin:admin_inlines_author_add'))
# The heading for the m2m inline block uses the right text
self.assertContains(response, '<h2>Author-book relationships</h2>')
# The "add another" label is correct
self.assertContains(response, 'Add another Author-book relationship')
# The '+' is dropped from the autogenerated form prefix (Author_books+)
self.assertContains(response, 'id="id_Author_books-TOTAL_FORMS"')
def test_inline_primary(self):
person = Person.objects.create(firstname='Imelda')
item = OutfitItem.objects.create(name='Shoes')
# Imelda likes shoes, but can't carry her own bags.
data = {
'shoppingweakness_set-TOTAL_FORMS': 1,
'shoppingweakness_set-INITIAL_FORMS': 0,
'shoppingweakness_set-MAX_NUM_FORMS': 0,
'_save': 'Save',
'person': person.id,
'max_weight': 0,
'shoppingweakness_set-0-item': item.id,
}
response = self.client.post(reverse('admin:admin_inlines_fashionista_add'), data)
self.assertEqual(response.status_code, 302)
self.assertEqual(len(Fashionista.objects.filter(person__firstname='Imelda')), 1)
def test_custom_form_tabular_inline_label(self):
"""
A model form with a form field specified (TitleForm.title1) should have
its label rendered in the tabular inline.
"""
response = self.client.get(reverse('admin:admin_inlines_titlecollection_add'))
self.assertContains(response, '<th class="required">Title1</th>', html=True)
def test_tabular_non_field_errors(self):
"""
Ensure that non_field_errors are displayed correctly, including the
right value for colspan. Refs #13510.
"""
data = {
'title_set-TOTAL_FORMS': 1,
'title_set-INITIAL_FORMS': 0,
'title_set-MAX_NUM_FORMS': 0,
'_save': 'Save',
'title_set-0-title1': 'a title',
'title_set-0-title2': 'a different title',
}
response = self.client.post(reverse('admin:admin_inlines_titlecollection_add'), data)
# Here colspan is "4": two fields (title1 and title2), one hidden field and the delete checkbox.
self.assertContains(
response,
'<tr><td colspan="4"><ul class="errorlist nonfield">'
'<li>The two titles must be the same</li></ul></td></tr>'
)
def test_no_parent_callable_lookup(self):
"""Admin inline `readonly_field` shouldn't invoke parent ModelAdmin callable"""
# Identically named callable isn't present in the parent ModelAdmin,
# rendering of the add view shouldn't explode
response = self.client.get(reverse('admin:admin_inlines_novel_add'))
self.assertEqual(response.status_code, 200)
# View should have the child inlines section
self.assertContains(
response,
'<div class="js-inline-admin-formset inline-group" id="chapter_set-group"'
)
def test_callable_lookup(self):
"""Admin inline should invoke local callable when its name is listed in readonly_fields"""
response = self.client.get(reverse('admin:admin_inlines_poll_add'))
self.assertEqual(response.status_code, 200)
# Add parent object view should have the child inlines section
self.assertContains(
response,
'<div class="js-inline-admin-formset inline-group" id="question_set-group"'
)
# The right callable should be used for the inline readonly_fields
# column cells
self.assertContains(response, '<p>Callable in QuestionInline</p>')
def test_help_text(self):
"""
Ensure that the inlines' model field help texts are displayed when
using both the stacked and tabular layouts.
Ref #8190.
"""
response = self.client.get(reverse('admin:admin_inlines_holder4_add'))
self.assertContains(response, '<p class="help">Awesome stacked help text is awesome.</p>', 4)
self.assertContains(
response,
'<img src="/static/admin/img/icon-unknown.svg" '
'class="help help-tooltip" width="10" height="10" '
'alt="(Awesome tabular help text is awesome.)" '
'title="Awesome tabular help text is awesome." />',
1
)
# ReadOnly fields
response = self.client.get(reverse('admin:admin_inlines_capofamiglia_add'))
self.assertContains(
response,
'<img src="/static/admin/img/icon-unknown.svg" '
'class="help help-tooltip" width="10" height="10" '
'alt="(Help text for ReadOnlyInline)" '
'title="Help text for ReadOnlyInline" />',
1
)
def test_inline_hidden_field_no_column(self):
"""#18263 -- Make sure hidden fields don't get a column in tabular inlines"""
parent = SomeParentModel.objects.create(name='a')
SomeChildModel.objects.create(name='b', position='0', parent=parent)
SomeChildModel.objects.create(name='c', position='1', parent=parent)
response = self.client.get(reverse('admin:admin_inlines_someparentmodel_change', args=(parent.pk,)))
self.assertNotContains(response, '<td class="field-position">')
self.assertContains(response, (
'<input id="id_somechildmodel_set-1-position" '
'name="somechildmodel_set-1-position" type="hidden" value="1" />'))
def test_non_related_name_inline(self):
"""
Ensure that multiple inlines with related_name='+' have correct form
prefixes. Bug #16838.
"""
response = self.client.get(reverse('admin:admin_inlines_capofamiglia_add'))
self.assertContains(response,
'<input type="hidden" name="-1-0-id" id="id_-1-0-id" />', html=True)
self.assertContains(response,
'<input type="hidden" name="-1-0-capo_famiglia" id="id_-1-0-capo_famiglia" />', html=True)
self.assertContains(response,
'<input id="id_-1-0-name" type="text" class="vTextField" '
'name="-1-0-name" maxlength="100" />', html=True)
self.assertContains(response,
'<input type="hidden" name="-2-0-id" id="id_-2-0-id" />', html=True)
self.assertContains(response,
'<input type="hidden" name="-2-0-capo_famiglia" id="id_-2-0-capo_famiglia" />', html=True)
self.assertContains(response,
'<input id="id_-2-0-name" type="text" class="vTextField" '
'name="-2-0-name" maxlength="100" />', html=True)
@override_settings(USE_L10N=True, USE_THOUSAND_SEPARATOR=True)
def test_localize_pk_shortcut(self):
"""
Ensure that the "View on Site" link is correct for locales that use
thousand separators
"""
holder = Holder.objects.create(pk=123456789, dummy=42)
inner = Inner.objects.create(pk=987654321, holder=holder, dummy=42, readonly='')
response = self.client.get(reverse('admin:admin_inlines_holder_change', args=(holder.id,)))
inner_shortcut = 'r/%s/%s/' % (ContentType.objects.get_for_model(inner).pk, inner.pk)
self.assertContains(response, inner_shortcut)
def test_custom_pk_shortcut(self):
"""
Ensure that the "View on Site" link is correct for models with a
custom primary key field. Bug #18433.
"""
parent = ParentModelWithCustomPk.objects.create(my_own_pk="foo", name="Foo")
child1 = ChildModel1.objects.create(my_own_pk="bar", name="Bar", parent=parent)
child2 = ChildModel2.objects.create(my_own_pk="baz", name="Baz", parent=parent)
response = self.client.get(reverse('admin:admin_inlines_parentmodelwithcustompk_change', args=('foo',)))
child1_shortcut = 'r/%s/%s/' % (ContentType.objects.get_for_model(child1).pk, child1.pk)
child2_shortcut = 'r/%s/%s/' % (ContentType.objects.get_for_model(child2).pk, child2.pk)
self.assertContains(response, child1_shortcut)
self.assertContains(response, child2_shortcut)
def test_create_inlines_on_inherited_model(self):
"""
Ensure that an object can be created with inlines when it inherits
another class. Bug #19524.
"""
data = {
'name': 'Martian',
'sighting_set-TOTAL_FORMS': 1,
'sighting_set-INITIAL_FORMS': 0,
'sighting_set-MAX_NUM_FORMS': 0,
'sighting_set-0-place': 'Zone 51',
'_save': 'Save',
}
response = self.client.post(reverse('admin:admin_inlines_extraterrestrial_add'), data)
self.assertEqual(response.status_code, 302)
self.assertEqual(Sighting.objects.filter(et__name='Martian').count(), 1)
def test_custom_get_extra_form(self):
bt_head = BinaryTree.objects.create(name="Tree Head")
BinaryTree.objects.create(name="First Child", parent=bt_head)
# The maximum number of forms should respect 'get_max_num' on the
# ModelAdmin
max_forms_input = (
'<input id="id_binarytree_set-MAX_NUM_FORMS" '
'name="binarytree_set-MAX_NUM_FORMS" type="hidden" value="%d" />'
)
# The total number of forms will remain the same in either case
total_forms_hidden = (
'<input id="id_binarytree_set-TOTAL_FORMS" '
'name="binarytree_set-TOTAL_FORMS" type="hidden" value="2" />'
)
response = self.client.get(reverse('admin:admin_inlines_binarytree_add'))
self.assertContains(response, max_forms_input % 3)
self.assertContains(response, total_forms_hidden)
response = self.client.get(reverse('admin:admin_inlines_binarytree_change', args=(bt_head.id,)))
self.assertContains(response, max_forms_input % 2)
self.assertContains(response, total_forms_hidden)
def test_min_num(self):
"""
Ensure that min_num and extra determine number of forms.
"""
class MinNumInline(TabularInline):
model = BinaryTree
min_num = 2
extra = 3
modeladmin = ModelAdmin(BinaryTree, admin_site)
modeladmin.inlines = [MinNumInline]
min_forms = (
'<input id="id_binarytree_set-MIN_NUM_FORMS" '
'name="binarytree_set-MIN_NUM_FORMS" type="hidden" value="2" />'
)
total_forms = (
'<input id="id_binarytree_set-TOTAL_FORMS" '
'name="binarytree_set-TOTAL_FORMS" type="hidden" value="5" />'
)
request = self.factory.get(reverse('admin:admin_inlines_binarytree_add'))
request.user = User(username='super', is_superuser=True)
response = modeladmin.changeform_view(request)
self.assertContains(response, min_forms)
self.assertContains(response, total_forms)
def test_custom_min_num(self):
"""
Ensure that get_min_num is called and used correctly.
"""
bt_head = BinaryTree.objects.create(name="Tree Head")
BinaryTree.objects.create(name="First Child", parent=bt_head)
class MinNumInline(TabularInline):
model = BinaryTree
extra = 3
def get_min_num(self, request, obj=None, **kwargs):
if obj:
return 5
return 2
modeladmin = ModelAdmin(BinaryTree, admin_site)
modeladmin.inlines = [MinNumInline]
min_forms = (
'<input id="id_binarytree_set-MIN_NUM_FORMS" '
'name="binarytree_set-MIN_NUM_FORMS" type="hidden" value="%d" />'
)
total_forms = (
'<input id="id_binarytree_set-TOTAL_FORMS" '
'name="binarytree_set-TOTAL_FORMS" type="hidden" value="%d" />'
)
request = self.factory.get(reverse('admin:admin_inlines_binarytree_add'))
request.user = User(username='super', is_superuser=True)
response = modeladmin.changeform_view(request)
self.assertContains(response, min_forms % 2)
self.assertContains(response, total_forms % 5)
request = self.factory.get(reverse('admin:admin_inlines_binarytree_change', args=(bt_head.id,)))
request.user = User(username='super', is_superuser=True)
response = modeladmin.changeform_view(request, object_id=str(bt_head.id))
self.assertContains(response, min_forms % 5)
self.assertContains(response, total_forms % 8)
def test_inline_nonauto_noneditable_pk(self):
response = self.client.get(reverse('admin:admin_inlines_author_add'))
self.assertContains(
response,
'<input id="id_nonautopkbook_set-0-rand_pk" '
'name="nonautopkbook_set-0-rand_pk" type="hidden" />',
html=True
)
self.assertContains(
response,
'<input id="id_nonautopkbook_set-2-0-rand_pk" '
'name="nonautopkbook_set-2-0-rand_pk" type="hidden" />',
html=True
)
def test_inline_editable_pk(self):
response = self.client.get(reverse('admin:admin_inlines_author_add'))
self.assertContains(
response,
'<input class="vIntegerField" id="id_editablepkbook_set-0-manual_pk" '
'name="editablepkbook_set-0-manual_pk" type="text" />',
html=True, count=1
)
self.assertContains(
response,
'<input class="vIntegerField" id="id_editablepkbook_set-2-0-manual_pk" '
'name="editablepkbook_set-2-0-manual_pk" type="text" />',
html=True, count=1
)
def test_stacked_inline_edit_form_contains_has_original_class(self):
holder = Holder.objects.create(dummy=1)
holder.inner_set.create(dummy=1)
response = self.client.get(reverse('admin:admin_inlines_holder_change', args=(holder.pk,)))
self.assertContains(
response,
'<div class="inline-related has_original" id="inner_set-0">',
count=1
)
self.assertContains(
response,
'<div class="inline-related" id="inner_set-1">',
count=1
)
def test_inlines_show_change_link_registered(self):
"Inlines `show_change_link` for registered models when enabled."
holder = Holder4.objects.create(dummy=1)
item1 = Inner4Stacked.objects.create(dummy=1, holder=holder)
item2 = Inner4Tabular.objects.create(dummy=1, holder=holder)
items = (
('inner4stacked', item1.pk),
('inner4tabular', item2.pk),
)
response = self.client.get(reverse('admin:admin_inlines_holder4_change', args=(holder.pk,)))
self.assertTrue(response.context['inline_admin_formset'].opts.has_registered_model)
for model, pk in items:
url = reverse('admin:admin_inlines_%s_change' % model, args=(pk,))
self.assertContains(response, '<a href="%s" %s' % (url, INLINE_CHANGELINK_HTML))
def test_inlines_show_change_link_unregistered(self):
"Inlines `show_change_link` disabled for unregistered models."
parent = ParentModelWithCustomPk.objects.create(my_own_pk="foo", name="Foo")
ChildModel1.objects.create(my_own_pk="bar", name="Bar", parent=parent)
ChildModel2.objects.create(my_own_pk="baz", name="Baz", parent=parent)
response = self.client.get(reverse('admin:admin_inlines_parentmodelwithcustompk_change', args=('foo',)))
self.assertFalse(response.context['inline_admin_formset'].opts.has_registered_model)
self.assertNotContains(response, INLINE_CHANGELINK_HTML)
def test_tabular_inline_show_change_link_false_registered(self):
"Inlines `show_change_link` disabled by default."
poll = Poll.objects.create(name="New poll")
Question.objects.create(poll=poll)
response = self.client.get(reverse('admin:admin_inlines_poll_change', args=(poll.pk,)))
self.assertTrue(response.context['inline_admin_formset'].opts.has_registered_model)
self.assertNotContains(response, INLINE_CHANGELINK_HTML)
@override_settings(PASSWORD_HASHERS=['django.contrib.auth.hashers.SHA1PasswordHasher'],
ROOT_URLCONF="admin_inlines.urls")
class TestInlineMedia(TestDataMixin, TestCase):
def setUp(self):
result = self.client.login(username='super', password='secret')
self.assertEqual(result, True)
def test_inline_media_only_base(self):
holder = Holder(dummy=13)
holder.save()
Inner(dummy=42, holder=holder).save()
change_url = reverse('admin:admin_inlines_holder_change', args=(holder.id,))
response = self.client.get(change_url)
self.assertContains(response, 'my_awesome_admin_scripts.js')
def test_inline_media_only_inline(self):
holder = Holder3(dummy=13)
holder.save()
Inner3(dummy=42, holder=holder).save()
change_url = reverse('admin:admin_inlines_holder3_change', args=(holder.id,))
response = self.client.get(change_url)
self.assertContains(response, 'my_awesome_inline_scripts.js')
def test_all_inline_media(self):
holder = Holder2(dummy=13)
holder.save()
Inner2(dummy=42, holder=holder).save()
change_url = reverse('admin:admin_inlines_holder2_change', args=(holder.id,))
response = self.client.get(change_url)
self.assertContains(response, 'my_awesome_admin_scripts.js')
self.assertContains(response, 'my_awesome_inline_scripts.js')
@override_settings(ROOT_URLCONF="admin_inlines.urls")
class TestInlineAdminForm(TestCase):
def test_immutable_content_type(self):
"""Regression for #9362
The problem depends only on InlineAdminForm and its "original"
argument, so we can safely set the other arguments to None/{}. We just
need to check that the content_type argument of Child isn't altered by
the internals of the inline form."""
sally = Teacher.objects.create(name='Sally')
john = Parent.objects.create(name='John')
joe = Child.objects.create(name='Joe', teacher=sally, parent=john)
iaf = InlineAdminForm(None, None, {}, {}, joe)
parent_ct = ContentType.objects.get_for_model(Parent)
self.assertEqual(iaf.original.content_type, parent_ct)
@override_settings(PASSWORD_HASHERS=['django.contrib.auth.hashers.SHA1PasswordHasher'],
ROOT_URLCONF="admin_inlines.urls")
class TestInlineProtectedOnDelete(TestDataMixin, TestCase):
def setUp(self):
result = self.client.login(username='super', password='secret')
self.assertEqual(result, True)
def test_deleting_inline_with_protected_delete_does_not_validate(self):
lotr = Novel.objects.create(name='Lord of the rings')
chapter = Chapter.objects.create(novel=lotr, name='Many Meetings')
foot_note = FootNote.objects.create(chapter=chapter, note='yadda yadda')
change_url = reverse('admin:admin_inlines_novel_change', args=(lotr.id,))
response = self.client.get(change_url)
data = {
'name': lotr.name,
'chapter_set-TOTAL_FORMS': 1,
'chapter_set-INITIAL_FORMS': 1,
'chapter_set-MAX_NUM_FORMS': 1000,
'_save': 'Save',
'chapter_set-0-id': chapter.id,
'chapter_set-0-name': chapter.name,
'chapter_set-0-novel': lotr.id,
'chapter_set-0-DELETE': 'on'
}
response = self.client.post(change_url, data)
self.assertContains(response, "Deleting chapter %s would require deleting "
"the following protected related objects: foot note %s"
% (chapter, foot_note))
@override_settings(ROOT_URLCONF="admin_inlines.urls")
class TestInlinePermissions(TestCase):
"""
Make sure the admin respects permissions for objects that are edited
inline. Refs #8060.
"""
def setUp(self):
self.user = User(username='admin')
self.user.is_staff = True
self.user.is_active = True
self.user.set_password('secret')
self.user.save()
self.author_ct = ContentType.objects.get_for_model(Author)
self.holder_ct = ContentType.objects.get_for_model(Holder2)
self.book_ct = ContentType.objects.get_for_model(Book)
self.inner_ct = ContentType.objects.get_for_model(Inner2)
# User always has permissions to add and change Authors, and Holders,
# the main (parent) models of the inlines. Permissions on the inlines
# vary per test.
permission = Permission.objects.get(codename='add_author', content_type=self.author_ct)
self.user.user_permissions.add(permission)
permission = Permission.objects.get(codename='change_author', content_type=self.author_ct)
self.user.user_permissions.add(permission)
permission = Permission.objects.get(codename='add_holder2', content_type=self.holder_ct)
self.user.user_permissions.add(permission)
permission = Permission.objects.get(codename='change_holder2', content_type=self.holder_ct)
self.user.user_permissions.add(permission)
author = Author.objects.create(pk=1, name='The Author')
book = author.books.create(name='The inline Book')
self.author_change_url = reverse('admin:admin_inlines_author_change', args=(author.id,))
# Get the ID of the automatically created intermediate model for the Author-Book m2m
author_book_auto_m2m_intermediate = Author.books.through.objects.get(author=author, book=book)
self.author_book_auto_m2m_intermediate_id = author_book_auto_m2m_intermediate.pk
holder = Holder2.objects.create(dummy=13)
inner2 = Inner2.objects.create(dummy=42, holder=holder)
self.holder_change_url = reverse('admin:admin_inlines_holder2_change', args=(holder.id,))
self.inner2_id = inner2.id
self.assertEqual(
self.client.login(username='admin', password='secret'),
True)
def test_inline_add_m2m_noperm(self):
response = self.client.get(reverse('admin:admin_inlines_author_add'))
# No change permission on books, so no inline
self.assertNotContains(response, '<h2>Author-book relationships</h2>')
self.assertNotContains(response, 'Add another Author-Book Relationship')
self.assertNotContains(response, 'id="id_Author_books-TOTAL_FORMS"')
def test_inline_add_fk_noperm(self):
response = self.client.get(reverse('admin:admin_inlines_holder2_add'))
# No permissions on Inner2s, so no inline
self.assertNotContains(response, '<h2>Inner2s</h2>')
self.assertNotContains(response, 'Add another Inner2')
self.assertNotContains(response, 'id="id_inner2_set-TOTAL_FORMS"')
def test_inline_change_m2m_noperm(self):
response = self.client.get(self.author_change_url)
# No change permission on books, so no inline
self.assertNotContains(response, '<h2>Author-book relationships</h2>')
self.assertNotContains(response, 'Add another Author-Book Relationship')
self.assertNotContains(response, 'id="id_Author_books-TOTAL_FORMS"')
def test_inline_change_fk_noperm(self):
response = self.client.get(self.holder_change_url)
# No permissions on Inner2s, so no inline
self.assertNotContains(response, '<h2>Inner2s</h2>')
self.assertNotContains(response, 'Add another Inner2')
self.assertNotContains(response, 'id="id_inner2_set-TOTAL_FORMS"')
def test_inline_add_m2m_add_perm(self):
permission = Permission.objects.get(codename='add_book', content_type=self.book_ct)
self.user.user_permissions.add(permission)
response = self.client.get(reverse('admin:admin_inlines_author_add'))
# No change permission on Books, so no inline
self.assertNotContains(response, '<h2>Author-book relationships</h2>')
self.assertNotContains(response, 'Add another Author-Book Relationship')
self.assertNotContains(response, 'id="id_Author_books-TOTAL_FORMS"')
def test_inline_add_fk_add_perm(self):
permission = Permission.objects.get(codename='add_inner2', content_type=self.inner_ct)
self.user.user_permissions.add(permission)
response = self.client.get(reverse('admin:admin_inlines_holder2_add'))
# Add permission on inner2s, so we get the inline
self.assertContains(response, '<h2>Inner2s</h2>')
self.assertContains(response, 'Add another Inner2')
self.assertContains(response, '<input type="hidden" id="id_inner2_set-TOTAL_FORMS" '
'value="3" name="inner2_set-TOTAL_FORMS" />', html=True)
def test_inline_change_m2m_add_perm(self):
permission = Permission.objects.get(codename='add_book', content_type=self.book_ct)
self.user.user_permissions.add(permission)
response = self.client.get(self.author_change_url)
# No change permission on books, so no inline
self.assertNotContains(response, '<h2>Author-book relationships</h2>')
self.assertNotContains(response, 'Add another Author-Book Relationship')
self.assertNotContains(response, 'id="id_Author_books-TOTAL_FORMS"')
self.assertNotContains(response, 'id="id_Author_books-0-DELETE"')
def test_inline_change_m2m_change_perm(self):
permission = Permission.objects.get(codename='change_book', content_type=self.book_ct)
self.user.user_permissions.add(permission)
response = self.client.get(self.author_change_url)
# We have change perm on books, so we can add/change/delete inlines
self.assertContains(response, '<h2>Author-book relationships</h2>')
self.assertContains(response, 'Add another Author-book relationship')
self.assertContains(response, '<input type="hidden" id="id_Author_books-TOTAL_FORMS" '
'value="4" name="Author_books-TOTAL_FORMS" />', html=True)
self.assertContains(
response,
'<input type="hidden" id="id_Author_books-0-id" value="%i" '
'name="Author_books-0-id" />' % self.author_book_auto_m2m_intermediate_id,
html=True
)
self.assertContains(response, 'id="id_Author_books-0-DELETE"')
def test_inline_change_fk_add_perm(self):
permission = Permission.objects.get(codename='add_inner2', content_type=self.inner_ct)
self.user.user_permissions.add(permission)
response = self.client.get(self.holder_change_url)
# Add permission on inner2s, so we can add but not modify existing
self.assertContains(response, '<h2>Inner2s</h2>')
self.assertContains(response, 'Add another Inner2')
# 3 extra forms only, not the existing instance form
self.assertContains(
response,
'<input type="hidden" id="id_inner2_set-TOTAL_FORMS" value="3" '
'name="inner2_set-TOTAL_FORMS" />',
html=True
)
self.assertNotContains(response, '<input type="hidden" id="id_inner2_set-0-id" '
'value="%i" name="inner2_set-0-id" />' % self.inner2_id, html=True)
def test_inline_change_fk_change_perm(self):
permission = Permission.objects.get(codename='change_inner2', content_type=self.inner_ct)
self.user.user_permissions.add(permission)
response = self.client.get(self.holder_change_url)
# Change permission on inner2s, so we can change existing but not add new
self.assertContains(response, '<h2>Inner2s</h2>')
# Just the one form for existing instances
self.assertContains(response, '<input type="hidden" id="id_inner2_set-TOTAL_FORMS" '
'value="1" name="inner2_set-TOTAL_FORMS" />', html=True)
self.assertContains(response, '<input type="hidden" id="id_inner2_set-0-id" '
'value="%i" name="inner2_set-0-id" />' % self.inner2_id, html=True)
# max-num 0 means we can't add new ones
self.assertContains(response, '<input type="hidden" id="id_inner2_set-MAX_NUM_FORMS" '
'value="0" name="inner2_set-MAX_NUM_FORMS" />', html=True)
def test_inline_change_fk_add_change_perm(self):
permission = Permission.objects.get(codename='add_inner2', content_type=self.inner_ct)
self.user.user_permissions.add(permission)
permission = Permission.objects.get(codename='change_inner2', content_type=self.inner_ct)
self.user.user_permissions.add(permission)
response = self.client.get(self.holder_change_url)
# Add/change perm, so we can add new and change existing
self.assertContains(response, '<h2>Inner2s</h2>')
# One form for existing instance and three extra for new
self.assertContains(response, '<input type="hidden" id="id_inner2_set-TOTAL_FORMS" '
'value="4" name="inner2_set-TOTAL_FORMS" />', html=True)
self.assertContains(response, '<input type="hidden" id="id_inner2_set-0-id" '
'value="%i" name="inner2_set-0-id" />' % self.inner2_id, html=True)
def test_inline_change_fk_change_del_perm(self):
permission = Permission.objects.get(codename='change_inner2', content_type=self.inner_ct)
self.user.user_permissions.add(permission)
permission = Permission.objects.get(codename='delete_inner2', content_type=self.inner_ct)
self.user.user_permissions.add(permission)
response = self.client.get(self.holder_change_url)
# Change/delete perm on inner2s, so we can change/delete existing
self.assertContains(response, '<h2>Inner2s</h2>')
# One form for existing instance only, no new
self.assertContains(response, '<input type="hidden" id="id_inner2_set-TOTAL_FORMS" '
'value="1" name="inner2_set-TOTAL_FORMS" />', html=True)
self.assertContains(response, '<input type="hidden" id="id_inner2_set-0-id" '
'value="%i" name="inner2_set-0-id" />' % self.inner2_id, html=True)
self.assertContains(response, 'id="id_inner2_set-0-DELETE"')
def test_inline_change_fk_all_perms(self):
permission = Permission.objects.get(codename='add_inner2', content_type=self.inner_ct)
self.user.user_permissions.add(permission)
permission = Permission.objects.get(codename='change_inner2', content_type=self.inner_ct)
self.user.user_permissions.add(permission)
permission = Permission.objects.get(codename='delete_inner2', content_type=self.inner_ct)
self.user.user_permissions.add(permission)
response = self.client.get(self.holder_change_url)
# All perms on inner2s, so we can add/change/delete
self.assertContains(response, '<h2>Inner2s</h2>')
# One form for existing instance only, three for new
self.assertContains(response, '<input type="hidden" id="id_inner2_set-TOTAL_FORMS" '
'value="4" name="inner2_set-TOTAL_FORMS" />', html=True)
self.assertContains(response, '<input type="hidden" id="id_inner2_set-0-id" '
'value="%i" name="inner2_set-0-id" />' % self.inner2_id, html=True)
self.assertContains(response, 'id="id_inner2_set-0-DELETE"')
@override_settings(PASSWORD_HASHERS=['django.contrib.auth.hashers.SHA1PasswordHasher'],
ROOT_URLCONF="admin_inlines.urls")
class SeleniumFirefoxTests(AdminSeleniumWebDriverTestCase):
available_apps = ['admin_inlines'] + AdminSeleniumWebDriverTestCase.available_apps
webdriver_class = 'selenium.webdriver.firefox.webdriver.WebDriver'
def setUp(self):
# password = "secret"
User.objects.create(
pk=100, username='super', first_name='Super', last_name='User', email='super@example.com',
password='sha1$995a3$6011485ea3834267d719b4c801409b8b1ddd0158', is_active=True, is_superuser=True,
is_staff=True, last_login=datetime.datetime(2007, 5, 30, 13, 20, 10),
date_joined=datetime.datetime(2007, 5, 30, 13, 20, 10)
)
def test_add_stackeds(self):
"""
Ensure that the "Add another XXX" link correctly adds items to the
stacked formset.
"""
self.admin_login(username='super', password='secret')
self.selenium.get('%s%s' % (self.live_server_url,
reverse('admin:admin_inlines_holder4_add')))
inline_id = '#inner4stacked_set-group'
rows_length = lambda: len(self.selenium.find_elements_by_css_selector(
'%s .dynamic-inner4stacked_set' % inline_id))
self.assertEqual(rows_length(), 3)
add_button = self.selenium.find_element_by_link_text(
'Add another Inner4 stacked')
add_button.click()
self.assertEqual(rows_length(), 4)
def test_delete_stackeds(self):
self.admin_login(username='super', password='secret')
self.selenium.get('%s%s' % (self.live_server_url,
reverse('admin:admin_inlines_holder4_add')))
inline_id = '#inner4stacked_set-group'
rows_length = lambda: len(self.selenium.find_elements_by_css_selector(
'%s .dynamic-inner4stacked_set' % inline_id))
self.assertEqual(rows_length(), 3)
add_button = self.selenium.find_element_by_link_text(
'Add another Inner4 stacked')
add_button.click()
add_button.click()
self.assertEqual(rows_length(), 5, msg="sanity check")
for delete_link in self.selenium.find_elements_by_css_selector(
'%s .inline-deletelink' % inline_id):
delete_link.click()
self.assertEqual(rows_length(), 3)
def test_add_inlines(self):
"""
Ensure that the "Add another XXX" link correctly adds items to the
inline form.
"""
self.admin_login(username='super', password='secret')
self.selenium.get('%s%s' % (self.live_server_url,
reverse('admin:admin_inlines_profilecollection_add')))
# Check that there's only one inline to start with and that it has the
# correct ID.
self.assertEqual(len(self.selenium.find_elements_by_css_selector(
'.dynamic-profile_set')), 1)
self.assertEqual(self.selenium.find_elements_by_css_selector(
'.dynamic-profile_set')[0].get_attribute('id'),
'profile_set-0')
self.assertEqual(len(self.selenium.find_elements_by_css_selector(
'.dynamic-profile_set#profile_set-0 input[name=profile_set-0-first_name]')), 1)
self.assertEqual(len(self.selenium.find_elements_by_css_selector(
'.dynamic-profile_set#profile_set-0 input[name=profile_set-0-last_name]')), 1)
# Add an inline
self.selenium.find_element_by_link_text('Add another Profile').click()
# Check that the inline has been added, that it has the right id, and
# that it contains the right fields.
self.assertEqual(len(self.selenium.find_elements_by_css_selector(
'.dynamic-profile_set')), 2)
self.assertEqual(self.selenium.find_elements_by_css_selector(
'.dynamic-profile_set')[1].get_attribute('id'), 'profile_set-1')
self.assertEqual(len(self.selenium.find_elements_by_css_selector(
'.dynamic-profile_set#profile_set-1 input[name=profile_set-1-first_name]')), 1)
self.assertEqual(len(self.selenium.find_elements_by_css_selector(
'.dynamic-profile_set#profile_set-1 input[name=profile_set-1-last_name]')), 1)
# Let's add another one to be sure
self.selenium.find_element_by_link_text('Add another Profile').click()
self.assertEqual(len(self.selenium.find_elements_by_css_selector(
'.dynamic-profile_set')), 3)
self.assertEqual(self.selenium.find_elements_by_css_selector(
'.dynamic-profile_set')[2].get_attribute('id'), 'profile_set-2')
self.assertEqual(len(self.selenium.find_elements_by_css_selector(
'.dynamic-profile_set#profile_set-2 input[name=profile_set-2-first_name]')), 1)
self.assertEqual(len(self.selenium.find_elements_by_css_selector(
'.dynamic-profile_set#profile_set-2 input[name=profile_set-2-last_name]')), 1)
# Enter some data and click 'Save'
self.selenium.find_element_by_name('profile_set-0-first_name').send_keys('0 first name 1')
self.selenium.find_element_by_name('profile_set-0-last_name').send_keys('0 last name 2')
self.selenium.find_element_by_name('profile_set-1-first_name').send_keys('1 first name 1')
self.selenium.find_element_by_name('profile_set-1-last_name').send_keys('1 last name 2')
self.selenium.find_element_by_name('profile_set-2-first_name').send_keys('2 first name 1')
self.selenium.find_element_by_name('profile_set-2-last_name').send_keys('2 last name 2')
self.selenium.find_element_by_xpath('//input[@value="Save"]').click()
self.wait_page_loaded()
# Check that the objects have been created in the database
self.assertEqual(ProfileCollection.objects.all().count(), 1)
self.assertEqual(Profile.objects.all().count(), 3)
def test_delete_inlines(self):
self.admin_login(username='super', password='secret')
self.selenium.get('%s%s' % (self.live_server_url,
reverse('admin:admin_inlines_profilecollection_add')))
# Add a few inlines
self.selenium.find_element_by_link_text('Add another Profile').click()
self.selenium.find_element_by_link_text('Add another Profile').click()
self.selenium.find_element_by_link_text('Add another Profile').click()
self.selenium.find_element_by_link_text('Add another Profile').click()
self.assertEqual(len(self.selenium.find_elements_by_css_selector(
'#profile_set-group table tr.dynamic-profile_set')), 5)
self.assertEqual(len(self.selenium.find_elements_by_css_selector(
'form#profilecollection_form tr.dynamic-profile_set#profile_set-0')), 1)
self.assertEqual(len(self.selenium.find_elements_by_css_selector(
'form#profilecollection_form tr.dynamic-profile_set#profile_set-1')), 1)
self.assertEqual(len(self.selenium.find_elements_by_css_selector(
'form#profilecollection_form tr.dynamic-profile_set#profile_set-2')), 1)
self.assertEqual(len(self.selenium.find_elements_by_css_selector(
'form#profilecollection_form tr.dynamic-profile_set#profile_set-3')), 1)
self.assertEqual(len(self.selenium.find_elements_by_css_selector(
'form#profilecollection_form tr.dynamic-profile_set#profile_set-4')), 1)
# Click on a few delete buttons
self.selenium.find_element_by_css_selector(
'form#profilecollection_form tr.dynamic-profile_set#profile_set-1 td.delete a').click()
self.selenium.find_element_by_css_selector(
'form#profilecollection_form tr.dynamic-profile_set#profile_set-2 td.delete a').click()
# Verify that they're gone and that the IDs have been re-sequenced
self.assertEqual(len(self.selenium.find_elements_by_css_selector(
'#profile_set-group table tr.dynamic-profile_set')), 3)
self.assertEqual(len(self.selenium.find_elements_by_css_selector(
'form#profilecollection_form tr.dynamic-profile_set#profile_set-0')), 1)
self.assertEqual(len(self.selenium.find_elements_by_css_selector(
'form#profilecollection_form tr.dynamic-profile_set#profile_set-1')), 1)
self.assertEqual(len(self.selenium.find_elements_by_css_selector(
'form#profilecollection_form tr.dynamic-profile_set#profile_set-2')), 1)
def test_alternating_rows(self):
self.admin_login(username='super', password='secret')
self.selenium.get('%s%s' % (self.live_server_url,
reverse('admin:admin_inlines_profilecollection_add')))
# Add a few inlines
self.selenium.find_element_by_link_text('Add another Profile').click()
self.selenium.find_element_by_link_text('Add another Profile').click()
row_selector = 'form#profilecollection_form tr.dynamic-profile_set'
self.assertEqual(len(self.selenium.find_elements_by_css_selector(
"%s.row1" % row_selector)), 2, msg="Expect two row1 styled rows")
self.assertEqual(len(self.selenium.find_elements_by_css_selector(
"%s.row2" % row_selector)), 1, msg="Expect one row2 styled row")
def test_collapsed_inlines(self):
# Collapsed inlines have SHOW/HIDE links.
self.admin_login(username='super', password='secret')
self.selenium.get(self.live_server_url + reverse('admin:admin_inlines_author_add'))
# One field is in a stacked inline, other in a tabular one.
test_fields = ['#id_nonautopkbook_set-0-title', '#id_nonautopkbook_set-2-0-title']
show_links = self.selenium.find_elements_by_link_text('SHOW')
self.assertEqual(len(show_links), 2)
for show_index, field_name in enumerate(test_fields, 0):
self.wait_until_invisible(field_name)
show_links[show_index].click()
self.wait_until_visible(field_name)
hide_links = self.selenium.find_elements_by_link_text('HIDE')
self.assertEqual(len(hide_links), 2)
for hide_index, field_name in enumerate(test_fields, 0):
self.wait_until_visible(field_name)
hide_links[hide_index].click()
self.wait_until_invisible(field_name)
class SeleniumChromeTests(SeleniumFirefoxTests):
webdriver_class = 'selenium.webdriver.chrome.webdriver.WebDriver'
class SeleniumIETests(SeleniumFirefoxTests):
webdriver_class = 'selenium.webdriver.ie.webdriver.WebDriver'
| {
"content_hash": "f43aead75026a0822f9ea2a868c28a94",
"timestamp": "",
"source": "github",
"line_count": 902,
"max_line_length": 112,
"avg_line_length": 49.90022172949002,
"alnum_prop": 0.6464341257498334,
"repo_name": "benjaminjkraft/django",
"id": "f59f87e6deef62d3905713a58fcc4fecf5f5138c",
"size": "45010",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "tests/admin_inlines/tests.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "52334"
},
{
"name": "HTML",
"bytes": "170510"
},
{
"name": "JavaScript",
"bytes": "256027"
},
{
"name": "Makefile",
"bytes": "125"
},
{
"name": "Python",
"bytes": "11459633"
},
{
"name": "Shell",
"bytes": "809"
},
{
"name": "Smarty",
"bytes": "130"
}
],
"symlink_target": ""
} |
import uuid
from keystoneclient import exceptions
from keystoneclient.tests.v3 import utils
from keystoneclient.v3 import endpoints
class EndpointTests(utils.TestCase, utils.CrudTests):
def setUp(self):
super(EndpointTests, self).setUp()
self.key = 'endpoint'
self.collection_key = 'endpoints'
self.model = endpoints.Endpoint
self.manager = self.client.endpoints
def new_ref(self, **kwargs):
kwargs = super(EndpointTests, self).new_ref(**kwargs)
kwargs.setdefault('interface', 'public')
kwargs.setdefault('region', uuid.uuid4().hex)
kwargs.setdefault('service_id', uuid.uuid4().hex)
kwargs.setdefault('url', uuid.uuid4().hex)
kwargs.setdefault('enabled', True)
return kwargs
def test_create_public_interface(self):
ref = self.new_ref(interface='public')
self.test_create(ref)
def test_create_admin_interface(self):
ref = self.new_ref(interface='admin')
self.test_create(ref)
def test_create_internal_interface(self):
ref = self.new_ref(interface='internal')
self.test_create(ref)
def test_create_invalid_interface(self):
ref = self.new_ref(interface=uuid.uuid4().hex)
self.assertRaises(exceptions.ValidationError, self.manager.create,
**utils.parameterize(ref))
def test_update_public_interface(self):
ref = self.new_ref(interface='public')
self.test_update(ref)
def test_update_admin_interface(self):
ref = self.new_ref(interface='admin')
self.test_update(ref)
def test_update_internal_interface(self):
ref = self.new_ref(interface='internal')
self.test_update(ref)
def test_update_invalid_interface(self):
ref = self.new_ref(interface=uuid.uuid4().hex)
ref['endpoint'] = "fake_endpoint"
self.assertRaises(exceptions.ValidationError, self.manager.update,
**utils.parameterize(ref))
def test_list_public_interface(self):
interface = 'public'
expected_path = 'v3/%s?interface=%s' % (self.collection_key, interface)
self.test_list(expected_path=expected_path, interface=interface)
def test_list_admin_interface(self):
interface = 'admin'
expected_path = 'v3/%s?interface=%s' % (self.collection_key, interface)
self.test_list(expected_path=expected_path, interface=interface)
def test_list_internal_interface(self):
interface = 'admin'
expected_path = 'v3/%s?interface=%s' % (self.collection_key, interface)
self.test_list(expected_path=expected_path, interface=interface)
def test_list_invalid_interface(self):
interface = uuid.uuid4().hex
expected_path = 'v3/%s?interface=%s' % (self.collection_key, interface)
self.assertRaises(exceptions.ValidationError, self.manager.list,
expected_path=expected_path, interface=interface)
| {
"content_hash": "ad7d13376179fed07de1d059a82a74d9",
"timestamp": "",
"source": "github",
"line_count": 79,
"max_line_length": 79,
"avg_line_length": 38.0126582278481,
"alnum_prop": 0.6513486513486514,
"repo_name": "raildo/python-keystoneclient",
"id": "050a646eca21a6c9071c5d163a778ca2ae0b8f23",
"size": "3621",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "keystoneclient/tests/v3/test_endpoints.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "32004"
},
{
"name": "JavaScript",
"bytes": "14806"
},
{
"name": "Python",
"bytes": "1344730"
},
{
"name": "Shell",
"bytes": "22768"
}
],
"symlink_target": ""
} |
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = []
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Typus'
copyright = u'2012, Francesc Esplugas'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '3.1'
# The full version, including alpha/beta/rc tags.
release = '3.1.9'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'nature'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
html_domain_indices = False
# If false, no index is generated.
html_use_index = False
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'Typusdoc'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'Typus.tex', u'Typus Documentation',
u'Francesc Esplugas', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'typus', u'Typus Documentation',
[u'Francesc Esplugas'], 1)
]
| {
"content_hash": "f29d5be69e76c7d37f0bfde86c760cda",
"timestamp": "",
"source": "github",
"line_count": 203,
"max_line_length": 80,
"avg_line_length": 32.27093596059113,
"alnum_prop": 0.7055411387574416,
"repo_name": "chiragshah/typus",
"id": "efcd3cdb0aa6eb587bffe82e3e9c5f1d9fc06236",
"size": "6967",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "doc/user_guide/source/conf.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "62223"
},
{
"name": "Python",
"bytes": "6967"
},
{
"name": "Ruby",
"bytes": "239571"
},
{
"name": "Shell",
"bytes": "77"
}
],
"symlink_target": ""
} |
from .interface import SerialInterface
from . import message
import socketserver
import argparse
import threading
import time
import logging
class IR_Control:
def __init__(self, interface, serial_port, baudrate):
self.i = interface
self.serial_port = serial_port
self.baudrate = baudrate
self.log = logging.getLogger("IR_Control")
self.running = True
def stop(self):
self.running = False
# blocks reading from serial port and acting appropriately.
def loop(self):
while(self.running):
if (not self.i.is_serial_connected()):
self.log.error("No serial port!")
self.i.connect(self.serial_port, self.baudrate)
time.sleep(1)
a = self.i.get_message()
if (a):
self.received_serial(a)
else:
time.sleep(0.001)
# processes received messages from serial
def received_serial(self, msg):
# receives messages from the interface.
if (msg.msg_type == msg.type.action_IR_received):
# convert it into a ir_message
ir_code = message.IR(**dict(msg.ir_specification))
self.ir_received(ir_code)
# sends a message over the serial port
def send_serial(self, msg):
self.i.put_message(msg)
# send an IR code with the hardware.
def send_ir(self, ir_code):
self.log.debug("sending ir {}".format(ir_code))
# create the message
msg = message.Msg()
msg.msg_type = msg.type.action_IR_send
try:
msg.ir_specification.from_dict(ir_code.raw())
except TypeError as e:
self.log.error("Conversion failed: {} ".format(str(e)))
self.send_serial(msg)
# This method is called when an IR code is received from the serial port.
def ir_received(self, ir_code):
raise NotImplementedError("Subclass should implement this.")
# This object actually deals with the interaction and configuration file
# it is up to you to change this to suit your needs... or use this and modify
# the configuration file.
class Interactor(IR_Control):
def __init__(self, *args, **kwargs):
super(Interactor, self).__init__(*args, **kwargs)
self.log = logging.getLogger("Interactor")
def load_config(self, conf):
self.ir_by_name = {}
self.ir_by_code = {}
ir_codes = conf.get_codes()
for code in ir_codes:
name = ir_codes[code]
# store lookup for name -> ir_code and ir_code -> name.
self.ir_by_name[name] = code
self.ir_by_code[code.tuple()] = name
# store actions per name.
self.ir_actions = conf.get_actions()
# called when an ir code is received from the serial port.
def ir_received(self, ir_code):
if (ir_code.tuple() in self.ir_by_code):
# if it is in the list, convert to ir_name
ir_name = self.ir_by_code[ir_code.tuple()]
self.log.debug("IR name known: {}".format(ir_name))
# try to perform the action:
self.perform_action(ir_name)
else:
self.log.debug("IR code not known:\n{}".format(
ir_code.config_print()))
# When an IR code is received and we have a name for this, this performs
# the action associated to that name.
def perform_action(self, action_name):
if (action_name not in self.ir_actions):
return
self.log.info("Action found for {}.".format(action_name))
action = self.ir_actions[action_name]
# call the action, with the interactor and action_name argument.
action(self, action_name)
# send an IR code by name.
def send_ir_by_name(self, name):
if name in self.ir_by_name:
self.send_ir(self.ir_by_name[name])
else:
self.log.warn("Tried to send unknown {} ir code".format(name))
# this method is called when something is passed via the TCP socket.
def incoming_external_command(self, cmd):
cmd = str(cmd, 'ascii')
self.log.debug("Incoming command: {}".format(cmd))
self.send_ir_by_name(cmd)
# self.perform_action(cmd)
class TCPCommandHandler(socketserver.StreamRequestHandler):
def handle(self):
data = self.request.recv(1024).strip()
self.server.mcu_manager_.incoming_external_command(data)
self.finish()
class ThreadedTCPServer(socketserver.ThreadingMixIn, socketserver.TCPServer):
def setManager(self, manager):
self.mcu_manager_ = manager
def start(conf):
parser = argparse.ArgumentParser(description="Control MCU at serial port.")
parser.add_argument('--serial', '-s', help="The serial port to use.",
default="/dev/ttyUSB0")
parser.add_argument('--baudrate', '-r', help="The badurate for the port.",
default=9600, type=int)
parser.add_argument('--verbose', '-v', help="Print all communication.",
action="store_true", default=False)
parser.add_argument('--tcpport', '-p', help="The port used for the tcp"
" socket.",
default=9999)
parser.add_argument('--tcphost', '-b', help="The host/ip on which to bind"
" the tcp socket receiving the IR commands.",
default="127.0.0.1")
# parse the arguments.
args = parser.parse_args()
# start the serial interface
a = SerialInterface(packet_size=message.PACKET_SIZE)
a.connect(serial_port=args.serial, baudrate=args.baudrate)
a.start() # start the interface
# pretty elaborate logging...
logger_interface = logging.getLogger("interface")
logger_IR_control = logging.getLogger("IR_control")
logger_interactor = logging.getLogger("Interactor")
if (args.verbose):
logger_interface.setLevel(logging.DEBUG)
logger_IR_control.setLevel(logging.DEBUG)
logger_interactor.setLevel(logging.DEBUG)
else:
logger_interactor.setLevel(logging.WARN)
logger_interface.setLevel(logging.WARN)
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(name)s - %(asctime)s - %(levelname)s'
' - %(message)s')
ch.setFormatter(formatter)
logger_interface.addHandler(ch)
logger_IR_control.addHandler(ch)
logger_interactor.addHandler(ch)
# start the Interactor 'glue' object.
m = Interactor(a, serial_port=args.serial, baudrate=args.baudrate)
m.load_config(conf)
# This is only for the TCP server to facilitate sending IR codes from the
# terminal easily.
server = ThreadedTCPServer((args.tcphost, args.tcpport), TCPCommandHandler)
server.setManager(m)
server_thread = threading.Thread(target=server.serve_forever)
server_thread.daemon = True
server_thread.start()
# loop the IR_Control object such that the correct actions are performed
try:
m.loop()
except KeyboardInterrupt as e:
m.stop()
a.stop()
logger_IR_control.error("Received interrupt signal, stopping.")
| {
"content_hash": "d4306418c5a2abba13a2a3408ff5c9b0",
"timestamp": "",
"source": "github",
"line_count": 199,
"max_line_length": 79,
"avg_line_length": 36.35175879396985,
"alnum_prop": 0.6172242189659939,
"repo_name": "iwanders/IR_control",
"id": "c52146faa7985fd23865209eefe2aeb6d683ef63",
"size": "8376",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ir_control/__init__.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Arduino",
"bytes": "5506"
},
{
"name": "C",
"bytes": "1864"
},
{
"name": "Makefile",
"bytes": "758"
},
{
"name": "Python",
"bytes": "43932"
}
],
"symlink_target": ""
} |
import os
from cli import IllegalArgumentException
from cli.evaluation import MMTTranslator, _EvaluationResult, TranslateError, BLEUScore
from cli.libs import fileutils
from cli.mmt.processing import XMLEncoder
# similar to class Evaluator above. Alternatively we could have added a flag there, to call corpus.copy(dest_path)
# but we also don't want to run all eval metrics, and want to support missing reference on the source, ...
class BatchTranslator:
def __init__(self, node, use_sessions=True):
self._engine = node.engine
self._node = node
self._xmlencoder = XMLEncoder()
self._translator = MMTTranslator(self._node, use_sessions)
def translate(self, corpora, dest_path=None, debug=False):
if len(corpora) == 0:
raise IllegalArgumentException('empty corpora')
if dest_path:
fileutils.makedirs(dest_path, exist_ok=True)
target_lang = self._engine.target_lang
source_lang = self._engine.source_lang
working_dir = self._engine.get_tempdir('evaluation')
have_references = False
try:
results = []
# Process references
corpora_path = os.path.join(working_dir, 'corpora')
corpora = self._xmlencoder.encode(corpora, corpora_path)
reference = os.path.join(working_dir, 'reference.' + target_lang)
source = os.path.join(working_dir, 'source.' + source_lang)
refs = [corpus.get_file(target_lang) for corpus in corpora if corpus.get_file(target_lang)]
have_references = len(refs) > 0
fileutils.merge(refs, reference) # tolerates missing reference
fileutils.merge([corpus.get_file(source_lang) for corpus in corpora], source)
if dest_path:
for corpus in corpora:
corpus.copy(dest_path, suffixes={source_lang: '.src', target_lang: '.ref', 'tmx': '.src'})
# Translate
translator = self._translator
name = translator.name()
result = _EvaluationResult(translator)
results.append(result)
translations_path = os.path.join(working_dir, 'translations', result.id + '.raw')
xmltranslations_path = os.path.join(working_dir, 'translations', result.id)
fileutils.makedirs(translations_path, exist_ok=True)
try:
translated, mtt, parallelism = translator.translate(corpora, translations_path)
filename = result.id + '.' + target_lang
result.mtt = mtt
result.parallelism = parallelism
result.translated_corpora = self._xmlencoder.encode(translated, xmltranslations_path)
result.merge = os.path.join(working_dir, filename)
fileutils.merge([corpus.get_file(target_lang)
for corpus in result.translated_corpora], result.merge)
if dest_path:
for corpus in result.translated_corpora:
corpus.copy(dest_path, suffixes={target_lang: '.hyp', 'tmx': '.hyp'})
except TranslateError as e:
result.error = e
except Exception as e:
result.error = TranslateError('Unexpected ERROR: ' + str(e.message))
if result.error is None:
if have_references:
scorer = BLEUScore()
# bleu in range [0;1)
bleu = scorer.calculate(result.merge, reference)
return bleu
else:
return True
else:
print(result.error)
return None
finally:
if not debug:
self._engine.clear_tempdir('evaluation')
| {
"content_hash": "449d7ee351cdd8b88f6b04f6259b7254",
"timestamp": "",
"source": "github",
"line_count": 95,
"max_line_length": 114,
"avg_line_length": 40.37894736842105,
"alnum_prop": 0.5862877997914494,
"repo_name": "letconex/MMT",
"id": "928dd1b4e94494781de018938adc0543962e49c8",
"size": "3836",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/legacy_tests/translator.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "AutoHotkey",
"bytes": "1609"
},
{
"name": "C",
"bytes": "2310"
},
{
"name": "C++",
"bytes": "3479743"
},
{
"name": "CMake",
"bytes": "42508"
},
{
"name": "Java",
"bytes": "810385"
},
{
"name": "Perl",
"bytes": "93820"
},
{
"name": "Protocol Buffer",
"bytes": "947"
},
{
"name": "Python",
"bytes": "232214"
},
{
"name": "Roff",
"bytes": "25856"
},
{
"name": "Shell",
"bytes": "15583"
}
],
"symlink_target": ""
} |
"""Tests for circulation signals."""
from invenio_circulation.proxies import current_circulation
from invenio_circulation.signals import loan_state_changed
def test_signals_loan_request(loan_created, params):
"""Test signal for loan request action."""
recorded = []
def record_signals(_, initial_loan, loan, trigger, **kwargs):
recorded.append((initial_loan, loan, trigger, kwargs))
loan_state_changed.connect(record_signals, weak=False)
assert len(recorded) == 0
current_circulation.circulation.trigger(
loan_created,
**dict(
params,
trigger="request",
pickup_location_pid="pickup_location_pid",
)
)
assert len(recorded) == 1
initial_loan, updated_loan, trigger, kwargs = recorded.pop()
assert initial_loan["state"] == "CREATED"
assert updated_loan["state"] == "PENDING"
assert trigger == "request"
def test_signals_loan_extend(loan_created, params):
"""Test signals for loan extend action."""
recorded = []
def record_signals(_, initial_loan, loan, trigger, **kwargs):
recorded.append((initial_loan, loan, trigger, kwargs))
loan_state_changed.connect(record_signals, weak=False)
assert len(recorded) == 0
loan = current_circulation.circulation.trigger(
loan_created, **dict(params, trigger="checkout")
)
assert len(recorded) == 1
initial_loan, updated_loan, trigger, kwargs = recorded.pop()
assert initial_loan["state"] == "CREATED"
assert updated_loan["state"] == "ITEM_ON_LOAN"
assert trigger == "checkout"
current_circulation.circulation.trigger(
loan, **dict(params, trigger="extend")
)
assert len(recorded) == 1
initial_loan, updated_loan, trigger, kwargs = recorded.pop()
assert initial_loan["state"] == "ITEM_ON_LOAN"
assert updated_loan["state"] == "ITEM_ON_LOAN"
assert initial_loan["end_date"] != updated_loan["end_date"]
assert trigger == "extend"
| {
"content_hash": "e986b5d0277dcc80e611a8290588e020",
"timestamp": "",
"source": "github",
"line_count": 59,
"max_line_length": 65,
"avg_line_length": 33.779661016949156,
"alnum_prop": 0.6562970396387355,
"repo_name": "inveniosoftware/invenio-circulation",
"id": "0f24623c0967bda70c7e4a424c84d0a696586a4a",
"size": "2203",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_signals.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "174897"
},
{
"name": "Shell",
"bytes": "1051"
}
],
"symlink_target": ""
} |
"""Test load_sync/store_sync instructions.
This uses four threads to update variables round-robin.
"""
import sys
sys.path.insert(0, '../..')
import test_harness
@test_harness.test(['verilator'])
def multicore(_, target):
hex_file = test_harness.build_program(['multicore.S'])
result = test_harness.run_program(hex_file, target)
if 'ABCDEFGHIJKLMNOPQRSTUVWXYZ[\\]^_`' not in result:
raise test_harness.TestException('Output mismatch:\n' + result)
test_harness.execute_tests()
| {
"content_hash": "96cdedad8569e91c95a7ccc77c3432bb",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 71,
"avg_line_length": 26.42105263157895,
"alnum_prop": 0.703187250996016,
"repo_name": "jbush001/NyuziProcessor",
"id": "7aae0e046512bdc15cc32a5ed1ffe56a17c190d7",
"size": "1108",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/core/multicore/runtest.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "213194"
},
{
"name": "C",
"bytes": "2962697"
},
{
"name": "C++",
"bytes": "1644391"
},
{
"name": "CMake",
"bytes": "49462"
},
{
"name": "Dockerfile",
"bytes": "1642"
},
{
"name": "Java",
"bytes": "6570"
},
{
"name": "Perl",
"bytes": "4771"
},
{
"name": "Python",
"bytes": "203129"
},
{
"name": "Shell",
"bytes": "6099"
},
{
"name": "SystemVerilog",
"bytes": "966291"
},
{
"name": "Tcl",
"bytes": "471"
}
],
"symlink_target": ""
} |
from fabric.api import *
@task
def bootstrap(puppetmaster=''):
"""Bootstrap an Ubuntu 14.04 host on Upcloud"""
from . import util, puppet
util.install_sudo()
fqdn = run('hostname -f')
hostname = run('hostname -s')
ipaddress = run("/sbin/ifconfig eth0 | grep 'inet ' | awk -F'[: ]+' '{ print $4 }'")
hosts_line = "%s %s %s" % (ipaddress, fqdn, hostname)
sudo("sed -i '/127.0.1.1/d' /etc/hosts")
with settings(warn_only=True):
grep = run("grep \"%s\" /etc/hosts" % hosts_line)
if grep.failed:
sudo("echo %s >> /etc/hosts" % hosts_line)
puppet.install(puppetmaster)
puppet.run_agent()
| {
"content_hash": "198d192a7c24baff2eef3b896d1ce0e0",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 88,
"avg_line_length": 28.347826086956523,
"alnum_prop": 0.588957055214724,
"repo_name": "mattock/fabric",
"id": "c5b80cbf3f21f748702a5fd9eac19d471776f53d",
"size": "676",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "upcloud.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "35265"
},
{
"name": "Shell",
"bytes": "1965"
}
],
"symlink_target": ""
} |
'''
参考 https://github.com/7sDream/zhihu-py3/blob/master/zhihu/client.py
知乎认证部分
'''
import os
import time
import json
import requests
from PIL import Image
from global_constant import *
class Authentication(object):
'''
知乎认证类,可用cookies(JSON数据格式)或者帐号密码登陆
若使用帐号密码登陆,则会将cookies以JSON数据格式保存至当前文件夹下
'''
def __init__(self, cookies_path=None, kw=None):
self.session = requests.Session()
self.session.headers.update(headers)
if cookies_path is not None:
assert isinstance(cookies_path, str)
self._login_with_cookies(cookies_path)
elif kw is not None:
self._login_with_email(kw)
else:
print("Input the parameter.")
def _login_with_cookies(self, cookies_path):
'''
使用cookies文件登陆
cookies_path:cookie文件路径
cookies是JSON数据格式
'''
try:
with open(cookies_path) as f:
cookies = f.read()
self.session.cookies.update(json.loads(cookies))
except FileNotFoundError as e:
print("No such file or directory.")
def _login_with_email(self, kw):
'''
使用帐号密码登陆
登陆过程:
请求 url: https://www.zhihu.com
获取 _xsrf
请求 url: https://www.zhihu.com/captcha.gif?r=*
获取 captcha.gif
手动输入 captcha
登陆
kw:帐号、密码字典
'''
xsrf = self._get_xsrf()
captcha = self._get_captcha()
form_data = {
'_xsrf': xsrf,
'captcha': captcha,
'remember_me': 'true'
}
form_data.update(kw)
response = self.session.post(login_url, data=form_data)
if response.status_code is 200:
print("Login sucessfully.")
self._save_cookies()
else:
print("Login failed.")
def _get_xsrf(self):
self.session.get(zhihu_url)
return self.session.cookies.get('_xsrf')
def _get_captcha(self):
captcha_url = captcha_url_prefix + str(int(time.time() * 1000))
response = self.session.get(captcha_url)
with open("captcha.gif", 'wb') as f:
for i in response:
f.write(i)
with open("captcha.gif", 'rb') as f:
Image.open(f).show()
os.remove("captcha.gif")
return input("Input the captcha.\n")
def _save_cookies(self):
'''
保存cookies至当前文件夹下
'''
with open('cookies.json', 'w') as f:
json.dump(self.session.cookies.get_dict(), f)
def set_proxy(self, proxy):
'''
设置代理
proxy: str, 形式: "('http', 'example.com:port')"
'''
self.session.proxies.update({proxy[0]+'://': proxy[1]})
| {
"content_hash": "720260f55e7e8e72a36dc1706d321072",
"timestamp": "",
"source": "github",
"line_count": 97,
"max_line_length": 71,
"avg_line_length": 28.31958762886598,
"alnum_prop": 0.5438660356752821,
"repo_name": "time-river/wander",
"id": "02a7c887276dcb2f09dd93de5cc5d068648ff6ea",
"size": "3038",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "practice/requests/zhihu/authentication.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "100268"
}
],
"symlink_target": ""
} |
from loguru import logger
from flexget import plugin
from flexget.event import event
from flexget.plugin import PluginError
logger = logger.bind(name='list_clear')
class ListClear:
schema = {
'type': 'object',
'properties': {
'what': {
'type': 'array',
'items': {
'allOf': [
{'$ref': '/schema/plugins?interface=list'},
{
'maxProperties': 1,
'error_maxProperties': 'Plugin options within list_clear plugin must be indented '
'2 more spaces than the first letter of the plugin name.',
'minProperties': 1,
},
]
},
},
'phase': {'type': 'string', 'enum': plugin.task_phases, 'default': 'start'},
},
'required': ['what'],
}
def __getattr__(self, phase):
# enable plugin in regular task phases
if phase.replace('on_task_', '') in plugin.task_phases:
return self.clear
@plugin.priority(plugin.PRIORITY_FIRST)
def clear(self, task, config):
for item in config['what']:
for plugin_name, plugin_config in item.items():
try:
thelist = plugin.get(plugin_name, self).get_list(plugin_config)
except AttributeError:
raise PluginError('Plugin %s does not support list interface' % plugin_name)
if thelist.immutable:
raise plugin.PluginError(thelist.immutable)
if config['phase'] == task.current_phase:
if task.manager.options.test and thelist.online:
logger.info(
'would have cleared all items from {} - {}', plugin_name, plugin_config
)
continue
logger.verbose('clearing all items from {} - {}', plugin_name, plugin_config)
thelist.clear()
@event('plugin.register')
def register_plugin():
plugin.register(ListClear, 'list_clear', api_ver=2)
| {
"content_hash": "ca6a147d7901ed0bec7a7846c7aa53d8",
"timestamp": "",
"source": "github",
"line_count": 60,
"max_line_length": 110,
"avg_line_length": 37.15,
"alnum_prop": 0.4952893674293405,
"repo_name": "ianstalk/Flexget",
"id": "e9a386c203a295bcb20a0d724b346d2e21baeeed",
"size": "2229",
"binary": false,
"copies": "4",
"ref": "refs/heads/develop",
"path": "flexget/components/managed_lists/list_clear.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "56725"
},
{
"name": "HTML",
"bytes": "35670"
},
{
"name": "JavaScript",
"bytes": "455222"
},
{
"name": "Python",
"bytes": "2063551"
}
],
"symlink_target": ""
} |
from django.conf.urls import url
from django.views import generic
from . import views
urlpatterns = [
url('^$', generic.View.as_view(), name="index"),
url('^url_timeout\.html$', views.UrlTimeoutView.as_view(), name='url_timeout'),
# 申込み用フォーム(車室の一時確保に必要な項目)【個人・法人共通】
url('^user_subscription_simple_step1/(?P<signature>[^/]+)\.html$', views.UserSubscriptionSimpleStep1View.as_view(),
name="user_subscription_simple_step1"),
url('^user_subscription_simple_step2/(?P<signature>[^/]+)\.html$', views.UserSubscriptionSimpleStep2View.as_view(),
name="user_subscription_simple_step2"),
# 審査用フォーム
url('^user_subscription_inspection_step1/(?P<signature>[^/]+)\.html$',
views.UserSubscriptionInspectionStep1View.as_view(),
name="user_subscription_inspection_step1"),
url('^user_subscription_inspection_step2/(?P<signature>[^/]+)\.html$',
views.UserSubscriptionInspectionStep2View.as_view(),
name="user_subscription_inspection_step2"),
# # ユーザー申込み
# url('^user_subscription_step1/(?P<signature>[^/]+)\.html$', views.UserSubscriptionStep1View.as_view(),
# name="user_subscription_step1"),
# url('^user_subscription_step2/(?P<signature>[^/]+)\.html$', views.UserSubscriptionStep2View.as_view(),
# name="user_subscription_step2"),
# url('^user_subscription_step3/(?P<signature>[^/]+)\.html$', views.UserSubscriptionStep3View.as_view(),
# name="user_subscription_step3"),
# url('^user_subscription_step4/(?P<signature>[^/]+)\.html$', views.UserSubscriptionStep4View.as_view(),
# name="user_subscription_step4"),
# url('^user_subscription_step5/(?P<signature>[^/]+)\.html$', views.UserSubscriptionStep5View.as_view(),
# name="user_subscription_step5"),
url('^subscription_confirm/(?P<subscription_id>\d+)/$',
views.SubscriptionConfirmView.as_view(), name="report_subscription_confirm"),
url('^subscription/(?P<subscription_id>\d+)/$',
views.SubscriptionView.as_view(), name="report_subscription"),
# ユーザー契約
url('^user_contract_step1/(?P<signature>[^/]+)\.html$', views.UserContractStep1View.as_view(),
name="user_contract_step1"),
url('^user_contract_step2/(?P<signature>[^/]+)\.html$', views.UserContractStep2View.as_view(),
name="user_contract_step2"),
url('^user_contract_step3/(?P<signature>[^/]+)\.html$', views.UserContractStep3View.as_view(),
name="user_contract_step3"),
url('^user_contract_step4/(?P<signature>[^/]+)\.html$', views.UserContractStep4View.as_view(),
name="user_contract_step4"),
url('^user_contract_step5/(?P<signature>[^/]+)\.html$', views.UserContractStep5View.as_view(),
name="user_contract_step5"),
# 一般解約
url('^user_contract_cancellation_step1/(?P<signature>[^/]+)\.html$', views.ContractCancellationStep1View.as_view(),
name="user_contract_cancellation_step1"),
url('^user_contract_cancellation_step2/(?P<signature>[^/]+)\.html$', views.ContractCancellationStep2View.as_view(),
name="user_contract_cancellation_step2"),
# Download PDF
url('^download/pdf/subscription_confirm/(?P<subscription_id>\d+)/$',
views.GenerateSubscriptionConfirmPdfView.as_view(), name='download_report_subscription_confirm'),
url('^download/pdf/subscription/(?P<subscription_id>\d+)/$',
views.GenerateSubscriptionPdfView.as_view(), name='download_report_subscription'),
]
| {
"content_hash": "297365804ae134b090c4a7110b4ca255",
"timestamp": "",
"source": "github",
"line_count": 59,
"max_line_length": 119,
"avg_line_length": 58.52542372881356,
"alnum_prop": 0.6689834926151172,
"repo_name": "YangWanjun/areaparking",
"id": "7f9a9b1df041b8373fd229eb77d8322b5695e18b",
"size": "3561",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "format/urls.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "715"
},
{
"name": "CSS",
"bytes": "16603"
},
{
"name": "HTML",
"bytes": "696337"
},
{
"name": "JavaScript",
"bytes": "1412226"
},
{
"name": "Python",
"bytes": "2820285"
},
{
"name": "SQLPL",
"bytes": "2281"
},
{
"name": "Shell",
"bytes": "882"
}
],
"symlink_target": ""
} |
import sys
sys.path.append("../src/app")
import unittest
from app.functions import get_config,safe_get_config
class FunctionsTest(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_get_config_key_missing(self):
self.assertIsNone(get_config('hackathon-api.endpoint_test'))
def test_get_config_format_error(self):
self.assertIsNone(get_config('mysql.sql'))
def test_get_config(self):
self.assertEqual(get_config('login.session_minutes'),60)
def test_safe_get_config_default(self):
self.assertIsNone(get_config('mysql.sql'))
self.assertEqual(safe_get_config('mysql.sql','default'),'default')
def test_safe_get_config_value(self):
self.assertEqual(get_config('login.session_minutes'),60)
self.assertEqual(safe_get_config('login.session_minutes',66666),60) | {
"content_hash": "5ff05fa8647c006afe7f181daf45997e",
"timestamp": "",
"source": "github",
"line_count": 34,
"max_line_length": 75,
"avg_line_length": 26.147058823529413,
"alnum_prop": 0.6805399325084365,
"repo_name": "mshubian/BAK_open-hackathon",
"id": "5a560701fbe765e6a268c93f1a7c7db78b8d0f68",
"size": "889",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "open-hackathon-adminUI/test/app/test_functions.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "294659"
},
{
"name": "HTML",
"bytes": "206952"
},
{
"name": "Java",
"bytes": "8220"
},
{
"name": "JavaScript",
"bytes": "791759"
},
{
"name": "Perl",
"bytes": "889"
},
{
"name": "Python",
"bytes": "259754"
},
{
"name": "Ruby",
"bytes": "154828"
},
{
"name": "Shell",
"bytes": "5227"
}
],
"symlink_target": ""
} |
from ale_python_interface import ALEInterface
from enduro.action import Action
from enduro.control import Controller
from enduro.state import StateExtractor
class Agent(object):
def __init__(self):
self._ale = ALEInterface()
self._ale.setInt('random_seed', 123)
self._ale.setFloat('repeat_action_probability', 0.0)
self._ale.setBool('color_averaging', False)
self._ale.loadROM('roms/enduro.bin')
self._controller = Controller(self._ale)
self._extractor = StateExtractor(self._ale)
self._image = None
def run(self, learn, episodes=1, draw=False):
""" Implements the playing/learning loop.
Args:
learn(bool): Whether the self.learn() function should be called.
episodes (int): The number of episodes to run the agent for.
draw (bool): Whether to overlay the environment state on the frame.
Returns:
None
"""
for e in range(episodes):
# Observe the environment to set the initial state
(grid, self._image) = self._extractor.run(draw=draw, scale=4.0)
self.initialise(grid)
num_frames = self._ale.getFrameNumber()
# Each episode lasts 6500 frames
while self._ale.getFrameNumber() - num_frames < 6500:
# Take an action
self.act()
# Update the environment grid
(grid, self._image) = self._extractor.run(draw=draw, scale=4.0)
self.sense(grid)
# Perform learning if required
if learn:
self.learn()
self.callback(learn, e + 1, self._ale.getFrameNumber() - num_frames)
self._ale.reset_game()
def getActionsSet(self):
""" Returns the set of all possible actions
"""
return [Action.ACCELERATE, Action.RIGHT, Action.LEFT, Action.BREAK]
def move(self, action):
""" Executes the action and advances the game to the next state.
Args:
action (int): The action which should executed. Make sure to use
the constants returned by self.getActionsSet()
Returns:
int: The obtained reward after executing the action
"""
return self._controller.move(action)
def initialise(self, grid):
""" Called at the beginning of each episode, mainly used
for state initialisation.
Args:
grid (np.ndarray): 11x10 array with the initial environment grid.
Returns:
None
"""
raise NotImplementedError
def act(self):
""" Called at each loop iteration to choose and execute an action.
Returns:
None
"""
raise NotImplementedError
def sense(self, grid):
""" Called at each loop iteration to construct the new state from
the update environment grid.
Returns:
None
"""
raise NotImplementedError
def learn(self):
""" Called at each loop iteration when the agent is learning. It should
implement the learning procedure.
Returns:
None
"""
raise NotImplementedError
def callback(self, learn, episode, iteration):
""" Called at each loop iteration mainly for reporting purposes.
Args:
learn (bool): Indicates whether the agent is learning or not.
episode (int): The number of the current episode.
iteration (int): The number of the current iteration.
Returns:
None
"""
raise NotImplementedError
| {
"content_hash": "cd88f4597a8e7fec97f1a128823e0307",
"timestamp": "",
"source": "github",
"line_count": 119,
"max_line_length": 84,
"avg_line_length": 31.168067226890756,
"alnum_prop": 0.5845241304933945,
"repo_name": "mamikonyana/mamikonyana.github.io",
"id": "1217908fe0ce3b0d525f70a5562cefbb39200914",
"size": "3709",
"binary": false,
"copies": "1",
"ref": "refs/heads/flask",
"path": "static/ml_afternoon/presentation_data/practical_s6/code/enduro/agent.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "102"
},
{
"name": "HTML",
"bytes": "11586263"
},
{
"name": "Makefile",
"bytes": "384"
},
{
"name": "Python",
"bytes": "95088"
},
{
"name": "Shell",
"bytes": "1662"
},
{
"name": "Stan",
"bytes": "872"
}
],
"symlink_target": ""
} |
"""
Definition of urls for $safeprojectname$.
"""
from django.conf.urls import include, url
import $safeprojectname$.views
# Uncomment the next two lines to enable the admin:
# from django.contrib import admin
# admin.autodiscover()
urlpatterns = [
# Examples:
# url(r'^$', $safeprojectname$.views.home, name='home'),
# url(r'^$safeprojectname$/', include('$safeprojectname$.$safeprojectname$.urls')),
# Uncomment the admin/doc line below to enable admin documentation:
# url(r'^admin/doc/', include('django.contrib.admindocs.urls')),
# Uncomment the next line to enable the admin:
# url(r'^admin/', include(admin.site.urls)),
]
| {
"content_hash": "2a193fdc3a63663e52fa05eec61299b0",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 87,
"avg_line_length": 28.82608695652174,
"alnum_prop": 0.6923076923076923,
"repo_name": "DinoV/PTVS",
"id": "c9ba270513edf5e9b8f4552e418f3e521c129dd5",
"size": "663",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Python/Templates/Django/ProjectTemplates/Python/Web/WebRoleDjango/urls.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ASP",
"bytes": "109"
},
{
"name": "Batchfile",
"bytes": "4035"
},
{
"name": "C",
"bytes": "4974"
},
{
"name": "C#",
"bytes": "13192050"
},
{
"name": "C++",
"bytes": "187194"
},
{
"name": "CSS",
"bytes": "7024"
},
{
"name": "HTML",
"bytes": "45289"
},
{
"name": "JavaScript",
"bytes": "85712"
},
{
"name": "Objective-C",
"bytes": "4201"
},
{
"name": "PowerShell",
"bytes": "135280"
},
{
"name": "Python",
"bytes": "943244"
},
{
"name": "Smarty",
"bytes": "8356"
},
{
"name": "Tcl",
"bytes": "24968"
}
],
"symlink_target": ""
} |
import warnings
warnings.simplefilter('ignore', Warning)
from django.conf import settings
from core.tests.backends import *
from core.tests.fields import *
from core.tests.forms import *
from core.tests.indexes import *
from core.tests.inputs import *
from core.tests.loading import *
from core.tests.models import *
from core.tests.query import *
from core.tests.templatetags import *
from core.tests.views import *
from core.tests.utils import *
from core.tests.management_commands import *
| {
"content_hash": "527d18796478f4378e54646d57cf0f5d",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 44,
"avg_line_length": 29.11764705882353,
"alnum_prop": 0.793939393939394,
"repo_name": "josesanch/django-haystack",
"id": "2aef8ad618fea691a4d5a0ecc7ed882a2b79ba12",
"size": "495",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tests/core/tests/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "676842"
},
{
"name": "Shell",
"bytes": "1583"
}
],
"symlink_target": ""
} |
'''
Merge k sorted linked lists and return it as one sorted list. Analyze and describe its complexity.
Link: https://leetcode.com/problems/merge-k-sorted-lists/#/description
Example: None
Solution:
The problem is simply to find connected components in the grid.
Source: None
'''
# Definition for singly-linked list.
# class ListNode(object):
# def __init__(self, x):
# self.val = x
# self.next = None
import heapq
class Solution(object):
def mergeKLists(self, lists):
"""
:type lists: List[ListNode]
:rtype: ListNode
"""
heap = []
for node in lists:
if node:
heapq.heappush(heap, (node.val, node) )
p = head = ListNode(-1)
while heap:
p.next = p = heapq.heappop(heap)[1]
if p.next:
heapq.heappush( heap, (p.next.val, p.next) )
return head.next
class ListNode(object):
def __init__(self, x):
self.val = x
self.next = None | {
"content_hash": "ace0f0fddeff36e203c6b03eacabed87",
"timestamp": "",
"source": "github",
"line_count": 44,
"max_line_length": 102,
"avg_line_length": 23.681818181818183,
"alnum_prop": 0.5604606525911708,
"repo_name": "supermarkion/Life-Backup",
"id": "9287029f41af0abe2f233cda2df18e30faa84cc4",
"size": "1042",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Python/MergeKSortedLists.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "ASP",
"bytes": "108"
},
{
"name": "C#",
"bytes": "171862"
},
{
"name": "CSS",
"bytes": "513"
},
{
"name": "HTML",
"bytes": "5272"
},
{
"name": "JavaScript",
"bytes": "10918"
},
{
"name": "Python",
"bytes": "92106"
}
],
"symlink_target": ""
} |
print(__doc__)
from sklearn import svm
from sklearn.datasets import samples_generator
from sklearn.feature_selection import SelectKBest, f_regression
from sklearn.pipeline import Pipeline
def loadData():
# generating data
X, y = samples_generator.make_classification(n_features=20, \
n_informative=3, \
n_redundant=0, \
n_classes=4, \
n_clusters_per_class=2)
return X, y
# ANOVA SVM-C
def createANOVASVM():
# anova filter, take 3 best ranked features
anova_filter = SelectKBest(f_regression, k=3)
# svm
clf = svm.SVC(kernel='linear')
anova_svm = Pipeline([('anova', anova_filter), \
('svm', clf)])
return anova_svm
def predict(X, y, anova_svm):
anova_svm.fit(X, y)
target = anova_svm.predict(X)
return target
def test():
X, y = loadData()
anova_svm = createANOVASVM()
target = predict(X, y, anova_svm)
print target
if __name__ == '__main__':
test()
| {
"content_hash": "32cf1cb487e923f26a90c8b19c1cc042",
"timestamp": "",
"source": "github",
"line_count": 50,
"max_line_length": 72,
"avg_line_length": 23.28,
"alnum_prop": 0.5266323024054983,
"repo_name": "kwailamchan/programming-languages",
"id": "27fbd39a1db9844366bc6be6d3c2510941abe0cc",
"size": "1360",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "python/sklearn/examples/general/pipeline_anova_svm.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "2321"
},
{
"name": "Batchfile",
"bytes": "7087"
},
{
"name": "C",
"bytes": "2883116"
},
{
"name": "C++",
"bytes": "12948373"
},
{
"name": "CMake",
"bytes": "128629"
},
{
"name": "CSS",
"bytes": "197791"
},
{
"name": "Cuda",
"bytes": "687316"
},
{
"name": "E",
"bytes": "2914"
},
{
"name": "Eiffel",
"bytes": "1073"
},
{
"name": "Fortran",
"bytes": "1332263"
},
{
"name": "HTML",
"bytes": "257547"
},
{
"name": "Java",
"bytes": "102661"
},
{
"name": "JavaScript",
"bytes": "439019"
},
{
"name": "Jupyter Notebook",
"bytes": "5488542"
},
{
"name": "Makefile",
"bytes": "24949"
},
{
"name": "Matlab",
"bytes": "32307"
},
{
"name": "PHP",
"bytes": "21650"
},
{
"name": "PLpgSQL",
"bytes": "16859"
},
{
"name": "Python",
"bytes": "5424664"
},
{
"name": "QMake",
"bytes": "787"
},
{
"name": "R",
"bytes": "105611"
},
{
"name": "Ruby",
"bytes": "152074"
},
{
"name": "SAS",
"bytes": "4505"
},
{
"name": "Scala",
"bytes": "6121"
},
{
"name": "Shell",
"bytes": "49387"
},
{
"name": "Visual Basic",
"bytes": "1174"
}
],
"symlink_target": ""
} |
import pytest
from tests.lib import (_create_test_package, _change_test_package_version,
pyversion)
from tests.lib.local_repos import local_checkout
@pytest.mark.network
def test_install_editable_from_git_with_https(script, tmpdir):
"""
Test cloning from Git with https.
"""
result = script.pip(
'install', '-e',
'%s#egg=pip-test-package' %
local_checkout(
'git+https://github.com/pypa/pip-test-package.git',
tmpdir.join("cache"),
),
expect_error=True,
)
result.assert_installed('pip-test-package', with_files=['.git'])
@pytest.mark.network
def test_install_noneditable_git(script, tmpdir):
"""
Test installing from a non-editable git URL with a given tag.
"""
result = script.pip(
'install',
'git+https://github.com/pypa/pip-test-package.git'
'@0.1.1#egg=pip-test-package'
)
egg_info_folder = (
script.site_packages /
'pip_test_package-0.1.1-py%s.egg-info' % pyversion
)
result.assert_installed('piptestpackage',
without_egg_link=True,
editable=False)
assert egg_info_folder in result.files_created, str(result)
@pytest.mark.network
def test_git_with_sha1_revisions(script):
"""
Git backend should be able to install from SHA1 revisions
"""
version_pkg_path = _create_test_package(script)
_change_test_package_version(script, version_pkg_path)
sha1 = script.run(
'git', 'rev-parse', 'HEAD~1',
cwd=version_pkg_path,
).stdout.strip()
script.pip(
'install', '-e',
'%s@%s#egg=version_pkg' %
('git+file://' + version_pkg_path.abspath.replace('\\', '/'), sha1)
)
version = script.run('version_pkg')
assert '0.1' in version.stdout, version.stdout
@pytest.mark.network
def test_git_with_branch_name_as_revision(script):
"""
Git backend should be able to install from branch names
"""
version_pkg_path = _create_test_package(script)
script.run(
'git', 'checkout', '-b', 'test_branch',
expect_stderr=True,
cwd=version_pkg_path,
)
_change_test_package_version(script, version_pkg_path)
script.pip(
'install', '-e', '%s@test_branch#egg=version_pkg' %
('git+file://' + version_pkg_path.abspath.replace('\\', '/'))
)
version = script.run('version_pkg')
assert 'some different version' in version.stdout
@pytest.mark.network
def test_git_with_tag_name_as_revision(script):
"""
Git backend should be able to install from tag names
"""
version_pkg_path = _create_test_package(script)
script.run(
'git', 'tag', 'test_tag',
expect_stderr=True,
cwd=version_pkg_path,
)
_change_test_package_version(script, version_pkg_path)
script.pip(
'install', '-e', '%s@test_tag#egg=version_pkg' %
('git+file://' + version_pkg_path.abspath.replace('\\', '/'))
)
version = script.run('version_pkg')
assert '0.1' in version.stdout
@pytest.mark.network
def test_git_with_tag_name_and_update(script, tmpdir):
"""
Test cloning a git repository and updating to a different version.
"""
result = script.pip(
'install', '-e', '%s#egg=pip-test-package' %
local_checkout(
'git+http://github.com/pypa/pip-test-package.git',
tmpdir.join("cache"),
),
expect_error=True,
)
result.assert_installed('pip-test-package', with_files=['.git'])
result = script.pip(
'install', '--global-option=--version', '-e',
'%s@0.1.2#egg=pip-test-package' %
local_checkout(
'git+http://github.com/pypa/pip-test-package.git',
tmpdir.join("cache"),
),
expect_error=True,
)
assert '0.1.2' in result.stdout
@pytest.mark.network
def test_git_branch_should_not_be_changed(script, tmpdir):
"""
Editable installations should not change branch
related to issue #32 and #161
"""
script.pip(
'install', '-e', '%s#egg=pip-test-package' %
local_checkout(
'git+http://github.com/pypa/pip-test-package.git',
tmpdir.join("cache"),
),
expect_error=True,
)
source_dir = script.venv_path / 'src' / 'pip-test-package'
result = script.run('git', 'branch', cwd=source_dir)
assert '* master' in result.stdout, result.stdout
@pytest.mark.network
def test_git_with_non_editable_unpacking(script, tmpdir):
"""
Test cloning a git repository from a non-editable URL with a given tag.
"""
result = script.pip(
'install', '--global-option=--version',
local_checkout(
'git+http://github.com/pypa/pip-test-package.git@0.1.2'
'#egg=pip-test-package',
tmpdir.join("cache")
),
expect_error=True,
)
assert '0.1.2' in result.stdout
@pytest.mark.network
def test_git_with_editable_where_egg_contains_dev_string(script, tmpdir):
"""
Test cloning a git repository from an editable url which contains "dev"
string
"""
result = script.pip(
'install', '-e',
'%s#egg=django-devserver' %
local_checkout(
'git+git://github.com/dcramer/django-devserver.git',
tmpdir.join("cache")
)
)
result.assert_installed('django-devserver', with_files=['.git'])
@pytest.mark.network
def test_git_with_non_editable_where_egg_contains_dev_string(script, tmpdir):
"""
Test cloning a git repository from a non-editable url which contains "dev"
string
"""
result = script.pip(
'install',
'%s#egg=django-devserver' %
local_checkout(
'git+git://github.com/dcramer/django-devserver.git',
tmpdir.join("cache")
),
)
devserver_folder = script.site_packages / 'devserver'
assert devserver_folder in result.files_created, str(result)
@pytest.mark.network
def test_git_with_ambiguous_revs(script):
"""
Test git with two "names" (tag/branch) pointing to the same commit
"""
version_pkg_path = _create_test_package(script)
package_url = (
'git+file://%s@0.1#egg=version_pkg' %
(version_pkg_path.abspath.replace('\\', '/'))
)
script.run('git', 'tag', '0.1', cwd=version_pkg_path)
result = script.pip('install', '-e', package_url)
assert 'Could not find a tag or branch' not in result.stdout
# it is 'version-pkg' instead of 'version_pkg' because
# egg-link name is version-pkg.egg-link because it is a single .py module
result.assert_installed('version-pkg', with_files=['.git'])
@pytest.mark.network
def test_git_works_with_editable_non_origin_repo(script):
# set up, create a git repo and install it as editable from a local
# directory path
version_pkg_path = _create_test_package(script)
script.pip('install', '-e', version_pkg_path.abspath)
# 'freeze'ing this should not fall over, but should result in stderr output
# warning
result = script.pip('freeze', expect_stderr=True)
assert "Error when trying to get requirement" in result.stderr
assert "Could not determine repository location" in result.stdout
assert "version-pkg==0.1" in result.stdout
| {
"content_hash": "00b319e5bca72176aea789192b8c0b8c",
"timestamp": "",
"source": "github",
"line_count": 232,
"max_line_length": 79,
"avg_line_length": 31.676724137931036,
"alnum_prop": 0.6113756973737924,
"repo_name": "newsteinking/docker",
"id": "06ca8ffb1b47f6af945f6b7108f13b8cca5e9b22",
"size": "7349",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "tests/functional/test_install_vcs.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "1835"
},
{
"name": "Python",
"bytes": "3994890"
},
{
"name": "Shell",
"bytes": "2437"
}
],
"symlink_target": ""
} |
import argparse
from ..basic.runner import PygameRunner
__author__ = 'fyabc'
class AntSpider(PygameRunner):
def main_loop(self):
pass
def draw(self):
pass
def draw_background(self):
pass
def real_main(options):
pass
def build_parser():
parser = argparse.ArgumentParser(prog='antspider', description='A simple game of ants and spiders.')
return parser
def main():
parser = build_parser()
options = parser.parse_args()
real_main(options)
| {
"content_hash": "e8c7877d0b81ec0ad3517087ff85125f",
"timestamp": "",
"source": "github",
"line_count": 34,
"max_line_length": 104,
"avg_line_length": 15.029411764705882,
"alnum_prop": 0.6477495107632094,
"repo_name": "fyabc/MiniGames",
"id": "fed9779e156cc93bd0c6a93bbde5c2e13a7b8d43",
"size": "555",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "GamePack/GamePack/AntSpider/antspider.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "821180"
}
],
"symlink_target": ""
} |
"""
sqldiff.py - Prints the (approximated) difference between models and database
TODO:
- better support for relations
- better support for constraints (mainly postgresql?)
- support for table spaces with postgresql
- when a table is not managed (meta.managed==False) then only do a one-way
sqldiff ? show differences from db->table but not the other way around since
it's not managed.
KNOWN ISSUES:
- MySQL has by far the most problems with introspection. Please be
carefull when using MySQL with sqldiff.
- Booleans are reported back as Integers, so there's know way to know if
there was a real change.
- Varchar sizes are reported back without unicode support so their size
may change in comparison to the real length of the varchar.
- Some of the 'fixes' to counter these problems might create false
positives or false negatives.
"""
from django.core.management.base import BaseCommand
from django.core.management import sql as _sql
from django.core.management import CommandError
from django.core.management.color import no_style
from django.db import transaction, connection
from django.db.models.fields import IntegerField
from optparse import make_option
ORDERING_FIELD = IntegerField('_order', null=True)
def flatten(l, ltypes=(list, tuple)):
ltype = type(l)
l = list(l)
i = 0
while i < len(l):
while isinstance(l[i], ltypes):
if not l[i]:
l.pop(i)
i -= 1
break
else:
l[i:i + 1] = l[i]
i += 1
return ltype(l)
def all_local_fields(meta):
all_fields = []
if meta.managed:
if meta.proxy:
for parent in meta.parents:
all_fields.extend(all_local_fields(parent._meta))
else:
for f in meta.local_fields:
col_type = f.db_type(connection=connection)
if col_type is None:
continue
all_fields.append(f)
return all_fields
class SQLDiff(object):
DATA_TYPES_REVERSE_OVERRIDE = {}
DIFF_TYPES = [
'error',
'comment',
'table-missing-in-db',
'field-missing-in-db',
'field-missing-in-model',
'fkey-missing-in-db',
'fkey-missing-in-model',
'index-missing-in-db',
'index-missing-in-model',
'unique-missing-in-db',
'unique-missing-in-model',
'field-type-differ',
'field-parameter-differ',
'notnull-differ',
]
DIFF_TEXTS = {
'error': 'error: %(0)s',
'comment': 'comment: %(0)s',
'table-missing-in-db': "table '%(0)s' missing in database",
'field-missing-in-db': "field '%(1)s' defined in model but missing in database",
'field-missing-in-model': "field '%(1)s' defined in database but missing in model",
'fkey-missing-in-db': "field '%(1)s' FOREIGN KEY defined in model but missing in database",
'fkey-missing-in-model': "field '%(1)s' FOREIGN KEY defined in database but missing in model",
'index-missing-in-db': "field '%(1)s' INDEX defined in model but missing in database",
'index-missing-in-model': "field '%(1)s' INDEX defined in database schema but missing in model",
'unique-missing-in-db': "field '%(1)s' UNIQUE defined in model but missing in database",
'unique-missing-in-model': "field '%(1)s' UNIQUE defined in database schema but missing in model",
'field-type-differ': "field '%(1)s' not of same type: db='%(3)s', model='%(2)s'",
'field-parameter-differ': "field '%(1)s' parameters differ: db='%(3)s', model='%(2)s'",
'notnull-differ': "field '%(1)s' null differ: db='%(3)s', model='%(2)s'",
}
SQL_FIELD_MISSING_IN_DB = lambda self, style, qn, args: "%s %s\n\t%s %s %s;" % (style.SQL_KEYWORD('ALTER TABLE'), style.SQL_TABLE(qn(args[0])), style.SQL_KEYWORD('ADD COLUMN'), style.SQL_FIELD(qn(args[1])), ' '.join(style.SQL_COLTYPE(a) if i == 0 else style.SQL_KEYWORD(a) for i, a in enumerate(args[2:])))
SQL_FIELD_MISSING_IN_MODEL = lambda self, style, qn, args: "%s %s\n\t%s %s;" % (style.SQL_KEYWORD('ALTER TABLE'), style.SQL_TABLE(qn(args[0])), style.SQL_KEYWORD('DROP COLUMN'), style.SQL_FIELD(qn(args[1])))
SQL_FKEY_MISSING_IN_DB = lambda self, style, qn, args: "%s %s\n\t%s %s %s %s %s (%s)%s;" % (style.SQL_KEYWORD('ALTER TABLE'), style.SQL_TABLE(qn(args[0])), style.SQL_KEYWORD('ADD COLUMN'), style.SQL_FIELD(qn(args[1])), ' '.join(style.SQL_COLTYPE(a) if i == 0 else style.SQL_KEYWORD(a) for i, a in enumerate(args[4:])), style.SQL_KEYWORD('REFERENCES'), style.SQL_TABLE(qn(args[2])), style.SQL_FIELD(qn(args[3])), connection.ops.deferrable_sql())
SQL_INDEX_MISSING_IN_DB = lambda self, style, qn, args: "%s %s\n\t%s %s (%s%s);" % (style.SQL_KEYWORD('CREATE INDEX'), style.SQL_TABLE(qn("%s" % '_'.join(a for a in args[0:3] if a))), style.SQL_KEYWORD('ON'), style.SQL_TABLE(qn(args[0])), style.SQL_FIELD(qn(args[1])), style.SQL_KEYWORD(args[3]))
# FIXME: need to lookup index name instead of just appending _idx to table + fieldname
SQL_INDEX_MISSING_IN_MODEL = lambda self, style, qn, args: "%s %s;" % (style.SQL_KEYWORD('DROP INDEX'), style.SQL_TABLE(qn("%s" % '_'.join(a for a in args[0:3] if a))))
SQL_UNIQUE_MISSING_IN_DB = lambda self, style, qn, args: "%s %s\n\t%s %s (%s);" % (style.SQL_KEYWORD('ALTER TABLE'), style.SQL_TABLE(qn(args[0])), style.SQL_KEYWORD('ADD COLUMN'), style.SQL_KEYWORD('UNIQUE'), style.SQL_FIELD(qn(args[1])))
# FIXME: need to lookup unique constraint name instead of appending _key to table + fieldname
SQL_UNIQUE_MISSING_IN_MODEL = lambda self, style, qn, args: "%s %s\n\t%s %s %s;" % (style.SQL_KEYWORD('ALTER TABLE'), style.SQL_TABLE(qn(args[0])), style.SQL_KEYWORD('DROP'), style.SQL_KEYWORD('CONSTRAINT'), style.SQL_TABLE(qn("%s_key" % ('_'.join(args[:2])))))
SQL_FIELD_TYPE_DIFFER = lambda self, style, qn, args: "%s %s\n\t%s %s %s;" % (style.SQL_KEYWORD('ALTER TABLE'), style.SQL_TABLE(qn(args[0])), style.SQL_KEYWORD("MODIFY"), style.SQL_FIELD(qn(args[1])), style.SQL_COLTYPE(args[2]))
SQL_FIELD_PARAMETER_DIFFER = lambda self, style, qn, args: "%s %s\n\t%s %s %s;" % (style.SQL_KEYWORD('ALTER TABLE'), style.SQL_TABLE(qn(args[0])), style.SQL_KEYWORD("MODIFY"), style.SQL_FIELD(qn(args[1])), style.SQL_COLTYPE(args[2]))
SQL_NOTNULL_DIFFER = lambda self, style, qn, args: "%s %s\n\t%s %s %s %s;" % (style.SQL_KEYWORD('ALTER TABLE'), style.SQL_TABLE(qn(args[0])), style.SQL_KEYWORD('MODIFY'), style.SQL_FIELD(qn(args[1])), style.SQL_KEYWORD(args[2]), style.SQL_KEYWORD('NOT NULL'))
SQL_ERROR = lambda self, style, qn, args: style.NOTICE('-- Error: %s' % style.ERROR(args[0]))
SQL_COMMENT = lambda self, style, qn, args: style.NOTICE('-- Comment: %s' % style.SQL_TABLE(args[0]))
SQL_TABLE_MISSING_IN_DB = lambda self, style, qn, args: style.NOTICE('-- Table missing: %s' % args[0])
can_detect_notnull_differ = False
def __init__(self, app_models, options):
self.app_models = app_models
self.options = options
self.dense = options.get('dense_output', False)
try:
self.introspection = connection.introspection
except AttributeError:
from django.db import get_introspection_module
self.introspection = get_introspection_module()
self.cursor = connection.cursor()
self.django_tables = self.get_django_tables(options.get('only_existing', True))
self.db_tables = self.introspection.get_table_list(self.cursor)
self.differences = []
self.unknown_db_fields = {}
self.new_db_fields = set()
self.null = {}
self.DIFF_SQL = {
'error': self.SQL_ERROR,
'comment': self.SQL_COMMENT,
'table-missing-in-db': self.SQL_TABLE_MISSING_IN_DB,
'field-missing-in-db': self.SQL_FIELD_MISSING_IN_DB,
'field-missing-in-model': self.SQL_FIELD_MISSING_IN_MODEL,
'fkey-missing-in-db': self.SQL_FKEY_MISSING_IN_DB,
'fkey-missing-in-model': self.SQL_FIELD_MISSING_IN_MODEL,
'index-missing-in-db': self.SQL_INDEX_MISSING_IN_DB,
'index-missing-in-model': self.SQL_INDEX_MISSING_IN_MODEL,
'unique-missing-in-db': self.SQL_UNIQUE_MISSING_IN_DB,
'unique-missing-in-model': self.SQL_UNIQUE_MISSING_IN_MODEL,
'field-type-differ': self.SQL_FIELD_TYPE_DIFFER,
'field-parameter-differ': self.SQL_FIELD_PARAMETER_DIFFER,
'notnull-differ': self.SQL_NOTNULL_DIFFER,
}
if self.can_detect_notnull_differ:
self.load_null()
def load_null(self):
raise NotImplementedError("load_null functions must be implemented if diff backend has 'can_detect_notnull_differ' set to True")
def add_app_model_marker(self, app_label, model_name):
self.differences.append((app_label, model_name, []))
def add_difference(self, diff_type, *args):
assert diff_type in self.DIFF_TYPES, 'Unknown difference type'
self.differences[-1][-1].append((diff_type, args))
def get_django_tables(self, only_existing):
try:
django_tables = self.introspection.django_table_names(only_existing=only_existing)
except AttributeError:
# backwards compatibility for before introspection refactoring (r8296)
try:
django_tables = _sql.django_table_names(only_existing=only_existing)
except AttributeError:
# backwards compatibility for before svn r7568
django_tables = _sql.django_table_list(only_existing=only_existing)
return django_tables
def sql_to_dict(self, query, param):
""" sql_to_dict(query, param) -> list of dicts
code from snippet at http://www.djangosnippets.org/snippets/1383/
"""
cursor = connection.cursor()
cursor.execute(query, param)
fieldnames = [name[0] for name in cursor.description]
result = []
for row in cursor.fetchall():
rowset = []
for field in zip(fieldnames, row):
rowset.append(field)
result.append(dict(rowset))
return result
def get_field_model_type(self, field):
return field.db_type(connection=connection)
def get_field_db_type(self, description, field=None, table_name=None):
from django.db import models
# DB-API cursor.description
#(name, type_code, display_size, internal_size, precision, scale, null_ok) = description
type_code = description[1]
if type_code in self.DATA_TYPES_REVERSE_OVERRIDE:
reverse_type = self.DATA_TYPES_REVERSE_OVERRIDE[type_code]
else:
try:
try:
reverse_type = self.introspection.data_types_reverse[type_code]
except AttributeError:
# backwards compatibility for before introspection refactoring (r8296)
reverse_type = self.introspection.DATA_TYPES_REVERSE.get(type_code)
except KeyError:
reverse_type = self.get_field_db_type_lookup(type_code)
if not reverse_type:
# type_code not found in data_types_reverse map
key = (self.differences[-1][:2], description[:2])
if key not in self.unknown_db_fields:
self.unknown_db_fields[key] = 1
self.add_difference('comment', "Unknown database type for field '%s' (%s)" % (description[0], type_code))
return None
kwargs = {}
if isinstance(reverse_type, tuple):
kwargs.update(reverse_type[1])
reverse_type = reverse_type[0]
if reverse_type == "CharField" and description[3]:
kwargs['max_length'] = description[3]
if reverse_type == "DecimalField":
kwargs['max_digits'] = description[4]
kwargs['decimal_places'] = description[5] and abs(description[5]) or description[5]
if description[6]:
kwargs['blank'] = True
if not reverse_type in ('TextField', 'CharField'):
kwargs['null'] = True
if '.' in reverse_type:
from django.utils import importlib
# TODO: when was importlib added to django.utils ? and do we
# need to add backwards compatibility code ?
module_path, package_name = reverse_type.rsplit('.', 1)
module = importlib.import_module(module_path)
field_db_type = getattr(module, package_name)(**kwargs).db_type(connection=connection)
else:
field_db_type = getattr(models, reverse_type)(**kwargs).db_type(connection=connection)
return field_db_type
def get_field_db_type_lookup(self, type_code):
return None
def get_field_db_nullable(self, field, table_name):
tablespace = field.db_tablespace
if tablespace == "":
tablespace = "public"
return self.null.get((tablespace, table_name, field.attname), 'fixme')
def strip_parameters(self, field_type):
if field_type and field_type != 'double precision':
return field_type.split(" ")[0].split("(")[0].lower()
return field_type
def find_unique_missing_in_db(self, meta, table_indexes, table_name):
for field in all_local_fields(meta):
if field.unique:
attname = field.db_column or field.attname
if attname in table_indexes and table_indexes[attname]['unique']:
continue
self.add_difference('unique-missing-in-db', table_name, attname)
def find_unique_missing_in_model(self, meta, table_indexes, table_name):
# TODO: Postgresql does not list unique_togethers in table_indexes
# MySQL does
fields = dict([(field.db_column or field.name, field.unique) for field in all_local_fields(meta)])
for att_name, att_opts in table_indexes.iteritems():
if att_opts['unique'] and att_name in fields and not fields[att_name]:
if att_name in flatten(meta.unique_together):
continue
self.add_difference('unique-missing-in-model', table_name, att_name)
def find_index_missing_in_db(self, meta, table_indexes, table_name):
for field in all_local_fields(meta):
if field.db_index:
attname = field.db_column or field.attname
if not attname in table_indexes:
self.add_difference('index-missing-in-db', table_name, attname, '', '')
db_type = field.db_type(connection=connection)
if db_type.startswith('varchar'):
self.add_difference('index-missing-in-db', table_name, attname, 'like', ' varchar_pattern_ops')
if db_type.startswith('text'):
self.add_difference('index-missing-in-db', table_name, attname, 'like', ' text_pattern_ops')
def find_index_missing_in_model(self, meta, table_indexes, table_name):
fields = dict([(field.name, field) for field in all_local_fields(meta)])
for att_name, att_opts in table_indexes.iteritems():
if att_name in fields:
field = fields[att_name]
if field.db_index:
continue
if att_opts['primary_key'] and field.primary_key:
continue
if att_opts['unique'] and field.unique:
continue
if att_opts['unique'] and att_name in flatten(meta.unique_together):
continue
self.add_difference('index-missing-in-model', table_name, att_name)
db_type = field.db_type(connection=connection)
if db_type.startswith('varchar') or db_type.startswith('text'):
self.add_difference('index-missing-in-model', table_name, att_name, 'like')
def find_field_missing_in_model(self, fieldmap, table_description, table_name):
for row in table_description:
if row[0] not in fieldmap:
self.add_difference('field-missing-in-model', table_name, row[0])
def find_field_missing_in_db(self, fieldmap, table_description, table_name):
db_fields = [row[0] for row in table_description]
for field_name, field in fieldmap.iteritems():
if field_name not in db_fields:
field_output = []
if field.rel:
field_output.extend([field.rel.to._meta.db_table, field.rel.to._meta.get_field(field.rel.field_name).column])
op = 'fkey-missing-in-db'
else:
op = 'field-missing-in-db'
field_output.append(field.db_type(connection=connection))
if not field.null:
field_output.append('NOT NULL')
self.add_difference(op, table_name, field_name, *field_output)
self.new_db_fields.add((table_name, field_name))
def find_field_type_differ(self, meta, table_description, table_name, func=None):
db_fields = dict([(row[0], row) for row in table_description])
for field in all_local_fields(meta):
if field.name not in db_fields:
continue
description = db_fields[field.name]
model_type = self.get_field_model_type(field)
db_type = self.get_field_db_type(description, field)
# use callback function if defined
if func:
model_type, db_type = func(field, description, model_type, db_type)
if not self.strip_parameters(db_type) == self.strip_parameters(model_type):
self.add_difference('field-type-differ', table_name, field.name, model_type, db_type)
def find_field_parameter_differ(self, meta, table_description, table_name, func=None):
db_fields = dict([(row[0], row) for row in table_description])
for field in all_local_fields(meta):
if field.name not in db_fields:
continue
description = db_fields[field.name]
model_type = self.get_field_model_type(field)
db_type = self.get_field_db_type(description, field, table_name)
if not self.strip_parameters(model_type) == self.strip_parameters(db_type):
continue
# use callback function if defined
if func:
model_type, db_type = func(field, description, model_type, db_type)
if not model_type == db_type:
self.add_difference('field-parameter-differ', table_name, field.name, model_type, db_type)
def find_field_notnull_differ(self, meta, table_description, table_name):
if not self.can_detect_notnull_differ:
return
for field in all_local_fields(meta):
if (table_name, field.attname) in self.new_db_fields:
continue
null = self.get_field_db_nullable(field, table_name)
if field.null != null:
action = field.null and 'DROP' or 'SET'
self.add_difference('notnull-differ', table_name, field.attname, action)
@transaction.commit_manually
def find_differences(self):
cur_app_label = None
for app_model in self.app_models:
meta = app_model._meta
table_name = meta.db_table
app_label = meta.app_label
if cur_app_label != app_label:
# Marker indicating start of difference scan for this table_name
self.add_app_model_marker(app_label, app_model.__name__)
#if not table_name in self.django_tables:
if not table_name in self.db_tables:
# Table is missing from database
self.add_difference('table-missing-in-db', table_name)
continue
table_indexes = self.introspection.get_indexes(self.cursor, table_name)
fieldmap = dict([(field.db_column or field.get_attname(), field) for field in all_local_fields(meta)])
# add ordering field if model uses order_with_respect_to
if meta.order_with_respect_to:
fieldmap['_order'] = ORDERING_FIELD
try:
table_description = self.introspection.get_table_description(self.cursor, table_name)
except Exception as e:
self.add_difference('error', 'unable to introspect table: %s' % str(e).strip())
transaction.rollback() # reset transaction
continue
else:
transaction.commit()
# Fields which are defined in database but not in model
# 1) find: 'unique-missing-in-model'
self.find_unique_missing_in_model(meta, table_indexes, table_name)
# 2) find: 'index-missing-in-model'
self.find_index_missing_in_model(meta, table_indexes, table_name)
# 3) find: 'field-missing-in-model'
self.find_field_missing_in_model(fieldmap, table_description, table_name)
# Fields which are defined in models but not in database
# 4) find: 'field-missing-in-db'
self.find_field_missing_in_db(fieldmap, table_description, table_name)
# 5) find: 'unique-missing-in-db'
self.find_unique_missing_in_db(meta, table_indexes, table_name)
# 6) find: 'index-missing-in-db'
self.find_index_missing_in_db(meta, table_indexes, table_name)
# Fields which have a different type or parameters
# 7) find: 'type-differs'
self.find_field_type_differ(meta, table_description, table_name)
# 8) find: 'type-parameter-differs'
self.find_field_parameter_differ(meta, table_description, table_name)
# 9) find: 'field-notnull'
self.find_field_notnull_differ(meta, table_description, table_name)
def print_diff(self, style=no_style()):
""" print differences to stdout """
if self.options.get('sql', True):
self.print_diff_sql(style)
else:
self.print_diff_text(style)
def print_diff_text(self, style):
if not self.can_detect_notnull_differ:
print(style.NOTICE("# Detecting notnull changes not implemented for this database backend"))
print("")
cur_app_label = None
for app_label, model_name, diffs in self.differences:
if not diffs:
continue
if not self.dense and cur_app_label != app_label:
print("%s %s" % (style.NOTICE("+ Application:"), style.SQL_TABLE(app_label)))
cur_app_label = app_label
if not self.dense:
print("%s %s" % (style.NOTICE("|-+ Differences for model:"), style.SQL_TABLE(model_name)))
for diff in diffs:
diff_type, diff_args = diff
text = self.DIFF_TEXTS[diff_type] % dict((str(i), style.SQL_TABLE(e)) for i, e in enumerate(diff_args))
text = "'".join(i % 2 == 0 and style.ERROR(e) or e for i, e in enumerate(text.split("'")))
if not self.dense:
print("%s %s" % (style.NOTICE("|--+"), text))
else:
print("%s %s %s %s %s" % (style.NOTICE("App"), style.SQL_TABLE(app_label), style.NOTICE('Model'), style.SQL_TABLE(model_name), text))
def print_diff_sql(self, style):
if not self.can_detect_notnull_differ:
print(style.NOTICE("-- Detecting notnull changes not implemented for this database backend"))
print("")
cur_app_label = None
qn = connection.ops.quote_name
has_differences = max([len(diffs) for app_label, model_name, diffs in self.differences])
if not has_differences:
if not self.dense:
print(style.SQL_KEYWORD("-- No differences"))
else:
print(style.SQL_KEYWORD("BEGIN;"))
for app_label, model_name, diffs in self.differences:
if not diffs:
continue
if not self.dense and cur_app_label != app_label:
print(style.NOTICE("-- Application: %s" % style.SQL_TABLE(app_label)))
cur_app_label = app_label
if not self.dense:
print(style.NOTICE("-- Model: %s" % style.SQL_TABLE(model_name)))
for diff in diffs:
diff_type, diff_args = diff
text = self.DIFF_SQL[diff_type](style, qn, diff_args)
if self.dense:
text = text.replace("\n\t", " ")
print(text)
print(style.SQL_KEYWORD("COMMIT;"))
class GenericSQLDiff(SQLDiff):
can_detect_notnull_differ = False
class MySQLDiff(SQLDiff):
can_detect_notnull_differ = False
# All the MySQL hacks together create something of a problem
# Fixing one bug in MySQL creates another issue. So just keep in mind
# that this is way unreliable for MySQL atm.
def get_field_db_type(self, description, field=None, table_name=None):
from MySQLdb.constants import FIELD_TYPE
# weird bug? in mysql db-api where it returns three times the correct value for field length
# if i remember correctly it had something todo with unicode strings
# TODO: Fix this is a more meaningful and better understood manner
description = list(description)
if description[1] not in [FIELD_TYPE.TINY, FIELD_TYPE.SHORT]: # exclude tinyints from conversion.
description[3] = description[3] / 3
description[4] = description[4] / 3
db_type = super(MySQLDiff, self).get_field_db_type(description)
if not db_type:
return
if field:
if field.primary_key and (db_type == 'integer' or db_type == 'bigint'):
db_type += ' AUTO_INCREMENT'
# MySQL isn't really sure about char's and varchar's like sqlite
field_type = self.get_field_model_type(field)
# Fix char/varchar inconsistencies
if self.strip_parameters(field_type) == 'char' and self.strip_parameters(db_type) == 'varchar':
db_type = db_type.lstrip("var")
# They like to call 'bool's 'tinyint(1)' and introspection makes that a integer
# just convert it back to it's proper type, a bool is a bool and nothing else.
if db_type == 'integer' and description[1] == FIELD_TYPE.TINY and description[4] == 1:
db_type = 'bool'
if db_type == 'integer' and description[1] == FIELD_TYPE.SHORT:
db_type = 'smallint UNSIGNED' # FIXME: what about if it's not UNSIGNED ?
return db_type
class SqliteSQLDiff(SQLDiff):
can_detect_notnull_differ = True
def load_null(self):
for table_name in self.db_tables:
# sqlite does not support tablespaces
tablespace = "public"
# index, column_name, column_type, nullable, default_value
# see: http://www.sqlite.org/pragma.html#pragma_table_info
for table_info in self.sql_to_dict("PRAGMA table_info(%s);" % table_name, []):
key = (tablespace, table_name, table_info['name'])
self.null[key] = not table_info['notnull']
# Unique does not seem to be implied on Sqlite for Primary_key's
# if this is more generic among databases this might be usefull
# to add to the superclass's find_unique_missing_in_db method
def find_unique_missing_in_db(self, meta, table_indexes, table_name):
for field in all_local_fields(meta):
if field.unique:
attname = field.db_column or field.attname
if attname in table_indexes and table_indexes[attname]['unique']:
continue
if attname in table_indexes and table_indexes[attname]['primary_key']:
continue
self.add_difference('unique-missing-in-db', table_name, attname)
# Finding Indexes by using the get_indexes dictionary doesn't seem to work
# for sqlite.
def find_index_missing_in_db(self, meta, table_indexes, table_name):
pass
def find_index_missing_in_model(self, meta, table_indexes, table_name):
pass
def get_field_db_type(self, description, field=None, table_name=None):
db_type = super(SqliteSQLDiff, self).get_field_db_type(description)
if not db_type:
return
if field:
field_type = self.get_field_model_type(field)
# Fix char/varchar inconsistencies
if self.strip_parameters(field_type) == 'char' and self.strip_parameters(db_type) == 'varchar':
db_type = db_type.lstrip("var")
return db_type
class PostgresqlSQLDiff(SQLDiff):
can_detect_notnull_differ = True
DATA_TYPES_REVERSE_OVERRIDE = {
1042: 'CharField',
# postgis types (TODO: support is very incomplete)
17506: 'django.contrib.gis.db.models.fields.PointField',
55902: 'django.contrib.gis.db.models.fields.MultiPolygonField',
}
DATA_TYPES_REVERSE_NAME = {
'hstore': 'django_hstore.hstore.DictionaryField',
}
# Hopefully in the future we can add constraint checking and other more
# advanced checks based on this database.
SQL_LOAD_CONSTRAINTS = """
SELECT nspname, relname, conname, attname, pg_get_constraintdef(pg_constraint.oid)
FROM pg_constraint
INNER JOIN pg_attribute ON pg_constraint.conrelid = pg_attribute.attrelid AND pg_attribute.attnum = any(pg_constraint.conkey)
INNER JOIN pg_class ON conrelid=pg_class.oid
INNER JOIN pg_namespace ON pg_namespace.oid=pg_class.relnamespace
ORDER BY CASE WHEN contype='f' THEN 0 ELSE 1 END,contype,nspname,relname,conname;
"""
SQL_LOAD_NULL = """
SELECT nspname, relname, attname, attnotnull
FROM pg_attribute
INNER JOIN pg_class ON attrelid=pg_class.oid
INNER JOIN pg_namespace ON pg_namespace.oid=pg_class.relnamespace;
"""
SQL_FIELD_TYPE_DIFFER = lambda self, style, qn, args: "%s %s\n\t%s %s %s %s;" % (style.SQL_KEYWORD('ALTER TABLE'), style.SQL_TABLE(qn(args[0])), style.SQL_KEYWORD('ALTER'), style.SQL_FIELD(qn(args[1])), style.SQL_KEYWORD("TYPE"), style.SQL_COLTYPE(args[2]))
SQL_FIELD_PARAMETER_DIFFER = lambda self, style, qn, args: "%s %s\n\t%s %s %s %s;" % (style.SQL_KEYWORD('ALTER TABLE'), style.SQL_TABLE(qn(args[0])), style.SQL_KEYWORD('ALTER'), style.SQL_FIELD(qn(args[1])), style.SQL_KEYWORD("TYPE"), style.SQL_COLTYPE(args[2]))
SQL_NOTNULL_DIFFER = lambda self, style, qn, args: "%s %s\n\t%s %s %s %s;" % (style.SQL_KEYWORD('ALTER TABLE'), style.SQL_TABLE(qn(args[0])), style.SQL_KEYWORD('ALTER COLUMN'), style.SQL_FIELD(qn(args[1])), style.SQL_KEYWORD(args[2]), style.SQL_KEYWORD('NOT NULL'))
def __init__(self, app_models, options):
SQLDiff.__init__(self, app_models, options)
self.check_constraints = {}
self.load_constraints()
def load_null(self):
for dct in self.sql_to_dict(self.SQL_LOAD_NULL, []):
key = (dct['nspname'], dct['relname'], dct['attname'])
self.null[key] = not dct['attnotnull']
def load_constraints(self):
for dct in self.sql_to_dict(self.SQL_LOAD_CONSTRAINTS, []):
key = (dct['nspname'], dct['relname'], dct['attname'])
if 'CHECK' in dct['pg_get_constraintdef']:
self.check_constraints[key] = dct
def get_field_db_type(self, description, field=None, table_name=None):
db_type = super(PostgresqlSQLDiff, self).get_field_db_type(description)
if not db_type:
return
if field:
if field.primary_key:
if db_type == 'integer':
db_type = 'serial'
elif db_type == 'bigint':
db_type = 'bigserial'
if table_name:
tablespace = field.db_tablespace
if tablespace == "":
tablespace = "public"
check_constraint = self.check_constraints.get((tablespace, table_name, field.attname), {}).get('pg_get_constraintdef', None)
if check_constraint:
check_constraint = check_constraint.replace("((", "(")
check_constraint = check_constraint.replace("))", ")")
check_constraint = '("'.join([')' in e and '" '.join(p.strip('"') for p in e.split(" ", 1)) or e for e in check_constraint.split("(")])
# TODO: might be more then one constraint in definition ?
db_type += ' ' + check_constraint
return db_type
@transaction.autocommit
def get_field_db_type_lookup(self, type_code):
try:
name = self.sql_to_dict("SELECT typname FROM pg_type WHERE typelem=%s;", [type_code])[0]['typname']
return self.DATA_TYPES_REVERSE_NAME.get(name.strip('_'))
except (IndexError, KeyError):
pass
"""
def find_field_type_differ(self, meta, table_description, table_name):
def callback(field, description, model_type, db_type):
if field.primary_key and db_type=='integer':
db_type = 'serial'
return model_type, db_type
super(PostgresqlSQLDiff, self).find_field_type_differ(meta, table_description, table_name, callback)
"""
DATABASE_SQLDIFF_CLASSES = {
'postgis': PostgresqlSQLDiff,
'postgresql_psycopg2': PostgresqlSQLDiff,
'postgresql': PostgresqlSQLDiff,
'mysql': MySQLDiff,
'sqlite3': SqliteSQLDiff,
'oracle': GenericSQLDiff
}
class Command(BaseCommand):
option_list = BaseCommand.option_list + (
make_option('--all-applications', '-a', action='store_true', dest='all_applications',
help="Automaticly include all application from INSTALLED_APPS."),
make_option('--not-only-existing', '-e', action='store_false', dest='only_existing',
help="Check all tables that exist in the database, not only tables that should exist based on models."),
make_option('--dense-output', '-d', action='store_true', dest='dense_output',
help="Shows the output in dense format, normally output is spreaded over multiple lines."),
make_option('--output_text', '-t', action='store_false', dest='sql', default=True,
help="Outputs the differences as descriptive text instead of SQL"),
)
help = """Prints the (approximated) difference between models and fields in the database for the given app name(s).
It indicates how columns in the database are different from the sql that would
be generated by Django. This command is not a database migration tool. (Though
it can certainly help) It's purpose is to show the current differences as a way
to check/debug ur models compared to the real database tables and columns."""
output_transaction = False
args = '<appname appname ...>'
def handle(self, *app_labels, **options):
from django import VERSION
if VERSION[:2] < (1, 0):
raise CommandError("SQLDiff only support Django 1.0 or higher!")
from django.db import models
from django.conf import settings
engine = None
if hasattr(settings, 'DATABASES'):
engine = settings.DATABASES['default']['ENGINE']
else:
engine = settings.DATABASE_ENGINE
if engine == 'dummy':
# This must be the "dummy" database backend, which means the user
# hasn't set DATABASE_ENGINE.
raise CommandError("""Django doesn't know which syntax to use for your SQL statements,
because you haven't specified the DATABASE_ENGINE setting.
Edit your settings file and change DATABASE_ENGINE to something like 'postgresql' or 'mysql'.""")
if options.get('all_applications', False):
app_models = models.get_models(include_auto_created=True)
else:
if not app_labels:
raise CommandError('Enter at least one appname.')
try:
app_list = [models.get_app(app_label) for app_label in app_labels]
except (models.ImproperlyConfigured, ImportError) as e:
raise CommandError("%s. Are you sure your INSTALLED_APPS setting is correct?" % e)
app_models = []
for app in app_list:
app_models.extend(models.get_models(app, include_auto_created=True))
## remove all models that are not managed by Django
#app_models = [model for model in app_models if getattr(model._meta, 'managed', True)]
if not app_models:
raise CommandError('Unable to execute sqldiff no models founds.')
if not engine:
engine = connection.__module__.split('.')[-2]
if '.' in engine:
engine = engine.split('.')[-1]
cls = DATABASE_SQLDIFF_CLASSES.get(engine, GenericSQLDiff)
sqldiff_instance = cls(app_models, options)
sqldiff_instance.find_differences()
sqldiff_instance.print_diff(self.style)
return
| {
"content_hash": "4c219312337523cfd6209c735d0c26b0",
"timestamp": "",
"source": "github",
"line_count": 760,
"max_line_length": 448,
"avg_line_length": 49.40657894736842,
"alnum_prop": 0.6047830834376415,
"repo_name": "bop/bauhaus",
"id": "33c8749474489b4b38d98d1816b3423961cc4038",
"size": "37549",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lib/python2.7/site-packages/django_extensions/management/commands/sqldiff.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "145210"
},
{
"name": "Groff",
"bytes": "22"
},
{
"name": "HTML",
"bytes": "1013469"
},
{
"name": "JavaScript",
"bytes": "267371"
},
{
"name": "Python",
"bytes": "6660999"
},
{
"name": "Shell",
"bytes": "4317"
}
],
"symlink_target": ""
} |
import time
print time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time()))
time.sleep(1)
print time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time()))
| {
"content_hash": "b33aaa692916431fb7e400925cd634c7",
"timestamp": "",
"source": "github",
"line_count": 8,
"max_line_length": 69,
"avg_line_length": 21.25,
"alnum_prop": 0.6352941176470588,
"repo_name": "yjwx0017/test",
"id": "265631a227b037702747c63d8bc53d2fbb50708f",
"size": "247",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python-codes/100-exercises/example10.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "37624"
},
{
"name": "CMake",
"bytes": "1077"
},
{
"name": "CSS",
"bytes": "600"
},
{
"name": "HTML",
"bytes": "1724"
},
{
"name": "Python",
"bytes": "16528"
},
{
"name": "QMake",
"bytes": "878"
}
],
"symlink_target": ""
} |
import warnings
from tencentcloud.common.abstract_model import AbstractModel
class AddNodesRequest(AbstractModel):
"""AddNodes请求参数结构体
"""
def __init__(self):
r"""
:param Placement: 集群中实例所在的位置。
:type Placement: :class:`tencentcloud.thpc.v20220401.models.Placement`
:param ClusterId: 集群ID。
:type ClusterId: str
:param ImageId: 指定有效的[镜像](https://cloud.tencent.com/document/product/213/4940)ID,格式形如`img-xxx`。目前仅支持公有镜。
:type ImageId: str
:param VirtualPrivateCloud: 私有网络相关信息配置。
:type VirtualPrivateCloud: :class:`tencentcloud.thpc.v20220401.models.VirtualPrivateCloud`
:param Count: 添加节点数量。
:type Count: int
:param InstanceChargeType: 节点[计费类型](https://cloud.tencent.com/document/product/213/2180)。<br><li>PREPAID:预付费,即包年包月<br><li>POSTPAID_BY_HOUR:按小时后付费<br><li>SPOTPAID:竞价付费<br>默认值:POSTPAID_BY_HOUR。
:type InstanceChargeType: str
:param InstanceChargePrepaid: 预付费模式,即包年包月相关参数设置。通过该参数可以指定包年包月节点的购买时长、是否设置自动续费等属性。若指定节点的付费模式为预付费则该参数必传。
:type InstanceChargePrepaid: :class:`tencentcloud.thpc.v20220401.models.InstanceChargePrepaid`
:param InstanceType: 节点机型。不同实例机型指定了不同的资源规格。<br><li>具体取值可通过调用接口[DescribeInstanceTypeConfigs](https://cloud.tencent.com/document/api/213/15749)来获得最新的规格表或参见[实例规格](https://cloud.tencent.com/document/product/213/11518)描述。
:type InstanceType: str
:param SystemDisk: 节点系统盘配置信息。若不指定该参数,则按照系统默认值进行分配。
:type SystemDisk: list of SystemDisk
:param DataDisks: 节点数据盘配置信息。若不指定该参数,则默认不购买数据盘。支持购买的时候指定21块数据盘,其中最多包含1块LOCAL_BASIC数据盘或者LOCAL_SSD数据盘,最多包含20块CLOUD_BASIC数据盘、CLOUD_PREMIUM数据盘或者CLOUD_SSD数据盘。
:type DataDisks: list of DataDisk
:param InternetAccessible: 公网带宽相关信息设置。若不指定该参数,则默认公网带宽为0Mbps。
:type InternetAccessible: :class:`tencentcloud.thpc.v20220401.models.InternetAccessible`
:param InstanceName: 节点显示名称。
不指定节点显示名称则默认显示‘未命名’。
最多支持60个字符。
:type InstanceName: str
:param LoginSettings: 集群登录设置。
:type LoginSettings: :class:`tencentcloud.thpc.v20220401.models.LoginSettings`
:param SecurityGroupIds: 集群中实例所属安全组。该参数可以通过调用 [DescribeSecurityGroups](https://cloud.tencent.com/document/api/215/15808) 的返回值中的sgId字段来获取。若不指定该参数,则绑定默认安全组。
:type SecurityGroupIds: list of str
:param ClientToken: 用于保证请求幂等性的字符串。该字符串由客户生成,需保证不同请求之间唯一,最大值不超过64个ASCII字符。若不指定该参数,则无法保证请求的幂等性。
:type ClientToken: str
:param QueueName: 队列名称。
:type QueueName: str
:param NodeRole: 添加节点类型。默认值:Compute<br><li>Compute:计算节点。<br><li>Login:登录节点。
:type NodeRole: str
:param DryRun: 是否只预检此次请求。
true:发送检查请求,不会创建实例。检查项包括是否填写了必需参数,请求格式,业务限制和云服务器库存。
如果检查不通过,则返回对应错误码;
如果检查通过,则返回RequestId.
false(默认):发送正常请求,通过检查后直接创建实例
:type DryRun: bool
"""
self.Placement = None
self.ClusterId = None
self.ImageId = None
self.VirtualPrivateCloud = None
self.Count = None
self.InstanceChargeType = None
self.InstanceChargePrepaid = None
self.InstanceType = None
self.SystemDisk = None
self.DataDisks = None
self.InternetAccessible = None
self.InstanceName = None
self.LoginSettings = None
self.SecurityGroupIds = None
self.ClientToken = None
self.QueueName = None
self.NodeRole = None
self.DryRun = None
def _deserialize(self, params):
if params.get("Placement") is not None:
self.Placement = Placement()
self.Placement._deserialize(params.get("Placement"))
self.ClusterId = params.get("ClusterId")
self.ImageId = params.get("ImageId")
if params.get("VirtualPrivateCloud") is not None:
self.VirtualPrivateCloud = VirtualPrivateCloud()
self.VirtualPrivateCloud._deserialize(params.get("VirtualPrivateCloud"))
self.Count = params.get("Count")
self.InstanceChargeType = params.get("InstanceChargeType")
if params.get("InstanceChargePrepaid") is not None:
self.InstanceChargePrepaid = InstanceChargePrepaid()
self.InstanceChargePrepaid._deserialize(params.get("InstanceChargePrepaid"))
self.InstanceType = params.get("InstanceType")
if params.get("SystemDisk") is not None:
self.SystemDisk = []
for item in params.get("SystemDisk"):
obj = SystemDisk()
obj._deserialize(item)
self.SystemDisk.append(obj)
if params.get("DataDisks") is not None:
self.DataDisks = []
for item in params.get("DataDisks"):
obj = DataDisk()
obj._deserialize(item)
self.DataDisks.append(obj)
if params.get("InternetAccessible") is not None:
self.InternetAccessible = InternetAccessible()
self.InternetAccessible._deserialize(params.get("InternetAccessible"))
self.InstanceName = params.get("InstanceName")
if params.get("LoginSettings") is not None:
self.LoginSettings = LoginSettings()
self.LoginSettings._deserialize(params.get("LoginSettings"))
self.SecurityGroupIds = params.get("SecurityGroupIds")
self.ClientToken = params.get("ClientToken")
self.QueueName = params.get("QueueName")
self.NodeRole = params.get("NodeRole")
self.DryRun = params.get("DryRun")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class AddNodesResponse(AbstractModel):
"""AddNodes返回参数结构体
"""
def __init__(self):
r"""
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.RequestId = None
def _deserialize(self, params):
self.RequestId = params.get("RequestId")
class BindAutoScalingGroupRequest(AbstractModel):
"""BindAutoScalingGroup请求参数结构体
"""
def __init__(self):
r"""
:param ClusterId: 集群ID。
:type ClusterId: str
:param LaunchConfigurationId: 弹性伸缩启动配置ID。
:type LaunchConfigurationId: str
:param AutoScalingGroupId: 弹性伸缩组ID。
:type AutoScalingGroupId: str
:param QueueName: 队列名称。
:type QueueName: str
:param ExpansionBusyTime: 任务连续等待时间,队列的任务处于连续等待的时间。单位秒。默认值120。
:type ExpansionBusyTime: int
:param ShrinkIdleTime: 节点连续空闲(未运行作业)时间,一个节点连续处于空闲状态时间。单位秒。默认值300。
:type ShrinkIdleTime: int
:param EnableAutoExpansion: 是否开启自动扩容,默认值true。
:type EnableAutoExpansion: bool
:param EnableAutoShrink: 是否开启自动缩容,默认值true。
:type EnableAutoShrink: bool
:param DryRun: 是否只预检此次请求。
true:发送检查请求,不会绑定弹性伸缩组。检查项包括是否填写了必需参数,请求格式,业务限制。
如果检查不通过,则返回对应错误码;
如果检查通过,则返回RequestId。
false(默认):发送正常请求,通过检查后直接绑定弹性伸缩组。
:type DryRun: bool
"""
self.ClusterId = None
self.LaunchConfigurationId = None
self.AutoScalingGroupId = None
self.QueueName = None
self.ExpansionBusyTime = None
self.ShrinkIdleTime = None
self.EnableAutoExpansion = None
self.EnableAutoShrink = None
self.DryRun = None
def _deserialize(self, params):
self.ClusterId = params.get("ClusterId")
self.LaunchConfigurationId = params.get("LaunchConfigurationId")
self.AutoScalingGroupId = params.get("AutoScalingGroupId")
self.QueueName = params.get("QueueName")
self.ExpansionBusyTime = params.get("ExpansionBusyTime")
self.ShrinkIdleTime = params.get("ShrinkIdleTime")
self.EnableAutoExpansion = params.get("EnableAutoExpansion")
self.EnableAutoShrink = params.get("EnableAutoShrink")
self.DryRun = params.get("DryRun")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class BindAutoScalingGroupResponse(AbstractModel):
"""BindAutoScalingGroup返回参数结构体
"""
def __init__(self):
r"""
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.RequestId = None
def _deserialize(self, params):
self.RequestId = params.get("RequestId")
class CFSOption(AbstractModel):
"""描述CFS文件系统版本和挂载信息
"""
def __init__(self):
r"""
:param LocalPath: 文件系统本地挂载路径
:type LocalPath: str
:param RemotePath: 文件系统远程挂载ip及路径
:type RemotePath: str
:param Protocol: 文件系统协议类型,默认值NFS 3.0。
<li>NFS 3.0。
<li>NFS 4.0。
<li>TURBO。
:type Protocol: str
:param StorageType: 文件系统存储类型,默认值SD;其中 SD 为通用标准型标准型存储, HP为通用性能型存储, TB为turbo标准型, TP 为turbo性能型。
:type StorageType: str
"""
self.LocalPath = None
self.RemotePath = None
self.Protocol = None
self.StorageType = None
def _deserialize(self, params):
self.LocalPath = params.get("LocalPath")
self.RemotePath = params.get("RemotePath")
self.Protocol = params.get("Protocol")
self.StorageType = params.get("StorageType")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class ClusterOverview(AbstractModel):
"""集群概览信息。
"""
def __init__(self):
r"""
:param ClusterId: 集群ID。
:type ClusterId: str
:param ClusterStatus: 集群状态。取值范围:<br><li>PENDING:创建中<br><li>INITING:初始化中<br><li>INIT_FAILED:初始化失败<br><li>RUNNING:运行中<br><li>TERMINATING:销毁中
:type ClusterStatus: str
:param ClusterName: 集群名称。
:type ClusterName: str
:param Placement: 集群位置信息。
:type Placement: :class:`tencentcloud.thpc.v20220401.models.Placement`
:param CreateTime: 集群创建时间。
:type CreateTime: str
:param SchedulerType: 集群调度器。
:type SchedulerType: str
:param ComputeNodeCount: 计算节点数量。
:type ComputeNodeCount: int
:param ComputeNodeSet: 计算节点概览。
:type ComputeNodeSet: list of ComputeNodeOverview
:param ManagerNodeCount: 管控节点数量。
:type ManagerNodeCount: int
:param ManagerNodeSet: 管控节点概览。
:type ManagerNodeSet: list of ManagerNodeOverview
:param LoginNodeSet: 登录节点概览。
:type LoginNodeSet: list of LoginNodeOverview
:param LoginNodeCount: 登录节点数量。
:type LoginNodeCount: int
:param VpcId: 集群所属私有网络ID。
:type VpcId: str
"""
self.ClusterId = None
self.ClusterStatus = None
self.ClusterName = None
self.Placement = None
self.CreateTime = None
self.SchedulerType = None
self.ComputeNodeCount = None
self.ComputeNodeSet = None
self.ManagerNodeCount = None
self.ManagerNodeSet = None
self.LoginNodeSet = None
self.LoginNodeCount = None
self.VpcId = None
def _deserialize(self, params):
self.ClusterId = params.get("ClusterId")
self.ClusterStatus = params.get("ClusterStatus")
self.ClusterName = params.get("ClusterName")
if params.get("Placement") is not None:
self.Placement = Placement()
self.Placement._deserialize(params.get("Placement"))
self.CreateTime = params.get("CreateTime")
self.SchedulerType = params.get("SchedulerType")
self.ComputeNodeCount = params.get("ComputeNodeCount")
if params.get("ComputeNodeSet") is not None:
self.ComputeNodeSet = []
for item in params.get("ComputeNodeSet"):
obj = ComputeNodeOverview()
obj._deserialize(item)
self.ComputeNodeSet.append(obj)
self.ManagerNodeCount = params.get("ManagerNodeCount")
if params.get("ManagerNodeSet") is not None:
self.ManagerNodeSet = []
for item in params.get("ManagerNodeSet"):
obj = ManagerNodeOverview()
obj._deserialize(item)
self.ManagerNodeSet.append(obj)
if params.get("LoginNodeSet") is not None:
self.LoginNodeSet = []
for item in params.get("LoginNodeSet"):
obj = LoginNodeOverview()
obj._deserialize(item)
self.LoginNodeSet.append(obj)
self.LoginNodeCount = params.get("LoginNodeCount")
self.VpcId = params.get("VpcId")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class ComputeNode(AbstractModel):
"""计算节点信息。
"""
def __init__(self):
r"""
:param InstanceChargeType: 节点[计费类型](https://cloud.tencent.com/document/product/213/2180)。<br><li>PREPAID:预付费,即包年包月<br><li>POSTPAID_BY_HOUR:按小时后付费<br><li>SPOTPAID:竞价付费<br>默认值:POSTPAID_BY_HOUR。
:type InstanceChargeType: str
:param InstanceChargePrepaid: 预付费模式,即包年包月相关参数设置。通过该参数可以指定包年包月节点的购买时长、是否设置自动续费等属性。若指定节点的付费模式为预付费则该参数必传。
:type InstanceChargePrepaid: :class:`tencentcloud.thpc.v20220401.models.InstanceChargePrepaid`
:param InstanceType: 节点机型。不同实例机型指定了不同的资源规格。
<br><li>具体取值可通过调用接口[DescribeInstanceTypeConfigs](https://cloud.tencent.com/document/api/213/15749)来获得最新的规格表或参见[实例规格](https://cloud.tencent.com/document/product/213/11518)描述。
:type InstanceType: str
:param SystemDisk: 节点系统盘配置信息。若不指定该参数,则按照系统默认值进行分配。
:type SystemDisk: :class:`tencentcloud.thpc.v20220401.models.SystemDisk`
:param DataDisks: 节点数据盘配置信息。若不指定该参数,则默认不购买数据盘。支持购买的时候指定21块数据盘,其中最多包含1块LOCAL_BASIC数据盘或者LOCAL_SSD数据盘,最多包含20块CLOUD_BASIC数据盘、CLOUD_PREMIUM数据盘或者CLOUD_SSD数据盘。
:type DataDisks: list of DataDisk
:param InternetAccessible: 公网带宽相关信息设置。若不指定该参数,则默认公网带宽为0Mbps。
:type InternetAccessible: :class:`tencentcloud.thpc.v20220401.models.InternetAccessible`
:param InstanceName: 节点显示名称。<br><li>
不指定节点显示名称则默认显示‘未命名’。
最多支持60个字符。
:type InstanceName: str
"""
self.InstanceChargeType = None
self.InstanceChargePrepaid = None
self.InstanceType = None
self.SystemDisk = None
self.DataDisks = None
self.InternetAccessible = None
self.InstanceName = None
def _deserialize(self, params):
self.InstanceChargeType = params.get("InstanceChargeType")
if params.get("InstanceChargePrepaid") is not None:
self.InstanceChargePrepaid = InstanceChargePrepaid()
self.InstanceChargePrepaid._deserialize(params.get("InstanceChargePrepaid"))
self.InstanceType = params.get("InstanceType")
if params.get("SystemDisk") is not None:
self.SystemDisk = SystemDisk()
self.SystemDisk._deserialize(params.get("SystemDisk"))
if params.get("DataDisks") is not None:
self.DataDisks = []
for item in params.get("DataDisks"):
obj = DataDisk()
obj._deserialize(item)
self.DataDisks.append(obj)
if params.get("InternetAccessible") is not None:
self.InternetAccessible = InternetAccessible()
self.InternetAccessible._deserialize(params.get("InternetAccessible"))
self.InstanceName = params.get("InstanceName")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class ComputeNodeOverview(AbstractModel):
"""计算节点概览。
"""
def __init__(self):
r"""
:param NodeId: 计算节点ID。
注意:此字段可能返回 null,表示取不到有效值。
:type NodeId: str
"""
self.NodeId = None
def _deserialize(self, params):
self.NodeId = params.get("NodeId")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class CreateClusterRequest(AbstractModel):
"""CreateCluster请求参数结构体
"""
def __init__(self):
r"""
:param Placement: 集群中实例所在的位置。
:type Placement: :class:`tencentcloud.thpc.v20220401.models.Placement`
:param ManagerNode: 指定管理节点。
:type ManagerNode: :class:`tencentcloud.thpc.v20220401.models.ManagerNode`
:param ManagerNodeCount: 指定管理节点的数量。默认取值:1。取值范围:1~2。
:type ManagerNodeCount: int
:param ComputeNode: 指定计算节点。
:type ComputeNode: :class:`tencentcloud.thpc.v20220401.models.ComputeNode`
:param ComputeNodeCount: 指定计算节点的数量。默认取值:0。
:type ComputeNodeCount: int
:param SchedulerType: 调度器类型。默认取值:SLURM。<br><li>SGE:SGE调度器。<br><li>SLURM:SLURM调度器。
:type SchedulerType: str
:param ImageId: 指定有效的[镜像](https://cloud.tencent.com/document/product/213/4940)ID,格式形如`img-xxx`。目前仅支持公有镜像。
:type ImageId: str
:param VirtualPrivateCloud: 私有网络相关信息配置。
:type VirtualPrivateCloud: :class:`tencentcloud.thpc.v20220401.models.VirtualPrivateCloud`
:param LoginSettings: 集群登录设置。
:type LoginSettings: :class:`tencentcloud.thpc.v20220401.models.LoginSettings`
:param SecurityGroupIds: 集群中实例所属安全组。该参数可以通过调用 [DescribeSecurityGroups](https://cloud.tencent.com/document/api/215/15808) 的返回值中的sgId字段来获取。若不指定该参数,则绑定默认安全组。
:type SecurityGroupIds: list of str
:param ClientToken: 用于保证请求幂等性的字符串。该字符串由客户生成,需保证不同请求之间唯一,最大值不超过64个ASCII字符。若不指定该参数,则无法保证请求的幂等性。
:type ClientToken: str
:param DryRun: 是否只预检此次请求。
true:发送检查请求,不会创建实例。检查项包括是否填写了必需参数,请求格式,业务限制和云服务器库存。
如果检查不通过,则返回对应错误码;
如果检查通过,则返回RequestId.
false(默认):发送正常请求,通过检查后直接创建实例
:type DryRun: bool
:param AccountType: 域名字服务类型。默认取值:NIS。
<li>NIS:NIS域名字服务。
:type AccountType: str
:param ClusterName: 集群显示名称。
:type ClusterName: str
:param StorageOption: 集群存储选项
:type StorageOption: :class:`tencentcloud.thpc.v20220401.models.StorageOption`
:param LoginNode: 指定登录节点。
:type LoginNode: :class:`tencentcloud.thpc.v20220401.models.LoginNode`
:param LoginNodeCount: 指定登录节点的数量。默认取值:0。取值范围:0~10。
:type LoginNodeCount: int
:param Tags: 创建集群时同时绑定的标签对说明。
:type Tags: list of Tag
:param AutoScalingType: 弹性伸缩类型。<br><li>AS:集群自动扩缩容由[弹性伸缩](https://cloud.tencent.com/document/product/377/3154)产品实现。<br><li>THPC_AS:集群自动扩缩容由THPC产品内部实现。
:type AutoScalingType: str
"""
self.Placement = None
self.ManagerNode = None
self.ManagerNodeCount = None
self.ComputeNode = None
self.ComputeNodeCount = None
self.SchedulerType = None
self.ImageId = None
self.VirtualPrivateCloud = None
self.LoginSettings = None
self.SecurityGroupIds = None
self.ClientToken = None
self.DryRun = None
self.AccountType = None
self.ClusterName = None
self.StorageOption = None
self.LoginNode = None
self.LoginNodeCount = None
self.Tags = None
self.AutoScalingType = None
def _deserialize(self, params):
if params.get("Placement") is not None:
self.Placement = Placement()
self.Placement._deserialize(params.get("Placement"))
if params.get("ManagerNode") is not None:
self.ManagerNode = ManagerNode()
self.ManagerNode._deserialize(params.get("ManagerNode"))
self.ManagerNodeCount = params.get("ManagerNodeCount")
if params.get("ComputeNode") is not None:
self.ComputeNode = ComputeNode()
self.ComputeNode._deserialize(params.get("ComputeNode"))
self.ComputeNodeCount = params.get("ComputeNodeCount")
self.SchedulerType = params.get("SchedulerType")
self.ImageId = params.get("ImageId")
if params.get("VirtualPrivateCloud") is not None:
self.VirtualPrivateCloud = VirtualPrivateCloud()
self.VirtualPrivateCloud._deserialize(params.get("VirtualPrivateCloud"))
if params.get("LoginSettings") is not None:
self.LoginSettings = LoginSettings()
self.LoginSettings._deserialize(params.get("LoginSettings"))
self.SecurityGroupIds = params.get("SecurityGroupIds")
self.ClientToken = params.get("ClientToken")
self.DryRun = params.get("DryRun")
self.AccountType = params.get("AccountType")
self.ClusterName = params.get("ClusterName")
if params.get("StorageOption") is not None:
self.StorageOption = StorageOption()
self.StorageOption._deserialize(params.get("StorageOption"))
if params.get("LoginNode") is not None:
self.LoginNode = LoginNode()
self.LoginNode._deserialize(params.get("LoginNode"))
self.LoginNodeCount = params.get("LoginNodeCount")
if params.get("Tags") is not None:
self.Tags = []
for item in params.get("Tags"):
obj = Tag()
obj._deserialize(item)
self.Tags.append(obj)
self.AutoScalingType = params.get("AutoScalingType")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class CreateClusterResponse(AbstractModel):
"""CreateCluster返回参数结构体
"""
def __init__(self):
r"""
:param ClusterId: 集群ID。
注意:此字段可能返回 null,表示取不到有效值。
:type ClusterId: str
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.ClusterId = None
self.RequestId = None
def _deserialize(self, params):
self.ClusterId = params.get("ClusterId")
self.RequestId = params.get("RequestId")
class DataDisk(AbstractModel):
"""描述了数据盘的信息
"""
def __init__(self):
r"""
:param DiskSize: 数据盘大小,单位:GB。最小调整步长为10G,不同数据盘类型取值范围不同,具体限制详见:[存储概述](https://cloud.tencent.com/document/product/213/4952)。默认值为0,表示不购买数据盘。更多限制详见产品文档。
:type DiskSize: int
:param DiskType: 数据盘类型。数据盘类型限制详见[存储概述](https://cloud.tencent.com/document/product/213/4952)。取值范围:<br><li>LOCAL_BASIC:本地硬盘<br><li>LOCAL_SSD:本地SSD硬盘<br><li>LOCAL_NVME:本地NVME硬盘,与InstanceType强相关,不支持指定<br><li>LOCAL_PRO:本地HDD硬盘,与InstanceType强相关,不支持指定<br><li>CLOUD_BASIC:普通云硬盘<br><li>CLOUD_PREMIUM:高性能云硬盘<br><li>CLOUD_SSD:SSD云硬盘<br><li>CLOUD_HSSD:增强型SSD云硬盘<br><li>CLOUD_TSSD:极速型SSD云硬盘<br><br>默认取值:LOCAL_BASIC。
:type DiskType: str
"""
self.DiskSize = None
self.DiskType = None
def _deserialize(self, params):
self.DiskSize = params.get("DiskSize")
self.DiskType = params.get("DiskType")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class DeleteClusterRequest(AbstractModel):
"""DeleteCluster请求参数结构体
"""
def __init__(self):
r"""
:param ClusterId: 集群ID。
:type ClusterId: str
"""
self.ClusterId = None
def _deserialize(self, params):
self.ClusterId = params.get("ClusterId")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class DeleteClusterResponse(AbstractModel):
"""DeleteCluster返回参数结构体
"""
def __init__(self):
r"""
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.RequestId = None
def _deserialize(self, params):
self.RequestId = params.get("RequestId")
class DeleteNodesRequest(AbstractModel):
"""DeleteNodes请求参数结构体
"""
def __init__(self):
r"""
:param ClusterId: 集群ID。
:type ClusterId: str
:param NodeIds: 节点ID。
:type NodeIds: list of str
"""
self.ClusterId = None
self.NodeIds = None
def _deserialize(self, params):
self.ClusterId = params.get("ClusterId")
self.NodeIds = params.get("NodeIds")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class DeleteNodesResponse(AbstractModel):
"""DeleteNodes返回参数结构体
"""
def __init__(self):
r"""
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.RequestId = None
def _deserialize(self, params):
self.RequestId = params.get("RequestId")
class DescribeClustersRequest(AbstractModel):
"""DescribeClusters请求参数结构体
"""
def __init__(self):
r"""
:param ClusterIds: 集群ID列表。通过该参数可以指定需要查询信息的集群列表。<br>如果您不指定该参数,则返回Limit数量以内的集群信息。
:type ClusterIds: list of str
:param Offset: 偏移量,默认为0。关于`Offset`的更进一步介绍请参考 API [简介](https://cloud.tencent.com/document/api/213/15688)中的相关小节。
:type Offset: int
:param Limit: 返回数量,默认为20,最大值为100。关于`Limit`的更进一步介绍请参考 API [简介](https://cloud.tencent.com/document/api/213/15688)中的相关小节。
:type Limit: int
"""
self.ClusterIds = None
self.Offset = None
self.Limit = None
def _deserialize(self, params):
self.ClusterIds = params.get("ClusterIds")
self.Offset = params.get("Offset")
self.Limit = params.get("Limit")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class DescribeClustersResponse(AbstractModel):
"""DescribeClusters返回参数结构体
"""
def __init__(self):
r"""
:param ClusterSet: 集群概览信息列表。
:type ClusterSet: list of ClusterOverview
:param TotalCount: 集群数量。
:type TotalCount: int
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.ClusterSet = None
self.TotalCount = None
self.RequestId = None
def _deserialize(self, params):
if params.get("ClusterSet") is not None:
self.ClusterSet = []
for item in params.get("ClusterSet"):
obj = ClusterOverview()
obj._deserialize(item)
self.ClusterSet.append(obj)
self.TotalCount = params.get("TotalCount")
self.RequestId = params.get("RequestId")
class ExpansionNodeConfig(AbstractModel):
"""弹性扩容节点配置信息。
"""
def __init__(self):
r"""
:param Placement: 扩容实例所在的位置。
:type Placement: :class:`tencentcloud.thpc.v20220401.models.Placement`
:param InstanceChargeType: 节点[计费类型](https://cloud.tencent.com/document/product/213/2180)。<br><li>PREPAID:预付费,即包年包月<br><li>POSTPAID_BY_HOUR:按小时后付费<br><li>SPOTPAID:竞价付费<br>默认值:POSTPAID_BY_HOUR。
:type InstanceChargeType: str
:param InstanceChargePrepaid: 预付费模式,即包年包月相关参数设置。通过该参数可以指定包年包月节点的购买时长、是否设置自动续费等属性。若指定节点的付费模式为预付费则该参数必传。
:type InstanceChargePrepaid: :class:`tencentcloud.thpc.v20220401.models.InstanceChargePrepaid`
:param InstanceType: 节点机型。不同实例机型指定了不同的资源规格。
<br><li>具体取值可通过调用接口[DescribeInstanceTypeConfigs](https://cloud.tencent.com/document/api/213/15749)来获得最新的规格表或参见[实例规格](https://cloud.tencent.com/document/product/213/11518)描述。
:type InstanceType: str
:param VirtualPrivateCloud: 私有网络相关信息配置。
:type VirtualPrivateCloud: :class:`tencentcloud.thpc.v20220401.models.VirtualPrivateCloud`
"""
self.Placement = None
self.InstanceChargeType = None
self.InstanceChargePrepaid = None
self.InstanceType = None
self.VirtualPrivateCloud = None
def _deserialize(self, params):
if params.get("Placement") is not None:
self.Placement = Placement()
self.Placement._deserialize(params.get("Placement"))
self.InstanceChargeType = params.get("InstanceChargeType")
if params.get("InstanceChargePrepaid") is not None:
self.InstanceChargePrepaid = InstanceChargePrepaid()
self.InstanceChargePrepaid._deserialize(params.get("InstanceChargePrepaid"))
self.InstanceType = params.get("InstanceType")
if params.get("VirtualPrivateCloud") is not None:
self.VirtualPrivateCloud = VirtualPrivateCloud()
self.VirtualPrivateCloud._deserialize(params.get("VirtualPrivateCloud"))
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class GooseFSOption(AbstractModel):
"""描述GooseFS挂载信息
"""
def __init__(self):
r"""
:param LocalPath: 文件系统本地挂载路径
:type LocalPath: str
:param RemotePath: 文件系统远程挂载路径
:type RemotePath: str
:param Masters: 文件系统master的ip和端口
:type Masters: list of str
"""
self.LocalPath = None
self.RemotePath = None
self.Masters = None
def _deserialize(self, params):
self.LocalPath = params.get("LocalPath")
self.RemotePath = params.get("RemotePath")
self.Masters = params.get("Masters")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class InstanceChargePrepaid(AbstractModel):
"""描述了实例的计费模式
"""
def __init__(self):
r"""
:param Period: 购买实例的时长,单位:月。取值范围:1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 24, 36, 48, 60。
:type Period: int
:param RenewFlag: 自动续费标识。取值范围:
NOTIFY_AND_AUTO_RENEW:通知过期且自动续费
NOTIFY_AND_MANUAL_RENEW:通知过期不自动续费
DISABLE_NOTIFY_AND_MANUAL_RENEW:不通知过期不自动续费
默认取值:NOTIFY_AND_MANUAL_RENEW。若该参数指定为NOTIFY_AND_AUTO_RENEW,在账户余额充足的情况下,实例到期后将按月自动续费。
:type RenewFlag: str
"""
self.Period = None
self.RenewFlag = None
def _deserialize(self, params):
self.Period = params.get("Period")
self.RenewFlag = params.get("RenewFlag")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class InternetAccessible(AbstractModel):
"""描述了实例的公网可访问性,声明了实例的公网使用计费模式,最大带宽等
"""
def __init__(self):
r"""
:param InternetChargeType: 网络计费类型。取值范围:
BANDWIDTH_PREPAID:预付费按带宽结算
TRAFFIC_POSTPAID_BY_HOUR:流量按小时后付费
BANDWIDTH_POSTPAID_BY_HOUR:带宽按小时后付费
BANDWIDTH_PACKAGE:带宽包用户
默认取值:非带宽包用户默认与子机付费类型保持一致。
:type InternetChargeType: str
:param InternetMaxBandwidthOut: 公网出带宽上限,单位:Mbps。默认值:0Mbps。不同机型带宽上限范围不一致,具体限制详见购买网络带宽。
:type InternetMaxBandwidthOut: int
"""
self.InternetChargeType = None
self.InternetMaxBandwidthOut = None
def _deserialize(self, params):
self.InternetChargeType = params.get("InternetChargeType")
self.InternetMaxBandwidthOut = params.get("InternetMaxBandwidthOut")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class LoginNode(AbstractModel):
"""登录节点信息。
"""
def __init__(self):
r"""
:param InstanceChargeType: 节点[计费类型](https://cloud.tencent.com/document/product/213/2180)。<br><li>PREPAID:预付费,即包年包月<br><li>POSTPAID_BY_HOUR:按小时后付费<br>默认值:POSTPAID_BY_HOUR。
:type InstanceChargeType: str
:param InstanceChargePrepaid: 预付费模式,即包年包月相关参数设置。通过该参数可以指定包年包月节点的购买时长、是否设置自动续费等属性。若指定节点的付费模式为预付费则该参数必传。
:type InstanceChargePrepaid: :class:`tencentcloud.thpc.v20220401.models.InstanceChargePrepaid`
:param InstanceType: 节点机型。不同实例机型指定了不同的资源规格。
<br><li>具体取值可通过调用接口[DescribeInstanceTypeConfigs](https://cloud.tencent.com/document/api/213/15749)来获得最新的规格表或参见[实例规格](https://cloud.tencent.com/document/product/213/11518)描述。
:type InstanceType: str
:param SystemDisk: 节点系统盘配置信息。若不指定该参数,则按照系统默认值进行分配。
:type SystemDisk: list of SystemDisk
:param DataDisks: 节点数据盘配置信息。若不指定该参数,则默认不购买数据盘。支持购买的时候指定21块数据盘,其中最多包含1块LOCAL_BASIC数据盘或者LOCAL_SSD数据盘,最多包含20块CLOUD_BASIC数据盘、CLOUD_PREMIUM数据盘或者CLOUD_SSD数据盘。
:type DataDisks: list of DataDisk
:param InternetAccessible: 公网带宽相关信息设置。若不指定该参数,则默认公网带宽为0Mbps。
:type InternetAccessible: list of InternetAccessible
:param InstanceName: 节点显示名称。<br><li>
不指定节点显示名称则默认显示‘未命名’。
最多支持60个字符。
:type InstanceName: str
"""
self.InstanceChargeType = None
self.InstanceChargePrepaid = None
self.InstanceType = None
self.SystemDisk = None
self.DataDisks = None
self.InternetAccessible = None
self.InstanceName = None
def _deserialize(self, params):
self.InstanceChargeType = params.get("InstanceChargeType")
if params.get("InstanceChargePrepaid") is not None:
self.InstanceChargePrepaid = InstanceChargePrepaid()
self.InstanceChargePrepaid._deserialize(params.get("InstanceChargePrepaid"))
self.InstanceType = params.get("InstanceType")
if params.get("SystemDisk") is not None:
self.SystemDisk = []
for item in params.get("SystemDisk"):
obj = SystemDisk()
obj._deserialize(item)
self.SystemDisk.append(obj)
if params.get("DataDisks") is not None:
self.DataDisks = []
for item in params.get("DataDisks"):
obj = DataDisk()
obj._deserialize(item)
self.DataDisks.append(obj)
if params.get("InternetAccessible") is not None:
self.InternetAccessible = []
for item in params.get("InternetAccessible"):
obj = InternetAccessible()
obj._deserialize(item)
self.InternetAccessible.append(obj)
self.InstanceName = params.get("InstanceName")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class LoginNodeOverview(AbstractModel):
"""登录节点概览。
"""
def __init__(self):
r"""
:param NodeId: 登录节点ID。
:type NodeId: str
"""
self.NodeId = None
def _deserialize(self, params):
self.NodeId = params.get("NodeId")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class LoginSettings(AbstractModel):
"""描述了实例登录相关配置与信息。
"""
def __init__(self):
r"""
:param Password: 实例登录密码。不同操作系统类型密码复杂度限制不一样,具体如下:<br><li>Linux实例密码必须8到30位,至少包括两项[a-z],[A-Z]、[0-9] 和 [( ) \` ~ ! @ # $ % ^ & * - + = | { } [ ] : ; ' , . ? / ]中的特殊符号。<br><li>Windows实例密码必须12到30位,至少包括三项[a-z],[A-Z],[0-9] 和 [( ) \` ~ ! @ # $ % ^ & * - + = | { } [ ] : ; ' , . ? /]中的特殊符号。<br><br>若不指定该参数,则由系统随机生成密码,并通过站内信方式通知到用户。
:type Password: str
"""
self.Password = None
def _deserialize(self, params):
self.Password = params.get("Password")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class ManagerNode(AbstractModel):
"""管控节点信息
"""
def __init__(self):
r"""
:param InstanceChargeType: 节点[计费类型](https://cloud.tencent.com/document/product/213/2180)。<br><li>PREPAID:预付费,即包年包月<br><li>POSTPAID_BY_HOUR:按小时后付费<br>默认值:POSTPAID_BY_HOUR。
:type InstanceChargeType: str
:param InstanceChargePrepaid: 预付费模式,即包年包月相关参数设置。通过该参数可以指定包年包月节点的购买时长、是否设置自动续费等属性。若指定节点的付费模式为预付费则该参数必传。
:type InstanceChargePrepaid: :class:`tencentcloud.thpc.v20220401.models.InstanceChargePrepaid`
:param InstanceType: 节点机型。不同实例机型指定了不同的资源规格。
<br><li>对于付费模式为PREPAID或POSTPAID\_BY\_HOUR的实例创建,具体取值可通过调用接口[DescribeInstanceTypeConfigs](https://cloud.tencent.com/document/api/213/15749)来获得最新的规格表或参见[实例规格](https://cloud.tencent.com/document/product/213/11518)描述。
:type InstanceType: str
:param SystemDisk: 节点系统盘配置信息。若不指定该参数,则按照系统默认值进行分配。
:type SystemDisk: :class:`tencentcloud.thpc.v20220401.models.SystemDisk`
:param DataDisks: 节点数据盘配置信息。若不指定该参数,则默认不购买数据盘。支持购买的时候指定21块数据盘,其中最多包含1块LOCAL_BASIC数据盘或者LOCAL_SSD数据盘,最多包含20块CLOUD_BASIC数据盘、CLOUD_PREMIUM数据盘或者CLOUD_SSD数据盘。
:type DataDisks: list of DataDisk
:param InternetAccessible: 公网带宽相关信息设置。若不指定该参数,则默认公网带宽为0Mbps。
:type InternetAccessible: :class:`tencentcloud.thpc.v20220401.models.InternetAccessible`
:param InstanceName: 节点显示名称。<br><li>
不指定节点显示名称则默认显示‘未命名’。
</li><li>购买多个节点,如果指定模式串`{R:x}`,表示生成数字[`[x, x+n-1]`,其中`n`表示购买节点的数量,例如`server_{R:3}`,购买1个时,节点显示名称为`server_3`;购买2个时,节点显示名称分别为`server_3`,`server_4`。支持指定多个模式串`{R:x}`。
购买多个节点,如果不指定模式串,则在节点显示名称添加后缀`1、2...n`,其中`n`表示购买节点的数量,例如`server_`,购买2个时,节点显示名称分别为`server_1`,`server_2`。</li><li>
最多支持60个字符(包含模式串)。
:type InstanceName: str
"""
self.InstanceChargeType = None
self.InstanceChargePrepaid = None
self.InstanceType = None
self.SystemDisk = None
self.DataDisks = None
self.InternetAccessible = None
self.InstanceName = None
def _deserialize(self, params):
self.InstanceChargeType = params.get("InstanceChargeType")
if params.get("InstanceChargePrepaid") is not None:
self.InstanceChargePrepaid = InstanceChargePrepaid()
self.InstanceChargePrepaid._deserialize(params.get("InstanceChargePrepaid"))
self.InstanceType = params.get("InstanceType")
if params.get("SystemDisk") is not None:
self.SystemDisk = SystemDisk()
self.SystemDisk._deserialize(params.get("SystemDisk"))
if params.get("DataDisks") is not None:
self.DataDisks = []
for item in params.get("DataDisks"):
obj = DataDisk()
obj._deserialize(item)
self.DataDisks.append(obj)
if params.get("InternetAccessible") is not None:
self.InternetAccessible = InternetAccessible()
self.InternetAccessible._deserialize(params.get("InternetAccessible"))
self.InstanceName = params.get("InstanceName")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class ManagerNodeOverview(AbstractModel):
"""管控节点概览。
"""
def __init__(self):
r"""
:param NodeId: 管控节点ID。
注意:此字段可能返回 null,表示取不到有效值。
:type NodeId: str
"""
self.NodeId = None
def _deserialize(self, params):
self.NodeId = params.get("NodeId")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class Placement(AbstractModel):
"""描述了实例的抽象位置
"""
def __init__(self):
r"""
:param Zone: 实例所属的可用区名称。该参数可以通过调用 [DescribeZones](https://cloud.tencent.com/document/product/213/15707) 的返回值中的Zone字段来获取。
:type Zone: str
"""
self.Zone = None
def _deserialize(self, params):
self.Zone = params.get("Zone")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class QueueConfig(AbstractModel):
"""扩容队列配置。
"""
def __init__(self):
r"""
:param QueueName: 队列名称。
:type QueueName: str
:param MinSize: 队列中弹性节点数量最小值。取值范围0~200。
:type MinSize: int
:param MaxSize: 队列中弹性节点数量最大值。取值范围0~200。
:type MaxSize: int
:param EnableAutoExpansion: 是否开启自动扩容。
:type EnableAutoExpansion: bool
:param EnableAutoShrink: 是否开启自动缩容。
:type EnableAutoShrink: bool
:param ImageId: 指定有效的[镜像](https://cloud.tencent.com/document/product/213/4940)ID,格式形如`img-xxx`。目前仅支持公有镜和特定自定义镜像。
:type ImageId: str
:param SystemDisk: 节点系统盘配置信息。若不指定该参数,则按照系统默认值进行分配。
:type SystemDisk: :class:`tencentcloud.thpc.v20220401.models.SystemDisk`
:param DataDisks: 节点数据盘配置信息。若不指定该参数,则默认不购买数据盘。支持购买的时候指定21块数据盘,其中最多包含1块LOCAL_BASIC数据盘或者LOCAL_SSD数据盘,最多包含20块CLOUD_BASIC数据盘、CLOUD_PREMIUM数据盘或者CLOUD_SSD数据盘。
:type DataDisks: list of DataDisk
:param InternetAccessible: 公网带宽相关信息设置。若不指定该参数,则默认公网带宽为0Mbps。
:type InternetAccessible: :class:`tencentcloud.thpc.v20220401.models.InternetAccessible`
:param ExpansionNodeConfigs: 扩容节点配置信息。
:type ExpansionNodeConfigs: list of ExpansionNodeConfig
"""
self.QueueName = None
self.MinSize = None
self.MaxSize = None
self.EnableAutoExpansion = None
self.EnableAutoShrink = None
self.ImageId = None
self.SystemDisk = None
self.DataDisks = None
self.InternetAccessible = None
self.ExpansionNodeConfigs = None
def _deserialize(self, params):
self.QueueName = params.get("QueueName")
self.MinSize = params.get("MinSize")
self.MaxSize = params.get("MaxSize")
self.EnableAutoExpansion = params.get("EnableAutoExpansion")
self.EnableAutoShrink = params.get("EnableAutoShrink")
self.ImageId = params.get("ImageId")
if params.get("SystemDisk") is not None:
self.SystemDisk = SystemDisk()
self.SystemDisk._deserialize(params.get("SystemDisk"))
if params.get("DataDisks") is not None:
self.DataDisks = []
for item in params.get("DataDisks"):
obj = DataDisk()
obj._deserialize(item)
self.DataDisks.append(obj)
if params.get("InternetAccessible") is not None:
self.InternetAccessible = InternetAccessible()
self.InternetAccessible._deserialize(params.get("InternetAccessible"))
if params.get("ExpansionNodeConfigs") is not None:
self.ExpansionNodeConfigs = []
for item in params.get("ExpansionNodeConfigs"):
obj = ExpansionNodeConfig()
obj._deserialize(item)
self.ExpansionNodeConfigs.append(obj)
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class SetAutoScalingConfigurationRequest(AbstractModel):
"""SetAutoScalingConfiguration请求参数结构体
"""
def __init__(self):
r"""
:param ClusterId: 集群ID。
:type ClusterId: str
:param ExpansionBusyTime: 任务连续等待时间,队列的任务处于连续等待的时间。单位秒。默认值120。
:type ExpansionBusyTime: int
:param ShrinkIdleTime: 节点连续空闲(未运行作业)时间,一个节点连续处于空闲状态时间。单位秒。默认值300。
:type ShrinkIdleTime: int
:param QueueConfigs: 扩容队列配置列表。
:type QueueConfigs: list of QueueConfig
:param DryRun: 是否只预检此次请求。
true:发送检查请求,不会绑定弹性伸缩组。检查项包括是否填写了必需参数,请求格式,业务限制。
如果检查不通过,则返回对应错误码;
如果检查通过,则返回RequestId。
false(默认):发送正常请求,通过检查后直接绑定弹性伸缩组。
:type DryRun: bool
"""
self.ClusterId = None
self.ExpansionBusyTime = None
self.ShrinkIdleTime = None
self.QueueConfigs = None
self.DryRun = None
def _deserialize(self, params):
self.ClusterId = params.get("ClusterId")
self.ExpansionBusyTime = params.get("ExpansionBusyTime")
self.ShrinkIdleTime = params.get("ShrinkIdleTime")
if params.get("QueueConfigs") is not None:
self.QueueConfigs = []
for item in params.get("QueueConfigs"):
obj = QueueConfig()
obj._deserialize(item)
self.QueueConfigs.append(obj)
self.DryRun = params.get("DryRun")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class SetAutoScalingConfigurationResponse(AbstractModel):
"""SetAutoScalingConfiguration返回参数结构体
"""
def __init__(self):
r"""
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.RequestId = None
def _deserialize(self, params):
self.RequestId = params.get("RequestId")
class StorageOption(AbstractModel):
"""描述集群文件系统选项
"""
def __init__(self):
r"""
:param CFSOptions: 集群挂载CFS文件系统选项
:type CFSOptions: list of CFSOption
:param GooseFSOptions: 集群挂在GooseFS文件系统选项
:type GooseFSOptions: list of GooseFSOption
"""
self.CFSOptions = None
self.GooseFSOptions = None
def _deserialize(self, params):
if params.get("CFSOptions") is not None:
self.CFSOptions = []
for item in params.get("CFSOptions"):
obj = CFSOption()
obj._deserialize(item)
self.CFSOptions.append(obj)
if params.get("GooseFSOptions") is not None:
self.GooseFSOptions = []
for item in params.get("GooseFSOptions"):
obj = GooseFSOption()
obj._deserialize(item)
self.GooseFSOptions.append(obj)
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class SystemDisk(AbstractModel):
"""描述了操作系统所在块设备即系统盘的信息
"""
def __init__(self):
r"""
:param DiskType: 系统盘类型。系统盘类型限制详见存储概述。取值范围:
LOCAL_BASIC:本地硬盘
LOCAL_SSD:本地SSD硬盘
CLOUD_BASIC:普通云硬盘
CLOUD_SSD:SSD云硬盘
CLOUD_PREMIUM:高性能云硬盘
默认取值:当前有库存的硬盘类型。
:type DiskType: str
:param DiskSize: 系统盘大小,单位:GB。默认值为 50
:type DiskSize: int
"""
self.DiskType = None
self.DiskSize = None
def _deserialize(self, params):
self.DiskType = params.get("DiskType")
self.DiskSize = params.get("DiskSize")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class Tag(AbstractModel):
"""标签键值对。
"""
def __init__(self):
r"""
:param Key: 标签键
:type Key: str
:param Value: 标签值
:type Value: str
"""
self.Key = None
self.Value = None
def _deserialize(self, params):
self.Key = params.get("Key")
self.Value = params.get("Value")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class VirtualPrivateCloud(AbstractModel):
"""描述了VPC相关信息
"""
def __init__(self):
r"""
:param VpcId: 私有网络ID,形如`vpc-xxx`。有效的VpcId可通过登录[控制台](https://console.cloud.tencent.com/vpc/vpc?rid=1)查询;也可以调用接口 [DescribeVpcEx](/document/api/215/1372) ,从接口返回中的`unVpcId`字段获取。若在创建子机时VpcId与SubnetId同时传入`DEFAULT`,则强制使用默认vpc网络。
:type VpcId: str
:param SubnetId: 私有网络子网ID,形如`subnet-xxx`。有效的私有网络子网ID可通过登录[控制台](https://console.cloud.tencent.com/vpc/subnet?rid=1)查询;也可以调用接口 [DescribeSubnets](/document/api/215/15784) ,从接口返回中的`unSubnetId`字段获取。若在创建子机时SubnetId与VpcId同时传入`DEFAULT`,则强制使用默认vpc网络。
:type SubnetId: str
"""
self.VpcId = None
self.SubnetId = None
def _deserialize(self, params):
self.VpcId = params.get("VpcId")
self.SubnetId = params.get("SubnetId")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
| {
"content_hash": "1d3809e67bc59aec8ef95969087b35a5",
"timestamp": "",
"source": "github",
"line_count": 1391,
"max_line_length": 410,
"avg_line_length": 36.835370237239395,
"alnum_prop": 0.6278933603965806,
"repo_name": "tzpBingo/github-trending",
"id": "8c5467709a71be68068a6ce638ab09243c620b5f",
"size": "61482",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "codespace/python/tencentcloud/thpc/v20220401/models.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Go",
"bytes": "11470"
},
{
"name": "HTML",
"bytes": "1543"
},
{
"name": "Python",
"bytes": "49985109"
},
{
"name": "Shell",
"bytes": "18039"
}
],
"symlink_target": ""
} |
"""
Test functions for models.GLM
"""
import os
import warnings
import numpy as np
from numpy.testing import (
assert_,
assert_allclose,
assert_almost_equal,
assert_array_less,
assert_equal,
assert_raises,
)
import pandas as pd
from pandas.testing import assert_series_equal
import pytest
from scipy import stats
import statsmodels.api as sm
from statsmodels.datasets import cpunish, longley
from statsmodels.discrete import discrete_model as discrete
from statsmodels.genmod.generalized_linear_model import GLM, SET_USE_BIC_LLF
from statsmodels.tools.numdiff import (
approx_fprime,
approx_fprime_cs,
approx_hess,
approx_hess_cs,
)
from statsmodels.tools.sm_exceptions import (
DomainWarning,
PerfectSeparationError,
ValueWarning,
)
from statsmodels.tools.tools import add_constant
# Test Precisions
DECIMAL_4 = 4
DECIMAL_3 = 3
DECIMAL_2 = 2
DECIMAL_1 = 1
DECIMAL_0 = 0
pdf_output = False
if pdf_output:
from matplotlib.backends.backend_pdf import PdfPages
pdf = PdfPages("test_glm.pdf")
else:
pdf = None
def close_or_save(pdf, fig):
if pdf_output:
pdf.savefig(fig)
def teardown_module():
if pdf_output:
pdf.close()
@pytest.fixture(scope="module")
def iris():
cur_dir = os.path.dirname(os.path.abspath(__file__))
return np.genfromtxt(os.path.join(cur_dir, 'results', 'iris.csv'),
delimiter=",", skip_header=1)
class CheckModelResultsMixin:
'''
res2 should be either the results from RModelWrap
or the results as defined in model_results_data
'''
decimal_params = DECIMAL_4
def test_params(self):
assert_almost_equal(self.res1.params, self.res2.params,
self.decimal_params)
decimal_bse = DECIMAL_4
def test_standard_errors(self):
assert_allclose(self.res1.bse, self.res2.bse,
atol=10**(-self.decimal_bse), rtol=1e-5)
decimal_resids = DECIMAL_4
def test_residuals(self):
# fix incorrect numbers in resid_working results
# residuals for Poisson are also tested in test_glm_weights.py
import copy
# new numpy would have copy method
resid2 = copy.copy(self.res2.resids)
resid2[:, 2] *= self.res1.family.link.deriv(self.res1.mu)**2
atol = 10**(-self.decimal_resids)
resid_a = self.res1.resid_anscombe_unscaled
resids = np.column_stack((self.res1.resid_pearson,
self.res1.resid_deviance, self.res1.resid_working,
resid_a, self.res1.resid_response))
assert_allclose(resids, resid2, rtol=1e-6, atol=atol)
decimal_aic_R = DECIMAL_4
def test_aic_R(self):
# R includes the estimation of the scale as a lost dof
# Does not with Gamma though
if self.res1.scale != 1:
dof = 2
else:
dof = 0
if isinstance(self.res1.model.family, (sm.families.NegativeBinomial)):
llf = self.res1.model.family.loglike(self.res1.model.endog,
self.res1.mu,
self.res1.model.var_weights,
self.res1.model.freq_weights,
scale=1)
aic = (-2*llf+2*(self.res1.df_model+1))
else:
aic = self.res1.aic
assert_almost_equal(aic+dof, self.res2.aic_R,
self.decimal_aic_R)
decimal_aic_Stata = DECIMAL_4
def test_aic_Stata(self):
# Stata uses the below llf for aic definition for these families
if isinstance(self.res1.model.family, (sm.families.Gamma,
sm.families.InverseGaussian,
sm.families.NegativeBinomial)):
llf = self.res1.model.family.loglike(self.res1.model.endog,
self.res1.mu,
self.res1.model.var_weights,
self.res1.model.freq_weights,
scale=1)
aic = (-2*llf+2*(self.res1.df_model+1))/self.res1.nobs
else:
aic = self.res1.aic/self.res1.nobs
assert_almost_equal(aic, self.res2.aic_Stata, self.decimal_aic_Stata)
decimal_deviance = DECIMAL_4
def test_deviance(self):
assert_almost_equal(self.res1.deviance, self.res2.deviance,
self.decimal_deviance)
decimal_scale = DECIMAL_4
def test_scale(self):
assert_almost_equal(self.res1.scale, self.res2.scale,
self.decimal_scale)
decimal_loglike = DECIMAL_4
def test_loglike(self):
# Stata uses the below llf for these families
# We differ with R for them
if isinstance(self.res1.model.family, (sm.families.Gamma,
sm.families.InverseGaussian,
sm.families.NegativeBinomial)):
llf = self.res1.model.family.loglike(self.res1.model.endog,
self.res1.mu,
self.res1.model.var_weights,
self.res1.model.freq_weights,
scale=1)
else:
llf = self.res1.llf
assert_almost_equal(llf, self.res2.llf, self.decimal_loglike)
decimal_null_deviance = DECIMAL_4
def test_null_deviance(self):
with warnings.catch_warnings():
warnings.simplefilter("ignore", DomainWarning)
assert_almost_equal(self.res1.null_deviance,
self.res2.null_deviance,
self.decimal_null_deviance)
decimal_bic = DECIMAL_4
def test_bic(self):
with warnings.catch_warnings():
warnings.simplefilter("ignore")
assert_almost_equal(self.res1.bic,
self.res2.bic_Stata,
self.decimal_bic)
def test_degrees(self):
assert_equal(self.res1.model.df_resid,self.res2.df_resid)
decimal_fittedvalues = DECIMAL_4
def test_fittedvalues(self):
assert_almost_equal(self.res1.fittedvalues, self.res2.fittedvalues,
self.decimal_fittedvalues)
def test_tpvalues(self):
# test comparing tvalues and pvalues with normal implementation
# make sure they use normal distribution (inherited in results class)
params = self.res1.params
tvalues = params / self.res1.bse
pvalues = stats.norm.sf(np.abs(tvalues)) * 2
half_width = stats.norm.isf(0.025) * self.res1.bse
conf_int = np.column_stack((params - half_width, params + half_width))
if isinstance(tvalues, pd.Series):
assert_series_equal(self.res1.tvalues, tvalues)
else:
assert_almost_equal(self.res1.tvalues, tvalues)
assert_almost_equal(self.res1.pvalues, pvalues)
assert_almost_equal(self.res1.conf_int(), conf_int)
def test_pearson_chi2(self):
if hasattr(self.res2, 'pearson_chi2'):
assert_allclose(self.res1.pearson_chi2, self.res2.pearson_chi2,
atol=1e-6, rtol=1e-6)
def test_prsquared(self):
if hasattr(self.res2, 'prsquared'):
assert_allclose(self.res1.pseudo_rsquared(kind="mcf"),
self.res2.prsquared, rtol=0.05)
if hasattr(self.res2, 'prsquared_cox_snell'):
assert_allclose(float(self.res1.pseudo_rsquared(kind="cs")),
self.res2.prsquared_cox_snell, rtol=0.05)
@pytest.mark.smoke
def test_summary(self):
self.res1.summary()
@pytest.mark.smoke
def test_summary2(self):
with warnings.catch_warnings():
warnings.simplefilter("ignore", DomainWarning)
self.res1.summary2()
def test_get_distribution(self):
res1 = self.res1
if not hasattr(res1.model.family, "get_distribution"):
# only Tweedie has not get_distribution
pytest.skip("get_distribution not available")
if isinstance(res1.model.family, sm.families.NegativeBinomial):
res_scale = 1 # QMLE scale can differ from 1
else:
res_scale = res1.scale
distr = res1.model.family.get_distribution(res1.fittedvalues,
res_scale)
var_endog = res1.model.family.variance(res1.fittedvalues) * res_scale
m, v = distr.stats()
assert_allclose(res1.fittedvalues, m, rtol=1e-13)
assert_allclose(var_endog, v, rtol=1e-13)
# check model method
distr2 = res1.model.get_distribution(res1.params, res_scale)
for k in distr2.kwds:
assert_allclose(distr.kwds[k], distr2.kwds[k], rtol=1e-13)
class CheckComparisonMixin:
def test_compare_discrete(self):
res1 = self.res1
resd = self.resd
assert_allclose(res1.llf, resd.llf, rtol=1e-10)
score_obs1 = res1.model.score_obs(res1.params * 0.98)
score_obsd = resd.model.score_obs(resd.params * 0.98)
assert_allclose(score_obs1, score_obsd, rtol=1e-10)
# score
score1 = res1.model.score(res1.params * 0.98)
assert_allclose(score1, score_obs1.sum(0), atol=1e-20)
score0 = res1.model.score(res1.params)
assert_allclose(score0, np.zeros(score_obs1.shape[1]), atol=5e-7)
hessian1 = res1.model.hessian(res1.params * 0.98, observed=False)
hessiand = resd.model.hessian(resd.params * 0.98)
assert_allclose(hessian1, hessiand, rtol=1e-10)
hessian1 = res1.model.hessian(res1.params * 0.98, observed=True)
hessiand = resd.model.hessian(resd.params * 0.98)
assert_allclose(hessian1, hessiand, rtol=1e-9)
def test_score_test(self):
res1 = self.res1
# fake example, should be zero, k_constraint should be 0
st, pv, df = res1.model.score_test(res1.params, k_constraints=1)
assert_allclose(st, 0, atol=1e-20)
assert_allclose(pv, 1, atol=1e-10)
assert_equal(df, 1)
st, pv, df = res1.model.score_test(res1.params, k_constraints=0)
assert_allclose(st, 0, atol=1e-20)
assert_(np.isnan(pv), msg=repr(pv))
assert_equal(df, 0)
# TODO: no verified numbers largely SMOKE test
exog_extra = res1.model.exog[:,1]**2
st, pv, df = res1.model.score_test(res1.params, exog_extra=exog_extra)
assert_array_less(0.1, st)
assert_array_less(0.1, pv)
assert_equal(df, 1)
def test_get_prediction(self):
pred1 = self.res1.get_prediction() # GLM
predd = self.resd.get_prediction() # discrete class
assert_allclose(predd.predicted, pred1.predicted_mean, rtol=1e-11)
assert_allclose(predd.se, pred1.se_mean, rtol=1e-6)
assert_allclose(predd.summary_frame().values,
pred1.summary_frame().values, rtol=1e-6)
class TestGlmGaussian(CheckModelResultsMixin):
@classmethod
def setup_class(cls):
'''
Test Gaussian family with canonical identity link
'''
# Test Precisions
cls.decimal_resids = DECIMAL_3
cls.decimal_params = DECIMAL_2
cls.decimal_bic = DECIMAL_0
cls.decimal_bse = DECIMAL_3
from statsmodels.datasets.longley import load
cls.data = load()
cls.data.endog = np.asarray(cls.data.endog)
cls.data.exog = np.asarray(cls.data.exog)
cls.data.exog = add_constant(cls.data.exog, prepend=False)
cls.res1 = GLM(cls.data.endog, cls.data.exog,
family=sm.families.Gaussian()).fit()
from .results.results_glm import Longley
cls.res2 = Longley()
def test_compare_OLS(self):
res1 = self.res1
# OLS does not define score_obs
from statsmodels.regression.linear_model import OLS
resd = OLS(self.data.endog, self.data.exog).fit()
self.resd = resd # attach to access from the outside
assert_allclose(res1.llf, resd.llf, rtol=1e-10)
score_obs1 = res1.model.score_obs(res1.params, scale=None)
score_obsd = resd.resid[:, None] / resd.scale * resd.model.exog
# low precision because of badly scaled exog
assert_allclose(score_obs1, score_obsd, rtol=1e-8)
score_obs1 = res1.model.score_obs(res1.params, scale=1)
score_obsd = resd.resid[:, None] * resd.model.exog
assert_allclose(score_obs1, score_obsd, rtol=1e-8)
hess_obs1 = res1.model.hessian(res1.params, scale=None)
hess_obsd = -1. / resd.scale * resd.model.exog.T.dot(resd.model.exog)
# low precision because of badly scaled exog
assert_allclose(hess_obs1, hess_obsd, rtol=1e-8)
# FIXME: enable or delete
# def setup_method(self):
# if skipR:
# raise SkipTest, "Rpy not installed."
# Gauss = r.gaussian
# self.res2 = RModel(self.data.endog, self.data.exog, r.glm, family=Gauss)
# self.res2.resids = np.array(self.res2.resid)[:,None]*np.ones((1,5))
# self.res2.null_deviance = 185008826 # taken from R. Rpy bug?
class TestGlmGaussianGradient(TestGlmGaussian):
@classmethod
def setup_class(cls):
'''
Test Gaussian family with canonical identity link
'''
# Test Precisions
cls.decimal_resids = DECIMAL_3
cls.decimal_params = DECIMAL_2
cls.decimal_bic = DECIMAL_0
cls.decimal_bse = DECIMAL_2
from statsmodels.datasets.longley import load
cls.data = load()
cls.data.endog = np.asarray(cls.data.endog)
cls.data.exog = np.asarray(cls.data.exog)
cls.data.exog = add_constant(cls.data.exog, prepend=False)
cls.res1 = GLM(cls.data.endog, cls.data.exog,
family=sm.families.Gaussian()).fit(method='newton')
from .results.results_glm import Longley
cls.res2 = Longley()
class TestGaussianLog(CheckModelResultsMixin):
@classmethod
def setup_class(cls):
# Test Precision
cls.decimal_aic_R = DECIMAL_0
cls.decimal_aic_Stata = DECIMAL_2
cls.decimal_loglike = DECIMAL_0
cls.decimal_null_deviance = DECIMAL_1
nobs = 100
x = np.arange(nobs)
np.random.seed(54321)
# y = 1.0 - .02*x - .001*x**2 + 0.001 * np.random.randn(nobs)
cls.X = np.c_[np.ones((nobs,1)),x,x**2]
cls.lny = np.exp(-(-1.0 + 0.02*x + 0.0001*x**2)) +\
0.001 * np.random.randn(nobs)
GaussLog_Model = GLM(cls.lny, cls.X,
family=sm.families.Gaussian(sm.families.links.log()))
cls.res1 = GaussLog_Model.fit()
from .results.results_glm import GaussianLog
cls.res2 = GaussianLog()
# FIXME: enable or delete
# def setup(cls):
# if skipR:
# raise SkipTest, "Rpy not installed"
# GaussLogLink = r.gaussian(link = "log")
# GaussLog_Res_R = RModel(cls.lny, cls.X, r.glm, family=GaussLogLink)
# cls.res2 = GaussLog_Res_R
class TestGaussianInverse(CheckModelResultsMixin):
@classmethod
def setup_class(cls):
# Test Precisions
cls.decimal_bic = DECIMAL_1
cls.decimal_aic_R = DECIMAL_1
cls.decimal_aic_Stata = DECIMAL_3
cls.decimal_loglike = DECIMAL_1
cls.decimal_resids = DECIMAL_3
nobs = 100
x = np.arange(nobs)
np.random.seed(54321)
y = 1.0 + 2.0 * x + x**2 + 0.1 * np.random.randn(nobs)
cls.X = np.c_[np.ones((nobs,1)),x,x**2]
cls.y_inv = (1. + .02*x + .001*x**2)**-1 + .001 * np.random.randn(nobs)
InverseLink_Model = GLM(cls.y_inv, cls.X,
family=sm.families.Gaussian(sm.families.links.inverse_power()))
InverseLink_Res = InverseLink_Model.fit()
cls.res1 = InverseLink_Res
from .results.results_glm import GaussianInverse
cls.res2 = GaussianInverse()
# FIXME: enable or delete
# def setup(cls):
# if skipR:
# raise SkipTest, "Rpy not installed."
# InverseLink = r.gaussian(link = "inverse")
# InverseLink_Res_R = RModel(cls.y_inv, cls.X, r.glm, family=InverseLink)
# cls.res2 = InverseLink_Res_R
class TestGlmBinomial(CheckModelResultsMixin):
@classmethod
def setup_class(cls):
'''
Test Binomial family with canonical logit link using star98 dataset.
'''
cls.decimal_resids = DECIMAL_1
cls.decimal_bic = DECIMAL_2
from statsmodels.datasets.star98 import load
from .results.results_glm import Star98
data = load()
data.endog = np.asarray(data.endog)
data.exog = np.asarray(data.exog)
data.exog = add_constant(data.exog, prepend=False)
cls.res1 = GLM(data.endog, data.exog,
family=sm.families.Binomial()).fit()
# NOTE: if you want to replicate with RModel
# res2 = RModel(data.endog[:,0]/trials, data.exog, r.glm,
# family=r.binomial, weights=trials)
cls.res2 = Star98()
def test_endog_dtype(self):
from statsmodels.datasets.star98 import load
data = load()
data.exog = add_constant(data.exog, prepend=False)
endog = data.endog.astype(int)
res2 = GLM(endog, data.exog, family=sm.families.Binomial()).fit()
assert_allclose(res2.params, self.res1.params)
endog = data.endog.astype(np.double)
res3 = GLM(endog, data.exog, family=sm.families.Binomial()).fit()
assert_allclose(res3.params, self.res1.params)
def test_invalid_endog(self, reset_randomstate):
# GH2733 inspired check
endog = np.random.randint(0, 100, size=(1000, 3))
exog = np.random.standard_normal((1000, 2))
with pytest.raises(ValueError, match='endog has more than 2 columns'):
GLM(endog, exog, family=sm.families.Binomial())
def test_invalid_endog_formula(self, reset_randomstate):
# GH2733
n = 200
exog = np.random.normal(size=(n, 2))
endog = np.random.randint(0, 3, size=n).astype(str)
# formula interface
data = pd.DataFrame({"y": endog, "x1": exog[:, 0], "x2": exog[:, 1]})
with pytest.raises(ValueError, match='array with multiple columns'):
sm.GLM.from_formula("y ~ x1 + x2", data,
family=sm.families.Binomial())
def test_get_distribution_binom_count(self):
# test for binomial counts with n_trials > 1
res1 = self.res1
res_scale = 1 # QMLE scale can differ from 1
mu_prob = res1.fittedvalues
n = res1.model.n_trials
distr = res1.model.family.get_distribution(mu_prob, res_scale,
n_trials=n)
var_endog = res1.model.family.variance(mu_prob) * res_scale
m, v = distr.stats()
assert_allclose(mu_prob * n, m, rtol=1e-13)
assert_allclose(var_endog * n, v, rtol=1e-13)
# check model method
distr2 = res1.model.get_distribution(res1.params, res_scale,
n_trials=n)
for k in distr2.kwds:
assert_allclose(distr.kwds[k], distr2.kwds[k], rtol=1e-13)
# FIXME: enable/xfail/skip or delete
# TODO:
# Non-Canonical Links for the Binomial family require the algorithm to be
# slightly changed
# class TestGlmBinomialLog(CheckModelResultsMixin):
# pass
# class TestGlmBinomialLogit(CheckModelResultsMixin):
# pass
# class TestGlmBinomialProbit(CheckModelResultsMixin):
# pass
# class TestGlmBinomialCloglog(CheckModelResultsMixin):
# pass
# class TestGlmBinomialPower(CheckModelResultsMixin):
# pass
# class TestGlmBinomialLoglog(CheckModelResultsMixin):
# pass
# class TestGlmBinomialLogc(CheckModelResultsMixin):
# TODO: need include logc link
# pass
class TestGlmBernoulli(CheckModelResultsMixin, CheckComparisonMixin):
@classmethod
def setup_class(cls):
from .results.results_glm import Lbw
cls.res2 = Lbw()
cls.res1 = GLM(cls.res2.endog, cls.res2.exog,
family=sm.families.Binomial()).fit()
modd = discrete.Logit(cls.res2.endog, cls.res2.exog)
cls.resd = modd.fit(start_params=cls.res1.params * 0.9, disp=False)
def test_score_r(self):
res1 = self.res1
res2 = self.res2
st, pv, df = res1.model.score_test(res1.params,
exog_extra=res1.model.exog[:, 1]**2)
st_res = 0.2837680293459376 # (-0.5326988167303712)**2
assert_allclose(st, st_res, rtol=1e-4)
st, pv, df = res1.model.score_test(res1.params,
exog_extra=res1.model.exog[:, 0]**2)
st_res = 0.6713492821514992 # (-0.8193590679009413)**2
assert_allclose(st, st_res, rtol=1e-4)
select = list(range(9))
select.pop(7)
res1b = GLM(res2.endog, res2.exog.iloc[:, select],
family=sm.families.Binomial()).fit()
tres = res1b.model.score_test(res1b.params,
exog_extra=res1.model.exog[:, -2])
tres = np.asarray(tres[:2]).ravel()
tres_r = (2.7864148487452, 0.0950667)
assert_allclose(tres, tres_r, rtol=1e-4)
cmd_r = """\
data = read.csv("...statsmodels\\statsmodels\\genmod\\tests\\results\\stata_lbw_glm.csv")
data["race_black"] = data["race"] == "black"
data["race_other"] = data["race"] == "other"
mod = glm(low ~ age + lwt + race_black + race_other + smoke + ptl + ht + ui, family=binomial, data=data)
options(digits=16)
anova(mod, test="Rao")
library(statmod)
s = glm.scoretest(mod, data["age"]**2)
s**2
s = glm.scoretest(mod, data["lwt"]**2)
s**2
"""
# class TestGlmBernoulliIdentity(CheckModelResultsMixin):
# pass
# class TestGlmBernoulliLog(CheckModelResultsMixin):
# pass
# class TestGlmBernoulliProbit(CheckModelResultsMixin):
# pass
# class TestGlmBernoulliCloglog(CheckModelResultsMixin):
# pass
# class TestGlmBernoulliPower(CheckModelResultsMixin):
# pass
# class TestGlmBernoulliLoglog(CheckModelResultsMixin):
# pass
# class test_glm_bernoulli_logc(CheckModelResultsMixin):
# pass
class TestGlmGamma(CheckModelResultsMixin):
@classmethod
def setup_class(cls):
'''
Tests Gamma family with canonical inverse link (power -1)
'''
# Test Precisions
cls.decimal_aic_R = -1 #TODO: off by about 1, we are right with Stata
cls.decimal_resids = DECIMAL_2
from statsmodels.datasets.scotland import load
from .results.results_glm import Scotvote
data = load()
data.exog = add_constant(data.exog, prepend=False)
with warnings.catch_warnings():
warnings.simplefilter("ignore")
res1 = GLM(data.endog, data.exog,
family=sm.families.Gamma()).fit()
cls.res1 = res1
# res2 = RModel(data.endog, data.exog, r.glm, family=r.Gamma)
res2 = Scotvote()
res2.aic_R += 2 # R does not count degree of freedom for scale with gamma
cls.res2 = res2
class TestGlmGammaLog(CheckModelResultsMixin):
@classmethod
def setup_class(cls):
# Test Precisions
cls.decimal_resids = DECIMAL_3
cls.decimal_aic_R = DECIMAL_0
cls.decimal_fittedvalues = DECIMAL_3
from .results.results_glm import CancerLog
res2 = CancerLog()
cls.res1 = GLM(res2.endog, res2.exog,
family=sm.families.Gamma(link=sm.families.links.log())).fit()
cls.res2 = res2
# FIXME: enable or delete
# def setup(cls):
# if skipR:
# raise SkipTest, "Rpy not installed."
# cls.res2 = RModel(cls.data.endog, cls.data.exog, r.glm,
# family=r.Gamma(link="log"))
# cls.res2.null_deviance = 27.92207137420696 # From R (bug in rpy)
# cls.res2.bic = -154.1582089453923 # from Stata
class TestGlmGammaIdentity(CheckModelResultsMixin):
@classmethod
def setup_class(cls):
# Test Precisions
cls.decimal_resids = -100 #TODO Very off from Stata?
cls.decimal_params = DECIMAL_2
cls.decimal_aic_R = DECIMAL_0
cls.decimal_loglike = DECIMAL_1
from .results.results_glm import CancerIdentity
res2 = CancerIdentity()
with warnings.catch_warnings():
warnings.simplefilter("ignore")
fam = sm.families.Gamma(link=sm.families.links.identity())
cls.res1 = GLM(res2.endog, res2.exog, family=fam).fit()
cls.res2 = res2
# FIXME: enable or delete
# def setup(cls):
# if skipR:
# raise SkipTest, "Rpy not installed."
# cls.res2 = RModel(cls.data.endog, cls.data.exog, r.glm,
# family=r.Gamma(link="identity"))
# cls.res2.null_deviance = 27.92207137420696 # from R, Rpy bug
class TestGlmPoisson(CheckModelResultsMixin, CheckComparisonMixin):
@classmethod
def setup_class(cls):
'''
Tests Poisson family with canonical log link.
Test results were obtained by R.
'''
from .results.results_glm import Cpunish
cls.data = cpunish.load()
cls.data.endog = np.asarray(cls.data.endog)
cls.data.exog = np.asarray(cls.data.exog)
cls.data.exog[:, 3] = np.log(cls.data.exog[:, 3])
cls.data.exog = add_constant(cls.data.exog, prepend=False)
cls.res1 = GLM(cls.data.endog, cls.data.exog,
family=sm.families.Poisson()).fit()
cls.res2 = Cpunish()
# compare with discrete, start close to save time
modd = discrete.Poisson(cls.data.endog, cls.data.exog)
cls.resd = modd.fit(start_params=cls.res1.params * 0.9, disp=False)
#class TestGlmPoissonIdentity(CheckModelResultsMixin):
# pass
#class TestGlmPoissonPower(CheckModelResultsMixin):
# pass
class TestGlmInvgauss(CheckModelResultsMixin):
@classmethod
def setup_class(cls):
'''
Tests the Inverse Gaussian family in GLM.
Notes
-----
Used the rndivgx.ado file provided by Hardin and Hilbe to
generate the data. Results are read from model_results, which
were obtained by running R_ig.s
'''
# Test Precisions
cls.decimal_aic_R = DECIMAL_0
cls.decimal_loglike = DECIMAL_0
from .results.results_glm import InvGauss
res2 = InvGauss()
res1 = GLM(res2.endog, res2.exog,
family=sm.families.InverseGaussian()).fit()
cls.res1 = res1
cls.res2 = res2
def test_get_distribution(self):
res1 = self.res1
distr = res1.model.family.get_distribution(res1.fittedvalues,
res1.scale)
var_endog = res1.model.family.variance(res1.fittedvalues) * res1.scale
m, v = distr.stats()
assert_allclose(res1.fittedvalues, m, rtol=1e-13)
assert_allclose(var_endog, v, rtol=1e-13)
class TestGlmInvgaussLog(CheckModelResultsMixin):
@classmethod
def setup_class(cls):
# Test Precisions
cls.decimal_aic_R = -10 # Big difference vs R.
cls.decimal_resids = DECIMAL_3
from .results.results_glm import InvGaussLog
res2 = InvGaussLog()
cls.res1 = GLM(res2.endog, res2.exog,
family=sm.families.InverseGaussian(
link=sm.families.links.log())).fit()
cls.res2 = res2
# FIXME: enable or delete
# def setup(cls):
# if skipR:
# raise SkipTest, "Rpy not installed."
# cls.res2 = RModel(cls.data.endog, cls.data.exog, r.glm,
# family=r.inverse_gaussian(link="log"))
# cls.res2.null_deviance = 335.1539777981053 # from R, Rpy bug
# cls.res2.llf = -12162.72308 # from Stata, R's has big rounding diff
class TestGlmInvgaussIdentity(CheckModelResultsMixin):
@classmethod
def setup_class(cls):
# Test Precisions
cls.decimal_aic_R = -10 #TODO: Big difference vs R
cls.decimal_fittedvalues = DECIMAL_3
cls.decimal_params = DECIMAL_3
from .results.results_glm import Medpar1
data = Medpar1()
with warnings.catch_warnings():
warnings.simplefilter("ignore")
cls.res1 = GLM(data.endog, data.exog,
family=sm.families.InverseGaussian(
link=sm.families.links.identity())).fit()
from .results.results_glm import InvGaussIdentity
cls.res2 = InvGaussIdentity()
# FIXME: enable or delete
# def setup(cls):
# if skipR:
# raise SkipTest, "Rpy not installed."
# cls.res2 = RModel(cls.data.endog, cls.data.exog, r.glm,
# family=r.inverse_gaussian(link="identity"))
# cls.res2.null_deviance = 335.1539777981053 # from R, Rpy bug
# cls.res2.llf = -12163.25545 # from Stata, big diff with R
class TestGlmNegbinomial(CheckModelResultsMixin):
@classmethod
def setup_class(cls):
'''
Test Negative Binomial family with log link
'''
# Test Precision
cls.decimal_resid = DECIMAL_1
cls.decimal_params = DECIMAL_3
cls.decimal_resids = -1 # 1 % mismatch at 0
cls.decimal_fittedvalues = DECIMAL_1
from statsmodels.datasets.committee import load
cls.data = load()
cls.data.endog = np.asarray(cls.data.endog)
cls.data.exog = np.asarray(cls.data.exog)
cls.data.exog[:,2] = np.log(cls.data.exog[:,2])
interaction = cls.data.exog[:,2]*cls.data.exog[:,1]
cls.data.exog = np.column_stack((cls.data.exog,interaction))
cls.data.exog = add_constant(cls.data.exog, prepend=False)
with warnings.catch_warnings():
warnings.simplefilter("ignore", category=DomainWarning)
fam = sm.families.NegativeBinomial()
cls.res1 = GLM(cls.data.endog, cls.data.exog,
family=fam).fit(scale='x2')
from .results.results_glm import Committee
res2 = Committee()
res2.aic_R += 2 # They do not count a degree of freedom for the scale
cls.res2 = res2
# FIXME: enable or delete
# def setup_method(self):
# if skipR:
# raise SkipTest, "Rpy not installed"
# r.library('MASS') # this does not work when done in rmodelwrap?
# self.res2 = RModel(self.data.endog, self.data.exog, r.glm,
# family=r.negative_binomial(1))
# self.res2.null_deviance = 27.8110469364343
# FIXME: enable/xfail/skip or delete
#class TestGlmNegbinomial_log(CheckModelResultsMixin):
# pass
# FIXME: enable/xfail/skip or delete
#class TestGlmNegbinomial_power(CheckModelResultsMixin):
# pass
# FIXME: enable/xfail/skip or delete
#class TestGlmNegbinomial_nbinom(CheckModelResultsMixin):
# pass
class TestGlmPoissonOffset(CheckModelResultsMixin):
@classmethod
def setup_class(cls):
from .results.results_glm import Cpunish_offset
cls.decimal_params = DECIMAL_4
cls.decimal_bse = DECIMAL_4
cls.decimal_aic_R = 3
data = cpunish.load()
data.endog = np.asarray(data.endog)
data.exog = np.asarray(data.exog)
data.exog[:, 3] = np.log(data.exog[:, 3])
data.exog = add_constant(data.exog, prepend=True)
exposure = [100] * len(data.endog)
cls.data = data
cls.exposure = exposure
cls.res1 = GLM(data.endog, data.exog, family=sm.families.Poisson(),
exposure=exposure).fit()
cls.res2 = Cpunish_offset()
def test_missing(self):
# make sure offset is dropped correctly
endog = self.data.endog.copy()
endog[[2,4,6,8]] = np.nan
mod = GLM(endog, self.data.exog, family=sm.families.Poisson(),
exposure=self.exposure, missing='drop')
assert_equal(mod.exposure.shape[0], 13)
def test_offset_exposure(self):
# exposure=x and offset=log(x) should have the same effect
np.random.seed(382304)
endog = np.random.randint(0, 10, 100)
exog = np.random.normal(size=(100,3))
exposure = np.random.uniform(1, 2, 100)
offset = np.random.uniform(1, 2, 100)
mod1 = GLM(endog, exog, family=sm.families.Poisson(),
offset=offset, exposure=exposure).fit()
offset2 = offset + np.log(exposure)
mod2 = GLM(endog, exog, family=sm.families.Poisson(),
offset=offset2).fit()
assert_almost_equal(mod1.params, mod2.params)
assert_allclose(mod1.null, mod2.null, rtol=1e-10)
# test recreating model
mod1_ = mod1.model
kwds = mod1_._get_init_kwds()
assert_allclose(kwds['exposure'], exposure, rtol=1e-14)
assert_allclose(kwds['offset'], mod1_.offset, rtol=1e-14)
mod3 = mod1_.__class__(mod1_.endog, mod1_.exog, **kwds)
assert_allclose(mod3.exposure, mod1_.exposure, rtol=1e-14)
assert_allclose(mod3.offset, mod1_.offset, rtol=1e-14)
# test fit_regularized exposure, see #4605
resr1 = mod1.model.fit_regularized()
resr2 = mod2.model.fit_regularized()
assert_allclose(resr1.params, resr2.params, rtol=1e-10)
def test_predict(self):
np.random.seed(382304)
endog = np.random.randint(0, 10, 100)
exog = np.random.normal(size=(100,3))
exposure = np.random.uniform(1, 2, 100)
mod1 = GLM(endog, exog, family=sm.families.Poisson(),
exposure=exposure).fit()
exog1 = np.random.normal(size=(10,3))
exposure1 = np.random.uniform(1, 2, 10)
# Doubling exposure time should double expected response
pred1 = mod1.predict(exog=exog1, exposure=exposure1)
pred2 = mod1.predict(exog=exog1, exposure=2*exposure1)
assert_almost_equal(pred2, 2*pred1)
# Check exposure defaults
pred3 = mod1.predict()
pred4 = mod1.predict(exposure=exposure)
pred5 = mod1.predict(exog=exog, exposure=exposure)
assert_almost_equal(pred3, pred4)
assert_almost_equal(pred4, pred5)
# Check offset defaults
offset = np.random.uniform(1, 2, 100)
mod2 = GLM(endog, exog, offset=offset, family=sm.families.Poisson()).fit()
pred1 = mod2.predict()
pred2 = mod2.predict(offset=offset)
pred3 = mod2.predict(exog=exog, offset=offset)
assert_almost_equal(pred1, pred2)
assert_almost_equal(pred2, pred3)
# Check that offset shifts the linear predictor
mod3 = GLM(endog, exog, family=sm.families.Poisson()).fit()
offset = np.random.uniform(1, 2, 10)
pred1 = mod3.predict(exog=exog1, offset=offset, linear=True)
pred2 = mod3.predict(exog=exog1, offset=2*offset, linear=True)
assert_almost_equal(pred2, pred1+offset)
# Passing exposure as a pandas series should not effect output type
assert isinstance(
mod1.predict(exog=exog1, exposure=pd.Series(exposure1)),
np.ndarray
)
def test_perfect_pred(iris):
y = iris[:, -1]
X = iris[:, :-1]
X = X[y != 2]
y = y[y != 2]
X = add_constant(X, prepend=True)
glm = GLM(y, X, family=sm.families.Binomial())
with warnings.catch_warnings():
warnings.simplefilter("ignore", category=RuntimeWarning)
assert_raises(PerfectSeparationError, glm.fit)
def test_score_test_ols():
# nicer example than Longley
from statsmodels.regression.linear_model import OLS
np.random.seed(5)
nobs = 100
sige = 0.5
x = np.random.uniform(0, 1, size=(nobs, 5))
x[:, 0] = 1
beta = 1. / np.arange(1., x.shape[1] + 1)
y = x.dot(beta) + sige * np.random.randn(nobs)
res_ols = OLS(y, x).fit()
res_olsc = OLS(y, x[:, :-2]).fit()
co = res_ols.compare_lm_test(res_olsc, demean=False)
res_glm = GLM(y, x[:, :-2], family=sm.families.Gaussian()).fit()
co2 = res_glm.model.score_test(res_glm.params, exog_extra=x[:, -2:])
# difference in df_resid versus nobs in scale see #1786
assert_allclose(co[0] * 97 / 100., co2[0], rtol=1e-13)
def test_attribute_writable_resettable():
# Regression test for mutables and class constructors.
data = sm.datasets.longley.load()
endog, exog = data.endog, data.exog
glm_model = sm.GLM(endog, exog)
assert_equal(glm_model.family.link.power, 1.0)
glm_model.family.link.power = 2.
assert_equal(glm_model.family.link.power, 2.0)
glm_model2 = sm.GLM(endog, exog)
assert_equal(glm_model2.family.link.power, 1.0)
class TestStartParams(CheckModelResultsMixin):
@classmethod
def setup_class(cls):
'''
Test Gaussian family with canonical identity link
'''
# Test Precisions
cls.decimal_resids = DECIMAL_3
cls.decimal_params = DECIMAL_2
cls.decimal_bic = DECIMAL_0
cls.decimal_bse = DECIMAL_3
from statsmodels.datasets.longley import load
cls.data = load()
cls.data.exog = add_constant(cls.data.exog, prepend=False)
params = sm.OLS(cls.data.endog, cls.data.exog).fit().params
cls.res1 = GLM(cls.data.endog, cls.data.exog,
family=sm.families.Gaussian()).fit(start_params=params)
from .results.results_glm import Longley
cls.res2 = Longley()
def test_glm_start_params():
# see 1604
y2 = np.array('0 1 0 0 0 1'.split(), int)
wt = np.array([50,1,50,1,5,10])
y2 = np.repeat(y2, wt)
x2 = np.repeat([0,0,0.001,100,-1,-1], wt)
mod = sm.GLM(y2, sm.add_constant(x2), family=sm.families.Binomial())
res = mod.fit(start_params=[-4, -5])
np.testing.assert_almost_equal(res.params, [-4.60305022, -5.29634545], 6)
def test_loglike_no_opt():
# see 1728
y = np.asarray([0, 1, 0, 0, 1, 1, 0, 1, 1, 1])
x = np.arange(10, dtype=np.float64)
def llf(params):
lin_pred = params[0] + params[1]*x
pr = 1 / (1 + np.exp(-lin_pred))
return np.sum(y*np.log(pr) + (1-y)*np.log(1-pr))
for params in [0,0], [0,1], [0.5,0.5]:
mod = sm.GLM(y, sm.add_constant(x), family=sm.families.Binomial())
res = mod.fit(start_params=params, maxiter=0)
like = llf(params)
assert_almost_equal(like, res.llf)
def test_formula_missing_exposure():
# see 2083
import statsmodels.formula.api as smf
d = {'Foo': [1, 2, 10, 149], 'Bar': [1, 2, 3, np.nan],
'constant': [1] * 4, 'exposure': np.random.uniform(size=4),
'x': [1, 3, 2, 1.5]}
df = pd.DataFrame(d)
family = sm.families.Gaussian(link=sm.families.links.log())
mod = smf.glm("Foo ~ Bar", data=df, exposure=df.exposure,
family=family)
assert_(type(mod.exposure) is np.ndarray, msg='Exposure is not ndarray')
exposure = pd.Series(np.random.uniform(size=5))
df.loc[3, 'Bar'] = 4 # nan not relevant for Valueerror for shape mismatch
assert_raises(ValueError, smf.glm, "Foo ~ Bar", data=df,
exposure=exposure, family=family)
assert_raises(ValueError, GLM, df.Foo, df[['constant', 'Bar']],
exposure=exposure, family=family)
@pytest.mark.matplotlib
def test_plots(close_figures):
np.random.seed(378)
n = 200
exog = np.random.normal(size=(n, 2))
lin_pred = exog[:, 0] + exog[:, 1]**2
prob = 1 / (1 + np.exp(-lin_pred))
endog = 1 * (np.random.uniform(size=n) < prob)
model = sm.GLM(endog, exog, family=sm.families.Binomial())
result = model.fit()
import pandas as pd
from statsmodels.graphics.regressionplots import add_lowess
# array interface
for j in 0,1:
fig = result.plot_added_variable(j)
add_lowess(fig.axes[0], frac=0.5)
close_or_save(pdf, fig)
fig = result.plot_partial_residuals(j)
add_lowess(fig.axes[0], frac=0.5)
close_or_save(pdf, fig)
fig = result.plot_ceres_residuals(j)
add_lowess(fig.axes[0], frac=0.5)
close_or_save(pdf, fig)
# formula interface
data = pd.DataFrame({"y": endog, "x1": exog[:, 0], "x2": exog[:, 1]})
model = sm.GLM.from_formula("y ~ x1 + x2", data, family=sm.families.Binomial())
result = model.fit()
for j in 0,1:
xname = ["x1", "x2"][j]
fig = result.plot_added_variable(xname)
add_lowess(fig.axes[0], frac=0.5)
close_or_save(pdf, fig)
fig = result.plot_partial_residuals(xname)
add_lowess(fig.axes[0], frac=0.5)
close_or_save(pdf, fig)
fig = result.plot_ceres_residuals(xname)
add_lowess(fig.axes[0], frac=0.5)
close_or_save(pdf, fig)
def gen_endog(lin_pred, family_class, link, binom_version=0):
np.random.seed(872)
fam = sm.families
mu = link().inverse(lin_pred)
if family_class == fam.Binomial:
if binom_version == 0:
endog = 1*(np.random.uniform(size=len(lin_pred)) < mu)
else:
endog = np.empty((len(lin_pred), 2))
n = 10
endog[:, 0] = (np.random.uniform(size=(len(lin_pred), n)) < mu[:, None]).sum(1)
endog[:, 1] = n - endog[:, 0]
elif family_class == fam.Poisson:
endog = np.random.poisson(mu)
elif family_class == fam.Gamma:
endog = np.random.gamma(2, mu)
elif family_class == fam.Gaussian:
endog = mu + 2 * np.random.normal(size=len(lin_pred))
elif family_class == fam.NegativeBinomial:
from scipy.stats.distributions import nbinom
endog = nbinom.rvs(mu, 0.5)
elif family_class == fam.InverseGaussian:
from scipy.stats.distributions import invgauss
endog = invgauss.rvs(mu, scale=20)
else:
raise ValueError
return endog
@pytest.mark.smoke
def test_summary():
np.random.seed(4323)
n = 100
exog = np.random.normal(size=(n, 2))
exog[:, 0] = 1
endog = np.random.normal(size=n)
for method in ["irls", "cg"]:
fa = sm.families.Gaussian()
model = sm.GLM(endog, exog, family=fa)
rslt = model.fit(method=method)
s = rslt.summary()
def check_score_hessian(results):
# compare models core and hessian with numerical derivatives
params = results.params
# avoid checking score at MLE, score close to zero
sc = results.model.score(params * 0.98, scale=1)
# cs currently (0.9) does not work for all families
llfunc = lambda x: results.model.loglike(x, scale=1) # noqa
sc2 = approx_fprime(params * 0.98, llfunc)
assert_allclose(sc, sc2, rtol=1e-4, atol=1e-4)
hess = results.model.hessian(params, scale=1)
hess2 = approx_hess(params, llfunc)
assert_allclose(hess, hess2, rtol=1e-4)
scfunc = lambda x: results.model.score(x, scale=1) # noqa
hess3 = approx_fprime(params, scfunc)
assert_allclose(hess, hess3, rtol=1e-4)
def test_gradient_irls():
# Compare the results when using gradient optimization and IRLS.
# TODO: Find working examples for inverse_squared link
np.random.seed(87342)
fam = sm.families
lnk = sm.families.links
families = [(fam.Binomial, [lnk.logit, lnk.probit, lnk.cloglog, lnk.log, lnk.cauchy]),
(fam.Poisson, [lnk.log, lnk.identity, lnk.sqrt]),
(fam.Gamma, [lnk.log, lnk.identity, lnk.inverse_power]),
(fam.Gaussian, [lnk.identity, lnk.log, lnk.inverse_power]),
(fam.InverseGaussian, [lnk.log, lnk.identity, lnk.inverse_power, lnk.inverse_squared]),
(fam.NegativeBinomial, [lnk.log, lnk.inverse_power, lnk.inverse_squared, lnk.identity])]
n = 100
p = 3
exog = np.random.normal(size=(n, p))
exog[:, 0] = 1
skip_one = False
for family_class, family_links in families:
for link in family_links:
for binom_version in 0,1:
if family_class != fam.Binomial and binom_version == 1:
continue
if (family_class, link) == (fam.Poisson, lnk.identity):
lin_pred = 20 + exog.sum(1)
elif (family_class, link) == (fam.Binomial, lnk.log):
lin_pred = -1 + exog.sum(1) / 8
elif (family_class, link) == (fam.Poisson, lnk.sqrt):
lin_pred = 2 + exog.sum(1)
elif (family_class, link) == (fam.InverseGaussian, lnk.log):
#skip_zero = True
lin_pred = -1 + exog.sum(1)
elif (family_class, link) == (fam.InverseGaussian, lnk.identity):
lin_pred = 20 + 5*exog.sum(1)
lin_pred = np.clip(lin_pred, 1e-4, np.inf)
elif (family_class, link) == (fam.InverseGaussian, lnk.inverse_squared):
lin_pred = 0.5 + exog.sum(1) / 5
continue # skip due to non-convergence
elif (family_class, link) == (fam.InverseGaussian, lnk.inverse_power):
lin_pred = 1 + exog.sum(1) / 5
elif (family_class, link) == (fam.NegativeBinomial, lnk.identity):
lin_pred = 20 + 5*exog.sum(1)
lin_pred = np.clip(lin_pred, 1e-4, np.inf)
elif (family_class, link) == (fam.NegativeBinomial, lnk.inverse_squared):
lin_pred = 0.1 + np.random.uniform(size=exog.shape[0])
continue # skip due to non-convergence
elif (family_class, link) == (fam.NegativeBinomial, lnk.inverse_power):
lin_pred = 1 + exog.sum(1) / 5
elif (family_class, link) == (fam.Gaussian, lnk.inverse_power):
# adding skip because of convergence failure
skip_one = True
# the following fails with identity link, because endog < 0
# elif family_class == fam.Gamma:
# lin_pred = 0.5 * exog.sum(1) + np.random.uniform(size=exog.shape[0])
else:
lin_pred = np.random.uniform(size=exog.shape[0])
endog = gen_endog(lin_pred, family_class, link, binom_version)
with warnings.catch_warnings():
warnings.simplefilter("ignore")
mod_irls = sm.GLM(endog, exog, family=family_class(link=link()))
rslt_irls = mod_irls.fit(method="IRLS")
if not (family_class, link) in [(fam.Poisson, lnk.sqrt),
(fam.Gamma, lnk.inverse_power),
(fam.InverseGaussian, lnk.identity)
]:
check_score_hessian(rslt_irls)
# Try with and without starting values.
for max_start_irls, start_params in (0, rslt_irls.params), (3, None):
# TODO: skip convergence failures for now
if max_start_irls > 0 and skip_one:
continue
with warnings.catch_warnings():
warnings.simplefilter("ignore")
mod_gradient = sm.GLM(endog, exog, family=family_class(link=link()))
rslt_gradient = mod_gradient.fit(max_start_irls=max_start_irls,
start_params=start_params,
method="newton", maxiter=300)
assert_allclose(rslt_gradient.params,
rslt_irls.params, rtol=1e-6, atol=5e-5)
assert_allclose(rslt_gradient.llf, rslt_irls.llf,
rtol=1e-6, atol=1e-6)
assert_allclose(rslt_gradient.scale, rslt_irls.scale,
rtol=1e-6, atol=1e-6)
# Get the standard errors using expected information.
gradient_bse = rslt_gradient.bse
ehess = mod_gradient.hessian(rslt_gradient.params, observed=False)
gradient_bse = np.sqrt(-np.diag(np.linalg.inv(ehess)))
assert_allclose(gradient_bse, rslt_irls.bse, rtol=1e-6, atol=5e-5)
# rslt_irls.bse corresponds to observed=True
assert_allclose(rslt_gradient.bse, rslt_irls.bse, rtol=0.2, atol=5e-5)
rslt_gradient_eim = mod_gradient.fit(max_start_irls=0,
cov_type='eim',
start_params=rslt_gradient.params,
method="newton", maxiter=300)
assert_allclose(rslt_gradient_eim.bse, rslt_irls.bse, rtol=5e-5, atol=0)
def test_gradient_irls_eim():
# Compare the results when using eime gradient optimization and IRLS.
# TODO: Find working examples for inverse_squared link
np.random.seed(87342)
fam = sm.families
lnk = sm.families.links
families = [(fam.Binomial, [lnk.logit, lnk.probit, lnk.cloglog, lnk.log,
lnk.cauchy]),
(fam.Poisson, [lnk.log, lnk.identity, lnk.sqrt]),
(fam.Gamma, [lnk.log, lnk.identity, lnk.inverse_power]),
(fam.Gaussian, [lnk.identity, lnk.log, lnk.inverse_power]),
(fam.InverseGaussian, [lnk.log, lnk.identity,
lnk.inverse_power,
lnk.inverse_squared]),
(fam.NegativeBinomial, [lnk.log, lnk.inverse_power,
lnk.inverse_squared, lnk.identity])]
n = 100
p = 3
exog = np.random.normal(size=(n, p))
exog[:, 0] = 1
skip_one = False
for family_class, family_links in families:
for link in family_links:
for binom_version in 0, 1:
if family_class != fam.Binomial and binom_version == 1:
continue
if (family_class, link) == (fam.Poisson, lnk.identity):
lin_pred = 20 + exog.sum(1)
elif (family_class, link) == (fam.Binomial, lnk.log):
lin_pred = -1 + exog.sum(1) / 8
elif (family_class, link) == (fam.Poisson, lnk.sqrt):
lin_pred = 2 + exog.sum(1)
elif (family_class, link) == (fam.InverseGaussian, lnk.log):
# skip_zero = True
lin_pred = -1 + exog.sum(1)
elif (family_class, link) == (fam.InverseGaussian,
lnk.identity):
lin_pred = 20 + 5*exog.sum(1)
lin_pred = np.clip(lin_pred, 1e-4, np.inf)
elif (family_class, link) == (fam.InverseGaussian,
lnk.inverse_squared):
lin_pred = 0.5 + exog.sum(1) / 5
continue # skip due to non-convergence
elif (family_class, link) == (fam.InverseGaussian,
lnk.inverse_power):
lin_pred = 1 + exog.sum(1) / 5
elif (family_class, link) == (fam.NegativeBinomial,
lnk.identity):
lin_pred = 20 + 5*exog.sum(1)
lin_pred = np.clip(lin_pred, 1e-4, np.inf)
elif (family_class, link) == (fam.NegativeBinomial,
lnk.inverse_squared):
lin_pred = 0.1 + np.random.uniform(size=exog.shape[0])
continue # skip due to non-convergence
elif (family_class, link) == (fam.NegativeBinomial,
lnk.inverse_power):
lin_pred = 1 + exog.sum(1) / 5
elif (family_class, link) == (fam.Gaussian, lnk.inverse_power):
# adding skip because of convergence failure
skip_one = True
else:
lin_pred = np.random.uniform(size=exog.shape[0])
endog = gen_endog(lin_pred, family_class, link, binom_version)
with warnings.catch_warnings():
warnings.simplefilter("ignore")
mod_irls = sm.GLM(endog, exog,
family=family_class(link=link()))
rslt_irls = mod_irls.fit(method="IRLS")
# Try with and without starting values.
for max_start_irls, start_params in ((0, rslt_irls.params),
(3, None)):
# TODO: skip convergence failures for now
if max_start_irls > 0 and skip_one:
continue
with warnings.catch_warnings():
warnings.simplefilter("ignore")
mod_gradient = sm.GLM(endog, exog,
family=family_class(link=link()))
rslt_gradient = mod_gradient.fit(
max_start_irls=max_start_irls,
start_params=start_params,
method="newton",
optim_hessian='eim'
)
assert_allclose(rslt_gradient.params, rslt_irls.params,
rtol=1e-6, atol=5e-5)
assert_allclose(rslt_gradient.llf, rslt_irls.llf,
rtol=1e-6, atol=1e-6)
assert_allclose(rslt_gradient.scale, rslt_irls.scale,
rtol=1e-6, atol=1e-6)
# Get the standard errors using expected information.
ehess = mod_gradient.hessian(rslt_gradient.params,
observed=False)
gradient_bse = np.sqrt(-np.diag(np.linalg.inv(ehess)))
assert_allclose(gradient_bse, rslt_irls.bse, rtol=1e-6,
atol=5e-5)
def test_glm_irls_method():
nobs, k_vars = 50, 4
np.random.seed(987126)
x = np.random.randn(nobs, k_vars - 1)
exog = add_constant(x, has_constant='add')
y = exog.sum(1) + np.random.randn(nobs)
mod = GLM(y, exog)
res1 = mod.fit()
res2 = mod.fit(wls_method='pinv', attach_wls=True)
res3 = mod.fit(wls_method='qr', attach_wls=True)
# fit_gradient does not attach mle_settings
res_g1 = mod.fit(start_params=res1.params, method='bfgs')
for r in [res1, res2, res3]:
assert_equal(r.mle_settings['optimizer'], 'IRLS')
assert_equal(r.method, 'IRLS')
assert_equal(res1.mle_settings['wls_method'], 'lstsq')
assert_equal(res2.mle_settings['wls_method'], 'pinv')
assert_equal(res3.mle_settings['wls_method'], 'qr')
assert_(hasattr(res2.results_wls.model, 'pinv_wexog'))
assert_(hasattr(res3.results_wls.model, 'exog_Q'))
# fit_gradient currently does not attach mle_settings
assert_equal(res_g1.method, 'bfgs')
class CheckWtdDuplicationMixin:
decimal_params = DECIMAL_4
@classmethod
def setup_class(cls):
cls.data = cpunish.load()
cls.data.endog = np.asarray(cls.data.endog)
cls.data.exog = np.asarray(cls.data.exog)
cls.endog = cls.data.endog
cls.exog = cls.data.exog
np.random.seed(1234)
cls.weight = np.random.randint(5, 100, len(cls.endog))
cls.endog_big = np.repeat(cls.endog, cls.weight)
cls.exog_big = np.repeat(cls.exog, cls.weight, axis=0)
def test_params(self):
assert_allclose(self.res1.params, self.res2.params, atol=1e-6,
rtol=1e-6)
decimal_bse = DECIMAL_4
def test_standard_errors(self):
assert_allclose(self.res1.bse, self.res2.bse, rtol=1e-5, atol=1e-6)
decimal_resids = DECIMAL_4
# TODO: This does not work... Arrays are of different shape.
# Perhaps we use self.res1.model.family.resid_XXX()?
"""
def test_residuals(self):
resids1 = np.column_stack((self.res1.resid_pearson,
self.res1.resid_deviance,
self.res1.resid_working,
self.res1.resid_anscombe,
self.res1.resid_response))
resids2 = np.column_stack((self.res1.resid_pearson,
self.res2.resid_deviance,
self.res2.resid_working,
self.res2.resid_anscombe,
self.res2.resid_response))
assert_allclose(resids1, resids2, self.decimal_resids)
"""
def test_aic(self):
# R includes the estimation of the scale as a lost dof
# Does not with Gamma though
assert_allclose(self.res1.aic, self.res2.aic, atol=1e-6, rtol=1e-6)
def test_deviance(self):
assert_allclose(self.res1.deviance, self.res2.deviance, atol=1e-6,
rtol=1e-6)
def test_scale(self):
assert_allclose(self.res1.scale, self.res2.scale, atol=1e-6, rtol=1e-6)
def test_loglike(self):
# Stata uses the below llf for these families
# We differ with R for them
assert_allclose(self.res1.llf, self.res2.llf, 1e-6)
decimal_null_deviance = DECIMAL_4
def test_null_deviance(self):
with warnings.catch_warnings():
warnings.simplefilter("ignore", DomainWarning)
assert_allclose(self.res1.null_deviance,
self.res2.null_deviance,
atol=1e-6,
rtol=1e-6)
decimal_bic = DECIMAL_4
def test_bic(self):
with warnings.catch_warnings():
warnings.simplefilter("ignore")
assert_allclose(self.res1.bic, self.res2.bic, atol=1e-6, rtol=1e-6)
decimal_fittedvalues = DECIMAL_4
def test_fittedvalues(self):
res2_fitted = self.res2.predict(self.res1.model.exog)
assert_allclose(self.res1.fittedvalues, res2_fitted, atol=1e-5,
rtol=1e-5)
decimal_tpvalues = DECIMAL_4
def test_tpvalues(self):
# test comparing tvalues and pvalues with normal implementation
# make sure they use normal distribution (inherited in results class)
assert_allclose(self.res1.tvalues, self.res2.tvalues, atol=1e-6,
rtol=2e-4)
assert_allclose(self.res1.pvalues, self.res2.pvalues, atol=1e-6,
rtol=1e-6)
assert_allclose(self.res1.conf_int(), self.res2.conf_int(), atol=1e-6,
rtol=1e-6)
class TestWtdGlmPoisson(CheckWtdDuplicationMixin):
@classmethod
def setup_class(cls):
'''
Tests Poisson family with canonical log link.
'''
super(TestWtdGlmPoisson, cls).setup_class()
cls.endog = np.asarray(cls.endog)
cls.exog = np.asarray(cls.exog)
cls.res1 = GLM(cls.endog, cls.exog,
freq_weights=cls.weight,
family=sm.families.Poisson()).fit()
cls.res2 = GLM(cls.endog_big, cls.exog_big,
family=sm.families.Poisson()).fit()
class TestWtdGlmPoissonNewton(CheckWtdDuplicationMixin):
@classmethod
def setup_class(cls):
'''
Tests Poisson family with canonical log link.
'''
super(TestWtdGlmPoissonNewton, cls).setup_class()
start_params = np.array([1.82794424e-04, -4.76785037e-02,
-9.48249717e-02, -2.92293226e-04,
2.63728909e+00, -2.05934384e+01])
fit_kwds = dict(method='newton')
cls.res1 = GLM(cls.endog, cls.exog,
freq_weights=cls.weight,
family=sm.families.Poisson()).fit(**fit_kwds)
fit_kwds = dict(method='newton', start_params=start_params)
cls.res2 = GLM(cls.endog_big, cls.exog_big,
family=sm.families.Poisson()).fit(**fit_kwds)
class TestWtdGlmPoissonHC0(CheckWtdDuplicationMixin):
@classmethod
def setup_class(cls):
'''
Tests Poisson family with canonical log link.
'''
super(TestWtdGlmPoissonHC0, cls).setup_class()
start_params = np.array([1.82794424e-04, -4.76785037e-02,
-9.48249717e-02, -2.92293226e-04,
2.63728909e+00, -2.05934384e+01])
fit_kwds = dict(cov_type='HC0')
cls.res1 = GLM(cls.endog, cls.exog,
freq_weights=cls.weight,
family=sm.families.Poisson()).fit(**fit_kwds)
fit_kwds = dict(cov_type='HC0', start_params=start_params)
cls.res2 = GLM(cls.endog_big, cls.exog_big,
family=sm.families.Poisson()).fit(**fit_kwds)
class TestWtdGlmPoissonClu(CheckWtdDuplicationMixin):
@classmethod
def setup_class(cls):
'''
Tests Poisson family with canonical log link.
'''
super(TestWtdGlmPoissonClu, cls).setup_class()
start_params = np.array([1.82794424e-04, -4.76785037e-02,
-9.48249717e-02, -2.92293226e-04,
2.63728909e+00, -2.05934384e+01])
gid = np.arange(1, len(cls.endog) + 1) // 2
fit_kwds = dict(cov_type='cluster', cov_kwds={'groups': gid, 'use_correction':False})
import warnings
with warnings.catch_warnings():
warnings.simplefilter("ignore")
cls.res1 = GLM(cls.endog, cls.exog,
freq_weights=cls.weight,
family=sm.families.Poisson()).fit(**fit_kwds)
gidr = np.repeat(gid, cls.weight)
fit_kwds = dict(cov_type='cluster', cov_kwds={'groups': gidr, 'use_correction':False})
cls.res2 = GLM(cls.endog_big, cls.exog_big,
family=sm.families.Poisson()).fit(start_params=start_params,
**fit_kwds)
class TestWtdGlmBinomial(CheckWtdDuplicationMixin):
@classmethod
def setup_class(cls):
'''
Tests Binomial family with canonical logit link.
'''
super(TestWtdGlmBinomial, cls).setup_class()
cls.endog = cls.endog / 100
cls.endog_big = cls.endog_big / 100
cls.res1 = GLM(cls.endog, cls.exog,
freq_weights=cls.weight,
family=sm.families.Binomial()).fit()
cls.res2 = GLM(cls.endog_big, cls.exog_big,
family=sm.families.Binomial()).fit()
class TestWtdGlmNegativeBinomial(CheckWtdDuplicationMixin):
@classmethod
def setup_class(cls):
'''
Tests Negative Binomial family with canonical link
g(p) = log(p/(p + 1/alpha))
'''
super(TestWtdGlmNegativeBinomial, cls).setup_class()
alpha = 1.
with warnings.catch_warnings():
warnings.simplefilter("ignore", category=DomainWarning)
family_link = sm.families.NegativeBinomial(
link=sm.families.links.nbinom(alpha=alpha),
alpha=alpha)
cls.res1 = GLM(cls.endog, cls.exog,
freq_weights=cls.weight,
family=family_link).fit()
cls.res2 = GLM(cls.endog_big, cls.exog_big,
family=family_link).fit()
class TestWtdGlmGamma(CheckWtdDuplicationMixin):
@classmethod
def setup_class(cls):
'''
Tests Gamma family with log link.
'''
super(TestWtdGlmGamma, cls).setup_class()
family_link = sm.families.Gamma(sm.families.links.log())
cls.res1 = GLM(cls.endog, cls.exog,
freq_weights=cls.weight,
family=family_link).fit()
cls.res2 = GLM(cls.endog_big, cls.exog_big,
family=family_link).fit()
class TestWtdGlmGaussian(CheckWtdDuplicationMixin):
@classmethod
def setup_class(cls):
'''
Tests Gaussian family with log link.
'''
super(TestWtdGlmGaussian, cls).setup_class()
family_link = sm.families.Gaussian(sm.families.links.log())
cls.res1 = GLM(cls.endog, cls.exog,
freq_weights=cls.weight,
family=family_link).fit()
cls.res2 = GLM(cls.endog_big, cls.exog_big,
family=family_link).fit()
class TestWtdGlmInverseGaussian(CheckWtdDuplicationMixin):
@classmethod
def setup_class(cls):
'''
Tests InverseGaussian family with log link.
'''
super(TestWtdGlmInverseGaussian, cls).setup_class()
family_link = sm.families.InverseGaussian(sm.families.links.log())
cls.res1 = GLM(cls.endog, cls.exog,
freq_weights=cls.weight,
family=family_link).fit()
cls.res2 = GLM(cls.endog_big, cls.exog_big,
family=family_link).fit()
class TestWtdGlmGammaNewton(CheckWtdDuplicationMixin):
@classmethod
def setup_class(cls):
'''
Tests Gamma family with log link.
'''
super(TestWtdGlmGammaNewton, cls).setup_class()
family_link = sm.families.Gamma(sm.families.links.log())
cls.res1 = GLM(cls.endog, cls.exog,
freq_weights=cls.weight,
family=family_link
).fit(method='newton')
cls.res2 = GLM(cls.endog_big, cls.exog_big,
family=family_link
).fit(method='newton')
def test_init_kwargs(self):
family_link = sm.families.Gamma(sm.families.links.log())
with pytest.warns(ValueWarning, match="unknown kwargs"):
GLM(self.endog, self.exog, family=family_link,
weights=self.weight, # incorrect keyword
)
class TestWtdGlmGammaScale_X2(CheckWtdDuplicationMixin):
@classmethod
def setup_class(cls):
'''
Tests Gamma family with log link.
'''
super(TestWtdGlmGammaScale_X2, cls).setup_class()
family_link = sm.families.Gamma(sm.families.links.log())
cls.res1 = GLM(cls.endog, cls.exog,
freq_weights=cls.weight,
family=family_link,
).fit(scale='X2')
cls.res2 = GLM(cls.endog_big, cls.exog_big,
family=family_link,
).fit(scale='X2')
class TestWtdGlmGammaScale_dev(CheckWtdDuplicationMixin):
@classmethod
def setup_class(cls):
'''
Tests Gamma family with log link.
'''
super(TestWtdGlmGammaScale_dev, cls).setup_class()
family_link = sm.families.Gamma(sm.families.links.log())
cls.res1 = GLM(cls.endog, cls.exog,
freq_weights=cls.weight,
family=family_link,
).fit(scale='dev')
cls.res2 = GLM(cls.endog_big, cls.exog_big,
family=family_link,
).fit(scale='dev')
def test_missing(self):
endog = self.data.endog.copy()
exog = self.data.exog.copy()
exog[0, 0] = np.nan
endog[[2, 4, 6, 8]] = np.nan
freq_weights = self.weight
mod_misisng = GLM(endog, exog, family=self.res1.model.family,
freq_weights=freq_weights, missing='drop')
assert_equal(mod_misisng.freq_weights.shape[0],
mod_misisng.endog.shape[0])
assert_equal(mod_misisng.freq_weights.shape[0],
mod_misisng.exog.shape[0])
keep_idx = np.array([1, 3, 5, 7, 9, 10, 11, 12, 13, 14, 15, 16])
assert_equal(mod_misisng.freq_weights, self.weight[keep_idx])
class TestWtdTweedieLog(CheckWtdDuplicationMixin):
@classmethod
def setup_class(cls):
'''
Tests Tweedie family with log link and var_power=1.
'''
super(TestWtdTweedieLog, cls).setup_class()
family_link = sm.families.Tweedie(link=sm.families.links.log(),
var_power=1)
cls.res1 = GLM(cls.endog, cls.exog,
freq_weights=cls.weight,
family=family_link).fit()
cls.res2 = GLM(cls.endog_big, cls.exog_big,
family=family_link).fit()
class TestWtdTweediePower2(CheckWtdDuplicationMixin):
@classmethod
def setup_class(cls):
'''
Tests Tweedie family with Power(1) link and var_power=2.
'''
cls.data = cpunish.load_pandas()
cls.endog = cls.data.endog
cls.exog = cls.data.exog[['INCOME', 'SOUTH']]
np.random.seed(1234)
cls.weight = np.random.randint(5, 100, len(cls.endog))
cls.endog_big = np.repeat(cls.endog.values, cls.weight)
cls.exog_big = np.repeat(cls.exog.values, cls.weight, axis=0)
link = sm.families.links.Power()
family_link = sm.families.Tweedie(link=link, var_power=2)
cls.res1 = GLM(cls.endog, cls.exog,
freq_weights=cls.weight,
family=family_link).fit()
cls.res2 = GLM(cls.endog_big, cls.exog_big,
family=family_link).fit()
class TestWtdTweediePower15(CheckWtdDuplicationMixin):
@classmethod
def setup_class(cls):
'''
Tests Tweedie family with Power(0.5) link and var_power=1.5.
'''
super(TestWtdTweediePower15, cls).setup_class()
family_link = sm.families.Tweedie(link=sm.families.links.Power(0.5),
var_power=1.5)
cls.res1 = GLM(cls.endog, cls.exog,
freq_weights=cls.weight,
family=family_link).fit()
cls.res2 = GLM(cls.endog_big, cls.exog_big,
family=family_link).fit()
def test_wtd_patsy_missing():
import pandas as pd
data = cpunish.load()
data.endog = np.asarray(data.endog)
data.exog = np.asarray(data.exog)
data.exog[0, 0] = np.nan
data.endog[[2, 4, 6, 8]] = np.nan
data.pandas = pd.DataFrame(data.exog, columns=data.exog_name)
data.pandas['EXECUTIONS'] = data.endog
weights = np.arange(1, len(data.endog)+1)
formula = """EXECUTIONS ~ INCOME + PERPOVERTY + PERBLACK + VC100k96 +
SOUTH + DEGREE"""
mod_misisng = GLM.from_formula(formula, data=data.pandas,
freq_weights=weights)
assert_equal(mod_misisng.freq_weights.shape[0],
mod_misisng.endog.shape[0])
assert_equal(mod_misisng.freq_weights.shape[0],
mod_misisng.exog.shape[0])
assert_equal(mod_misisng.freq_weights.shape[0], 12)
keep_weights = np.array([2, 4, 6, 8, 10, 11, 12, 13, 14, 15, 16, 17])
assert_equal(mod_misisng.freq_weights, keep_weights)
class CheckTweedie:
def test_resid(self):
idx1 = len(self.res1.resid_response) - 1
idx2 = len(self.res2.resid_response) - 1
assert_allclose(np.concatenate((self.res1.resid_response[:17],
[self.res1.resid_response[idx1]])),
np.concatenate((self.res2.resid_response[:17],
[self.res2.resid_response[idx2]])),
rtol=1e-5, atol=1e-5)
assert_allclose(np.concatenate((self.res1.resid_pearson[:17],
[self.res1.resid_pearson[idx1]])),
np.concatenate((self.res2.resid_pearson[:17],
[self.res2.resid_pearson[idx2]])),
rtol=1e-5, atol=1e-5)
assert_allclose(np.concatenate((self.res1.resid_deviance[:17],
[self.res1.resid_deviance[idx1]])),
np.concatenate((self.res2.resid_deviance[:17],
[self.res2.resid_deviance[idx2]])),
rtol=1e-5, atol=1e-5)
assert_allclose(np.concatenate((self.res1.resid_working[:17],
[self.res1.resid_working[idx1]])),
np.concatenate((self.res2.resid_working[:17],
[self.res2.resid_working[idx2]])),
rtol=1e-5, atol=1e-5)
def test_bse(self):
assert_allclose(self.res1.bse, self.res2.bse, atol=1e-6, rtol=1e6)
def test_params(self):
assert_allclose(self.res1.params, self.res2.params, atol=1e-5,
rtol=1e-5)
def test_deviance(self):
assert_allclose(self.res1.deviance, self.res2.deviance, atol=1e-6,
rtol=1e-6)
def test_df(self):
assert_equal(self.res1.df_model, self.res2.df_model)
assert_equal(self.res1.df_resid, self.res2.df_resid)
def test_fittedvalues(self):
idx1 = len(self.res1.fittedvalues) - 1
idx2 = len(self.res2.resid_response) - 1
assert_allclose(np.concatenate((self.res1.fittedvalues[:17],
[self.res1.fittedvalues[idx1]])),
np.concatenate((self.res2.fittedvalues[:17],
[self.res2.fittedvalues[idx2]])),
atol=1e-4, rtol=1e-4)
def test_summary(self):
self.res1.summary()
self.res1.summary2()
class TestTweediePower15(CheckTweedie):
@classmethod
def setup_class(cls):
from .results.results_glm import CpunishTweediePower15
cls.data = cpunish.load_pandas()
cls.exog = cls.data.exog[['INCOME', 'SOUTH']]
cls.endog = cls.data.endog
family_link = sm.families.Tweedie(link=sm.families.links.Power(1),
var_power=1.5)
cls.res1 = sm.GLM(endog=cls.data.endog,
exog=cls.data.exog[['INCOME', 'SOUTH']],
family=family_link).fit()
cls.res2 = CpunishTweediePower15()
class TestTweediePower2(CheckTweedie):
@classmethod
def setup_class(cls):
from .results.results_glm import CpunishTweediePower2
cls.data = cpunish.load_pandas()
cls.exog = cls.data.exog[['INCOME', 'SOUTH']]
cls.endog = cls.data.endog
family_link = sm.families.Tweedie(link=sm.families.links.Power(1),
var_power=2.)
cls.res1 = sm.GLM(endog=cls.data.endog,
exog=cls.data.exog[['INCOME', 'SOUTH']],
family=family_link).fit()
cls.res2 = CpunishTweediePower2()
class TestTweedieLog1(CheckTweedie):
@classmethod
def setup_class(cls):
from .results.results_glm import CpunishTweedieLog1
cls.data = cpunish.load_pandas()
cls.exog = cls.data.exog[['INCOME', 'SOUTH']]
cls.endog = cls.data.endog
family_link = sm.families.Tweedie(link=sm.families.links.log(),
var_power=1.)
cls.res1 = sm.GLM(endog=cls.data.endog,
exog=cls.data.exog[['INCOME', 'SOUTH']],
family=family_link).fit()
cls.res2 = CpunishTweedieLog1()
class TestTweedieLog15Fair(CheckTweedie):
@classmethod
def setup_class(cls):
from statsmodels.datasets.fair import load_pandas
from .results.results_glm import FairTweedieLog15
data = load_pandas()
family_link = sm.families.Tweedie(link=sm.families.links.log(),
var_power=1.5)
cls.res1 = sm.GLM(endog=data.endog,
exog=data.exog[['rate_marriage', 'age',
'yrs_married']],
family=family_link).fit()
cls.res2 = FairTweedieLog15()
class CheckTweedieSpecial:
def test_mu(self):
assert_allclose(self.res1.mu, self.res2.mu, rtol=1e-5, atol=1e-5)
def test_resid(self):
assert_allclose(self.res1.resid_response, self.res2.resid_response,
rtol=1e-5, atol=1e-5)
assert_allclose(self.res1.resid_pearson, self.res2.resid_pearson,
rtol=1e-5, atol=1e-5)
assert_allclose(self.res1.resid_deviance, self.res2.resid_deviance,
rtol=1e-5, atol=1e-5)
assert_allclose(self.res1.resid_working, self.res2.resid_working,
rtol=1e-5, atol=1e-5)
assert_allclose(self.res1.resid_anscombe_unscaled,
self.res2.resid_anscombe_unscaled,
rtol=1e-5, atol=1e-5)
class TestTweedieSpecialLog0(CheckTweedieSpecial):
@classmethod
def setup_class(cls):
cls.data = cpunish.load_pandas()
cls.exog = cls.data.exog[['INCOME', 'SOUTH']]
cls.endog = cls.data.endog
family1 = sm.families.Gaussian(link=sm.families.links.log())
cls.res1 = sm.GLM(endog=cls.data.endog,
exog=cls.data.exog[['INCOME', 'SOUTH']],
family=family1).fit()
family2 = sm.families.Tweedie(link=sm.families.links.log(),
var_power=0)
cls.res2 = sm.GLM(endog=cls.data.endog,
exog=cls.data.exog[['INCOME', 'SOUTH']],
family=family2).fit()
class TestTweedieSpecialLog1(CheckTweedieSpecial):
@classmethod
def setup_class(cls):
cls.data = cpunish.load_pandas()
cls.exog = cls.data.exog[['INCOME', 'SOUTH']]
cls.endog = cls.data.endog
family1 = sm.families.Poisson(link=sm.families.links.log())
cls.res1 = sm.GLM(endog=cls.data.endog,
exog=cls.data.exog[['INCOME', 'SOUTH']],
family=family1).fit()
family2 = sm.families.Tweedie(link=sm.families.links.log(),
var_power=1)
cls.res2 = sm.GLM(endog=cls.data.endog,
exog=cls.data.exog[['INCOME', 'SOUTH']],
family=family2).fit()
class TestTweedieSpecialLog2(CheckTweedieSpecial):
@classmethod
def setup_class(cls):
cls.data = cpunish.load_pandas()
cls.exog = cls.data.exog[['INCOME', 'SOUTH']]
cls.endog = cls.data.endog
family1 = sm.families.Gamma(link=sm.families.links.log())
cls.res1 = sm.GLM(endog=cls.data.endog,
exog=cls.data.exog[['INCOME', 'SOUTH']],
family=family1).fit()
family2 = sm.families.Tweedie(link=sm.families.links.log(),
var_power=2)
cls.res2 = sm.GLM(endog=cls.data.endog,
exog=cls.data.exog[['INCOME', 'SOUTH']],
family=family2).fit()
class TestTweedieSpecialLog3(CheckTweedieSpecial):
@classmethod
def setup_class(cls):
cls.data = cpunish.load_pandas()
cls.exog = cls.data.exog[['INCOME', 'SOUTH']]
cls.endog = cls.data.endog
family1 = sm.families.InverseGaussian(link=sm.families.links.log())
cls.res1 = sm.GLM(endog=cls.data.endog,
exog=cls.data.exog[['INCOME', 'SOUTH']],
family=family1).fit()
family2 = sm.families.Tweedie(link=sm.families.links.log(),
var_power=3)
cls.res2 = sm.GLM(endog=cls.data.endog,
exog=cls.data.exog[['INCOME', 'SOUTH']],
family=family2).fit()
def gen_tweedie(p):
np.random.seed(3242)
n = 500
x = np.random.normal(size=(n, 4))
lpr = np.dot(x, np.r_[1, -1, 0, 0.5])
mu = np.exp(lpr)
lam = 10 * mu**(2 - p) / (2 - p)
alp = (2 - p) / (p - 1)
bet = 10 * mu**(1 - p) / (p - 1)
# Generate Tweedie values using commpound Poisson distribution
y = np.empty(n)
N = np.random.poisson(lam)
for i in range(n):
y[i] = np.random.gamma(alp, 1 / bet[i], N[i]).sum()
return y, x
@pytest.mark.filterwarnings("ignore:GLM ridge optimization")
def test_tweedie_EQL():
# All tests below are regression tests, but the results
# are very close to the population values.
p = 1.5
y, x = gen_tweedie(p)
# Un-regularized fit using gradients
fam = sm.families.Tweedie(var_power=p, eql=True)
model1 = sm.GLM(y, x, family=fam)
result1 = model1.fit(method="newton")
assert_allclose(result1.params,
np.array([1.00350497, -0.99656954, 0.00802702, 0.50713209]),
rtol=1e-5, atol=1e-5)
# Un-regularized fit using IRLS
model1x = sm.GLM(y, x, family=fam)
result1x = model1x.fit(method="irls")
assert_allclose(result1.params, result1x.params)
assert_allclose(result1.bse, result1x.bse, rtol=1e-2)
# Lasso fit using coordinate-wise descent
# TODO: The search gets trapped in an infinite oscillation, so use
# a slack convergence tolerance.
model2 = sm.GLM(y, x, family=fam)
result2 = model2.fit_regularized(L1_wt=1, alpha=0.07, maxiter=200,
cnvrg_tol=0.01)
rtol, atol = 1e-2, 1e-4
assert_allclose(result2.params,
np.array([0.976831, -0.952854, 0., 0.470171]),
rtol=rtol, atol=atol)
# Series of ridge fits using gradients
ev = (np.array([1.001778, -0.99388, 0.00797, 0.506183]),
np.array([0.98586638, -0.96953481, 0.00749983, 0.4975267]),
np.array([0.206429, -0.164547, 0.000235, 0.102489]))
for j, alpha in enumerate([0.05, 0.5, 0.7]):
model3 = sm.GLM(y, x, family=fam)
result3 = model3.fit_regularized(L1_wt=0, alpha=alpha)
assert_allclose(result3.params, ev[j], rtol=rtol, atol=atol)
result4 = model3.fit_regularized(L1_wt=0, alpha=alpha * np.ones(x.shape[1]))
assert_allclose(result4.params, result3.params, rtol=rtol, atol=atol)
alpha = alpha * np.ones(x.shape[1])
alpha[0] = 0
result5 = model3.fit_regularized(L1_wt=0, alpha=alpha)
assert not np.allclose(result5.params, result4.params)
def test_tweedie_elastic_net():
# Check that the coefficients vanish one-by-one
# when using the elastic net.
p = 1.5 # Tweedie variance exponent
y, x = gen_tweedie(p)
# Un-regularized fit using gradients
fam = sm.families.Tweedie(var_power=p, eql=True)
model1 = sm.GLM(y, x, family=fam)
nnz = []
for alpha in np.linspace(0, 10, 20):
result1 = model1.fit_regularized(L1_wt=0.5, alpha=alpha)
nnz.append((np.abs(result1.params) > 0).sum())
nnz = np.unique(nnz)
assert len(nnz) == 5
def test_tweedie_EQL_poisson_limit():
# Test the limiting Poisson case of the Nelder/Pregibon/Tweedie
# EQL.
np.random.seed(3242)
n = 500
x = np.random.normal(size=(n, 3))
x[:, 0] = 1
lpr = 4 + x[:, 1:].sum(1)
mn = np.exp(lpr)
y = np.random.poisson(mn)
for scale in 1.0, 'x2', 'dev':
# Un-regularized fit using gradients not IRLS
fam = sm.families.Tweedie(var_power=1, eql=True)
model1 = sm.GLM(y, x, family=fam)
result1 = model1.fit(method="newton", scale=scale)
# Poisson GLM
model2 = sm.GLM(y, x, family=sm.families.Poisson())
result2 = model2.fit(method="newton", scale=scale)
assert_allclose(result1.params, result2.params, atol=1e-6, rtol=1e-6)
assert_allclose(result1.bse, result2.bse, 1e-6, 1e-6)
def test_tweedie_EQL_upper_limit():
# Test the limiting case of the Nelder/Pregibon/Tweedie
# EQL with var = mean^2. These are tests against population
# values so accuracy is not high.
np.random.seed(3242)
n = 500
x = np.random.normal(size=(n, 3))
x[:, 0] = 1
lpr = 4 + x[:, 1:].sum(1)
mn = np.exp(lpr)
y = np.random.poisson(mn)
for scale in 'x2', 'dev', 1.0:
# Un-regularized fit using gradients not IRLS
fam = sm.families.Tweedie(var_power=2, eql=True)
model1 = sm.GLM(y, x, family=fam)
result1 = model1.fit(method="newton", scale=scale)
assert_allclose(result1.params, np.r_[4, 1, 1], atol=1e-3, rtol=1e-1)
def testTweediePowerEstimate():
# Test the Pearson estimate of the Tweedie variance and scale parameters.
#
# Ideally, this would match the following R code, but I cannot make it work...
#
# setwd('c:/workspace')
# data <- read.csv('cpunish.csv', sep=",")
#
# library(tweedie)
#
# y <- c(1.00113835e+05, 6.89668315e+03, 6.15726842e+03,
# 1.41718806e+03, 5.11776456e+02, 2.55369154e+02,
# 1.07147443e+01, 3.56874698e+00, 4.06797842e-02,
# 7.06996731e-05, 2.10165106e-07, 4.34276938e-08,
# 1.56354040e-09, 0.00000000e+00, 0.00000000e+00,
# 0.00000000e+00, 0.00000000e+00)
#
# data$NewY <- y
#
# out <- tweedie.profile( NewY ~ INCOME + SOUTH - 1,
# p.vec=c(1.1, 1.2, 1.3, 1.4, 1.5, 1.6, 1.7, 1.8,
# 1.9), link.power=0,
# data=data,do.plot = TRUE)
data = cpunish.load_pandas()
y = [1.00113835e+05, 6.89668315e+03, 6.15726842e+03,
1.41718806e+03, 5.11776456e+02, 2.55369154e+02,
1.07147443e+01, 3.56874698e+00, 4.06797842e-02,
7.06996731e-05, 2.10165106e-07, 4.34276938e-08,
1.56354040e-09, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00]
model1 = sm.GLM(y, data.exog[['INCOME', 'SOUTH']],
family=sm.families.Tweedie(link=sm.families.links.log(),
var_power=1.5))
res1 = model1.fit()
model2 = sm.GLM((y - res1.mu) ** 2,
np.column_stack((np.ones(len(res1.mu)), np.log(res1.mu))),
family=sm.families.Gamma(sm.families.links.log()))
res2 = model2.fit()
# Sample may be too small for this...
# assert_allclose(res1.scale, np.exp(res2.params[0]), rtol=0.25)
p = model1.estimate_tweedie_power(res1.mu)
assert_allclose(p, res2.params[1], rtol=0.25)
def test_glm_lasso_6431():
# Based on issue #6431
# Fails with newton-cg as optimizer
np.random.seed(123)
from statsmodels.regression.linear_model import OLS
n = 50
x = np.ones((n, 2))
x[:, 1] = np.arange(0, n)
y = 1000 + x[:, 1] + np.random.normal(0, 1, n)
params = np.r_[999.82244338, 1.0077889]
for method in "bfgs", None:
for fun in [OLS, GLM]:
# Changing L1_wtValue from 0 to 1e-9 changes
# the algorithm from scipy gradient optimization
# to statsmodels coordinate descent
for L1_wtValue in [0, 1e-9]:
model = fun(y, x)
if fun == OLS:
fit = model.fit_regularized(alpha=0, L1_wt=L1_wtValue)
else:
fit = model._fit_ridge(alpha=0, start_params=None, method=method)
assert_allclose(params, fit.params, atol=1e-6, rtol=1e-6)
class TestRegularized:
def test_regularized(self):
import os
from .results import glmnet_r_results
for dtype in "binomial", "poisson":
cur_dir = os.path.dirname(os.path.abspath(__file__))
data = np.loadtxt(os.path.join(cur_dir, "results", "enet_%s.csv" % dtype),
delimiter=",")
endog = data[:, 0]
exog = data[:, 1:]
fam = {"binomial" : sm.families.Binomial,
"poisson" : sm.families.Poisson}[dtype]
for j in range(9):
vn = "rslt_%s_%d" % (dtype, j)
r_result = getattr(glmnet_r_results, vn)
L1_wt = r_result[0]
alpha = r_result[1]
params = r_result[2:]
model = GLM(endog, exog, family=fam())
sm_result = model.fit_regularized(L1_wt=L1_wt, alpha=alpha)
# Agreement is OK, see below for further check
assert_allclose(params, sm_result.params, atol=1e-2, rtol=0.3)
# The penalized log-likelihood that we are maximizing.
def plf(params):
llf = model.loglike(params) / len(endog)
llf = llf - alpha * ((1 - L1_wt)*np.sum(params**2) / 2 + L1_wt*np.sum(np.abs(params)))
return llf
# Confirm that we are doing better than glmnet.
llf_r = plf(params)
llf_sm = plf(sm_result.params)
assert_equal(np.sign(llf_sm - llf_r), 1)
class TestConvergence:
@classmethod
def setup_class(cls):
'''
Test Binomial family with canonical logit link using star98 dataset.
'''
from statsmodels.datasets.star98 import load
data = load()
data.exog = add_constant(data.exog, prepend=False)
cls.model = GLM(data.endog, data.exog,
family=sm.families.Binomial())
def _when_converged(self, atol=1e-8, rtol=0, tol_criterion='deviance'):
for i, dev in enumerate(self.res.fit_history[tol_criterion]):
orig = self.res.fit_history[tol_criterion][i]
new = self.res.fit_history[tol_criterion][i + 1]
if np.allclose(orig, new, atol=atol, rtol=rtol):
return i
raise ValueError('CONVERGENCE CHECK: It seems this doens\'t converge!')
def test_convergence_atol_only(self):
atol = 1e-8
rtol = 0
self.res = self.model.fit(atol=atol, rtol=rtol)
expected_iterations = self._when_converged(atol=atol, rtol=rtol)
actual_iterations = self.res.fit_history['iteration']
# Note the first value is the list is np.inf. The second value
# is the initial guess based off of start_params or the
# estimate thereof. The third value (index = 2) is the actual "first
# iteration"
assert_equal(expected_iterations, actual_iterations)
assert_equal(len(self.res.fit_history['deviance']) - 2,
actual_iterations)
def test_convergence_rtol_only(self):
atol = 0
rtol = 1e-8
self.res = self.model.fit(atol=atol, rtol=rtol)
expected_iterations = self._when_converged(atol=atol, rtol=rtol)
actual_iterations = self.res.fit_history['iteration']
# Note the first value is the list is np.inf. The second value
# is the initial guess based off of start_params or the
# estimate thereof. The third value (index = 2) is the actual "first
# iteration"
assert_equal(expected_iterations, actual_iterations)
assert_equal(len(self.res.fit_history['deviance']) - 2,
actual_iterations)
def test_convergence_atol_rtol(self):
atol = 1e-8
rtol = 1e-8
self.res = self.model.fit(atol=atol, rtol=rtol)
expected_iterations = self._when_converged(atol=atol, rtol=rtol)
actual_iterations = self.res.fit_history['iteration']
# Note the first value is the list is np.inf. The second value
# is the initial guess based off of start_params or the
# estimate thereof. The third value (index = 2) is the actual "first
# iteration"
assert_equal(expected_iterations, actual_iterations)
assert_equal(len(self.res.fit_history['deviance']) - 2,
actual_iterations)
def test_convergence_atol_only_params(self):
atol = 1e-8
rtol = 0
self.res = self.model.fit(atol=atol, rtol=rtol, tol_criterion='params')
expected_iterations = self._when_converged(atol=atol, rtol=rtol,
tol_criterion='params')
actual_iterations = self.res.fit_history['iteration']
# Note the first value is the list is np.inf. The second value
# is the initial guess based off of start_params or the
# estimate thereof. The third value (index = 2) is the actual "first
# iteration"
assert_equal(expected_iterations, actual_iterations)
assert_equal(len(self.res.fit_history['deviance']) - 2,
actual_iterations)
def test_convergence_rtol_only_params(self):
atol = 0
rtol = 1e-8
self.res = self.model.fit(atol=atol, rtol=rtol, tol_criterion='params')
expected_iterations = self._when_converged(atol=atol, rtol=rtol,
tol_criterion='params')
actual_iterations = self.res.fit_history['iteration']
# Note the first value is the list is np.inf. The second value
# is the initial guess based off of start_params or the
# estimate thereof. The third value (index = 2) is the actual "first
# iteration"
assert_equal(expected_iterations, actual_iterations)
assert_equal(len(self.res.fit_history['deviance']) - 2,
actual_iterations)
def test_convergence_atol_rtol_params(self):
atol = 1e-8
rtol = 1e-8
self.res = self.model.fit(atol=atol, rtol=rtol, tol_criterion='params')
expected_iterations = self._when_converged(atol=atol, rtol=rtol,
tol_criterion='params')
actual_iterations = self.res.fit_history['iteration']
# Note the first value is the list is np.inf. The second value
# is the initial guess based off of start_params or the
# estimate thereof. The third value (index = 2) is the actual "first
# iteration"
assert_equal(expected_iterations, actual_iterations)
assert_equal(len(self.res.fit_history['deviance']) - 2,
actual_iterations)
def test_poisson_deviance():
# see #3355 missing term in deviance if resid_response.sum() != 0
np.random.seed(123987)
nobs, k_vars = 50, 3-1
x = sm.add_constant(np.random.randn(nobs, k_vars))
mu_true = np.exp(x.sum(1))
y = np.random.poisson(mu_true, size=nobs)
mod = sm.GLM(y, x[:, :], family=sm.genmod.families.Poisson())
res = mod.fit()
d_i = res.resid_deviance
d = res.deviance
lr = (mod.family.loglike(y, y+1e-20) -
mod.family.loglike(y, res.fittedvalues)) * 2
assert_allclose(d, (d_i**2).sum(), rtol=1e-12)
assert_allclose(d, lr, rtol=1e-12)
# case without constant, resid_response.sum() != 0
mod_nc = sm.GLM(y, x[:, 1:], family=sm.genmod.families.Poisson())
res_nc = mod_nc.fit()
d_i = res_nc.resid_deviance
d = res_nc.deviance
lr = (mod.family.loglike(y, y+1e-20) -
mod.family.loglike(y, res_nc.fittedvalues)) * 2
assert_allclose(d, (d_i**2).sum(), rtol=1e-12)
assert_allclose(d, lr, rtol=1e-12)
def test_non_invertible_hessian_fails_summary():
# Test when the hessian fails the summary is still available.
data = cpunish.load_pandas()
data.endog[:] = 1
with warnings.catch_warnings():
# we filter DomainWarning, the convergence problems
# and warnings in summary
warnings.simplefilter("ignore")
mod = sm.GLM(data.endog, data.exog, family=sm.families.Gamma())
res = mod.fit(maxiter=1, method='bfgs', max_start_irls=0)
res.summary()
def test_int_scale():
# GH-6627, make sure it works with int scale
data = longley.load()
mod = GLM(data.endog, data.exog, family=sm.families.Gaussian())
res = mod.fit(scale=1)
assert isinstance(res.params, pd.Series)
assert res.scale.dtype == np.float64
@pytest.mark.parametrize("dtype", [np.int8, np.int16, np.int32, np.int64])
def test_int_exog(dtype):
# GH-6627, make use of floats internally
count1, n1, count2, n2 = 60, 51477.5, 30, 54308.7
y = [count1, count2]
x = np.asarray([[1, 1], [1, 0]]).astype(dtype)
exposure = np.asarray([n1, n2])
mod = GLM(y, x, exposure=exposure, family=sm.families.Poisson())
res = mod.fit(method='bfgs', max_start_irls=0)
assert isinstance(res.params, np.ndarray)
def test_glm_bic(iris):
X = np.c_[np.ones(100), iris[50:, :4]]
y = np.array(iris)[50:, 4].astype(np.int32)
y -= 1
SET_USE_BIC_LLF(True)
model = GLM(y, X, family=sm.families.Binomial()).fit()
# 34.9244 is what glm() of R yields
assert_almost_equal(model.bic, 34.9244, decimal=3)
assert_almost_equal(model.bic_llf, 34.9244, decimal=3)
SET_USE_BIC_LLF(False)
assert_almost_equal(model.bic, model.bic_deviance, decimal=3)
SET_USE_BIC_LLF(None)
def test_glm_bic_warning(iris):
X = np.c_[np.ones(100), iris[50:, :4]]
y = np.array(iris)[50:, 4].astype(np.int32)
y -= 1
model = GLM(y, X, family=sm.families.Binomial()).fit()
with pytest.warns(FutureWarning, match="The bic"):
assert isinstance(model.bic, float)
def test_output_exposure_null(reset_randomstate):
# GH 6953
x0 = [np.sin(i / 20) + 2 for i in range(1000)]
rs = np.random.RandomState(0)
# Variable exposures for each observation
exposure = rs.randint(100, 200, size=1000)
y = [np.sum(rs.poisson(x, size=e)) for x, e in zip(x0, exposure)]
x = add_constant(x0)
model = GLM(
endog=y, exog=x, exposure=exposure, family=sm.families.Poisson()
).fit()
null_model = GLM(
endog=y, exog=x[:, 0], exposure=exposure, family=sm.families.Poisson()
).fit()
null_model_without_exposure = GLM(
endog=y, exog=x[:, 0], family=sm.families.Poisson()
).fit()
assert_allclose(model.llnull, null_model.llf)
# Check that they are different
assert np.abs(null_model_without_exposure.llf - model.llnull) > 1
def test_qaic():
# Example from documentation of R package MuMIn
import patsy
ldose = np.concatenate((np.arange(6), np.arange(6)))
sex = ["M"]*6 + ["F"]*6
numdead = [10, 4, 9, 12, 18, 20, 0, 2, 6, 10, 12, 16]
df = pd.DataFrame({"ldose": ldose, "sex": sex, "numdead": numdead})
df["numalive"] = 20 - df["numdead"]
df["SF"] = df["numdead"]
y = df[["numalive", "numdead"]].values
x = patsy.dmatrix("sex*ldose", data=df, return_type='dataframe')
m = GLM(y, x, family=sm.families.Binomial())
r = m.fit()
scale = 2.412699
qaic = r.info_criteria(crit="qaic", scale=scale)
# R gives 31.13266 because it uses a df that is 1 greater,
# presumably because they count the scale parameter in df.
# This won't matter when comparing models by differencing
# QAICs.
# Binomial doesn't have a scale parameter, so adding +1 is not correct.
assert_allclose(qaic, 29.13266, rtol=1e-5, atol=1e-5)
qaic1 = r.info_criteria(crit="qaic", scale=scale, dk_params=1)
assert_allclose(qaic1, 31.13266, rtol=1e-5, atol=1e-5)
def test_tweedie_score():
np.random.seed(3242)
n = 500
x = np.random.normal(size=(n, 4))
lpr = np.dot(x, np.r_[1, -1, 0, 0.5])
mu = np.exp(lpr)
p0 = 1.5
lam = 10 * mu**(2 - p0) / (2 - p0)
alp = (2 - p0) / (p0 - 1)
bet = 10 * mu**(1 - p0) / (p0 - 1)
y = np.empty(n)
N = np.random.poisson(lam)
for i in range(n):
y[i] = np.random.gamma(alp, 1 / bet[i], N[i]).sum()
for p in [1, 1.5, 2]:
fam = sm.families.Tweedie(var_power=p, eql=True)
model = GLM(y, x, family=fam)
result = model.fit()
pa = result.params + 0.2*np.random.normal(size=result.params.size)
ngrad = approx_fprime_cs(pa, lambda x: model.loglike(x, scale=1))
agrad = model.score(pa, scale=1)
assert_allclose(ngrad, agrad, atol=1e-8, rtol=1e-8)
nhess = approx_hess_cs(pa, lambda x: model.loglike(x, scale=1))
ahess = model.hessian(pa, scale=1)
assert_allclose(nhess, ahess, atol=5e-8, rtol=5e-8)
| {
"content_hash": "33ec9d847340de46cc00427d6faa09ea",
"timestamp": "",
"source": "github",
"line_count": 2620,
"max_line_length": 112,
"avg_line_length": 38.48664122137404,
"alnum_prop": 0.5722021123617791,
"repo_name": "bashtage/statsmodels",
"id": "6750bbb386050e41365c6ec3415d2537a86fa438",
"size": "100835",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "statsmodels/genmod/tests/test_glm.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "AGS Script",
"bytes": "457842"
},
{
"name": "Assembly",
"bytes": "10035"
},
{
"name": "Batchfile",
"bytes": "625"
},
{
"name": "C",
"bytes": "381"
},
{
"name": "Cython",
"bytes": "225838"
},
{
"name": "Fortran",
"bytes": "16671"
},
{
"name": "HTML",
"bytes": "148470"
},
{
"name": "MATLAB",
"bytes": "100525"
},
{
"name": "Python",
"bytes": "14433387"
},
{
"name": "R",
"bytes": "106569"
},
{
"name": "Shell",
"bytes": "25329"
},
{
"name": "Stata",
"bytes": "50129"
}
],
"symlink_target": ""
} |
from re import compile, escape
from ..scraper import _BasicScraper, _ParserScraper
from ..helpers import indirectStarter, bounceStarter
from ..util import tagre
from .common import ComicControlScraper, WordPressScraper, WordPressNavi, WordPressWebcomic
class Namesake(ComicControlScraper):
url = 'http://namesakecomic.com/'
stripUrl = url + 'comic/%s'
firstStripUrl = stripUrl % 'the-journey-begins'
class NatalieDee(_BasicScraper):
url = 'http://www.nataliedee.com/'
rurl = escape(url)
stripUrl = url + '%s'
firstStripUrl = stripUrl % '022806'
imageSearch = compile(tagre("img", "src", r'(%s\d+/[^"]+)' % rurl,
before="overflow"))
prevSearch = compile(tagre("a", "href", r'([^"]+)') + "<< Yesterday")
help = 'Index format: mmddyy'
def namer(self, image_url, page_url):
unused, date, filename = image_url.rsplit('/', 2)
return '%s-%s' % (date, filename)
class Nedroid(WordPressScraper):
url = 'http://nedroid.com/'
prevSearch = '//a[@rel="prev"]'
class NeoCTC(_ParserScraper):
url = 'http://www.hirezfox.com/neoctc/'
stripUrl = url + 'd/%s.html'
firstStripUrl = stripUrl % '20071205'
imageSearch = '//img[contains(@src, "neoctc/comics")]'
prevSearch = '//a[./img[@alt="Previous Day"]]'
multipleImagesPerStrip = True
class NeoEarth(_BasicScraper):
url = 'http://www.neo-earth.com/NE/'
stripUrl = url + 'index.php?date=%s'
firstStripUrl = stripUrl % '2007-03-23'
imageSearch = compile(r'<img src="(strips/.+?)"')
prevSearch = compile(r'<a href="(.+?)">Previous</a>')
help = 'Index format: yyyy-mm-dd'
class NerfNow(WordPressScraper):
url = 'https://www.nerfnow.com/'
prevSearch = '//li[@id="nav_previous"]/a'
class Newshounds(_ParserScraper):
stripUrl = 'http://www.newshounds.com/%s.html'
url = stripUrl % 'nh2/20140929'
firstStripUrl = stripUrl % 'nh1/19971101'
imageSearch = '//img[@class="ksc"]'
prevSearch = '//a[./img[@alt="Previous comic"]]'
endOfLife = True
def getPrevUrl(self, url, data):
# Add navigation link between comic and graphic novel
if url == self.stripUrl % 'nh2/20070201':
return self.stripUrl % 'nh1/20061208'
return super(Newshounds, self).getPrevUrl(url, data)
class NewWorld(WordPressScraper):
url = ('https://web.archive.org/web/20190718012133/'
'http://www.tfsnewworld.com/')
stripUrl = url + '%s/'
firstStripUrl = stripUrl % '2007/08/30/63'
prevSearch = '//a[@rel="prev"]'
endOfLife = True
help = 'Index format: yyyy/mm/dd/stripn'
class NeverSatisfied(ComicControlScraper):
url = 'https://www.neversatisfiedcomic.com/'
stripUrl = url + 'comic/%s'
firstStripUrl = stripUrl % 'never-satisfied'
class NichtLustig(_BasicScraper):
url = 'https://joscha.com/'
starter = bounceStarter
stripUrl = url + 'nichtlustig/%s/'
firstStripUrl = stripUrl % '000501'
lang = 'de'
imageSearch = compile(tagre("img", "src", r'(https://joscha.com/data/media/cartoons/[0-9a-f-_]+.png)'))
prevSearch = compile(tagre("a", "href", r'(https://joscha.com/nichtlustig/\d+/)', after="next"))
nextSearch = compile(tagre("a", "href", r'(https://joscha.com/nichtlustig/\d+/)', after="prev"))
help = 'Index format: yymmdd'
def namer(self, image_url, page_url):
unused, filename, unused2 = page_url.rsplit('/', 2)
return '%s' % (filename)
class Nicky510(WordPressNavi):
url = ('https://web.archive.org/web/20160510215718/'
'http://www.nickyitis.com/')
endOfLife = True
class Nightshift(WordPressWebcomic):
url = 'https://poecatcomix.com/nightshift-static/'
stripUrl = 'https://poecatcomix.com/nightshift/%s/'
firstStripUrl = stripUrl % 'ns-cover'
imageSearch = '//div[contains(@class, "webcomic-media")]//img'
adult = True
def starter(self):
# Build list of chapters for naming
indexPage = self.getPage(self.url)
self.chapters = indexPage.xpath('//a[./img[contains(@class, "attachment-large")]]/@href')
latestPage = self.chapters[0]
self.chapters = self.chapters[1:]
self.currentChapter = len(self.chapters)
return latestPage
def namer(self, imageUrl, pageUrl):
page = pageUrl.rstrip('/').rsplit('/', 1)[-1]
page = page.replace('blood-brothers', 'bloodbrothers').replace('bb-2', 'bb2').replace('ns7-', 'page-')
filename = 'ns%d-%s.%s' % (self.currentChapter, page, imageUrl.rsplit('.', 1)[-1])
if pageUrl in self.chapters:
self.currentChapter = self.currentChapter - 1
return filename
class Nimona(_ParserScraper):
url = ('https://web.archive.org/web/20141008095502/'
'http://gingerhaze.com/nimona/')
stripUrl = url + 'comic/%s'
firstStripUrl = stripUrl % "page-1"
imageSearch = '//div[d:class("field-name-field-comic-page")]//img'
prevSearch = '//a[img[contains(@src, "/comicdrop_prev_label")]]'
endOfLife = True
class NineToNine(_ParserScraper):
url = 'https://www.tigerknight.com/99'
stripUrl = url + '/%s'
firstStripUrl = stripUrl % '2014-01-01'
imageSearch = '//img[d:class("comic-image")]'
prevSearch = '//a[./span[contains(text(), "Previous")]]'
multipleImagesPerStrip = True
class NobodyScores(_BasicScraper):
url = 'http://nobodyscores.loosenutstudio.com/'
rurl = escape(url)
stripUrl = url + 'index.php?id=%s'
firstStripUrl = stripUrl % '4'
imageSearch = compile(tagre("img", "src", r'(%scomix/[^"]+)' % rurl))
multipleImagesPerStrip = True
prevSearch = compile(r'<a href="(%sindex.php.+?)">the one before </a>' % rurl)
help = 'Index format: nnn'
class NoNeedForBushido(_ParserScraper):
url = 'http://nn4b.com/'
stripUrl = url + 'comic/%s'
imageSearch = '//div[@id="comic-image"]//img'
prevSearch = '//a[@rel="prev"]'
help = 'Index format: nnn'
class NonPlayerCharacter(_ParserScraper):
url = 'https://www.lfg.co/'
stripUrl = url + 'npc/tale/%s/'
firstStripUrl = stripUrl % '1-1'
imageSearch = '//div[@id="comic-img"]//img'
prevSearch = '//a[@class="comic-nav-prev"]'
latestSearch = '//div[@id="feature-npc-footer"]/a[contains(@href, "npc/tale/")]'
starter = indirectStarter
def namer(self, imageUrl, pageUrl):
return pageUrl.rstrip('/').rsplit('/', 1)[-1]
class NotAVillain(WordPressWebcomic):
url = 'http://navcomic.com/'
stripUrl = url + 'not-a-villain/%s/'
firstStripUrl = stripUrl % 'v1-001'
def namer(self, imageUrl, pageUrl):
filename = imageUrl.rsplit('/', 1)[-1]
# Fix filenames missing "Page"
if filename[2].isdigit():
filename = filename[0] + '-Page' + filename[2:]
# Fix filenames of early comics
filename = filename.replace('Page-', '1-Page')
if filename.startswith('0-Page'):
filename = '1' + filename[1:]
return filename
class NotInventedHere(_ParserScraper):
url = 'http://notinventedhe.re/'
stripUrl = url + 'on/%s'
firstStripUrl = stripUrl % '2009-9-21'
imageSearch = '//div[@id="comic-content"]//img'
prevSearch = '//a[@id="nav-previous"]'
help = 'Index format: yyyy-m-d'
class Nukees(_BasicScraper):
url = 'http://www.nukees.com/'
stripUrl = url + 'd/%s'
firstStripUrl = stripUrl % '19970121'
imageSearch = compile(r'"comic".+?"(/comics/.+?)"')
prevSearch = compile(r'"(/d/.+?)".+?previous')
help = 'Index format: yyyymmdd.html'
| {
"content_hash": "6c32111cdc30a43b0863fa5a17179acb",
"timestamp": "",
"source": "github",
"line_count": 219,
"max_line_length": 110,
"avg_line_length": 34.56164383561644,
"alnum_prop": 0.6230677764565993,
"repo_name": "webcomics/dosage",
"id": "8387bccf2ede1485404a84a7eff4dca631594991",
"size": "7793",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "dosagelib/plugins/n.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "33"
},
{
"name": "Python",
"bytes": "622985"
},
{
"name": "Shell",
"bytes": "1363"
}
],
"symlink_target": ""
} |
"""
** OBS - this is not a normal command module! **
** You cannot import anything in this module as a command! **
This is part of the Evennia unittest framework, for testing the
stability and integrity of the codebase during updates. This module
test the default command set. It is instantiated by the
src/objects/tests.py module, which in turn is run by as part of the
main test suite started with
> python game/manage.py test.
"""
import re
from django.conf import settings
from django.utils.unittest import TestCase
from src.players.player import Player
from src.utils import create, utils, ansi
_RE = re.compile(r"^\+|-+\+|\+-+|--*|\|", re.MULTILINE)
#------------------------------------------------------------
# Command testing
# ------------------------------------------------------------
class TestPlayerClass(Player):
def msg(self, message, **kwargs):
"test message"
if not self.ndb.stored_msg:
self.ndb.stored_msg = []
self.ndb.stored_msg.append(message)
def _get_superuser(self):
"test with superuser flag"
return self.ndb.is_superuser
is_superuser = property(_get_superuser)
class CommandTest(TestCase):
"""
Tests a command
"""
CID = 0 # we must set a different CID in every test to avoid unique-name collisions creating the objects
def setUp(self):
"sets up testing environment"
self.room1 = create.create_object("src.objects.objects.Room", key="Room%i"%self.CID)
self.room1.db.desc = "room_desc"
self.room2 = create.create_object("src.objects.objects.Room", key="Room%ib" % self.CID)
self.obj1 = create.create_object("src.objects.objects.Object", key="Obj%i" % self.CID, location=self.room1, home=self.room1)
self.obj2 = create.create_object("src.objects.objects.Object", key="Obj%ib" % self.CID, location=self.room1, home=self.room1)
self.char1 = create.create_object("src.objects.objects.Character", key="Char%i" % self.CID, location=self.room1, home=self.room1)
self.char2 = create.create_object("src.objects.objects.Character", key="Char%ib" % self.CID, location=self.room1, home=self.room1)
self.script = create.create_script("src.scripts.scripts.Script", key="Script%i" % self.CID)
self.player = create.create_player("TestPlayer%i" % self.CID, "test@test.com", "testpassword", typeclass=TestPlayerClass)
self.player2 = create.create_player("TestPlayer%ib" % self.CID, "test@test.com", "testpassword", typeclass=TestPlayerClass)
self.player.permissions = "Immortals"
self.char1.player = self.player
self.char1.sessid = 1
def call(self, cmdobj, args, msg=None, cmdset=None, noansi=True):
"""
Test a command by assigning all the needed
properties to cmdobj and running
cmdobj.at_pre_cmd()
cmdobj.parse()
cmdobj.func()
cmdobj.at_post_cmd()
The msgreturn value is compared to eventual
output sent to caller.msg in the game
"""
cmdobj.caller = self.char1
cmdobj.cmdstring = cmdobj.key
cmdobj.args = args
cmdobj.cmdset = cmdset
cmdobj.raw_string = cmdobj.key + " " + args
cmdobj.obj = self.char1
cmdobj.sessid = 1
# test
self.char1.player.ndb.stored_msg = []
cmdobj.at_pre_cmd()
cmdobj.parse()
cmdobj.func()
cmdobj.at_post_cmd()
# clean out prettytable sugar
returned_msg = "|".join(_RE.sub("", mess) for mess in self.char1.player.ndb.stored_msg)
#returned_msg = "|".join(self.char1.player.ndb.stored_msg)
returned_msg = ansi.parse_ansi(returned_msg, strip_ansi=noansi).strip()
if msg != None:
if msg == "" and returned_msg or not returned_msg.startswith(msg.strip()):
sep1 = "\n" + "="*30 + "Wanted message" + "="*34 + "\n"
sep2 = "\n" + "="*30 + "Returned message" + "="*32 + "\n"
sep3 = "\n" + "="*78
retval = sep1 + msg.strip() + sep2 + returned_msg + sep3
raise AssertionError(retval)
#------------------------------------------------------------
# Individual module Tests
#------------------------------------------------------------
from src.commands.default import general
class TestGeneral(CommandTest):
CID = 1
def test_cmds(self):
self.call(general.CmdLook(), "here", "Room1\n room_desc")
self.call(general.CmdHome(), "", "You are already home")
self.call(general.CmdInventory(), "", "You are not carrying anything.")
self.call(general.CmdPose(), "looks around", "Char1 looks around")
self.call(general.CmdHome(), "", "You are already home")
self.call(general.CmdNick(), "testalias = testaliasedstring1", "Nick set:")
self.call(general.CmdNick(), "/player testalias = testaliasedstring2", "Nick set:")
self.call(general.CmdNick(), "/object testalias = testaliasedstring3", "Nick set:")
self.assertEqual(u"testaliasedstring1", self.char1.nicks.get("testalias"))
self.assertEqual(u"testaliasedstring2", self.char1.nicks.get("testalias", nick_type="player"))
self.assertEqual(u"testaliasedstring3", self.char1.nicks.get("testalias", nick_type="object"))
self.call(general.CmdGet(), "Obj1", "You pick up Obj1.")
self.call(general.CmdDrop(), "Obj1", "You drop Obj1.")
self.call(general.CmdSay(), "Testing", "You say, \"Testing\"")
self.call(general.CmdAccess(), "", "Permission Hierarchy (climbing):")
from src.commands.default import help
from src.commands.default.cmdset_character import CharacterCmdSet
class TestHelp(CommandTest):
CID = 2
def test_cmds(self):
self.call(help.CmdHelp(), "", "Command help entries", cmdset=CharacterCmdSet())
self.call(help.CmdSetHelp(), "testhelp, General = This is a test", "Topic 'testhelp' was successfully created.")
self.call(help.CmdHelp(), "testhelp", "Help topic for testhelp", cmdset=CharacterCmdSet())
from src.commands.default import system
class TestSystem(CommandTest):
CID = 3
def test_cmds(self):
# we are not testing CmdReload, CmdReset and CmdShutdown, CmdService or CmdTime
# since the server is not running during these tests.
self.call(system.CmdPy(), "1+2", ">>> 1+2|<<< 3")
self.call(system.CmdScripts(), "", "id ")
self.call(system.CmdObjects(), "", "Object subtype totals")
self.call(system.CmdAbout(), "", None)
self.call(system.CmdServerLoad(), "", "Server CPU and Memory load:")
from src.commands.default import admin
class TestAdmin(CommandTest):
CID = 4
def test_cmds(self):
# not testing CmdBoot, CmdDelPlayer, CmdNewPassword
self.call(admin.CmdEmit(), "Char4b = Test", "Emitted to Char4b.")
self.call(admin.CmdPerm(), "Obj4 = Builders", "Permission 'Builders' given to Obj4.")
self.call(admin.CmdWall(), "Test", "Announcing to all connected players ...")
self.call(admin.CmdPerm(), "Char4b = Builders","Permission 'Builders' given to Char4b.")
self.call(admin.CmdBan(), "Char4", "NameBan char4 was added.")
from src.commands.default import player
class TestPlayer(CommandTest):
CID = 5
def test_cmds(self):
self.call(player.CmdOOCLook(), "", "Account TestPlayer5 (you are OutofCharacter)")
self.call(player.CmdIC(), "Char5","Char5 is now acted from another")
self.call(player.CmdOOC(), "", "You are already")
self.call(player.CmdPassword(), "testpassword = testpassword", "Password changed.")
self.call(player.CmdEncoding(), "", "Default encoding:")
self.call(player.CmdWho(), "", "Players:")
self.call(player.CmdQuit(), "", "Quitting. Hope to see you soon again.")
self.call(player.CmdSessions(), "", "Your current session(s):")
self.call(player.CmdColorTest(), "ansi", "ANSI colors:")
self.call(player.CmdCharCreate(), "Test1=Test char","Created new character Test1. Use @ic Test1 to enter the game")
self.call(player.CmdQuell(), "", "Quelling Player permissions (Immortals). Use @unquell to get them back.")
from src.commands.default import building
class TestBuilding(CommandTest):
CID = 6
def test_cmds(self):
self.call(building.CmdCreate(), "/drop TestObj1", "You create a new Object: TestObj1.")
self.call(building.CmdSetObjAlias(), "TestObj1 = TestObj1b","Aliases for 'TestObj1' are now set to testobj1b.")
self.call(building.CmdCopy(), "TestObj1 = TestObj2;TestObj2b, TestObj3;TestObj3b", "Copied TestObj1 to 'TestObj3' (aliases: ['TestObj3b']")
self.call(building.CmdSetAttribute(), "Obj6/test1=\"value1\"", "Created attribute Obj6/test1 = \"value1\"")
self.call(building.CmdSetAttribute(), "Obj6b/test2=\"value2\"", "Created attribute Obj6b/test2 = \"value2\"")
self.call(building.CmdMvAttr(), "Obj6b/test2 = Obj6/test3", "Moving Obj6b/test2 (with value value2) ...\nMoved Obj6b.test2")
self.call(building.CmdCpAttr(), "Obj6/test1 = Obj6b/test3", "Copying Obj6/test1 (with value value1) ...\nCopied Obj6.test1")
self.call(building.CmdName(), "Obj6b=Obj6c", "Object's name changed to 'Obj6c'.")
self.call(building.CmdDesc(), "Obj6c=TestDesc", "The description was set on Obj6c.")
self.call(building.CmdWipe(), "Obj6c/test2/test3", "Wiped attributes test2,test3 on Obj6c.")
self.call(building.CmdDestroy(), "TestObj1","TestObj1 was destroyed.")
self.call(building.CmdDig(), "TestRoom1=testroom;tr,back;b", "Created room TestRoom1")
self.call(building.CmdTunnel(), "n = TestRoom2;test2", "Created room TestRoom2")
self.call(building.CmdOpen(), "TestExit1=Room6b", "Created new Exit 'TestExit1' from Room6 to Room6b")
self.call(building.CmdLink(),"TestExit1 = TestRoom1","Link created TestExit1 > TestRoom1 (one way).")
self.call(building.CmdUnLink(), "TestExit1", "Former exit TestExit1 no longer links anywhere.")
self.call(building.CmdSetHome(), "Obj6 = Room6b", "Obj6's home location was changed from Room6")
self.call(building.CmdListCmdSets(), "", "<DefaultCharacter (Union, prio 0, perm)>:")
self.call(building.CmdTypeclass(), "Obj6 = src.objects.objects.Character", "Obj6's changed typeclass from src.objects.objects.Object to")
self.call(building.CmdLock(), "Obj6 = test:perm(Immortals)", "Added lock 'test:perm(Immortals)' to Obj6.")
self.call(building.CmdExamine(), "Obj6", "Name/key: Obj6")
self.call(building.CmdFind(), "TestRoom1", "One Match")
self.call(building.CmdScript(), "Obj6 = src.scripts.scripts.Script", "Script src.scripts.scripts.Script successfully added")
self.call(building.CmdTeleport(), "TestRoom1", "TestRoom1\nExits: back|Teleported to TestRoom1.")
from src.commands.default import comms
class TestComms(CommandTest):
CID = 7
def test_cmds(self):
# not testing the irc/imc2/rss commands here since testing happens offline
self.call(comms.CmdChannelCreate(), "testchan;test=Test Channel", "Created channel testchan and connected to it.")
self.call(comms.CmdAddCom(), "tc = testchan", "You are already connected to channel testchan. You can now")
self.call(comms.CmdDelCom(), "tc", "Your alias 'tc' for channel testchan was cleared.")
self.call(comms.CmdChannels(), "" ,"Available channels (use comlist,addcom and delcom to manage")
self.call(comms.CmdAllCom(), "", "Available channels (use comlist,addcom and delcom to manage")
self.call(comms.CmdCset(), "testchan=send:all()", "Lock(s) applied. Current locks on testchan:")
self.call(comms.CmdCdesc(), "testchan = Test Channel", "Description of channel 'testchan' set to 'Test Channel'.")
self.call(comms.CmdCemit(), "testchan = Test Message", "Sent to channel testchan: [testchan] Test Message")
self.call(comms.CmdCWho(), "testchan", "Channel subscriptions\ntestchan:\n TestPlayer7")
self.call(comms.CmdPage(), "TestPlayer7b = Test", "You paged TestPlayer7b with: 'Test'.")
self.call(comms.CmdCBoot(), "", "Usage: @cboot[/quiet] <channel> = <player> [:reason]") # noone else connected to boot
self.call(comms.CmdCdestroy(), "testchan" ,"Channel 'testchan' (Test Channel) was destroyed.")
from src.commands.default import batchprocess
class TestBatchProcess(CommandTest):
CID = 8
def test_cmds(self):
# cannot test batchcode here, it must run inside the server process
self.call(batchprocess.CmdBatchCommands(), "examples.batch_cmds", "Running Batchcommand processor Automatic mode for examples.batch_cmds")
#self.call(batchprocess.CmdBatchCode(), "examples.batch_code", "")
| {
"content_hash": "c242eee03a883a36a7e5f23ce29b8e76",
"timestamp": "",
"source": "github",
"line_count": 221,
"max_line_length": 147,
"avg_line_length": 58.07239819004525,
"alnum_prop": 0.6505376344086021,
"repo_name": "TaliesinSkye/evennia",
"id": "ccad26bf56d499c91ea687b54830a060cab4981c",
"size": "12858",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/commands/default/tests.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "59698"
},
{
"name": "D",
"bytes": "9343933"
},
{
"name": "Emacs Lisp",
"bytes": "2734"
},
{
"name": "JavaScript",
"bytes": "91190"
},
{
"name": "Python",
"bytes": "2840755"
},
{
"name": "Shell",
"bytes": "4577"
}
],
"symlink_target": ""
} |
"""Base Driver for DataCore SANsymphony storage array."""
import math
import time
from oslo_config import cfg
from oslo_log import log as logging
from oslo_service import loopingcall
from oslo_utils import excutils
from oslo_utils import units
from cinder import context as cinder_context
from cinder import exception as cinder_exception
from cinder.i18n import _
from cinder import interface
from cinder import utils as cinder_utils
from cinder.volume import configuration
from cinder.volume import driver
from cinder.volume.drivers.datacore import api
from cinder.volume.drivers.datacore import exception as datacore_exception
from cinder.volume.drivers.datacore import utils as datacore_utils
from cinder.volume.drivers.san import san
from cinder.volume import volume_types
LOG = logging.getLogger(__name__)
datacore_opts = [
cfg.StrOpt('datacore_disk_type',
default='single',
choices=['single', 'mirrored'],
help='DataCore virtual disk type (single/mirrored). '
'Mirrored virtual disks require two storage servers in '
'the server group.'),
cfg.StrOpt('datacore_storage_profile',
default=None,
help='DataCore virtual disk storage profile.'),
cfg.ListOpt('datacore_disk_pools',
default=[],
help='List of DataCore disk pools that can be used '
'by volume driver.'),
cfg.IntOpt('datacore_api_timeout',
default=300,
min=1,
help='Seconds to wait for a response from a '
'DataCore API call.'),
cfg.IntOpt('datacore_disk_failed_delay',
default=300,
min=0,
help='Seconds to wait for DataCore virtual '
'disk to come out of the "Failed" state.'),
]
CONF = cfg.CONF
CONF.register_opts(datacore_opts, group=configuration.SHARED_CONF_GROUP)
@interface.volumedriver
class DataCoreVolumeDriver(driver.VolumeDriver):
"""DataCore SANsymphony base volume driver."""
STORAGE_PROTOCOL = 'N/A'
AWAIT_DISK_ONLINE_INTERVAL = 10
AWAIT_SNAPSHOT_ONLINE_INTERVAL = 10
AWAIT_SNAPSHOT_ONLINE_INITIAL_DELAY = 5
DATACORE_SINGLE_DISK = 'single'
DATACORE_MIRRORED_DISK = 'mirrored'
VOLUME_TYPE_STRIPED = 'Striped'
VOLUME_TYPE_SPANNED = 'Spanned'
DATACORE_DISK_TYPE_KEY = 'datacore:disk_type'
DATACORE_STORAGE_PROFILE_KEY = 'datacore:storage_profile'
DATACORE_DISK_POOLS_KEY = 'datacore:disk_pools'
VALID_VOLUME_TYPE_KEYS = (DATACORE_DISK_TYPE_KEY,
DATACORE_STORAGE_PROFILE_KEY,
DATACORE_DISK_POOLS_KEY,)
def __init__(self, *args, **kwargs):
super(DataCoreVolumeDriver, self).__init__(*args, **kwargs)
self.configuration.append_config_values(san.san_opts)
self.configuration.append_config_values(datacore_opts)
self._api = None
self._default_volume_options = None
def do_setup(self, context):
"""Perform validations and establish connection to server.
:param context: Context information
"""
required_params = [
'san_ip',
'san_login',
'san_password',
]
for param in required_params:
if not getattr(self.configuration, param, None):
raise cinder_exception.InvalidInput(_("%s not set.") % param)
self._api = api.DataCoreClient(
self.configuration.san_ip,
self.configuration.san_login,
self.configuration.san_password,
self.configuration.datacore_api_timeout)
disk_type = self.configuration.datacore_disk_type
if disk_type:
disk_type = disk_type.lower()
storage_profile = self.configuration.datacore_storage_profile
if storage_profile:
storage_profile = storage_profile.lower()
disk_pools = self.configuration.datacore_disk_pools
if disk_pools:
disk_pools = [pool.lower() for pool in disk_pools]
self._default_volume_options = {
self.DATACORE_DISK_TYPE_KEY: disk_type,
self.DATACORE_STORAGE_PROFILE_KEY: storage_profile,
self.DATACORE_DISK_POOLS_KEY: disk_pools,
}
def check_for_setup_error(self):
pass
def get_volume_backend_name(self):
"""Get volume backend name of the volume service.
:return: Volume backend name
"""
backend_name = self.configuration.safe_get('volume_backend_name')
return (backend_name or 'DataCore' + self.__class__.__name__)
def create_volume(self, volume):
"""Creates a volume.
:param volume: Volume object
:return: Dictionary of changes to the volume object to be persisted
"""
volume_options = self._get_volume_options(volume)
disk_type = volume_options[self.DATACORE_DISK_TYPE_KEY]
if disk_type == self.DATACORE_MIRRORED_DISK:
logical_disk_count = 2
virtual_disk_type = 'MultiPathMirrored'
elif disk_type == self.DATACORE_SINGLE_DISK:
logical_disk_count = 1
virtual_disk_type = 'NonMirrored'
else:
msg = _("Virtual disk type '%s' is not valid.") % disk_type
LOG.error(msg)
raise cinder_exception.VolumeDriverException(message=msg)
profile_id = self._get_storage_profile_id(
volume_options[self.DATACORE_STORAGE_PROFILE_KEY])
pools = datacore_utils.get_distinct_by(
lambda pool: pool.ServerId,
self._get_available_disk_pools(
volume_options[self.DATACORE_DISK_POOLS_KEY]))
if len(pools) < logical_disk_count:
msg = _("Suitable disk pools were not found for "
"creating virtual disk.")
LOG.error(msg)
raise cinder_exception.VolumeDriverException(message=msg)
disk_size = self._get_size_in_bytes(volume.size)
logical_disks = []
virtual_disk = None
try:
for logical_disk_pool in pools[:logical_disk_count]:
logical_disks.append(
self._api.create_pool_logical_disk(
logical_disk_pool.Id,
self.VOLUME_TYPE_STRIPED, disk_size))
virtual_disk_data = self._api.build_virtual_disk_data(
volume.id,
virtual_disk_type,
disk_size,
volume.display_name,
profile_id)
virtual_disk = self._api.create_virtual_disk_ex2(
virtual_disk_data,
logical_disks[0].Id,
logical_disks[1].Id if logical_disk_count == 2 else None,
True)
virtual_disk = self._await_virtual_disk_online(virtual_disk.Id)
except Exception:
with excutils.save_and_reraise_exception():
LOG.exception("Creation of volume %(volume)s failed.",
{'volume': volume.id})
try:
if virtual_disk:
self._api.delete_virtual_disk(virtual_disk.Id, True)
else:
for logical_disk in logical_disks:
self._api.delete_logical_disk(logical_disk.Id)
except datacore_exception.DataCoreException as e:
LOG.warning("An error occurred on a cleanup after failed "
"creation of volume %(volume)s: %(error)s.",
{'volume': volume.id, 'error': e})
return {'provider_location': virtual_disk.Id}
def create_volume_from_snapshot(self, volume, snapshot):
"""Creates a volume from a snapshot.
:param volume: Volume object
:param snapshot: Snapshot object
:return: Dictionary of changes to the volume object to be persisted
"""
return self._create_volume_from(volume, snapshot)
def create_cloned_volume(self, volume, src_vref):
"""Creates volume clone.
:param volume: New Volume object
:param src_vref: Volume object that must be cloned
:return: Dictionary of changes to the volume object to be persisted
"""
return self._create_volume_from(volume, src_vref)
def extend_volume(self, volume, new_size):
"""Extend an existing volume's size.
:param volume: Volume object
:param new_size: new size in GB to extend this volume to
"""
virtual_disk = self._get_virtual_disk_for(volume, raise_not_found=True)
self._set_virtual_disk_size(virtual_disk,
self._get_size_in_bytes(new_size))
virtual_disk = self._await_virtual_disk_online(virtual_disk.Id)
def delete_volume(self, volume):
"""Deletes a volume.
:param volume: Volume object
"""
virtual_disk = self._get_virtual_disk_for(volume)
if virtual_disk:
if virtual_disk.IsServed:
logical_disks = self._api.get_logical_disks()
logical_units = self._api.get_logical_units()
target_devices = self._api.get_target_devices()
logical_disks = [disk.Id for disk in logical_disks
if disk.VirtualDiskId == virtual_disk.Id]
logical_unit_devices = [unit.VirtualTargetDeviceId
for unit in logical_units
if unit.LogicalDiskId in logical_disks]
initiator_ports = set(device.InitiatorPortId
for device in target_devices
if device.Id in logical_unit_devices)
for port in initiator_ports:
self._api.unserve_virtual_disks_from_port(
port, [virtual_disk.Id])
self._api.delete_virtual_disk(virtual_disk.Id, True)
def create_snapshot(self, snapshot):
"""Creates a snapshot.
:param snapshot: Snapshot object
:return: Dictionary of changes to the snapshot object to be persisted
"""
src_virtual_disk = self._get_virtual_disk_for(snapshot.volume,
raise_not_found=True)
volume_options = self._get_volume_options(snapshot.volume)
profile_name = volume_options[self.DATACORE_STORAGE_PROFILE_KEY]
profile_id = self._get_storage_profile_id(profile_name)
pool_names = volume_options[self.DATACORE_DISK_POOLS_KEY]
if src_virtual_disk.DiskStatus != 'Online':
LOG.warning("Attempting to make a snapshot from virtual disk "
"%(disk)s that is in %(state)s state.",
{'disk': src_virtual_disk.Id,
'state': src_virtual_disk.DiskStatus})
snapshot_virtual_disk = self._create_virtual_disk_copy(
src_virtual_disk,
snapshot.id,
snapshot.display_name,
profile_id=profile_id,
pool_names=pool_names)
snapshot_virtual_disk = self._await_virtual_disk_online(
snapshot_virtual_disk.Id)
return {'provider_location': snapshot_virtual_disk.Id}
def delete_snapshot(self, snapshot):
"""Deletes a snapshot.
:param snapshot: Snapshot object
"""
snapshot_virtual_disk = self._get_virtual_disk_for(snapshot)
if snapshot_virtual_disk:
self._api.delete_virtual_disk(snapshot_virtual_disk.Id, True)
def ensure_export(self, context, volume):
pass
def create_export(self, context, volume, connector):
pass
def remove_export(self, context, volume):
pass
def unserve_virtual_disks_from_host(self, volume, connector):
virtual_disk = self._get_virtual_disk_for(volume)
if virtual_disk:
if connector is None:
clients = self._api.get_clients()
else:
clients = [self._get_client(connector['host'],
create_new=False)]
server_group = self._get_our_server_group()
@cinder_utils.synchronized(
'datacore-backend-%s' % server_group.Id, external=True)
def unserve_virtual_disk(client_id):
self._api.unserve_virtual_disks_from_host(
client_id, [virtual_disk.Id])
for client in clients:
unserve_virtual_disk(client.Id)
def terminate_connection(self, volume, connector, **kwargs):
"""Disallow connection from connector.
:param volume: Volume object
:param connector: Connector information
"""
self.unserve_virtual_disks_from_host(volume, connector)
def manage_existing(self, volume, existing_ref):
return self.manage_existing_object(volume, existing_ref, "volume")
def manage_existing_get_size(self, volume, existing_ref):
return self.manage_existing_object_get_size(volume, existing_ref,
"volume")
def manage_existing_snapshot(self, snapshot, existing_ref):
return self.manage_existing_object(snapshot, existing_ref, "snapshot")
def manage_existing_snapshot_get_size(self, snapshot, existing_ref):
return self.manage_existing_object_get_size(snapshot, existing_ref,
"snapshot")
def manage_existing_object(self, existing_object, existing_ref,
object_type):
if 'source-name' not in existing_ref:
reason = _('Reference must contain source-name element.')
raise cinder_exception.ManageExistingInvalidReference(
existing_ref=existing_ref, reason=reason)
vd_alias = existing_ref['source-name']
virtual_disk = datacore_utils.get_first_or_default(
lambda disk: disk.Alias == vd_alias,
self._api.get_virtual_disks(),
None)
if not virtual_disk:
kwargs = {'existing_ref': vd_alias,
'reason': 'Specified Virtual disk does not exist.'}
raise cinder_exception.ManageExistingInvalidReference(**kwargs)
return {'provider_location': virtual_disk.Id}
def manage_existing_object_get_size(self, existing_object, existing_ref,
object_type):
if 'source-name' not in existing_ref:
reason = _('Reference must contain source-name element.')
raise cinder_exception.ManageExistingInvalidReference(
existing_ref=existing_ref, reason=reason)
vd_alias = existing_ref['source-name']
virtual_disk = datacore_utils.get_first_or_default(
lambda disk: disk.Alias == vd_alias,
self._api.get_virtual_disks(),
None)
if not virtual_disk:
kwargs = {'existing_ref': vd_alias,
'reason': 'Specified Virtual disk does not exist.'}
raise cinder_exception.ManageExistingInvalidReference(**kwargs)
return(self._get_size_in_gigabytes(virtual_disk.Size.Value))
def _update_volume_stats(self):
performance_data = self._api.get_performance_by_type(
['DiskPoolPerformance'])
total = 0
available = 0
reserved = 0
for performance in performance_data:
missing_perf_data = []
if hasattr(performance.PerformanceData, 'BytesTotal'):
total += performance.PerformanceData.BytesTotal
else:
missing_perf_data.append('BytesTotal')
if hasattr(performance.PerformanceData, 'BytesAvailable'):
available += performance.PerformanceData.BytesAvailable
else:
missing_perf_data.append('BytesAvailable')
if hasattr(performance.PerformanceData, 'BytesReserved'):
reserved += performance.PerformanceData.BytesReserved
else:
missing_perf_data.append('BytesReserved')
if missing_perf_data:
LOG.warning("Performance data %(data)s is missing for "
"disk pool %(pool)s",
{'data': missing_perf_data,
'pool': performance.ObjectId})
provisioned = 0
logical_disks = self._api.get_logical_disks()
for disk in logical_disks:
if getattr(disk, 'PoolId', None):
provisioned += disk.Size.Value
total_capacity_gb = self._get_size_in_gigabytes(total)
free = available + reserved
free_capacity_gb = self._get_size_in_gigabytes(free)
provisioned_capacity_gb = self._get_size_in_gigabytes(provisioned)
reserved_percentage = 100.0 * reserved / total if total else 0.0
reserved_percentage = math.ceil(reserved_percentage)
ratio = self.configuration.max_over_subscription_ratio
stats_data = {
'vendor_name': 'DataCore',
'QoS_support': False,
'volume_backend_name': self.get_volume_backend_name(),
'driver_version': self.get_version(),
'storage_protocol': self.STORAGE_PROTOCOL,
'total_capacity_gb': total_capacity_gb,
'free_capacity_gb': free_capacity_gb,
'provisioned_capacity_gb': provisioned_capacity_gb,
'reserved_percentage': reserved_percentage,
'max_over_subscription_ratio': ratio,
'thin_provisioning_support': True,
'thick_provisioning_support': False,
'online_extend_support': False,
}
self._stats = stats_data
def _get_our_server_group(self):
server_group = datacore_utils.get_first(lambda group: group.OurGroup,
self._api.get_server_groups())
return server_group
def _get_volume_options_from_type(self, type_id, default_options):
options = dict(default_options.items())
if type_id:
admin_context = cinder_context.get_admin_context()
volume_type = volume_types.get_volume_type(admin_context, type_id)
specs = dict(volume_type).get('extra_specs')
for key, value in specs.items():
if key in self.VALID_VOLUME_TYPE_KEYS:
if key == self.DATACORE_DISK_POOLS_KEY:
options[key] = [v.strip().lower()
for v in value.split(',')]
else:
options[key] = value.lower()
return options
def _get_volume_options(self, volume):
type_id = volume.volume_type_id
volume_options = self._get_volume_options_from_type(
type_id, self._default_volume_options)
return volume_options
def _get_online_servers(self):
servers = self._api.get_servers()
online_servers = [server for server in servers
if server.State == 'Online']
return online_servers
def _get_available_disk_pools(self, disk_pool_names=None):
online_servers = [server.Id for server in self._get_online_servers()]
pool_performance = {
performance.ObjectId: performance.PerformanceData for performance
in self._api.get_performance_by_type(['DiskPoolPerformance'])}
disk_pools = self._api.get_disk_pools()
lower_disk_pool_names = ([name.lower() for name in disk_pool_names]
if disk_pool_names else [])
available_disk_pools = [
pool for pool in disk_pools
if (self._is_pool_healthy(pool, pool_performance,
online_servers) and
(not lower_disk_pool_names or
pool.Caption.lower() in lower_disk_pool_names))]
available_disk_pools.sort(
key=lambda p: pool_performance[p.Id].BytesAvailable, reverse=True)
return available_disk_pools
def _get_virtual_disk_for(self, obj, raise_not_found=False):
disk_id = obj.get('provider_location')
virtual_disk = datacore_utils.get_first_or_default(
lambda disk: disk.Id == disk_id,
self._api.get_virtual_disks(),
None)
if not virtual_disk:
msg = (_("Virtual disk not found for %(object)s %(object_id)s.")
% {'object': obj.__class__.__name__.lower(),
'object_id': obj['id']})
if raise_not_found:
LOG.error(msg)
raise cinder_exception.VolumeDriverException(message=msg)
else:
LOG.warning(msg)
return virtual_disk
def _set_virtual_disk_size(self, virtual_disk, new_size):
return self._api.set_virtual_disk_size(virtual_disk.Id, new_size)
def _get_storage_profile(self, profile_name, raise_not_found=False):
profiles = self._api.get_storage_profiles()
profile = datacore_utils.get_first_or_default(
lambda p: p.Caption.lower() == profile_name.lower(),
profiles,
None)
if not profile and raise_not_found:
msg = (_("Specified storage profile %s not found.")
% profile_name)
LOG.error(msg)
raise cinder_exception.VolumeDriverException(message=msg)
return profile
def _get_storage_profile_id(self, profile_name):
profile_id = None
if profile_name:
profile = self._get_storage_profile(profile_name,
raise_not_found=True)
profile_id = profile.Id
return profile_id
def _await_virtual_disk_online(self, virtual_disk_id):
def inner(start_time):
disk_failed_delay = self.configuration.datacore_disk_failed_delay
virtual_disk = datacore_utils.get_first(
lambda disk: disk.Id == virtual_disk_id,
self._api.get_virtual_disks())
if virtual_disk.DiskStatus == 'Online':
raise loopingcall.LoopingCallDone(virtual_disk)
elif (
virtual_disk.DiskStatus != 'FailedRedundancy' and
time.time() - start_time >= disk_failed_delay):
msg = (_("Virtual disk %(disk)s did not come out of the "
"%(state)s state after %(timeout)s seconds.")
% {'disk': virtual_disk.Id,
'state': virtual_disk.DiskStatus,
'timeout': disk_failed_delay})
LOG.error(msg)
raise cinder_exception.VolumeDriverException(message=msg)
inner_loop = loopingcall.FixedIntervalLoopingCall(inner, time.time())
return inner_loop.start(self.AWAIT_DISK_ONLINE_INTERVAL).wait()
def _create_volume_from(self, volume, src_obj):
src_virtual_disk = self._get_virtual_disk_for(src_obj,
raise_not_found=True)
if src_virtual_disk.DiskStatus != 'Online':
LOG.warning("Attempting to create a volume from virtual disk "
"%(disk)s that is in %(state)s state.",
{'disk': src_virtual_disk.Id,
'state': src_virtual_disk.DiskStatus})
volume_options = self._get_volume_options(volume)
profile_id = self._get_storage_profile_id(
volume_options[self.DATACORE_STORAGE_PROFILE_KEY])
pool_names = volume_options[self.DATACORE_DISK_POOLS_KEY]
volume_virtual_disk = self._create_virtual_disk_copy(
src_virtual_disk,
volume.id,
volume.display_name,
profile_id=profile_id,
pool_names=pool_names)
volume_logical_disk = datacore_utils.get_first(
lambda disk: disk.VirtualDiskId == volume_virtual_disk.Id,
self._api.get_logical_disks())
try:
disk_type = volume_options[self.DATACORE_DISK_TYPE_KEY]
if disk_type == self.DATACORE_MIRRORED_DISK:
pools = self._get_available_disk_pools(pool_names)
selected_pool = datacore_utils.get_first_or_default(
lambda pool: (pool.ServerId !=
volume_logical_disk.ServerHostId and
pool.Id != volume_logical_disk.PoolId),
pools, None)
if selected_pool:
logical_disk = self._api.create_pool_logical_disk(
selected_pool.Id,
self.VOLUME_TYPE_STRIPED,
volume_virtual_disk.Size.Value)
self._api.bind_logical_disk(volume_virtual_disk.Id,
logical_disk.Id,
'Second',
True,
False,
True)
else:
msg = _("Can not create mirrored virtual disk. "
"Suitable disk pools not found.")
LOG.error(msg)
raise cinder_exception.VolumeDriverException(message=msg)
volume_virtual_disk = self._await_virtual_disk_online(
volume_virtual_disk.Id)
try:
source_size = src_obj.size
except AttributeError:
source_size = src_obj.volume_size
if volume.size > source_size:
self._set_virtual_disk_size(volume_virtual_disk,
self._get_size_in_bytes(
volume.size))
volume_virtual_disk = datacore_utils.get_first(
lambda disk: disk.Id == volume_virtual_disk.Id,
self._api.get_virtual_disks())
volume_virtual_disk = self._await_virtual_disk_size_change(
volume_virtual_disk.Id,
self._get_size_in_bytes(source_size))
except Exception:
with excutils.save_and_reraise_exception():
LOG.exception("Creation of volume %(volume)s failed.",
{'volume': volume.id})
try:
self._api.delete_virtual_disk(volume_virtual_disk.Id, True)
except datacore_exception.DataCoreException as e:
LOG.warning("An error occurred on a cleanup after failed "
"creation of volume %(volume)s: %(error)s.",
{'volume': volume.id, 'error': e})
return {'provider_location': volume_virtual_disk.Id}
def _create_full_snapshot(self, description, name, pool_names, profile_id,
src_virtual_disk):
pools = self._get_available_disk_pools(pool_names)
destination_pool = datacore_utils.get_first_or_default(
lambda pool: (pool.ServerId == src_virtual_disk.FirstHostId or
pool.ServerId == src_virtual_disk.SecondHostId),
pools, None)
if not destination_pool:
msg = _("Suitable snapshot destination disk pool not found for "
"virtual disk %s.") % src_virtual_disk.Id
LOG.error(msg)
raise cinder_exception.VolumeDriverException(message=msg)
server = datacore_utils.get_first(
lambda srv: srv.Id == destination_pool.ServerId,
self._api.get_servers())
if not server.SnapshotMapStorePoolId:
self._api.designate_map_store(destination_pool.Id)
snapshot = self._api.create_snapshot(src_virtual_disk.Id,
name,
description,
destination_pool.Id,
'Full',
False,
profile_id)
return snapshot
def _await_snapshot_migrated(self, snapshot_id):
def inner():
snapshot_data = datacore_utils.get_first(
lambda snapshot: snapshot.Id == snapshot_id,
self._api.get_snapshots())
if snapshot_data.State == 'Migrated':
raise loopingcall.LoopingCallDone(snapshot_data)
elif (snapshot_data.State != 'Healthy' and
snapshot_data.Failure != 'NoFailure'):
msg = (_("Full migration of snapshot %(snapshot)s failed. "
"Snapshot is in %(state)s state.")
% {'snapshot': snapshot_data.Id,
'state': snapshot_data.State})
LOG.error(msg)
raise cinder_exception.VolumeDriverException(message=msg)
loop = loopingcall.FixedIntervalLoopingCall(inner)
time.sleep(self.AWAIT_SNAPSHOT_ONLINE_INTERVAL)
return loop.start(self.AWAIT_SNAPSHOT_ONLINE_INTERVAL,
self.AWAIT_SNAPSHOT_ONLINE_INITIAL_DELAY).wait()
def _create_virtual_disk_copy(self, src_virtual_disk, name, description,
profile_id=None, pool_names=None):
snapshot = self._create_full_snapshot(
description, name, pool_names, profile_id, src_virtual_disk)
try:
snapshot = self._await_snapshot_migrated(snapshot.Id)
self._api.delete_snapshot(snapshot.Id)
self._await_snapshot_split_state_change(snapshot)
except Exception:
with excutils.save_and_reraise_exception():
LOG.exception("Split operation failed for snapshot "
"%(snapshot)s.", {'snapshot': snapshot.Id})
try:
logical_disk_copy = datacore_utils.get_first(
lambda disk: (disk.Id ==
snapshot.DestinationLogicalDiskId),
self._api.get_logical_disks())
virtual_disk_copy = datacore_utils.get_first(
lambda disk: (disk.Id ==
logical_disk_copy.VirtualDiskId),
self._api.get_virtual_disks())
self._api.delete_virtual_disk(virtual_disk_copy.Id, True)
except datacore_exception.DataCoreException as e:
LOG.warning("An error occurred on a cleanup after failed "
"split of snapshot %(snapshot)s: %(error)s.",
{'snapshot': snapshot.Id, 'error': e})
logical_disk_copy = datacore_utils.get_first(
lambda disk: disk.Id == snapshot.DestinationLogicalDiskId,
self._api.get_logical_disks())
virtual_disk_copy = datacore_utils.get_first(
lambda disk: disk.Id == logical_disk_copy.VirtualDiskId,
self._api.get_virtual_disks())
return virtual_disk_copy
def _get_client(self, name, create_new=False):
client_hosts = self._api.get_clients()
client = datacore_utils.get_first_or_default(
lambda host: host.HostName.split('.')[0] == name.split('.')[0],
client_hosts, None)
if create_new:
if not client:
client = self._api.register_client(
name, None, 'Other', 'PreferredServer', None)
self._api.set_client_capabilities(client.Id, True, True)
return client
@staticmethod
def _is_pool_healthy(pool, pool_performance, online_servers):
if (pool.PoolStatus == 'Running' and
hasattr(pool_performance[pool.Id], 'BytesAvailable') and
pool.ServerId in online_servers):
return True
return False
@staticmethod
def _get_size_in_bytes(size_in_gigabytes):
return size_in_gigabytes * units.Gi
@staticmethod
def _get_size_in_gigabytes(size_in_bytes):
return size_in_bytes / float(units.Gi)
def _await_virtual_disk_size_change(self, virtual_disk_id, old_size):
def inner(start_time):
disk_failed_delay = self.configuration.datacore_disk_failed_delay
virtual_disk = datacore_utils.get_first(
lambda disk: disk.Id == virtual_disk_id,
self._api.get_virtual_disks())
if virtual_disk.DiskStatus == 'Online' \
and virtual_disk.Size.Value > old_size:
raise loopingcall.LoopingCallDone(virtual_disk)
elif (virtual_disk.DiskStatus != 'FailedRedundancy' and
time.time() - start_time >= disk_failed_delay):
msg = (_("Virtual disk %(disk)s did not come out of the "
"%(state)s state after %(timeout)s seconds.")
% {'disk': virtual_disk.Id,
'state': virtual_disk.DiskStatus,
'timeout': disk_failed_delay})
LOG.error(msg)
raise cinder_exception.VolumeDriverException(message=msg)
inner_loop = loopingcall.FixedIntervalLoopingCall(inner, time.time())
time.sleep(self.AWAIT_DISK_ONLINE_INTERVAL)
return inner_loop.start(self.AWAIT_DISK_ONLINE_INTERVAL).wait()
def _await_snapshot_split_state_change(self, split_snapshot):
def inner(start_time):
disk_failed_delay = self.configuration.datacore_disk_failed_delay
snapshot_found = False
snapshot_list = self._api.get_snapshots()
if not snapshot_list:
raise loopingcall.LoopingCallDone()
for entry in snapshot_list:
if entry.Caption == split_snapshot.Caption:
snapshot_found = True
break
if not snapshot_found:
raise loopingcall.LoopingCallDone()
if (time.time() - start_time >= disk_failed_delay):
msg = (_("Split Snapshot disk %(disk)s did not happened "
"after %(timeout)s seconds.")
% {'disk': split_snapshot.Caption,
'timeout': disk_failed_delay})
LOG.error(msg)
raise loopingcall.LoopingCallDone()
inner_loop = loopingcall.FixedIntervalLoopingCall(inner, time.time())
return inner_loop.start(self.AWAIT_DISK_ONLINE_INTERVAL).wait()
| {
"content_hash": "bb1f684cd2020715b075105ef50d7d1d",
"timestamp": "",
"source": "github",
"line_count": 831,
"max_line_length": 79,
"avg_line_length": 42.253910950661854,
"alnum_prop": 0.5603053000313274,
"repo_name": "mahak/cinder",
"id": "0cb38b56a425b3b4233131100409b0f15e1cd24c",
"size": "35754",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "cinder/volume/drivers/datacore/driver.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Jinja",
"bytes": "259"
},
{
"name": "Mako",
"bytes": "976"
},
{
"name": "Python",
"bytes": "25078356"
},
{
"name": "Shell",
"bytes": "6456"
},
{
"name": "Smarty",
"bytes": "67595"
}
],
"symlink_target": ""
} |
"""
Bottle is a fast and simple micro-framework for small web applications. It
offers request dispatching (Routes) with url parameter support, templates,
a built-in HTTP Server and adapters for many third party WSGI/HTTP-server and
template engines - all in a single file and with no dependencies other than the
Python Standard Library.
Homepage and documentation: http://bottle.paws.de/
Licence (MIT)
-------------
Copyright (c) 2009, Marcel Hellkamp.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
Example
-------
This is an example::
from bottle import route, run, request, response, static_file, abort
@route('/')
def hello_world():
return 'Hello World!'
@route('/hello/:name')
def hello_name(name):
return 'Hello %s!' % name
@route('/hello', method='POST')
def hello_post():
name = request.POST['name']
return 'Hello %s!' % name
@route('/static/:filename#.*#')
def static(filename):
return static_file(filename, root='/path/to/static/files/')
run(host='localhost', port=8080)
"""
from __future__ import with_statement
__author__ = 'Marcel Hellkamp'
__version__ = '0.8.5'
__license__ = 'MIT'
import base64
import cgi
import email.utils
import functools
import hmac
import inspect
import itertools
import mimetypes
import os
import re
import subprocess
import sys
import thread
import threading
import time
import tokenize
import tempfile
from Cookie import SimpleCookie
from tempfile import TemporaryFile
from traceback import format_exc
from urllib import quote as urlquote
from urlparse import urlunsplit, urljoin
try:
from collections import MutableMapping as DictMixin
except ImportError: # pragma: no cover
from UserDict import DictMixin
try:
from urlparse import parse_qs
except ImportError: # pragma: no cover
from cgi import parse_qs
try:
import cPickle as pickle
except ImportError: # pragma: no cover
import pickle
try:
try:
from json import dumps as json_dumps
except ImportError: # pragma: no cover
from simplejson import dumps as json_dumps
except ImportError: # pragma: no cover
json_dumps = None
if sys.version_info >= (3,0,0): # pragma: no cover
# See Request.POST
from io import BytesIO
from io import TextIOWrapper
class NCTextIOWrapper(TextIOWrapper):
''' Garbage collecting an io.TextIOWrapper(buffer) instance closes the
wrapped buffer. This subclass keeps it open. '''
def close(self): pass
StringType = bytes
def touni(x, enc='utf8'): # Convert anything to unicode (py3)
return str(x, encoding=enc) if isinstance(x, bytes) else str(x)
else:
from StringIO import StringIO as BytesIO
from types import StringType
NCTextIOWrapper = None
def touni(x, enc='utf8'): # Convert anything to unicode (py2)
return x if isinstance(x, unicode) else unicode(str(x), encoding=enc)
def tob(data, enc='utf8'): # Convert strings to bytes (py2 and py3)
return data.encode(enc) if isinstance(data, unicode) else data
# Background compatibility
import warnings
def depr(message, critical=False):
if critical: raise DeprecationWarning(message)
warnings.warn(message, DeprecationWarning, stacklevel=3)
# Exceptions and Events
class BottleException(Exception):
""" A base class for exceptions used by bottle. """
pass
class HTTPResponse(BottleException):
""" Used to break execution and immediately finish the response """
def __init__(self, output='', status=200, header=None):
super(BottleException, self).__init__("HTTP Response %d" % status)
self.status = int(status)
self.output = output
self.headers = HeaderDict(header) if header else None
def apply(self, response):
if self.headers:
for key, value in self.headers.iterallitems():
response.headers[key] = value
response.status = self.status
class HTTPError(HTTPResponse):
""" Used to generate an error page """
def __init__(self, code=500, output='Unknown Error', exception=None, traceback=None, header=None):
super(HTTPError, self).__init__(output, code, header)
self.exception = exception
self.traceback = traceback
def __repr__(self):
return ''.join(ERROR_PAGE_TEMPLATE.render(e=self))
# Routing
class RouteError(BottleException):
""" This is a base class for all routing related exceptions """
class RouteSyntaxError(RouteError):
""" The route parser found something not supported by this router """
class RouteBuildError(RouteError):
""" The route could not been build """
class Route(object):
''' Represents a single route and can parse the dynamic route syntax '''
syntax = re.compile(r'(.*?)(?<!\\):([a-zA-Z_]+)?(?:#(.*?)#)?')
default = '[^/]+'
def __init__(self, route, target=None, name=None, static=False):
""" Create a Route. The route string may contain `:key`,
`:key#regexp#` or `:#regexp#` tokens for each dynamic part of the
route. These can be escaped with a backslash infront of the `:`
and are compleately ignored if static is true. A name may be used
to refer to this route later (depends on Router)
"""
self.route = route
self.target = target
self.name = name
if static:
self.route = self.route.replace(':','\\:')
self._tokens = None
def tokens(self):
""" Return a list of (type, value) tokens. """
if not self._tokens:
self._tokens = list(self.tokenise(self.route))
return self._tokens
@classmethod
def tokenise(cls, route):
''' Split a string into an iterator of (type, value) tokens. '''
match = None
for match in cls.syntax.finditer(route):
pre, name, rex = match.groups()
if pre: yield ('TXT', pre.replace('\\:',':'))
if rex and name: yield ('VAR', (rex, name))
elif name: yield ('VAR', (cls.default, name))
elif rex: yield ('ANON', rex)
if not match:
yield ('TXT', route.replace('\\:',':'))
elif match.end() < len(route):
yield ('TXT', route[match.end():].replace('\\:',':'))
def group_re(self):
''' Return a regexp pattern with named groups '''
out = ''
for token, data in self.tokens():
if token == 'TXT': out += re.escape(data)
elif token == 'VAR': out += '(?P<%s>%s)' % (data[1], data[0])
elif token == 'ANON': out += '(?:%s)' % data
return out
def flat_re(self):
''' Return a regexp pattern with non-grouping parentheses '''
rf = lambda m: m.group(0) if len(m.group(1)) % 2 else m.group(1) + '(?:'
return re.sub(r'(\\*)(\(\?P<[^>]*>|\((?!\?))', rf, self.group_re())
def format_str(self):
''' Return a format string with named fields. '''
out, i = '', 0
for token, value in self.tokens():
if token == 'TXT': out += value.replace('%','%%')
elif token == 'ANON': out += '%%(anon%d)s' % i; i+=1
elif token == 'VAR': out += '%%(%s)s' % value[1]
return out
@property
def static(self):
return not self.is_dynamic()
def is_dynamic(self):
''' Return true if the route contains dynamic parts '''
for token, value in self.tokens():
if token != 'TXT':
return True
return False
def __repr__(self):
return "<Route(%s) />" % repr(self.route)
def __eq__(self, other):
return self.route == other.route
class Router(object):
''' A route associates a string (e.g. URL) with an object (e.g. function)
Some dynamic routes may extract parts of the string and provide them as
a dictionary. This router matches a string against multiple routes and
returns the associated object along with the extracted data.
'''
def __init__(self):
self.routes = [] # List of all installed routes
self.named = {} # Cache for named routes and their format strings
self.static = {} # Cache for static routes
self.dynamic = [] # Search structure for dynamic routes
def add(self, route, target=None, **ka):
""" Add a route->target pair or a :class:`Route` object to the Router.
Return the Route object. See :class:`Route` for details.
"""
if not isinstance(route, Route):
route = Route(route, target, **ka)
if self.get_route(route):
return RouteError('Route %s is not uniqe.' % route)
self.routes.append(route)
return route
def get_route(self, route, target=None, **ka):
''' Get a route from the router by specifying either the same
parameters as in :meth:`add` or comparing to an instance of
:class:`Route`. Note that not all parameters are considered by the
compare function. '''
if not isinstance(route, Route):
route = Route(route, **ka)
for known in self.routes:
if route == known:
return known
return None
def match(self, uri):
''' Match an URI and return a (target, urlargs) tuple '''
if uri in self.static:
return self.static[uri], {}
for combined, subroutes in self.dynamic:
match = combined.match(uri)
if not match: continue
target, args_re = subroutes[match.lastindex - 1]
args = args_re.match(uri).groupdict() if args_re else {}
return target, args
return None, {}
def build(self, _name, **args):
''' Build an URI out of a named route and values for te wildcards. '''
try:
return self.named[_name] % args
except KeyError:
raise RouteBuildError("No route found with name '%s'." % _name)
def compile(self):
''' Build the search structures. Call this before actually using the
router.'''
self.named = {}
self.static = {}
self.dynamic = []
for route in self.routes:
if route.name:
self.named[route.name] = route.format_str()
if route.static:
self.static[route.route] = route.target
continue
gpatt = route.group_re()
fpatt = route.flat_re()
try:
gregexp = re.compile('^(%s)$' % gpatt) if '(?P' in gpatt else None
combined = '%s|(^%s$)' % (self.dynamic[-1][0].pattern, fpatt)
self.dynamic[-1] = (re.compile(combined), self.dynamic[-1][1])
self.dynamic[-1][1].append((route.target, gregexp))
except (AssertionError, IndexError), e: # AssertionError: Too many groups
self.dynamic.append((re.compile('(^%s$)'%fpatt),[(route.target, gregexp)]))
except re.error, e:
raise RouteSyntaxError("Could not add Route: %s (%s)" % (route, e))
def __eq__(self, other):
return self.routes == other.routes
# WSGI abstraction: Application, Request and Response objects
class Bottle(object):
""" WSGI application """
def __init__(self, catchall=True, autojson=True, config=None):
""" Create a new bottle instance.
You usually don't do that. Use `bottle.app.push()` instead.
"""
self.routes = Router()
self.mounts = {}
self.error_handler = {}
self.catchall = catchall
self.config = config or {}
self.serve = True
self.castfilter = []
if autojson and json_dumps:
self.add_filter(dict, dict2json)
def optimize(self, *a, **ka):
depr("Bottle.optimize() is obsolete.")
def mount(self, app, script_path):
''' Mount a Bottle application to a specific URL prefix '''
if not isinstance(app, Bottle):
raise TypeError('Only Bottle instances are supported for now.')
script_path = '/'.join(filter(None, script_path.split('/')))
path_depth = script_path.count('/') + 1
if not script_path:
raise TypeError('Empty script_path. Perhaps you want a merge()?')
for other in self.mounts:
if other.startswith(script_path):
raise TypeError('Conflict with existing mount: %s' % other)
@self.route('/%s/:#.*#' % script_path, method="ANY")
def mountpoint():
request.path_shift(path_depth)
return app.handle(request.path, request.method)
self.mounts[script_path] = app
def add_filter(self, ftype, func):
''' Register a new output filter. Whenever bottle hits a handler output
matching `ftype`, `func` is applied to it. '''
if not isinstance(ftype, type):
raise TypeError("Expected type object, got %s" % type(ftype))
self.castfilter = [(t, f) for (t, f) in self.castfilter if t != ftype]
self.castfilter.append((ftype, func))
self.castfilter.sort()
def match_url(self, path, method='GET'):
""" Find a callback bound to a path and a specific HTTP method.
Return (callback, param) tuple or raise HTTPError.
method: HEAD falls back to GET. All methods fall back to ANY.
"""
path, method = path.strip().lstrip('/'), method.upper()
callbacks, args = self.routes.match(path)
if not callbacks:
raise HTTPError(404, "Not found: " + path)
if method in callbacks:
return callbacks[method], args
if method == 'HEAD' and 'GET' in callbacks:
return callbacks['GET'], args
if 'ANY' in callbacks:
return callbacks['ANY'], args
allow = [m for m in callbacks if m != 'ANY']
if 'GET' in allow and 'HEAD' not in allow:
allow.append('HEAD')
raise HTTPError(405, "Method not allowed.",
header=[('Allow',",".join(allow))])
def get_url(self, routename, **kargs):
""" Return a string that matches a named route """
scriptname = request.environ.get('SCRIPT_NAME', '').strip('/') + '/'
location = self.routes.build(routename, **kargs).lstrip('/')
return urljoin(urljoin('/', scriptname), location)
def route(self, path=None, method='GET', **kargs):
""" Decorator: bind a function to a GET request path.
If the path parameter is None, the signature of the decorated
function is used to generate the paths. See yieldroutes()
for details.
The method parameter (default: GET) specifies the HTTP request
method to listen to. You can specify a list of methods too.
"""
def wrapper(callback):
routes = [path] if path else yieldroutes(callback)
methods = method.split(';') if isinstance(method, str) else method
for r in routes:
for m in methods:
r, m = r.strip().lstrip('/'), m.strip().upper()
old = self.routes.get_route(r, **kargs)
if old:
old.target[m] = callback
else:
self.routes.add(r, {m: callback}, **kargs)
self.routes.compile()
return callback
return wrapper
def get(self, path=None, method='GET', **kargs):
""" Decorator: Bind a function to a GET request path.
See :meth:'route' for details. """
return self.route(path, method, **kargs)
def post(self, path=None, method='POST', **kargs):
""" Decorator: Bind a function to a POST request path.
See :meth:'route' for details. """
return self.route(path, method, **kargs)
def put(self, path=None, method='PUT', **kargs):
""" Decorator: Bind a function to a PUT request path.
See :meth:'route' for details. """
return self.route(path, method, **kargs)
def delete(self, path=None, method='DELETE', **kargs):
""" Decorator: Bind a function to a DELETE request path.
See :meth:'route' for details. """
return self.route(path, method, **kargs)
def error(self, code=500):
""" Decorator: Registrer an output handler for a HTTP error code"""
def wrapper(handler):
self.error_handler[int(code)] = handler
return handler
return wrapper
def handle(self, url, method):
""" Execute the handler bound to the specified url and method and return
its output. If catchall is true, exceptions are catched and returned as
HTTPError(500) objects. """
if not self.serve:
return HTTPError(503, "Server stopped")
try:
handler, args = self.match_url(url, method)
return handler(**args)
except HTTPResponse, e:
return e
except Exception, e:
if isinstance(e, (KeyboardInterrupt, SystemExit, MemoryError))\
or not self.catchall:
raise
return HTTPError(500, 'Unhandled exception', e, format_exc(10))
def _cast(self, out, request, response, peek=None):
""" Try to convert the parameter into something WSGI compatible and set
correct HTTP headers when possible.
Support: False, str, unicode, dict, HTTPResponse, HTTPError, file-like,
iterable of strings and iterable of unicodes
"""
# Filtered types (recursive, because they may return anything)
for testtype, filterfunc in self.castfilter:
if isinstance(out, testtype):
return self._cast(filterfunc(out), request, response)
# Empty output is done here
if not out:
response.headers['Content-Length'] = 0
return []
# Join lists of byte or unicode strings. Mixed lists are NOT supported
if isinstance(out, (tuple, list))\
and isinstance(out[0], (StringType, unicode)):
out = out[0][0:0].join(out) # b'abc'[0:0] -> b''
# Encode unicode strings
if isinstance(out, unicode):
out = out.encode(response.charset)
# Byte Strings are just returned
if isinstance(out, StringType):
response.headers['Content-Length'] = str(len(out))
return [out]
# HTTPError or HTTPException (recursive, because they may wrap anything)
if isinstance(out, HTTPError):
out.apply(response)
return self._cast(self.error_handler.get(out.status, repr)(out), request, response)
if isinstance(out, HTTPResponse):
out.apply(response)
return self._cast(out.output, request, response)
# File-like objects.
if hasattr(out, 'read'):
if 'wsgi.file_wrapper' in request.environ:
return request.environ['wsgi.file_wrapper'](out)
elif hasattr(out, 'close') or not hasattr(out, '__iter__'):
return WSGIFileWrapper(out)
# Handle Iterables. We peek into them to detect their inner type.
try:
out = iter(out)
first = out.next()
while not first:
first = out.next()
except StopIteration:
return self._cast('', request, response)
except HTTPResponse, e:
first = e
except Exception, e:
first = HTTPError(500, 'Unhandled exception', e, format_exc(10))
if isinstance(e, (KeyboardInterrupt, SystemExit, MemoryError))\
or not self.catchall:
raise
# These are the inner types allowed in iterator or generator objects.
if isinstance(first, HTTPResponse):
return self._cast(first, request, response)
if isinstance(first, StringType):
return itertools.chain([first], out)
if isinstance(first, unicode):
return itertools.imap(lambda x: x.encode(response.charset),
itertools.chain([first], out))
return self._cast(HTTPError(500, 'Unsupported response type: %s'\
% type(first)), request, response)
def __call__(self, environ, start_response):
""" The bottle WSGI-interface. """
try:
environ['bottle.app'] = self
request.bind(environ)
response.bind(self)
out = self.handle(request.path, request.method)
out = self._cast(out, request, response)
# rfc2616 section 4.3
if response.status in (100, 101, 204, 304) or request.method == 'HEAD':
out = []
status = '%d %s' % (response.status, HTTP_CODES[response.status])
start_response(status, response.headerlist)
return out
except (KeyboardInterrupt, SystemExit, MemoryError):
raise
except Exception, e:
if not self.catchall:
raise
err = '<h1>Critical error while processing request: %s</h1>' \
% environ.get('PATH_INFO', '/')
if DEBUG:
err += '<h2>Error:</h2>\n<pre>%s</pre>\n' % repr(e)
err += '<h2>Traceback:</h2>\n<pre>%s</pre>\n' % format_exc(10)
environ['wsgi.errors'].write(err) #TODO: wsgi.error should not get html
start_response('500 INTERNAL SERVER ERROR', [('Content-Type', 'text/html')])
return [tob(err)]
class Request(threading.local, DictMixin):
""" Represents a single HTTP request using thread-local attributes.
The Request object wraps a WSGI environment and can be used as such.
"""
def __init__(self, environ=None, config=None):
""" Create a new Request instance.
You usually don't do this but use the global `bottle.request`
instance instead.
"""
self.bind(environ or {}, config)
def bind(self, environ, config=None):
""" Bind a new WSGI enviroment.
This is done automatically for the global `bottle.request`
instance on every request.
"""
self.environ = environ
self.config = config or {}
# These attributes are used anyway, so it is ok to compute them here
self.path = '/' + environ.get('PATH_INFO', '/').lstrip('/')
self.method = environ.get('REQUEST_METHOD', 'GET').upper()
@property
def _environ(self):
depr("Request._environ renamed to Request.environ")
return self.environ
def copy(self):
''' Returns a copy of self '''
return Request(self.environ.copy(), self.config)
def path_shift(self, shift=1):
''' Shift path fragments from PATH_INFO to SCRIPT_NAME and vice versa.
:param shift: The number of path fragments to shift. May be negative to
change the shift direction. (default: 1)
'''
script_name = self.environ.get('SCRIPT_NAME','/')
self['SCRIPT_NAME'], self.path = path_shift(script_name, self.path, shift)
self['PATH_INFO'] = self.path
def __getitem__(self, key): return self.environ[key]
def __delitem__(self, key): self[key] = ""; del(self.environ[key])
def __iter__(self): return iter(self.environ)
def __len__(self): return len(self.environ)
def keys(self): return self.environ.keys()
def __setitem__(self, key, value):
""" Shortcut for Request.environ.__setitem__ """
self.environ[key] = value
todelete = []
if key in ('PATH_INFO','REQUEST_METHOD'):
self.bind(self.environ, self.config)
elif key == 'wsgi.input': todelete = ('body','forms','files','params')
elif key == 'QUERY_STRING': todelete = ('get','params')
elif key.startswith('HTTP_'): todelete = ('headers', 'cookies')
for key in todelete:
if 'bottle.' + key in self.environ:
del self.environ['bottle.' + key]
@property
def query_string(self):
""" The content of the QUERY_STRING environment variable. """
return self.environ.get('QUERY_STRING', '')
@property
def fullpath(self):
""" Request path including SCRIPT_NAME (if present) """
return self.environ.get('SCRIPT_NAME', '').rstrip('/') + self.path
@property
def url(self):
""" Full URL as requested by the client (computed).
This value is constructed out of different environment variables
and includes scheme, host, port, scriptname, path and query string.
"""
scheme = self.environ.get('wsgi.url_scheme', 'http')
host = self.environ.get('HTTP_X_FORWARDED_HOST', self.environ.get('HTTP_HOST', None))
if not host:
host = self.environ.get('SERVER_NAME')
port = self.environ.get('SERVER_PORT', '80')
if scheme + port not in ('https443', 'http80'):
host += ':' + port
parts = (scheme, host, urlquote(self.fullpath), self.query_string, '')
return urlunsplit(parts)
@property
def content_length(self):
""" Content-Length header as an integer, -1 if not specified """
return int(self.environ.get('CONTENT_LENGTH','') or -1)
@property
def header(self):
''' :class:`HeaderDict` filled with request headers.
HeaderDict keys are case insensitive str.title()d
'''
if 'bottle.headers' not in self.environ:
header = self.environ['bottle.headers'] = HeaderDict()
for key, value in self.environ.iteritems():
if key.startswith('HTTP_'):
key = key[5:].replace('_','-').title()
header[key] = value
return self.environ['bottle.headers']
@property
def GET(self):
""" The QUERY_STRING parsed into a MultiDict.
Keys and values are strings. Multiple values per key are possible.
See MultiDict for details.
"""
if 'bottle.get' not in self.environ:
data = parse_qs(self.query_string, keep_blank_values=True)
get = self.environ['bottle.get'] = MultiDict()
for key, values in data.iteritems():
for value in values:
get[key] = value
return self.environ['bottle.get']
@property
def POST(self):
""" Property: The HTTP POST body parsed into a MultiDict.
This supports urlencoded and multipart POST requests. Multipart
is commonly used for file uploads and may result in some of the
values being cgi.FieldStorage objects instead of strings.
Multiple values per key are possible. See MultiDict for details.
"""
if 'bottle.post' not in self.environ:
self.environ['bottle.post'] = MultiDict()
self.environ['bottle.forms'] = MultiDict()
self.environ['bottle.files'] = MultiDict()
safe_env = {'QUERY_STRING':''} # Build a safe environment for cgi
for key in ('REQUEST_METHOD', 'CONTENT_TYPE', 'CONTENT_LENGTH'):
if key in self.environ: safe_env[key] = self.environ[key]
if NCTextIOWrapper:
fb = NCTextIOWrapper(self.body, encoding='ISO-8859-1', newline='\n')
# TODO: Content-Length may be wrong now. Does cgi.FieldStorage
# use it at all? I think not, because all tests pass.
else:
fb = self.body
data = cgi.FieldStorage(fp=fb, environ=safe_env, keep_blank_values=True)
for item in data.list or []:
if item.filename:
self.environ['bottle.post'][item.name] = item
self.environ['bottle.files'][item.name] = item
else:
self.environ['bottle.post'][item.name] = item.value
self.environ['bottle.forms'][item.name] = item.value
return self.environ['bottle.post']
@property
def forms(self):
""" Property: HTTP POST form data parsed into a MultiDict. """
if 'bottle.forms' not in self.environ: self.POST
return self.environ['bottle.forms']
@property
def files(self):
""" Property: HTTP POST file uploads parsed into a MultiDict. """
if 'bottle.files' not in self.environ: self.POST
return self.environ['bottle.files']
@property
def params(self):
""" A combined MultiDict with POST and GET parameters. """
if 'bottle.params' not in self.environ:
self.environ['bottle.params'] = MultiDict(self.GET)
self.environ['bottle.params'].update(dict(self.forms))
return self.environ['bottle.params']
@property
def body(self):
""" The HTTP request body as a seekable buffer object.
This property returns a copy of the `wsgi.input` stream and should
be used instead of `environ['wsgi.input']`.
"""
if 'bottle.body' not in self.environ:
maxread = max(0, self.content_length)
stream = self.environ['wsgi.input']
body = BytesIO() if maxread < MEMFILE_MAX else TemporaryFile(mode='w+b')
while maxread > 0:
part = stream.read(min(maxread, MEMFILE_MAX))
if not part: #TODO: Wrong content_length. Error? Do nothing?
break
body.write(part)
maxread -= len(part)
self.environ['wsgi.input'] = body
self.environ['bottle.body'] = body
self.environ['bottle.body'].seek(0)
return self.environ['bottle.body']
@property
def auth(self): #TODO: Tests and docs. Add support for digest. namedtuple?
""" HTTP authorisation data as a (user, passwd) tuple. (experimental)
This implementation currently only supports basic auth and returns
None on errors.
"""
return parse_auth(self.environ.get('HTTP_AUTHORIZATION',''))
@property
def COOKIES(self):
""" Cookie information parsed into a dictionary.
Secure cookies are NOT decoded automatically. See
Request.get_cookie() for details.
"""
if 'bottle.cookies' not in self.environ:
raw_dict = SimpleCookie(self.environ.get('HTTP_COOKIE',''))
self.environ['bottle.cookies'] = {}
for cookie in raw_dict.itervalues():
self.environ['bottle.cookies'][cookie.key] = cookie.value
return self.environ['bottle.cookies']
def get_cookie(self, name, secret=None):
""" Return the (decoded) value of a cookie. """
value = self.COOKIES.get(name)
dec = cookie_decode(value, secret) if secret else None
return dec or value
@property
def is_ajax(self):
''' True if the request was generated using XMLHttpRequest '''
#TODO: write tests
return self.header.get('X-Requested-With') == 'XMLHttpRequest'
class Response(threading.local):
""" Represents a single HTTP response using thread-local attributes.
"""
def __init__(self, config=None):
self.bind(config)
def bind(self, config=None):
""" Resets the Response object to its factory defaults. """
self._COOKIES = None
self.status = 200
self.headers = HeaderDict()
self.content_type = 'text/html; charset=UTF-8'
self.config = config or {}
@property
def header(self):
depr("Response.header renamed to Response.headers")
return self.headers
def copy(self):
''' Returns a copy of self '''
copy = Response(self.config)
copy.status = self.status
copy.headers = self.headers.copy()
copy.content_type = self.content_type
return copy
def wsgiheader(self):
''' Returns a wsgi conform list of header/value pairs. '''
for c in self.COOKIES.values():
if c.OutputString() not in self.headers.getall('Set-Cookie'):
self.headers.append('Set-Cookie', c.OutputString())
# rfc2616 section 10.2.3, 10.3.5
if self.status in (204, 304) and 'content-type' in self.headers:
del self.headers['content-type']
if self.status == 304:
for h in ('allow', 'content-encoding', 'content-language',
'content-length', 'content-md5', 'content-range',
'content-type', 'last-modified'): # + c-location, expires?
if h in self.headers:
del self.headers[h]
return list(self.headers.iterallitems())
headerlist = property(wsgiheader)
@property
def charset(self):
""" Return the charset specified in the content-type header.
This defaults to `UTF-8`.
"""
if 'charset=' in self.content_type:
return self.content_type.split('charset=')[-1].split(';')[0].strip()
return 'UTF-8'
@property
def COOKIES(self):
""" A dict-like SimpleCookie instance. Use Response.set_cookie() instead. """
if not self._COOKIES:
self._COOKIES = SimpleCookie()
return self._COOKIES
def set_cookie(self, key, value, secret=None, **kargs):
""" Add a new cookie with various options.
If the cookie value is not a string, a secure cookie is created.
Possible options are:
expires, path, comment, domain, max_age, secure, version, httponly
See http://de.wikipedia.org/wiki/HTTP-Cookie#Aufbau for details
"""
if not isinstance(value, basestring):
if not secret:
raise TypeError('Cookies must be strings when secret is not set')
value = cookie_encode(value, secret).decode('ascii') #2to3 hack
self.COOKIES[key] = value
for k, v in kargs.iteritems():
self.COOKIES[key][k.replace('_', '-')] = v
def get_content_type(self):
""" Current 'Content-Type' header. """
return self.headers['Content-Type']
def set_content_type(self, value):
self.headers['Content-Type'] = value
content_type = property(get_content_type, set_content_type, None,
get_content_type.__doc__)
# Data Structures
class MultiDict(DictMixin):
""" A dict that remembers old values for each key """
# collections.MutableMapping would be better for Python >= 2.6
def __init__(self, *a, **k):
self.dict = dict()
for k, v in dict(*a, **k).iteritems():
self[k] = v
def __len__(self): return len(self.dict)
def __iter__(self): return iter(self.dict)
def __contains__(self, key): return key in self.dict
def __delitem__(self, key): del self.dict[key]
def keys(self): return self.dict.keys()
def __getitem__(self, key): return self.get(key, KeyError, -1)
def __setitem__(self, key, value): self.append(key, value)
def append(self, key, value): self.dict.setdefault(key, []).append(value)
def replace(self, key, value): self.dict[key] = [value]
def getall(self, key): return self.dict.get(key) or []
def get(self, key, default=None, index=-1):
if key not in self.dict and default != KeyError:
return [default][index]
return self.dict[key][index]
def iterallitems(self):
for key, values in self.dict.iteritems():
for value in values:
yield key, value
class HeaderDict(MultiDict):
""" Same as :class:`MultiDict`, but title()s the keys and overwrites by default. """
def __contains__(self, key): return MultiDict.__contains__(self, self.httpkey(key))
def __getitem__(self, key): return MultiDict.__getitem__(self, self.httpkey(key))
def __delitem__(self, key): return MultiDict.__delitem__(self, self.httpkey(key))
def __setitem__(self, key, value): self.replace(key, value)
def get(self, key, default=None, index=-1): return MultiDict.get(self, self.httpkey(key), default, index)
def append(self, key, value): return MultiDict.append(self, self.httpkey(key), str(value))
def replace(self, key, value): return MultiDict.replace(self, self.httpkey(key), str(value))
def getall(self, key): return MultiDict.getall(self, self.httpkey(key))
def httpkey(self, key): return str(key).replace('_','-').title()
class AppStack(list):
""" A stack implementation. """
def __call__(self):
""" Return the current default app. """
return self[-1]
def push(self, value=None):
""" Add a new Bottle instance to the stack """
if not isinstance(value, Bottle):
value = Bottle()
self.append(value)
return value
class WSGIFileWrapper(object):
def __init__(self, fp, buffer_size=1024*64):
self.fp, self.buffer_size = fp, buffer_size
for attr in ('fileno', 'close', 'read', 'readlines'):
if hasattr(fp, attr): setattr(self, attr, getattr(fp, attr))
def __iter__(self):
read, buff = self.fp.read, self.buffer_size
while True:
part = read(buff)
if not part: break
yield part
# Module level functions
# Output filter
def dict2json(d):
response.content_type = 'application/json'
return json_dumps(d)
def abort(code=500, text='Unknown Error: Appliction stopped.'):
""" Aborts execution and causes a HTTP error. """
raise HTTPError(code, text)
def redirect(url, code=303):
""" Aborts execution and causes a 303 redirect """
scriptname = request.environ.get('SCRIPT_NAME', '').rstrip('/') + '/'
location = urljoin(request.url, urljoin(scriptname, url))
raise HTTPResponse("", status=code, header=dict(Location=location))
def send_file(*a, **k): #BC 0.6.4
""" Raises the output of static_file(). (deprecated) """
raise static_file(*a, **k)
def static_file(filename, root, guessmime=True, mimetype=None, download=False):
""" Opens a file in a safe way and returns a HTTPError object with status
code 200, 305, 401 or 404. Sets Content-Type, Content-Length and
Last-Modified header. Obeys If-Modified-Since header and HEAD requests.
"""
root = os.path.abspath(root) + os.sep
filename = os.path.abspath(os.path.join(root, filename.strip('/\\')))
header = dict()
if not filename.startswith(root):
return HTTPError(403, "Access denied.")
if not os.path.exists(filename) or not os.path.isfile(filename):
return HTTPError(404, "File does not exist.")
if not os.access(filename, os.R_OK):
return HTTPError(403, "You do not have permission to access this file.")
if not mimetype and guessmime:
header['Content-Type'] = mimetypes.guess_type(filename)[0]
else:
header['Content-Type'] = mimetype if mimetype else 'text/plain'
if download == True:
download = os.path.basename(filename)
if download:
header['Content-Disposition'] = 'attachment; filename="%s"' % download
stats = os.stat(filename)
lm = time.strftime("%a, %d %b %Y %H:%M:%S GMT", time.gmtime(stats.st_mtime))
header['Last-Modified'] = lm
ims = request.environ.get('HTTP_IF_MODIFIED_SINCE')
if ims:
ims = ims.split(";")[0].strip() # IE sends "<date>; length=146"
ims = parse_date(ims)
if ims is not None and ims >= int(stats.st_mtime):
header['Date'] = time.strftime("%a, %d %b %Y %H:%M:%S GMT", time.gmtime())
return HTTPResponse(status=304, header=header)
header['Content-Length'] = stats.st_size
if request.method == 'HEAD':
return HTTPResponse('', header=header)
else:
return HTTPResponse(open(filename, 'rb'), header=header)
# Utilities
def debug(mode=True):
""" Change the debug level.
There is only one debug level supported at the moment."""
global DEBUG
DEBUG = bool(mode)
def parse_date(ims):
""" Parse rfc1123, rfc850 and asctime timestamps and return UTC epoch. """
try:
ts = email.utils.parsedate_tz(ims)
return time.mktime(ts[:8] + (0,)) - (ts[9] or 0) - time.timezone
except (TypeError, ValueError, IndexError):
return None
def parse_auth(header):
""" Parse rfc2617 HTTP authentication header string (basic) and return (user,pass) tuple or None"""
try:
method, data = header.split(None, 1)
if method.lower() == 'basic':
name, pwd = base64.b64decode(data).split(':', 1)
return name, pwd
except (KeyError, ValueError, TypeError):
return None
def _lscmp(a, b):
''' Compares two strings in a cryptographically save way:
Runtime is not affected by a common prefix. '''
return not sum(0 if x==y else 1 for x, y in zip(a, b)) and len(a) == len(b)
def cookie_encode(data, key):
''' Encode and sign a pickle-able object. Return a string '''
msg = base64.b64encode(pickle.dumps(data, -1))
sig = base64.b64encode(hmac.new(key, msg).digest())
return tob('!') + sig + tob('?') + msg
def cookie_decode(data, key):
''' Verify and decode an encoded string. Return an object or None'''
data = tob(data)
if cookie_is_encoded(data):
sig, msg = data.split(tob('?'), 1)
if _lscmp(sig[1:], base64.b64encode(hmac.new(key, msg).digest())):
return pickle.loads(base64.b64decode(msg))
return None
def cookie_is_encoded(data):
''' Return True if the argument looks like a encoded cookie.'''
return bool(data.startswith(tob('!')) and tob('?') in data)
def tonativefunc(enc='utf-8'):
''' Returns a function that turns everything into 'native' strings using enc '''
if sys.version_info >= (3,0,0):
return lambda x: x.decode(enc) if isinstance(x, bytes) else str(x)
return lambda x: x.encode(enc) if isinstance(x, unicode) else str(x)
def yieldroutes(func):
""" Return a generator for routes that match the signature (name, args)
of the func parameter. This may yield more than one route if the function
takes optional keyword arguments. The output is best described by example:
a() -> '/a'
b(x, y) -> '/b/:x/:y'
c(x, y=5) -> '/c/:x' and '/c/:x/:y'
d(x=5, y=6) -> '/d' and '/d/:x' and '/d/:x/:y'
"""
path = func.__name__.replace('__','/').lstrip('/')
spec = inspect.getargspec(func)
argc = len(spec[0]) - len(spec[3] or [])
path += ('/:%s' * argc) % tuple(spec[0][:argc])
yield path
for arg in spec[0][argc:]:
path += '/:%s' % arg
yield path
def path_shift(script_name, path_info, shift=1):
''' Shift path fragments from PATH_INFO to SCRIPT_NAME and vice versa.
:return: The modified paths.
:param script_name: The SCRIPT_NAME path.
:param script_name: The PATH_INFO path.
:param shift: The number of path fragments to shift. May be negative to
change ths shift direction. (default: 1)
'''
if shift == 0: return script_name, path_info
pathlist = path_info.strip('/').split('/')
scriptlist = script_name.strip('/').split('/')
if pathlist and pathlist[0] == '': pathlist = []
if scriptlist and scriptlist[0] == '': scriptlist = []
if shift > 0 and shift <= len(pathlist):
moved = pathlist[:shift]
scriptlist = scriptlist + moved
pathlist = pathlist[shift:]
elif shift < 0 and shift >= -len(scriptlist):
moved = scriptlist[shift:]
pathlist = moved + pathlist
scriptlist = scriptlist[:shift]
else:
empty = 'SCRIPT_NAME' if shift < 0 else 'PATH_INFO'
raise AssertionError("Cannot shift. Nothing left from %s" % empty)
new_script_name = '/' + '/'.join(scriptlist)
new_path_info = '/' + '/'.join(pathlist)
if path_info.endswith('/') and pathlist: new_path_info += '/'
return new_script_name, new_path_info
# Decorators
#TODO: Replace default_app() with app()
def validate(**vkargs):
"""
Validates and manipulates keyword arguments by user defined callables.
Handles ValueError and missing arguments by raising HTTPError(403).
"""
def decorator(func):
def wrapper(**kargs):
for key, value in vkargs.iteritems():
if key not in kargs:
abort(403, 'Missing parameter: %s' % key)
try:
kargs[key] = value(kargs[key])
except ValueError:
abort(403, 'Wrong parameter format for: %s' % key)
return func(**kargs)
return wrapper
return decorator
route = functools.wraps(Bottle.route)(lambda *a, **ka: app().route(*a, **ka))
get = functools.wraps(Bottle.get)(lambda *a, **ka: app().get(*a, **ka))
post = functools.wraps(Bottle.post)(lambda *a, **ka: app().post(*a, **ka))
put = functools.wraps(Bottle.put)(lambda *a, **ka: app().put(*a, **ka))
delete = functools.wraps(Bottle.delete)(lambda *a, **ka: app().delete(*a, **ka))
error = functools.wraps(Bottle.error)(lambda *a, **ka: app().error(*a, **ka))
url = functools.wraps(Bottle.get_url)(lambda *a, **ka: app().get_url(*a, **ka))
mount = functools.wraps(Bottle.mount)(lambda *a, **ka: app().mount(*a, **ka))
def default():
depr("The default() decorator is deprecated. Use @error(404) instead.")
return error(404)
# Server adapter
class ServerAdapter(object):
quiet = False
def __init__(self, host='127.0.0.1', port=8080, **kargs):
self.options = kargs
self.host = host
self.port = int(port)
def run(self, handler): # pragma: no cover
pass
def __repr__(self):
args = ', '.join(['%s=%s'%(k,repr(v)) for k, v in self.options.items()])
return "%s(%s)" % (self.__class__.__name__, args)
class CGIServer(ServerAdapter):
quiet = True
def run(self, handler): # pragma: no cover
from wsgiref.handlers import CGIHandler
CGIHandler().run(handler) # Just ignore host and port here
class FlupFCGIServer(ServerAdapter):
def run(self, handler): # pragma: no cover
import flup.server.fcgi
flup.server.fcgi.WSGIServer(handler, bindAddress=(self.host, self.port)).run()
class WSGIRefServer(ServerAdapter):
def run(self, handler): # pragma: no cover
from wsgiref.simple_server import make_server, WSGIRequestHandler
if self.quiet:
class QuietHandler(WSGIRequestHandler):
def log_request(*args, **kw): pass
self.options['handler_class'] = QuietHandler
srv = make_server(self.host, self.port, handler, **self.options)
srv.serve_forever()
class CherryPyServer(ServerAdapter):
def run(self, handler): # pragma: no cover
from cherrypy import wsgiserver
server = wsgiserver.CherryPyWSGIServer((self.host, self.port), handler)
server.start()
class PasteServer(ServerAdapter):
def run(self, handler): # pragma: no cover
from paste import httpserver
from paste.translogger import TransLogger
app = TransLogger(handler)
httpserver.serve(app, host=self.host, port=str(self.port), **self.options)
class FapwsServer(ServerAdapter):
"""
Extremly fast webserver using libev.
See http://william-os4y.livejournal.com/
"""
def run(self, handler): # pragma: no cover
import fapws._evwsgi as evwsgi
from fapws import base
evwsgi.start(self.host, self.port)
evwsgi.set_base_module(base)
def app(environ, start_response):
environ['wsgi.multiprocess'] = False
return handler(environ, start_response)
evwsgi.wsgi_cb(('',app))
evwsgi.run()
class TornadoServer(ServerAdapter):
""" Untested. As described here:
http://github.com/facebook/tornado/blob/master/tornado/wsgi.py#L187 """
def run(self, handler): # pragma: no cover
import tornado.wsgi
import tornado.httpserver
import tornado.ioloop
container = tornado.wsgi.WSGIContainer(handler)
server = tornado.httpserver.HTTPServer(container)
server.listen(port=self.port)
tornado.ioloop.IOLoop.instance().start()
class AppEngineServer(ServerAdapter):
""" Untested. """
quiet = True
def run(self, handler):
from google.appengine.ext.webapp import util
util.run_wsgi_app(handler)
class TwistedServer(ServerAdapter):
""" Untested. """
def run(self, handler):
from twisted.web import server, wsgi
from twisted.python.threadpool import ThreadPool
from twisted.internet import reactor
thread_pool = ThreadPool()
thread_pool.start()
reactor.addSystemEventTrigger('after', 'shutdown', thread_pool.stop)
factory = server.Site(wsgi.WSGIResource(reactor, thread_pool, handler))
reactor.listenTCP(self.port, factory, interface=self.host)
reactor.run()
class DieselServer(ServerAdapter):
""" Untested. """
def run(self, handler):
from diesel.protocols.wsgi import WSGIApplication
app = WSGIApplication(handler, port=self.port)
app.run()
class GunicornServer(ServerAdapter):
""" Untested. """
def run(self, handler):
import gunicorn.arbiter
gunicorn.arbiter.Arbiter((self.host, self.port), 4, handler).run()
class EventletServer(ServerAdapter):
""" Untested """
def run(self, handler):
from eventlet import wsgi, listen
wsgi.server(listen((self.host, self.port)), handler)
class RocketServer(ServerAdapter):
""" Untested. As requested in issue 63
http://github.com/defnull/bottle/issues/#issue/63 """
def run(self, handler):
from rocket import Rocket
server = Rocket((self.host, self.port), 'wsgi', { 'wsgi_app' : handler })
server.start()
class AutoServer(ServerAdapter):
""" Untested. """
adapters = [CherryPyServer, PasteServer, TwistedServer, WSGIRefServer]
def run(self, handler):
for sa in self.adapters:
try:
return sa(self.host, self.port, **self.options).run(handler)
except ImportError:
pass
def run(app=None, server=WSGIRefServer, host='127.0.0.1', port=8080,
interval=1, reloader=False, quiet=False, **kargs):
""" Runs bottle as a web server. """
app = app if app else default_app()
# Instantiate server, if it is a class instead of an instance
if isinstance(server, type):
server = server(host=host, port=port, **kargs)
if not isinstance(server, ServerAdapter):
raise RuntimeError("Server must be a subclass of WSGIAdapter")
server.quiet = server.quiet or quiet
if not server.quiet and not os.environ.get('BOTTLE_CHILD'):
print "Bottle server starting up (using %s)..." % repr(server)
print "Listening on http://%s:%d/" % (server.host, server.port)
print "Use Ctrl-C to quit."
print
try:
if reloader:
interval = min(interval, 1)
if os.environ.get('BOTTLE_CHILD'):
_reloader_child(server, app, interval)
else:
_reloader_observer(server, app, interval)
else:
server.run(app)
except KeyboardInterrupt: pass
if not server.quiet and not os.environ.get('BOTTLE_CHILD'):
print "Shutting down..."
class FileCheckerThread(threading.Thread):
''' Thread that periodically checks for changed module files. '''
def __init__(self, lockfile, interval):
threading.Thread.__init__(self)
self.lockfile, self.interval = lockfile, interval
#1: lockfile to old; 2: lockfile missing
#3: module file changed; 5: external exit
self.status = 0
def run(self):
exists = os.path.exists
mtime = lambda path: os.stat(path).st_mtime
files = dict()
for module in sys.modules.values():
try:
path = inspect.getsourcefile(module)
if path and exists(path): files[path] = mtime(path)
except TypeError: pass
while not self.status:
for path, lmtime in files.iteritems():
if not exists(path) or mtime(path) > lmtime:
self.status = 3
if not exists(self.lockfile):
self.status = 2
elif mtime(self.lockfile) < time.time() - self.interval - 5:
self.status = 1
if not self.status:
time.sleep(self.interval)
if self.status != 5:
thread.interrupt_main()
def _reloader_child(server, app, interval):
''' Start the server and check for modified files in a background thread.
As soon as an update is detected, KeyboardInterrupt is thrown in
the main thread to exit the server loop. The process exists with status
code 3 to request a reload by the observer process. If the lockfile
is not modified in 2*interval second or missing, we assume that the
observer process died and exit with status code 1 or 2.
'''
lockfile = os.environ.get('BOTTLE_LOCKFILE')
bgcheck = FileCheckerThread(lockfile, interval)
try:
bgcheck.start()
server.run(app)
except KeyboardInterrupt, e: pass
bgcheck.status, status = 5, bgcheck.status
bgcheck.join() # bgcheck.status == 5 --> silent exit
if status: sys.exit(status)
def _reloader_observer(server, app, interval):
''' Start a child process with identical commandline arguments and restart
it as long as it exists with status code 3. Also create a lockfile and
touch it (update mtime) every interval seconds.
'''
fd, lockfile = tempfile.mkstemp(prefix='bottle-reloader.', suffix='.lock')
os.close(fd) # We only need this file to exist. We never write to it
try:
while os.path.exists(lockfile):
args = [sys.executable] + sys.argv
environ = os.environ.copy()
environ['BOTTLE_CHILD'] = 'true'
environ['BOTTLE_LOCKFILE'] = lockfile
p = subprocess.Popen(args, env=environ)
while p.poll() is None: # Busy wait...
os.utime(lockfile, None) # I am alive!
time.sleep(interval)
if p.poll() != 3:
if os.path.exists(lockfile): os.unlink(lockfile)
sys.exit(p.poll())
elif not server.quiet:
print "Reloading server..."
except KeyboardInterrupt: pass
if os.path.exists(lockfile): os.unlink(lockfile)
# Templates
class TemplateError(HTTPError):
def __init__(self, message):
HTTPError.__init__(self, 500, message)
class BaseTemplate(object):
""" Base class and minimal API for template adapters """
extentions = ['tpl','html','thtml','stpl']
settings = {} #used in prepare()
defaults = {} #used in render()
def __init__(self, source=None, name=None, lookup=[], encoding='utf8', **settings):
""" Create a new template.
If the source parameter (str or buffer) is missing, the name argument
is used to guess a template filename. Subclasses can assume that
self.source and/or self.filename are set. Both are strings.
The lookup, encoding and settings parameters are stored as instance
variables.
The lookup parameter stores a list containing directory paths.
The encoding parameter should be used to decode byte strings or files.
The settings parameter contains a dict for engine-specific settings.
"""
self.name = name
self.source = source.read() if hasattr(source, 'read') else source
self.filename = source.filename if hasattr(source, 'filename') else None
self.lookup = map(os.path.abspath, lookup)
self.encoding = encoding
self.settings = self.settings.copy() # Copy from class variable
self.settings.update(settings) # Apply
if not self.source and self.name:
self.filename = self.search(self.name, self.lookup)
if not self.filename:
raise TemplateError('Template %s not found.' % repr(name))
if not self.source and not self.filename:
raise TemplateError('No template specified.')
self.prepare(**self.settings)
@classmethod
def search(cls, name, lookup=[]):
""" Search name in all directories specified in lookup.
First without, then with common extensions. Return first hit. """
if os.path.isfile(name): return name
for spath in lookup:
fname = os.path.join(spath, name)
if os.path.isfile(fname):
return fname
for ext in cls.extentions:
if os.path.isfile('%s.%s' % (fname, ext)):
return '%s.%s' % (fname, ext)
@classmethod
def global_config(cls, key, *args):
''' This reads or sets the global settings stored in class.settings. '''
if args:
cls.settings[key] = args[0]
else:
return cls.settings[key]
def prepare(self, **options):
""" Run preparations (parsing, caching, ...).
It should be possible to call this again to refresh a template or to
update settings.
"""
raise NotImplementedError
def render(self, **args):
""" Render the template with the specified local variables and return
a single byte or unicode string. If it is a byte string, the encoding
must match self.encoding. This method must be thread-safe!
"""
raise NotImplementedError
class MakoTemplate(BaseTemplate):
def prepare(self, **options):
from mako.template import Template
from mako.lookup import TemplateLookup
options.update({'input_encoding':self.encoding})
#TODO: This is a hack... http://github.com/defnull/bottle/issues#issue/8
mylookup = TemplateLookup(directories=['.']+self.lookup, **options)
if self.source:
self.tpl = Template(self.source, lookup=mylookup)
else: #mako cannot guess extentions. We can, but only at top level...
name = self.name
if not os.path.splitext(name)[1]:
name += os.path.splitext(self.filename)[1]
self.tpl = mylookup.get_template(name)
def render(self, **args):
_defaults = self.defaults.copy()
_defaults.update(args)
return self.tpl.render(**_defaults)
class CheetahTemplate(BaseTemplate):
def prepare(self, **options):
from Cheetah.Template import Template
self.context = threading.local()
self.context.vars = {}
options['searchList'] = [self.context.vars]
if self.source:
self.tpl = Template(source=self.source, **options)
else:
self.tpl = Template(file=self.filename, **options)
def render(self, **args):
self.context.vars.update(self.defaults)
self.context.vars.update(args)
out = str(self.tpl)
self.context.vars.clear()
return [out]
class Jinja2Template(BaseTemplate):
def prepare(self, filters=None, tests=None, **kwargs):
from jinja2 import Environment, FunctionLoader
if 'prefix' in kwargs: # TODO: to be removed after a while
raise RuntimeError('The keyword argument `prefix` has been removed. '
'Use the full jinja2 environment name line_statement_prefix instead.')
self.env = Environment(loader=FunctionLoader(self.loader), **kwargs)
if filters: self.env.filters.update(filters)
if tests: self.env.tests.update(tests)
if self.source:
self.tpl = self.env.from_string(self.source)
else:
self.tpl = self.env.get_template(self.filename)
def render(self, **args):
_defaults = self.defaults.copy()
_defaults.update(args)
return self.tpl.render(**_defaults).encode("utf-8")
def loader(self, name):
fname = self.search(name, self.lookup)
if fname:
with open(fname, "rb") as f:
return f.read().decode(self.encoding)
class SimpleTemplate(BaseTemplate):
blocks = ('if','elif','else','try','except','finally','for','while','with','def','class')
dedent_blocks = ('elif', 'else', 'except', 'finally')
def prepare(self, escape_func=cgi.escape, noescape=False):
self.cache = {}
if self.source:
self.code = self.translate(self.source)
self.co = compile(self.code, '<string>', 'exec')
else:
self.code = self.translate(open(self.filename).read())
self.co = compile(self.code, self.filename, 'exec')
enc = self.encoding
self._str = lambda x: touni(x, enc)
self._escape = lambda x: escape_func(touni(x, enc))
if noescape:
self._str, self._escape = self._escape, self._str
def translate(self, template):
stack = [] # Current Code indentation
lineno = 0 # Current line of code
ptrbuffer = [] # Buffer for printable strings and token tuple instances
codebuffer = [] # Buffer for generated python code
touni = functools.partial(unicode, encoding=self.encoding)
multiline = dedent = False
def yield_tokens(line):
for i, part in enumerate(re.split(r'\{\{(.*?)\}\}', line)):
if i % 2:
if part.startswith('!'): yield 'RAW', part[1:]
else: yield 'CMD', part
else: yield 'TXT', part
def split_comment(codeline):
""" Removes comments from a line of code. """
line = codeline.splitlines()[0]
try:
tokens = list(tokenize.generate_tokens(iter(line).next))
except tokenize.TokenError:
return line.rsplit('#',1) if '#' in line else (line, '')
for token in tokens:
if token[0] == tokenize.COMMENT:
start, end = token[2][1], token[3][1]
return codeline[:start] + codeline[end:], codeline[start:end]
return line, ''
def flush(): # Flush the ptrbuffer
if not ptrbuffer: return
cline = ''
for line in ptrbuffer:
for token, value in line:
if token == 'TXT': cline += repr(value)
elif token == 'RAW': cline += '_str(%s)' % value
elif token == 'CMD': cline += '_escape(%s)' % value
cline += ', '
cline = cline[:-2] + '\\\n'
cline = cline[:-2]
if cline[:-1].endswith('\\\\\\\\\\n'):
cline = cline[:-7] + cline[-1] # 'nobr\\\\\n' --> 'nobr'
cline = '_printlist([' + cline + '])'
del ptrbuffer[:] # Do this before calling code() again
code(cline)
def code(stmt):
for line in stmt.splitlines():
codebuffer.append(' ' * len(stack) + line.strip())
for line in template.splitlines(True):
lineno += 1
line = line if isinstance(line, unicode)\
else unicode(line, encoding=self.encoding)
if lineno <= 2:
m = re.search(r"%.*coding[:=]\s*([-\w\.]+)", line)
if m: self.encoding = m.group(1)
if m: line = line.replace('coding','coding (removed)')
if line.strip()[:2].count('%') == 1:
line = line.split('%',1)[1].lstrip() # Full line following the %
cline = split_comment(line)[0].strip()
cmd = re.split(r'[^a-zA-Z0-9_]', cline)[0]
flush() ##encodig (TODO: why?)
if cmd in self.blocks or multiline:
cmd = multiline or cmd
dedent = cmd in self.dedent_blocks # "else:"
if dedent and not oneline and not multiline:
cmd = stack.pop()
code(line)
oneline = not cline.endswith(':') # "if 1: pass"
multiline = cmd if cline.endswith('\\') else False
if not oneline and not multiline:
stack.append(cmd)
elif cmd == 'end' and stack:
code('#end(%s) %s' % (stack.pop(), line.strip()[3:]))
elif cmd == 'include':
p = cline.split(None, 2)[1:]
if len(p) == 2:
code("_=_include(%s, _stdout, %s)" % (repr(p[0]), p[1]))
elif p:
code("_=_include(%s, _stdout)" % repr(p[0]))
else: # Empty %include -> reverse of %rebase
code("_printlist(_base)")
elif cmd == 'rebase':
p = cline.split(None, 2)[1:]
if len(p) == 2:
code("globals()['_rebase']=(%s, dict(%s))" % (repr(p[0]), p[1]))
elif p:
code("globals()['_rebase']=(%s, {})" % repr(p[0]))
else:
code(line)
else: # Line starting with text (not '%') or '%%' (escaped)
if line.strip().startswith('%%'):
line = line.replace('%%', '%', 1)
ptrbuffer.append(yield_tokens(line))
flush()
return '\n'.join(codebuffer) + '\n'
def subtemplate(self, _name, _stdout, **args):
if _name not in self.cache:
self.cache[_name] = self.__class__(name=_name, lookup=self.lookup)
return self.cache[_name].execute(_stdout, **args)
def execute(self, _stdout, **args):
env = self.defaults.copy()
env.update({'_stdout': _stdout, '_printlist': _stdout.extend,
'_include': self.subtemplate, '_str': self._str,
'_escape': self._escape})
env.update(args)
eval(self.co, env)
if '_rebase' in env:
subtpl, rargs = env['_rebase']
subtpl = self.__class__(name=subtpl, lookup=self.lookup)
rargs['_base'] = _stdout[:] #copy stdout
del _stdout[:] # clear stdout
return subtpl.execute(_stdout, **rargs)
return env
def render(self, **args):
""" Render the template using keyword arguments as local variables. """
stdout = []
self.execute(stdout, **args)
return ''.join(stdout)
def template(tpl, template_adapter=SimpleTemplate, **kwargs):
'''
Get a rendered template as a string iterator.
You can use a name, a filename or a template string as first parameter.
'''
if tpl not in TEMPLATES or DEBUG:
settings = kwargs.get('template_settings',{})
lookup = kwargs.get('template_lookup', TEMPLATE_PATH)
if isinstance(tpl, template_adapter):
TEMPLATES[tpl] = tpl
if settings: TEMPLATES[tpl].prepare(**settings)
elif "\n" in tpl or "{" in tpl or "%" in tpl or '$' in tpl:
TEMPLATES[tpl] = template_adapter(source=tpl, lookup=lookup, **settings)
else:
TEMPLATES[tpl] = template_adapter(name=tpl, lookup=lookup, **settings)
if not TEMPLATES[tpl]:
abort(500, 'Template (%s) not found' % tpl)
return TEMPLATES[tpl].render(**kwargs)
mako_template = functools.partial(template, template_adapter=MakoTemplate)
cheetah_template = functools.partial(template, template_adapter=CheetahTemplate)
jinja2_template = functools.partial(template, template_adapter=Jinja2Template)
def view(tpl_name, **defaults):
''' Decorator: renders a template for a handler.
The handler can control its behavior like that:
- return a dict of template vars to fill out the template
- return something other than a dict and the view decorator will not
process the template, but return the handler result as is.
This includes returning a HTTPResponse(dict) to get,
for instance, JSON with autojson or other castfilters
'''
def decorator(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
result = func(*args, **kwargs)
if isinstance(result, (dict, DictMixin)):
tplvars = defaults.copy()
tplvars.update(result)
return template(tpl_name, **tplvars)
return result
return wrapper
return decorator
mako_view = functools.partial(view, template_adapter=MakoTemplate)
cheetah_view = functools.partial(view, template_adapter=CheetahTemplate)
jinja2_view = functools.partial(view, template_adapter=Jinja2Template)
# Modul initialization and configuration
TEMPLATE_PATH = ['./', './views/']
TEMPLATES = {}
DEBUG = False
MEMFILE_MAX = 1024*100
HTTP_CODES = {
100: 'CONTINUE',
101: 'SWITCHING PROTOCOLS',
200: 'OK',
201: 'CREATED',
202: 'ACCEPTED',
203: 'NON-AUTHORITATIVE INFORMATION',
204: 'NO CONTENT',
205: 'RESET CONTENT',
206: 'PARTIAL CONTENT',
300: 'MULTIPLE CHOICES',
301: 'MOVED PERMANENTLY',
302: 'FOUND',
303: 'SEE OTHER',
304: 'NOT MODIFIED',
305: 'USE PROXY',
306: 'RESERVED',
307: 'TEMPORARY REDIRECT',
400: 'BAD REQUEST',
401: 'UNAUTHORIZED',
402: 'PAYMENT REQUIRED',
403: 'FORBIDDEN',
404: 'NOT FOUND',
405: 'METHOD NOT ALLOWED',
406: 'NOT ACCEPTABLE',
407: 'PROXY AUTHENTICATION REQUIRED',
408: 'REQUEST TIMEOUT',
409: 'CONFLICT',
410: 'GONE',
411: 'LENGTH REQUIRED',
412: 'PRECONDITION FAILED',
413: 'REQUEST ENTITY TOO LARGE',
414: 'REQUEST-URI TOO LONG',
415: 'UNSUPPORTED MEDIA TYPE',
416: 'REQUESTED RANGE NOT SATISFIABLE',
417: 'EXPECTATION FAILED',
500: 'INTERNAL SERVER ERROR',
501: 'NOT IMPLEMENTED',
502: 'BAD GATEWAY',
503: 'SERVICE UNAVAILABLE',
504: 'GATEWAY TIMEOUT',
505: 'HTTP VERSION NOT SUPPORTED',
}
""" A dict of known HTTP error and status codes """
ERROR_PAGE_TEMPLATE = SimpleTemplate("""
%try:
%from bottle import DEBUG, HTTP_CODES, request
%status_name = HTTP_CODES.get(e.status, 'Unknown').title()
<!DOCTYPE HTML PUBLIC "-//IETF//DTD HTML 2.0//EN">
<html>
<head>
<title>Error {{e.status}}: {{status_name}}</title>
<style type="text/css">
html {background-color: #eee; font-family: sans;}
body {background-color: #fff; border: 1px solid #ddd; padding: 15px; margin: 15px;}
pre {background-color: #eee; border: 1px solid #ddd; padding: 5px;}
</style>
</head>
<body>
<h1>Error {{e.status}}: {{status_name}}</h1>
<p>Sorry, the requested URL <tt>{{request.url}}</tt> caused an error:</p>
<pre>{{str(e.output)}}</pre>
%if DEBUG and e.exception:
<h2>Exception:</h2>
<pre>{{repr(e.exception)}}</pre>
%end
%if DEBUG and e.traceback:
<h2>Traceback:</h2>
<pre>{{e.traceback}}</pre>
%end
</body>
</html>
%except ImportError:
<b>ImportError:</b> Could not generate the error page. Please add bottle to sys.path
%end
""")
""" The HTML template used for error messages """
request = Request()
""" Whenever a page is requested, the :class:`Bottle` WSGI handler stores
metadata about the current request into this instance of :class:`Request`.
It is thread-safe and can be accessed from within handler functions. """
response = Response()
""" The :class:`Bottle` WSGI handler uses metadata assigned to this instance
of :class:`Response` to generate the WSGI response. """
local = threading.local()
""" Thread-local namespace. Not used by Bottle, but could get handy """
# Initialize app stack (create first empty Bottle app)
# BC: 0.6.4 and needed for run()
app = default_app = AppStack()
app.push()
| {
"content_hash": "bc9d32792960044c0fd66ec909639ec6",
"timestamp": "",
"source": "github",
"line_count": 1933,
"max_line_length": 109,
"avg_line_length": 38.02224521469219,
"alnum_prop": 0.5920241642516021,
"repo_name": "svetlyak40wt/cony",
"id": "8f2be9e815fe407ed035d52d50a69f154b89515c",
"size": "73521",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "bottle.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "91629"
},
{
"name": "VimL",
"bytes": "33"
}
],
"symlink_target": ""
} |
class UtmContent(object):
"""The utm content of an Ganalytics object."""
def __init__(self, utm_content=None):
"""Create a UtmContent object
:param utm_content: Used to differentiate your campaign from advertisements.
:type utm_content: string, optional
"""
self._utm_content = None
if utm_content is not None:
self.utm_content = utm_content
@property
def utm_content(self):
"""Used to differentiate your campaign from advertisements.
:rtype: string
"""
return self._utm_content
@utm_content.setter
def utm_content(self, value):
"""Used to differentiate your campaign from advertisements.
:param value: Used to differentiate your campaign from advertisements.
:type value: string
"""
self._utm_content = value
def get(self):
"""
Get a JSON-ready representation of this UtmContent.
:returns: This UtmContent, ready for use in a request body.
:rtype: string
"""
return self.utm_content
| {
"content_hash": "6f0209d8ca1e0d2bf4482974f281e74d",
"timestamp": "",
"source": "github",
"line_count": 40,
"max_line_length": 84,
"avg_line_length": 27.55,
"alnum_prop": 0.6098003629764065,
"repo_name": "sendgrid/sendgrid-python",
"id": "e2a8ccff6377f97614362190e48da0c83d1c64e9",
"size": "1102",
"binary": false,
"copies": "2",
"ref": "refs/heads/main",
"path": "sendgrid/helpers/mail/utm_content.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "211"
},
{
"name": "HTML",
"bytes": "356"
},
{
"name": "Makefile",
"bytes": "848"
},
{
"name": "Procfile",
"bytes": "43"
},
{
"name": "Python",
"bytes": "388101"
},
{
"name": "Shell",
"bytes": "59"
}
],
"symlink_target": ""
} |
from __future__ import annotations
import unittest
from unittest import mock
from airflow.providers.grpc.operators.grpc import GrpcOperator
class StubClass:
def __init__(self, channel):
pass
def stream_call(self, data):
pass
class TestGrpcOperator(unittest.TestCase):
def custom_conn_func(self, connection):
pass
@mock.patch('airflow.providers.grpc.operators.grpc.GrpcHook')
def test_with_interceptors(self, mock_hook):
operator = GrpcOperator(
stub_class=StubClass,
call_func="stream_call",
interceptors=[],
task_id="test_grpc",
)
operator.execute({})
mock_hook.assert_called_once_with("grpc_default", interceptors=[], custom_connection_func=None)
@mock.patch('airflow.providers.grpc.operators.grpc.GrpcHook')
def test_with_custom_connection_func(self, mock_hook):
operator = GrpcOperator(
stub_class=StubClass,
call_func="stream_call",
custom_connection_func=self.custom_conn_func,
task_id="test_grpc",
)
operator.execute({})
mock_hook.assert_called_once_with(
"grpc_default", interceptors=None, custom_connection_func=self.custom_conn_func
)
@mock.patch('airflow.providers.grpc.operators.grpc.GrpcHook')
def test_execute_with_log(self, mock_hook):
mocked_hook = mock.Mock()
mock_hook.return_value = mocked_hook
mocked_hook.configure_mock(**{'run.return_value': ["value1", "value2"]})
operator = GrpcOperator(
stub_class=StubClass,
call_func="stream_call",
log_response=True,
task_id="test_grpc",
)
with mock.patch.object(operator.log, 'info') as mock_info:
operator.execute({})
mock_hook.assert_called_once_with("grpc_default", interceptors=None, custom_connection_func=None)
mocked_hook.run.assert_called_once_with(StubClass, "stream_call", data={}, streaming=False)
mock_info.assert_any_call("Calling gRPC service")
mock_info.assert_any_call("'value1'")
mock_info.assert_any_call("'value2'")
@mock.patch('airflow.providers.grpc.operators.grpc.GrpcHook')
def test_execute_with_callback(self, mock_hook):
mocked_hook = mock.Mock()
callback = mock.Mock()
mock_hook.return_value = mocked_hook
mocked_hook.configure_mock(**{'run.return_value': ["value1", "value2"]})
operator = GrpcOperator(
stub_class=StubClass, call_func="stream_call", task_id="test_grpc", response_callback=callback
)
with mock.patch.object(operator.log, 'info') as mock_info:
operator.execute({})
mock_hook.assert_called_once_with("grpc_default", interceptors=None, custom_connection_func=None)
mocked_hook.run.assert_called_once_with(StubClass, "stream_call", data={}, streaming=False)
assert ("'value1'", "'value2'") not in mock_info.call_args_list
mock_info.assert_any_call("Calling gRPC service")
callback.assert_any_call("value1", {})
callback.assert_any_call("value2", {})
| {
"content_hash": "ee31c41ab066911cf30efb9e5fc9d033",
"timestamp": "",
"source": "github",
"line_count": 85,
"max_line_length": 109,
"avg_line_length": 38.05882352941177,
"alnum_prop": 0.624420401854714,
"repo_name": "cfei18/incubator-airflow",
"id": "794d42ad1556ef3099f83c054279a9c7fa07f213",
"size": "4020",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/providers/grpc/operators/test_grpc.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "25980"
},
{
"name": "Dockerfile",
"bytes": "72003"
},
{
"name": "HCL",
"bytes": "3786"
},
{
"name": "HTML",
"bytes": "173434"
},
{
"name": "JavaScript",
"bytes": "143068"
},
{
"name": "Jinja",
"bytes": "38808"
},
{
"name": "Jupyter Notebook",
"bytes": "5482"
},
{
"name": "Mako",
"bytes": "1339"
},
{
"name": "Python",
"bytes": "22660683"
},
{
"name": "R",
"bytes": "313"
},
{
"name": "Shell",
"bytes": "312715"
},
{
"name": "TypeScript",
"bytes": "472379"
}
],
"symlink_target": ""
} |
import os
import sys
import datetime
from djpcms import test
from django.contrib.auth.models import User
from django.contrib.sites.models import Site
from django.core import management
try:
from djpcms.contrib.jdep.fabtools import *
fabric_available = True
except ImportError:
fabric_available = False
from StringIO import StringIO
from django.core.management.base import CommandError
class CommandDeploy(test.TestCase):
def setUp(self):
User.objects.create_superuser('pinco', 'pinco@pinco.com', 'pallino')
Site(domain='mysite.com', name='mysite.com').save()
def test_command(self):
out = StringIO()
management.call_command('deploy', stdout=out)
self.assertEquals(out.getvalue(),'no user. nothing done.')
def test_no_site(self):
out = StringIO()
management.call_command('deploy', username='pinco', password='pallino', stdout=out)
self.assertEquals(out.getvalue(),"no site. nothing done.")
def test_done(self):
out = StringIO()
management.call_command('deploy', username='pinco', password='pallino', domain='mysite.com', stdout=out)
self.assertTrue("ok. pinco deployed mysite.com." in out.getvalue())
if fabric_available:
split = os.path.split
class Deployment(test.TestCase):
def setUp(self):
self.curdir = os.getcwd()
self.clear()
path = os.path.split(os.path.abspath(__file__))[0]
os.chdir(path)
if path not in sys.path:
sys.path.insert(0,path)
env.host_string = 'localhost'
utils.project('testjdep','testjdep.com', redirect_port = 103)
self.settings.INSTALLED_APPS.append('djpcms.contrib.flowrepo')
def testPath(self):
upload(False)
self.assertEqual(env.project,'testjdep')
self.assertEqual(env.domain_name,'testjdep.com')
self.assertTrue(env.release)
self.assertTrue(env.release_path)
def testApps(self):
result = deploy(False)
nginx = result['nginx']
apps = result['apps']
self.assertTrue(apps)
media_inconf = 0
self.assertTrue(env.project_path in nginx)
for app in apps:
if app.exists:
media_inconf += 1
self.assertTrue('location %s {' % app.url() in nginx)
self.assertTrue(app.base in nginx)
def testServer(self):
result = deploy(False)
self.assertTrue(env.logdir)
self.assertTrue(env.confdir)
self.assertEqual(split(env.logdir)[0],split(env.confdir)[0])
nginx = result['nginx']
self.assertTrue('server_name %s' % env.domain_name in nginx)
self.assertTrue('access_log %s' % env.logdir in nginx)
self.assertTrue('listen %s' % env.server_port in nginx)
self.assertTrue('proxy_pass http://127.0.0.1:103/;' in nginx)
def testApache(self):
result = deploy(False)
apache = result['apache']
self.assertTrue('ServerName %s' % env.domain_name in apache)
def tearDown(self):
os.chdir(self.curdir)
self.settings.INSTALLED_APPS.pop()
| {
"content_hash": "f27aab5c37ef6e46bbaabdf4a3477d9b",
"timestamp": "",
"source": "github",
"line_count": 101,
"max_line_length": 112,
"avg_line_length": 33.95049504950495,
"alnum_prop": 0.5823855351414406,
"repo_name": "strogo/djpcms",
"id": "0ac2ad26396ade1e93f45336aa8ffa7ecccf4a1d",
"size": "3429",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "djpcms/contrib/jdep/tests/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
} |
import os, sys
sys.path.insert(0, os.path.dirname(os.path.dirname(__file__)))
import fnmatch
import numpy
import pytest
import pickle
from lasio import las, read, exceptions
test_dir = os.path.dirname(__file__)
egfn = lambda fn: os.path.join(os.path.dirname(__file__), "examples", fn)
stegfn = lambda vers, fn: os.path.join(os.path.dirname(__file__), "examples", vers, fn)
def test_pickle_default_wb():
las = read(egfn("sample.las"))
with open("binary_serialization", "wb") as fw:
pickle.dump(las, fw)
with open("binary_serialization", "rb") as fr:
las = pickle.load(fr)
os.remove("binary_serialization")
| {
"content_hash": "916e58cbe9a351a60ce23a13e26cced8",
"timestamp": "",
"source": "github",
"line_count": 26,
"max_line_length": 87,
"avg_line_length": 24.846153846153847,
"alnum_prop": 0.6687306501547987,
"repo_name": "kinverarity1/las-reader",
"id": "286916289c723200a1aeff0ce1b27684673a25b1",
"size": "646",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "tests/test_serialization.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "666403"
},
{
"name": "Lasso",
"bytes": "4852765"
},
{
"name": "Python",
"bytes": "82570"
}
],
"symlink_target": ""
} |
from rest_framework import test
from . import fixtures
class TenantQuotasTest(test.APITransactionTestCase):
def setUp(self):
super(TenantQuotasTest, self).setUp()
self.fixture = fixtures.OpenStackFixture()
self.tenant = self.fixture.tenant
self.project = self.fixture.project
self.customer = self.fixture.customer
def test_quotas_for_tenant_are_created_for_project_and_customer(self):
self.assertEqual(self.project.quotas.get(name='vpc_cpu_count').usage, 0)
self.assertEqual(self.project.quotas.get(name='vpc_ram_size').usage, 0)
self.assertEqual(self.project.quotas.get(name='vpc_storage_size').usage, 0)
self.assertEqual(self.project.quotas.get(name='vpc_floating_ip_count').usage, 0)
self.assertEqual(self.customer.quotas.get(name='vpc_cpu_count').usage, 0)
self.assertEqual(self.customer.quotas.get(name='vpc_ram_size').usage, 0)
self.assertEqual(self.customer.quotas.get(name='vpc_storage_size').usage, 0)
self.assertEqual(self.customer.quotas.get(name='vpc_floating_ip_count').usage, 0)
def test_quotas_for_tenant_are_increased_for_project_and_customer(self):
self.tenant.set_quota_usage('vcpu', 1)
self.tenant.set_quota_usage('ram', 1024)
self.tenant.set_quota_usage('storage', 102400)
self.tenant.set_quota_usage('floating_ip_count', 2)
self.assertEqual(self.project.quotas.get(name='vpc_cpu_count').usage, 1)
self.assertEqual(self.project.quotas.get(name='vpc_ram_size').usage, 1024)
self.assertEqual(self.project.quotas.get(name='vpc_storage_size').usage, 102400)
self.assertEqual(self.project.quotas.get(name='vpc_floating_ip_count').usage, 2)
self.assertEqual(self.customer.quotas.get(name='vpc_cpu_count').usage, 1)
self.assertEqual(self.customer.quotas.get(name='vpc_ram_size').usage, 1024)
self.assertEqual(self.customer.quotas.get(name='vpc_storage_size').usage, 102400)
self.assertEqual(self.customer.quotas.get(name='vpc_floating_ip_count').usage, 2)
| {
"content_hash": "dde4ce968a19f1ba49ee90c6c9edbbc6",
"timestamp": "",
"source": "github",
"line_count": 40,
"max_line_length": 89,
"avg_line_length": 52.25,
"alnum_prop": 0.6990430622009569,
"repo_name": "opennode/nodeconductor-openstack",
"id": "3ec8b1887ad4a9941a29b6f603c43395a86477e0",
"size": "2090",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "src/waldur_openstack/openstack/tests/test_quotas.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "792426"
}
],
"symlink_target": ""
} |
class superclass(object):
def test1(self):
print "superclass"
class childclass(superclass):
def test1(self):
print "child"
def test2(self):
super(childclass, self).test1()
def test3(self):
self.test1()
super(childclass, self).test1()
x = childclass()
x.test1()
x.test2()
x.test3()
| {
"content_hash": "ae1dc338501b86616b30a1e6cb6040e8",
"timestamp": "",
"source": "github",
"line_count": 26,
"max_line_length": 39,
"avg_line_length": 13.384615384615385,
"alnum_prop": 0.5890804597701149,
"repo_name": "chrivers/pyjaco",
"id": "506e4dce8bd919c837cef288933da087709a0730",
"size": "348",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tests/class/oo_simple_super.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "118838"
},
{
"name": "Python",
"bytes": "182689"
},
{
"name": "Shell",
"bytes": "864"
}
],
"symlink_target": ""
} |
from pmg.models import Bill, BillVersion, File, db, BillType
import json
import re
bills = json.load(open("data/bills-with-files.json"))
bill_pages = json.load(open("data/bill-pages.json"))
nids = json.load(open("data/nid_url.json"))
pages_by_nid = {p["nid"]: p for p in bill_pages}
nids_by_url = {n["url"]: n for n in nids}
nids = {n["nid"]: n for n in nids}
DESC_RE = re.compile('s:\d+:"description";s:\d+:"([^"]*)";')
def get_description(s):
# u'a:11:{s:11:"description";s:31:"Division of Revenue Act 10-2014";s:3:"fid";s:5:"51280";s:5:"width";i:0;s:6:"height";i:0;s:8:"duration";i:0;s:12:"audio_format";s:0:"";s:17:"audio_sample_rate";i:0;s:18:"audio_channel_mode";s:0:"";s:13:"audio_bitrate";i:0;s:18:"audio_bitrate_mode";s:0:"";s:4:"tags";a:0:{}}'
match = DESC_RE.search(s)
if match:
return match.group(1)
def get_file_info(url):
url = url.replace("http://www.pmg.org.za/", "")
if url.startswith("node/"):
nid = nids[url[5:]]["nid"]
else:
nid = nids_by_url.get(url)
if nid:
nid = nid["nid"]
else:
nid = {
u"bill/20060425-south-african-institute-for-drug-free-sport-amendment-act-25-2006": "44724",
}[url]
page = pages_by_nid[nid]
files = page["files"]
f = files[0]
f["description"] = get_description(f["field_file_bill_data"])
if f["filepath"].startswith("files/"):
f["filepath"] = f["filepath"][6:]
return f
added = 0
missing = 0
already_enacted = 0
already_exists = 0
def commit():
# db.session.commit()
print("added %d" % added)
print("missing %d" % missing)
print("already_enacted %d" % already_enacted)
print("already_exists %d" % already_exists)
# now load files into db
not_found = []
for bill in bills:
year = bill["year"]
number = bill["number"]
title = bill["name"]
bill_type = bill["bill_type"]
bill_obj = (
Bill.query.filter(Bill.year == year, Bill.number == number)
.join(BillType)
.filter(BillType.name == bill_type)
.first()
)
if not bill_obj:
print("Missing: %s %s -- %s" % (year, number, title))
missing += 1
continue
print("%s %s %s -- %s" % (bill_obj.id, year, number, title))
# already have enacted?
if any(v.enacted for v in bill_obj.versions):
already_enacted += 1
print("Already have enacted, skipping")
continue
for version in (e for e in bill["entries"] if e["type"] == "act"):
# find the file details
info = get_file_info(version["url"])
print("Version info: %s" % version)
print("File info: %s" % info)
# is there already a matching version?
existing = [
bv for bv in bill_obj.versions if bv.file.file_path == info["filepath"]
]
if existing:
existing[0].enacted = True
already_exists += 1
print("Already have matching file, skipping")
continue
# does the file exist?
file = File.query.filter(File.file_path == info["filepath"]).first()
if not file:
raise ValueError("File %s doesn't exist" % info["filepath"])
if not file.title:
file.title = info["description"]
date = version["date"]
# if no date, use the date it was signed
if not date or date == "None":
events = [e for e in bill_obj.events if e.type == "bill-signed"]
events.sort(key=lambda e: e.date, reverse=True)
date = events[0].date
bill_version = BillVersion()
bill_version.date = date
bill_version.title = version["title"]
bill_version.file = file
bill_version.enacted = True
bill_version.bill = bill_obj
added += 1
db.session.add(bill_version)
db.session.flush()
if added % 10 == 0:
commit()
commit()
| {
"content_hash": "7eb187f8cf2ffb4dd3fea174fa289388",
"timestamp": "",
"source": "github",
"line_count": 133,
"max_line_length": 312,
"avg_line_length": 29.68421052631579,
"alnum_prop": 0.565096251266464,
"repo_name": "Code4SA/pmg-cms-2",
"id": "f764b0013221edfda983d5fe585e4856417043f2",
"size": "3967",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "bin/import-enacted-bills.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "204619"
},
{
"name": "HTML",
"bytes": "361071"
},
{
"name": "JavaScript",
"bytes": "109536"
},
{
"name": "Mako",
"bytes": "412"
},
{
"name": "Python",
"bytes": "469838"
}
],
"symlink_target": ""
} |
from unittest import TestCase, mock
from nose.plugins.attrib import attr
from shiftschema.schema import Schema
from shiftschema.result import Result
from shiftschema.property import SimpleProperty
from shiftschema.property import EntityProperty
from shiftschema.property import CollectionProperty
from shiftschema.exceptions import PropertyExists, InvalidValidator
from shiftschema.translator import Translator
from shiftschema import validators
from shiftschema import filters
from tests import helpers
from pprint import pprint as pp
@attr('schema')
class SchemaTest(TestCase):
def test_create_schema(self):
""" Creating a schema """
schema = Schema()
self.assertIsInstance(schema, Schema)
def test_can_check_property_existence(self):
""" Checking property existence on schema """
schema = Schema()
schema.properties['simple_property'] = 'property processor'
schema.entities['entity_property'] = 'entity processor'
schema.collections['collection_property'] = 'collection processor'
self.assertTrue(schema.has_property('simple_property'))
self.assertTrue(schema.has_property('entity_property'))
self.assertTrue(schema.has_property('collection_property'))
def test_access_properties_through_overloading(self):
""" Overload access to schema properties """
schema = Schema()
schema.add_property('first_name')
schema.add_entity('spouse')
schema.add_collection('addresses')
self.assertIsInstance(schema.first_name, SimpleProperty)
self.assertIsInstance(schema.spouse, EntityProperty)
self.assertIsInstance(schema.addresses, CollectionProperty)
with self.assertRaises(AttributeError):
self.assertIsInstance(schema.nothinghere, EntityProperty)
def test_add_state_validator(self):
""" Adding entity state validator to schema """
validator = helpers.ValidatorValid()
schema = Schema()
schema.add_state_validator(validator)
self.assertTrue(validator in schema.state)
def test_raise_on_adding_bad_state_validator(self):
""" Raise when adding state validator of bad type to schema """
schema = Schema()
with self.assertRaises(InvalidValidator):
schema.add_state_validator(dict())
def test_add_simple_property(self):
""" Adding simple property to schema """
schema = Schema()
schema.add_property('simple')
self.assertIn('simple', schema.properties)
def test_raise_on_adding_existing_simple_property(self):
""" Raise on adding simple property with existing name to schema"""
schema = Schema()
schema.add_property('simple')
with self.assertRaises(PropertyExists):
schema.add_property('simple')
def test_add_entity_property(self):
""" Adding linked entity property to schema """
schema = Schema()
schema.add_entity('entity')
self.assertIn('entity', schema.entities)
def test_raise_on_adding_existing_entity_property(self):
""" Raise on adding entity property with existing name to schema """
schema = Schema()
schema.add_entity('entity')
with self.assertRaises(PropertyExists):
schema.add_entity('entity')
def test_add_collection_property(self):
""" Adding collection property to schema"""
schema = Schema()
schema.add_collection('collection_prop')
self.assertIn('collection_prop', schema.collections)
def test_raise_on_adding_existing_collection_property(self):
""" Raise on adding collection property with existing name to schema """
schema = Schema()
schema.add_collection('collection_prop')
with self.assertRaises(PropertyExists):
schema.add_collection('collection_prop')
def test_model_getter_on_dict(self):
""" Using model-getter for dictionary-models """
model = dict(someproperty='some value')
schema = Schema()
self.assertEqual('some value', schema.get(model, 'someproperty'))
def test_model_getter_on_dict_returns_none_for_missing_keys(self):
""" BUGFIX: do not explode on fetching value for missing dict key """
model = dict(someproperty='some value')
schema = Schema()
self.assertIsNone(schema.get(model, 'me-is-missing'))
def test_model_getter_method(self):
""" Model getter calls getter on model if present """
class Model:
def get_someproperty(self):
return 'SOME VALUE'
model = Model()
schema = Schema()
self.assertEqual('SOME VALUE', schema.get(model, 'someproperty'))
def test_model_getter_attribute(self):
""" Model getter falls back to attribute fetch if no getter on model"""
class Model:
someproperty = 'some value'
model = Model()
schema = Schema()
self.assertEqual('some value', schema.get(model, 'someproperty'))
def test_model_setter_on_dict(self):
""" Using model setter for dictionary-models"""
model = dict()
schema = Schema()
schema.set(model, 'someproperty', 'SOME VALUE')
self.assertEqual('SOME VALUE', model['someproperty'])
def test_model_setter_method(self):
""" Model setter uses setter on model if present """
class Model:
def set_somevalue(self, value):
self.somevalue = value
model = Model()
schema = Schema()
schema.set(model, 'someproperty', 'some value')
self.assertEqual('some value', model.__dict__['someproperty'])
def test_model_setter_attribute(self):
""" Model setter fallback to attribute set if no setter on model """
class Model:
pass
model = Model()
schema = Schema()
schema.set(model, 'someproperty', 'SOME VALUE')
self.assertEqual('SOME VALUE', model.someproperty)
def test_create_by_subclassing(self):
""" Creating schema in subclass """
class MySchema(Schema):
def schema(self):
self.add_property('property')
self.add_entity('entity')
schema = MySchema()
self.assertTrue(schema.has_property('property'))
self.assertTrue(schema.has_property('entity'))
def test_filter(self):
""" Filtering with schema """
schema = helpers.PersonSpec()
person = helpers.Person(
first_name=' Willy ',
last_name=' Wonka ',
salutation=' mr ',
birth_year='I was born in 1964'
)
schema.filter(person)
self.assertEqual('Willy', person.first_name)
self.assertEqual('Wonka', person.last_name)
self.assertEqual('mr', person.salutation)
self.assertEqual(1964, person.birth_year)
def test_skip_all_filters_if_value_is_none(self):
""" Skip filtering if value is none """
schema = helpers.PersonSpec()
person = helpers.Person()
schema.filter(person)
self.assertIsNone(person.first_name)
self.assertIsNone(person.last_name)
def test_filtering_simple_properties_with_context(self):
""" Filtering simple properties with context (default)"""
custom_context = 'I AM CUSTOM CONTEXT'
class TestFilter(filters.AbstractFilter):
def filter(self, value, model=None, context=None):
if context == custom_context:
return 'CUSTOM CONTEXT'
else:
return 'NO CUSTOM CONTEXT'
class TestSchema(Schema):
def schema(self):
self.add_property('prop')
self.prop.add_filter(TestFilter())
model = dict(prop='some value')
schema = TestSchema()
schema.filter(model, context=custom_context)
self.assertEquals('CUSTOM CONTEXT', model['prop'])
def test_validate_state(self):
""" Validating entity state """
model = helpers.Person()
schema = Schema()
schema.add_state_validator(helpers.ValidatorInvalid())
result = schema.validate(model)
self.assertIsInstance(result, Result)
self.assertFalse(result)
def test_validate_simple_properties(self):
""" Validating simple properties """
schema = helpers.PersonSpec()
person = helpers.Person(
first_name='Some really really long name',
last_name='And a really really long last name',
salutation='BAD!',
)
result = schema.validate(person)
self.assertFalse(result)
self.assertTrue('first_name' in result.errors)
self.assertTrue('last_name' in result.errors)
self.assertTrue('salutation' in result.errors)
def test_require_simple_properties_via_required_validator(self):
""" Validate simple properties required via validator"""
from shiftschema.validators import Required
schema = Schema()
schema.add_property('property')
schema.property.add_validator(Required())
result = schema.validate(dict())
self.assertFalse(result)
def test_validate_entity_property(self):
""" Validated linked entity properties with nested schemas """
model = helpers.Person()
model.spouse = helpers.Person(first_name='W', last_name='X')
schema = Schema()
schema.add_entity('spouse')
schema.spouse.schema = helpers.PersonSpec()
result = schema.validate(model)
self.assertFalse(result)
self.assertTrue('first_name' in result.errors['spouse']['schema'])
self.assertTrue('last_name' in result.errors['spouse']['schema'])
def test_entity_props_can_have_both_direct_and_schema_errors(self):
"""REGRESSION: Both direct and schema errors at the same time """
person = helpers.Person()
person.spouse = helpers.Person()
schema = helpers.PersonSpec()
schema.add_entity('spouse')
schema.spouse.add_validator(helpers.ValidatorInvalid())
schema.spouse.schema = helpers.PersonSpec()
schema.spouse.schema.salutation.add_validator(validators.Required())
result = schema.validate(person)
self.assertTrue(len(result.errors['spouse']['direct']))
self.assertIn('salutation', result.errors['spouse']['schema'])
def test_require_linked_entities_with_validator_attached_directly(self):
""" Require linked entities with validator attached directly """
class Person:
def __init__(self):
self.spouse = None
schema = Schema()
schema.add_entity('spouse').add_validator(validators.Required())
result = schema.validate(Person())
self.assertFalse(result)
self.assertIn('spouse', result.get_messages())
def test_can_filter_out_collections_directly(self):
""" Filter out collection properties with filters attached directly """
address1 = helpers.Address(
address=' 2 Hollin Croft ',
city=' Barnsley ',
country=' UK ',
postcode=' S75 3TF ',
)
address2 = helpers.Address(
address='Newspaper House, 40 Churchgate',
city=' Bolton ',
country=' UK ',
)
address3 = helpers.Address(
address=' 446 Meadow Drive ',
city=' Billings, MT ',
country='US',
postcode=' 59101 ',
)
person = helpers.Person(
first_name='Matthew',
last_name='Rankin',
salutation='mr',
email='matrankin@gmail.com',
birth_year='1964',
)
person.addresses.append(address1)
person.addresses.append(address2)
person.addresses.append(address3)
schema = helpers.PersonSpecAggregate()
schema.filter(person)
self.assertEquals(2, len(person.addresses))
for address in person.addresses:
if address.country == 'US':
self.fail('US address was not filtered out')
def test_filter_collection_items_with_schemas(self):
""" Filtering collection items with schema """
address1 = helpers.Address(
address=' 2 Hollin Croft ',
city=' Barnsley ',
country=' UK ',
postcode=' S75 3TF ',
)
person = helpers.Person(
first_name='Matthew',
last_name='Rankin',
salutation='mr',
email='matrankin@gmail.com',
birth_year='1964',
)
person.addresses.append(address1)
schema = helpers.PersonSpecCollectionAggregate()
schema.filter(person)
self.assertEquals('2 Hollin Croft', person.addresses[0].address)
self.assertEquals('Barnsley', person.addresses[0].city)
self.assertEquals('UK', person.addresses[0].country)
self.assertEquals('S75 3TF', person.addresses[0].postcode)
def test_can_validate_collections_directly(self):
""" Validating collection with validators attached directly """
person = helpers.Person(
first_name='Matthew',
last_name='Rankin',
salutation='mr',
email='matrankin@gmail.com',
birth_year='1964',
)
schema = helpers.PersonSpecCollectionAggregate()
result = schema.validate(person)
self.assertFalse(result)
self.assertIn('addresses', result.errors)
def test_validate_collection_items_with_schemas(self):
""" Validating collection items with schema """
# valid
address1 = helpers.Address(
address=' 2 Hollin Croft ',
city=' Barnsley ',
country=' UK ',
postcode=' S75 3TF ',
)
# postcode required
address2 = helpers.Address(
address='Newspaper House, 40 Churchgate',
city=' Bolton ',
country=' UK ',
)
# filtered out
address3 = helpers.Address(
address=' 446 Meadow Drive ',
city=' Billings, MT ',
country='US',
postcode=' 59101 ',
)
# address required
address4 = helpers.Address(
city=' Barnsley ',
country=' UK ',
postcode=' S75 3TF ',
)
person = helpers.Person(
first_name='Matthew',
last_name='Rankin',
salutation='mr',
email='matrankin@gmail.com',
birth_year='1964',
)
#
person.addresses.append(address1)
person.addresses.append(address2)
person.addresses.append(address3)
person.addresses.append(address4)
schema = helpers.PersonSpecCollectionAggregate()
result = schema.validate(person)
self.assertFalse(result)
collection = result.errors['addresses']['collection']
self.assertIsInstance(collection[1], Result)
self.assertFalse(collection[1])
self.assertIn('postcode', collection[1].errors)
self.assertIsInstance(collection[3], Result)
self.assertFalse(collection[3])
self.assertIn('address', collection[3].errors)
def test_skip_validating_collection_with_schema_if_collection_empty(self):
""" Skip validating collection with schema if it's empty or None """
person = helpers.Person(
first_name='Matthew',
last_name='Rankin',
salutation='mr',
email='matrankin@gmail.com',
birth_year='1964',
)
person.addresses = None # override default
schema = helpers.PersonSpecCollectionAggregate()
schema.addresses.validators = [] # skip required validator
result = schema.validate(person)
self.assertTrue(result)
def test_validate_and_filter(self):
""" Process: validation and filtering as single operation"""
person = helpers.Person(first_name=' W ')
person.spouse = helpers.Person(first_name=' X ')
schema = helpers.PersonSpecAggregate()
result = schema.process(person)
self.assertEqual('W', person.first_name)
self.assertEqual('X', person.spouse.first_name)
self.assertTrue('first_name' in result.errors) # too short
self.assertTrue('first_name' in result.errors['spouse']['schema'])
def test_results_injected_with_translations(self):
""" Schema-generated results are injected with translation settings """
schema = Schema()
result = schema.validate(mock.Mock())
self.assertEqual('en', result.locale)
self.assertIsInstance(result.translator, Translator)
Schema.locale = 'ru'
Schema.translator.add_location('/tmp')
schema = Schema()
result = schema.validate(mock.Mock())
self.assertEqual('ru', result.locale)
self.assertIsInstance(result.translator, Translator)
self.assertTrue('/tmp' in result.translator.dirs)
| {
"content_hash": "302599f37304f03be8b665203bb9423a",
"timestamp": "",
"source": "github",
"line_count": 480,
"max_line_length": 80,
"avg_line_length": 35.99583333333333,
"alnum_prop": 0.6130339159624957,
"repo_name": "projectshift/shift-schema",
"id": "0ac3822af1625151c5abca20738a83b89b47c883",
"size": "17278",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/schema_tests.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "177832"
},
{
"name": "Shell",
"bytes": "393"
}
],
"symlink_target": ""
} |
"""Distributed supervision suite.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import click
from treadmill import cli
def init():
"""Return top level command handler."""
@click.group(cls=cli.make_commands(__name__))
def run():
"""Cross-cell supervision tools."""
cli.init_logger('daemon.json')
return run
| {
"content_hash": "6e1a1103586390dcd662defccca3c925",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 49,
"avg_line_length": 20.545454545454547,
"alnum_prop": 0.672566371681416,
"repo_name": "Morgan-Stanley/treadmill",
"id": "91a76b32d7ad0d92981c8c8178fee44bcce62e51",
"size": "452",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "lib/python/treadmill/cli/supervise/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "PowerShell",
"bytes": "3750"
},
{
"name": "Python",
"bytes": "3372983"
},
{
"name": "Ruby",
"bytes": "3712"
},
{
"name": "Shell",
"bytes": "51646"
}
],
"symlink_target": ""
} |
import requests
from allauth.socialaccount import app_settings
from allauth.socialaccount.providers.oauth2.views import (
OAuth2Adapter,
OAuth2CallbackView,
OAuth2LoginView,
)
from .provider import ShareFileProvider
class ShareFileOAuth2Adapter(OAuth2Adapter):
provider_id = ShareFileProvider.id
settings = app_settings.PROVIDERS.get(provider_id, {})
subdomain = settings.get('SUBDOMAIN', 'secure')
apicp = settings.get('APICP', 'sharefile.com')
provider_default_url = settings.get('DEFAULT_URL',
'https://secure.sharefile.com')
provider_default_api_url = 'https://{}.sf-api.com'.format(subdomain)
provider_api_version = 'v3'
access_token_url = 'https://{}.{}/oauth/token'.format(subdomain, apicp)
refresh_token_url = 'https://{}.{}/oauth/token'.format(subdomain, apicp)
authorize_url = '{}/oauth/authorize'.format(provider_default_url)
profile_url = '{}/sf/{}/Users'.format(provider_default_api_url,
provider_api_version)
def complete_login(self, request, app, token, response):
headers = {"Authorization": "Bearer {}".format(token.token)}
extra_data = requests.get(self.profile_url, headers=headers).json()
return self.get_provider().sociallogin_from_response(request,
extra_data)
oauth2_login = OAuth2LoginView.adapter_view(ShareFileOAuth2Adapter)
oauth2_callback = OAuth2CallbackView.adapter_view(ShareFileOAuth2Adapter)
| {
"content_hash": "b01f2ac8883f5d7a84fc9abd5031d963",
"timestamp": "",
"source": "github",
"line_count": 38,
"max_line_length": 76,
"avg_line_length": 41.078947368421055,
"alnum_prop": 0.657911595131326,
"repo_name": "lukeburden/django-allauth",
"id": "fb2e8af727133716bcd4d980c3ae2583b129d6a7",
"size": "1561",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "allauth/socialaccount/providers/sharefile/views.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Emacs Lisp",
"bytes": "104"
},
{
"name": "HTML",
"bytes": "42255"
},
{
"name": "JavaScript",
"bytes": "3360"
},
{
"name": "Makefile",
"bytes": "396"
},
{
"name": "Python",
"bytes": "776219"
}
],
"symlink_target": ""
} |
"""
Borrowed then modified from: http://tuohuang.info/unity-automate-post-process/#.U6i3_I1dVaR
"""
import os
import plistlib
from sys import argv
from mod_pbxproj import XcodeProject
path = argv[1]
fileToAddPath = argv[2]
BASE_PATH = '/Applications/Xcode.app/Contents/Developer/Platforms/iPhoneOS.platform/Developer/SDKs/iPhoneOS.sdk/'
project = XcodeProject.Load(path + '/Unity-iPhone.xcodeproj/project.pbxproj')
frameworks_path = BASE_PATH + 'System/Library/Frameworks/'
lib_path = BASE_PATH + 'usr/lib/'
# Add required libraries
############################
project.add_file(frameworks_path + 'Security.framework', tree='SDKROOT')
project.add_file(frameworks_path + 'PassKit.framework', tree='SDKROOT', weak=True)
project.add_file(lib_path + 'libicucore.dylib', tree='SDKROOT')
# Add all files in /Assets/Editor/Arbiter/
files_in_dir = os.listdir(fileToAddPath)
for f in files_in_dir:
if not f.startswith('.'): # exclude .DS_STORE on mac
pathname = os.path.join(fileToAddPath, f)
fileName, fileExtension = os.path.splitext(pathname)
if not fileExtension == '.meta': # skip .meta file
if os.path.isfile(pathname):
project.add_file(pathname)
if os.path.isdir(pathname):
project.add_folder(pathname, excludes=["^.*\.meta$"])
# Change build settings
############################
project.add_other_buildsetting('GCC_ENABLE_OBJC_EXCEPTIONS', 'YES')
project.add_other_buildsetting('CLANG_ENABLE_MODULES', 'YES')
project.add_other_buildsetting('IPHONEOS_DEPLOYMENT_TARGET', '7.0')
# Add ARC compiler flag for Stripe and PaymentKit files
########################################################
for key in project.get_ids():
obj = project.get_obj(key)
file_path = obj.get('path')
try:
if 'Arbiter' in file_path or \
'PaymentKit' in file_path or \
'Stripe' in file_path or \
'Mixpanel' in file_path:
build_files = project.get_build_files(key)
if build_files is not None:
for build_file in build_files:
build_file.add_compiler_flag('-fobjc-arc')
except Exception as err:
pass
# Add Info.plist keys for location services
########################################################
rootObject = plistlib.readPlist(path + '/Info.plist')
rootObject['NSLocationWhenInUseUsageDescription'] = 'This is required to participate in cash games.'
plistlib.writePlist(rootObject, path + '/Info.plist')
# Now save
############################
if project.modified:
project.backup()
project.save()
| {
"content_hash": "ef875f9030f21883ad74f326d19b9715",
"timestamp": "",
"source": "github",
"line_count": 71,
"max_line_length": 113,
"avg_line_length": 36.70422535211268,
"alnum_prop": 0.6304681504221028,
"repo_name": "ArbiterGames/Clickster",
"id": "444ced309ac3df22ca71880096bf26b3f0b32615",
"size": "2606",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Clickster/Assets/Editor/ArbiterPostprocessor.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "6541"
},
{
"name": "C#",
"bytes": "89957"
},
{
"name": "C++",
"bytes": "2170"
},
{
"name": "Objective-C",
"bytes": "750005"
},
{
"name": "Python",
"bytes": "44756"
}
],
"symlink_target": ""
} |
import sys
from neutron_lib import constants as n_const
from oslo_config import cfg
from oslo_log import log as logging
import oslo_messaging
from oslo_utils import excutils
from osprofiler import profiler
from neutron._i18n import _LE, _LI, _LW
from neutron.common import utils as n_utils
from neutron.plugins.common import constants as p_const
from neutron.plugins.ml2.drivers.openvswitch.agent.common import constants
LOG = logging.getLogger(__name__)
cfg.CONF.import_group('AGENT', 'neutron.plugins.ml2.drivers.openvswitch.'
'agent.common.config')
# A class to represent a DVR-hosted subnet including vif_ports resident on
# that subnet
class LocalDVRSubnetMapping(object):
def __init__(self, subnet, csnat_ofport=constants.OFPORT_INVALID):
# set of compute ports on this dvr subnet
self.compute_ports = {}
self.subnet = subnet
self.csnat_ofport = csnat_ofport
self.dvr_owned = False
def __str__(self):
return ("subnet = %s compute_ports = %s csnat_port = %s"
" is_dvr_owned = %s" %
(self.subnet, self.get_compute_ofports(),
self.get_csnat_ofport(), self.is_dvr_owned()))
def get_subnet_info(self):
return self.subnet
def set_dvr_owned(self, owned):
self.dvr_owned = owned
def is_dvr_owned(self):
return self.dvr_owned
def add_compute_ofport(self, vif_id, ofport):
self.compute_ports[vif_id] = ofport
def remove_compute_ofport(self, vif_id):
self.compute_ports.pop(vif_id, 0)
def remove_all_compute_ofports(self):
self.compute_ports.clear()
def get_compute_ofports(self):
return self.compute_ports
def set_csnat_ofport(self, ofport):
self.csnat_ofport = ofport
def get_csnat_ofport(self):
return self.csnat_ofport
class OVSPort(object):
def __init__(self, id, ofport, mac, device_owner):
self.id = id
self.mac = mac
self.ofport = ofport
self.subnets = set()
self.device_owner = device_owner
def __str__(self):
return ("OVSPort: id = %s, ofport = %s, mac = %s, "
"device_owner = %s, subnets = %s" %
(self.id, self.ofport, self.mac,
self.device_owner, self.subnets))
def add_subnet(self, subnet_id):
self.subnets.add(subnet_id)
def remove_subnet(self, subnet_id):
self.subnets.remove(subnet_id)
def remove_all_subnets(self):
self.subnets.clear()
def get_subnets(self):
return self.subnets
def get_device_owner(self):
return self.device_owner
def get_mac(self):
return self.mac
def get_ofport(self):
return self.ofport
@profiler.trace_cls("ovs_dvr_agent")
class OVSDVRNeutronAgent(object):
'''
Implements OVS-based DVR(Distributed Virtual Router), for overlay networks.
'''
# history
# 1.0 Initial version
def __init__(self, context, plugin_rpc, integ_br, tun_br,
bridge_mappings, phys_brs, int_ofports, phys_ofports,
patch_int_ofport=constants.OFPORT_INVALID,
patch_tun_ofport=constants.OFPORT_INVALID,
host=None, enable_tunneling=False,
enable_distributed_routing=False):
self.context = context
self.plugin_rpc = plugin_rpc
self.host = host
self.enable_tunneling = enable_tunneling
self.enable_distributed_routing = enable_distributed_routing
self.bridge_mappings = bridge_mappings
self.phys_brs = phys_brs
self.int_ofports = int_ofports
self.phys_ofports = phys_ofports
self.reset_ovs_parameters(integ_br, tun_br,
patch_int_ofport, patch_tun_ofport)
self.reset_dvr_parameters()
self.dvr_mac_address = None
if self.enable_distributed_routing:
self.get_dvr_mac_address()
self.conf = cfg.CONF
def setup_dvr_flows(self):
self.setup_dvr_flows_on_integ_br()
self.setup_dvr_flows_on_tun_br()
self.setup_dvr_flows_on_phys_br()
self.setup_dvr_mac_flows_on_all_brs()
def reset_ovs_parameters(self, integ_br, tun_br,
patch_int_ofport, patch_tun_ofport):
'''Reset the openvswitch parameters'''
self.int_br = integ_br
self.tun_br = tun_br
self.patch_int_ofport = patch_int_ofport
self.patch_tun_ofport = patch_tun_ofport
def reset_dvr_parameters(self):
'''Reset the DVR parameters'''
self.local_dvr_map = {}
self.local_csnat_map = {}
self.local_ports = {}
self.registered_dvr_macs = set()
def get_dvr_mac_address(self):
try:
self.get_dvr_mac_address_with_retry()
except oslo_messaging.RemoteError as e:
LOG.error(_LE('L2 agent could not get DVR MAC address at '
'startup due to RPC error. It happens when the '
'server does not support this RPC API. Detailed '
'message: %s'), e)
except oslo_messaging.MessagingTimeout:
LOG.error(_LE('DVR: Failed to obtain a valid local '
'DVR MAC address'))
if not self.in_distributed_mode():
sys.exit(1)
def get_dvr_mac_address_with_retry(self):
# Get the local DVR MAC Address from the Neutron Server.
# This is the first place where we contact the server on startup
# so retry in case it's not ready to respond
for retry_count in reversed(range(5)):
try:
details = self.plugin_rpc.get_dvr_mac_address_by_host(
self.context, self.host)
except oslo_messaging.MessagingTimeout as e:
with excutils.save_and_reraise_exception() as ctx:
if retry_count > 0:
ctx.reraise = False
LOG.warning(_LW('L2 agent could not get DVR MAC '
'address from server. Retrying. '
'Detailed message: %s'), e)
else:
LOG.debug("L2 Agent DVR: Received response for "
"get_dvr_mac_address_by_host() from "
"plugin: %r", details)
self.dvr_mac_address = details['mac_address']
return
def setup_dvr_flows_on_integ_br(self):
'''Setup up initial dvr flows into br-int'''
LOG.info(_LI("L2 Agent operating in DVR Mode with MAC %s"),
self.dvr_mac_address)
# Remove existing flows in integration bridge
if self.conf.AGENT.drop_flows_on_start:
self.int_br.delete_flows()
# Add a canary flow to int_br to track OVS restarts
self.int_br.setup_canary_table()
# Insert 'drop' action as the default for Table DVR_TO_SRC_MAC
self.int_br.install_drop(table_id=constants.DVR_TO_SRC_MAC, priority=1)
self.int_br.install_drop(table_id=constants.DVR_TO_SRC_MAC_VLAN,
priority=1)
# Insert 'normal' action as the default for Table LOCAL_SWITCHING
self.int_br.install_normal(table_id=constants.LOCAL_SWITCHING,
priority=1)
for physical_network in self.bridge_mappings:
self.int_br.install_drop(table_id=constants.LOCAL_SWITCHING,
priority=2,
in_port=self.int_ofports[
physical_network])
def setup_dvr_flows_on_tun_br(self):
'''Setup up initial dvr flows into br-tun'''
if not self.enable_tunneling:
return
self.tun_br.install_goto(dest_table_id=constants.DVR_PROCESS,
priority=1,
in_port=self.patch_int_ofport)
# table-miss should be sent to learning table
self.tun_br.install_goto(table_id=constants.DVR_NOT_LEARN,
dest_table_id=constants.LEARN_FROM_TUN)
self.tun_br.install_goto(table_id=constants.DVR_PROCESS,
dest_table_id=constants.PATCH_LV_TO_TUN)
def setup_dvr_flows_on_phys_br(self):
'''Setup up initial dvr flows into br-phys'''
for physical_network in self.bridge_mappings:
self.phys_brs[physical_network].install_goto(
in_port=self.phys_ofports[physical_network],
priority=2,
dest_table_id=constants.DVR_PROCESS_VLAN)
self.phys_brs[physical_network].install_goto(
priority=1,
dest_table_id=constants.DVR_NOT_LEARN_VLAN)
self.phys_brs[physical_network].install_goto(
table_id=constants.DVR_PROCESS_VLAN,
priority=0,
dest_table_id=constants.LOCAL_VLAN_TRANSLATION)
self.phys_brs[physical_network].install_drop(
table_id=constants.LOCAL_VLAN_TRANSLATION,
in_port=self.phys_ofports[physical_network],
priority=2)
self.phys_brs[physical_network].install_normal(
table_id=constants.DVR_NOT_LEARN_VLAN,
priority=1)
def _add_dvr_mac_for_phys_br(self, physical_network, mac):
self.int_br.add_dvr_mac_vlan(mac=mac,
port=self.int_ofports[physical_network])
phys_br = self.phys_brs[physical_network]
phys_br.add_dvr_mac_vlan(mac=mac,
port=self.phys_ofports[physical_network])
def _remove_dvr_mac_for_phys_br(self, physical_network, mac):
# REVISIT(yamamoto): match in_port as well?
self.int_br.remove_dvr_mac_vlan(mac=mac)
phys_br = self.phys_brs[physical_network]
# REVISIT(yamamoto): match in_port as well?
phys_br.remove_dvr_mac_vlan(mac=mac)
def _add_dvr_mac_for_tun_br(self, mac):
self.int_br.add_dvr_mac_tun(mac=mac, port=self.patch_tun_ofport)
self.tun_br.add_dvr_mac_tun(mac=mac, port=self.patch_int_ofport)
def _remove_dvr_mac_for_tun_br(self, mac):
self.int_br.remove_dvr_mac_tun(mac=mac, port=self.patch_tun_ofport)
# REVISIT(yamamoto): match in_port as well?
self.tun_br.remove_dvr_mac_tun(mac=mac)
def _add_dvr_mac(self, mac):
for physical_network in self.bridge_mappings:
self._add_dvr_mac_for_phys_br(physical_network, mac)
if self.enable_tunneling:
self._add_dvr_mac_for_tun_br(mac)
LOG.debug("Added DVR MAC flow for %s", mac)
self.registered_dvr_macs.add(mac)
def _remove_dvr_mac(self, mac):
for physical_network in self.bridge_mappings:
self._remove_dvr_mac_for_phys_br(physical_network, mac)
if self.enable_tunneling:
self._remove_dvr_mac_for_tun_br(mac)
LOG.debug("Removed DVR MAC flow for %s", mac)
self.registered_dvr_macs.remove(mac)
def setup_dvr_mac_flows_on_all_brs(self):
dvr_macs = self.plugin_rpc.get_dvr_mac_address_list(self.context)
LOG.debug("L2 Agent DVR: Received these MACs: %r", dvr_macs)
for mac in dvr_macs:
if mac['mac_address'] == self.dvr_mac_address:
continue
self._add_dvr_mac(mac['mac_address'])
def dvr_mac_address_update(self, dvr_macs):
if not self.dvr_mac_address:
LOG.debug("Self mac unknown, ignoring this "
"dvr_mac_address_update() ")
return
dvr_host_macs = set()
for entry in dvr_macs:
if entry['mac_address'] == self.dvr_mac_address:
continue
dvr_host_macs.add(entry['mac_address'])
if dvr_host_macs == self.registered_dvr_macs:
LOG.debug("DVR Mac address already up to date")
return
dvr_macs_added = dvr_host_macs - self.registered_dvr_macs
dvr_macs_removed = self.registered_dvr_macs - dvr_host_macs
for oldmac in dvr_macs_removed:
self._remove_dvr_mac(oldmac)
for newmac in dvr_macs_added:
self._add_dvr_mac(newmac)
def in_distributed_mode(self):
return self.dvr_mac_address is not None
def process_tunneled_network(self, network_type, lvid, segmentation_id):
self.tun_br.provision_local_vlan(
network_type=network_type,
lvid=lvid,
segmentation_id=segmentation_id,
distributed=self.in_distributed_mode())
def _bind_distributed_router_interface_port(self, port, lvm,
fixed_ips, device_owner):
# since distributed router port must have only one fixed
# IP, directly use fixed_ips[0]
fixed_ip = fixed_ips[0]
subnet_uuid = fixed_ip['subnet_id']
csnat_ofport = constants.OFPORT_INVALID
ldm = None
if subnet_uuid in self.local_dvr_map:
ldm = self.local_dvr_map[subnet_uuid]
csnat_ofport = ldm.get_csnat_ofport()
if csnat_ofport == constants.OFPORT_INVALID:
LOG.error(_LE("DVR: Duplicate DVR router interface detected "
"for subnet %s"), subnet_uuid)
return
else:
# set up LocalDVRSubnetMapping available for this subnet
subnet_info = self.plugin_rpc.get_subnet_for_dvr(
self.context, subnet_uuid, fixed_ips=fixed_ips)
if not subnet_info:
LOG.warning(_LW("DVR: Unable to retrieve subnet information "
"for subnet_id %s. The subnet or the gateway "
"may have already been deleted"), subnet_uuid)
return
LOG.debug("get_subnet_for_dvr for subnet %(uuid)s "
"returned with %(info)s",
{"uuid": subnet_uuid, "info": subnet_info})
ldm = LocalDVRSubnetMapping(subnet_info)
self.local_dvr_map[subnet_uuid] = ldm
# DVR takes over
ldm.set_dvr_owned(True)
vlan_to_use = lvm.vlan
if lvm.network_type == p_const.TYPE_VLAN:
vlan_to_use = lvm.segmentation_id
subnet_info = ldm.get_subnet_info()
ip_version = subnet_info['ip_version']
local_compute_ports = (
self.plugin_rpc.get_ports_on_host_by_subnet(
self.context, self.host, subnet_uuid))
LOG.debug("DVR: List of ports received from "
"get_ports_on_host_by_subnet %s",
local_compute_ports)
vif_by_id = self.int_br.get_vifs_by_ids(
[local_port['id'] for local_port in local_compute_ports])
for local_port in local_compute_ports:
vif = vif_by_id.get(local_port['id'])
if not vif:
continue
ldm.add_compute_ofport(vif.vif_id, vif.ofport)
if vif.vif_id in self.local_ports:
# ensure if a compute port is already on
# a different dvr routed subnet
# if yes, queue this subnet to that port
comp_ovsport = self.local_ports[vif.vif_id]
comp_ovsport.add_subnet(subnet_uuid)
else:
# the compute port is discovered first here that its on
# a dvr routed subnet queue this subnet to that port
comp_ovsport = OVSPort(vif.vif_id, vif.ofport,
vif.vif_mac, local_port['device_owner'])
comp_ovsport.add_subnet(subnet_uuid)
self.local_ports[vif.vif_id] = comp_ovsport
# create rule for just this vm port
self.int_br.install_dvr_to_src_mac(
network_type=lvm.network_type,
vlan_tag=vlan_to_use,
gateway_mac=subnet_info['gateway_mac'],
dst_mac=comp_ovsport.get_mac(),
dst_port=comp_ovsport.get_ofport())
if lvm.network_type == p_const.TYPE_VLAN:
# TODO(vivek) remove the IPv6 related flows once SNAT is not
# used for IPv6 DVR.
br = self.phys_brs[lvm.physical_network]
if lvm.network_type in constants.TUNNEL_NETWORK_TYPES:
br = self.tun_br
# TODO(vivek) remove the IPv6 related flows once SNAT is not
# used for IPv6 DVR.
if ip_version == 4:
br.install_dvr_process_ipv4(
vlan_tag=lvm.vlan, gateway_ip=subnet_info['gateway_ip'])
else:
br.install_dvr_process_ipv6(
vlan_tag=lvm.vlan, gateway_mac=subnet_info['gateway_mac'])
br.install_dvr_process(
vlan_tag=lvm.vlan, vif_mac=port.vif_mac,
dvr_mac_address=self.dvr_mac_address)
# the dvr router interface is itself a port, so capture it
# queue this subnet to that port. A subnet appears only once as
# a router interface on any given router
ovsport = OVSPort(port.vif_id, port.ofport,
port.vif_mac, device_owner)
ovsport.add_subnet(subnet_uuid)
self.local_ports[port.vif_id] = ovsport
def _bind_port_on_dvr_subnet(self, port, lvm, fixed_ips,
device_owner):
# Handle new compute port added use-case
subnet_uuid = None
for ips in fixed_ips:
if ips['subnet_id'] not in self.local_dvr_map:
continue
subnet_uuid = ips['subnet_id']
ldm = self.local_dvr_map[subnet_uuid]
if not ldm.is_dvr_owned():
# well this is CSNAT stuff, let dvr come in
# and do plumbing for this vm later
continue
# This confirms that this compute port belongs
# to a dvr hosted subnet.
# Accommodate this VM Port into the existing rule in
# the integration bridge
LOG.debug("DVR: Plumbing compute port %s", port.vif_id)
subnet_info = ldm.get_subnet_info()
ldm.add_compute_ofport(port.vif_id, port.ofport)
if port.vif_id in self.local_ports:
# ensure if a compute port is already on a different
# dvr routed subnet
# if yes, queue this subnet to that port
ovsport = self.local_ports[port.vif_id]
ovsport.add_subnet(subnet_uuid)
else:
# the compute port is discovered first here that its
# on a dvr routed subnet, queue this subnet to that port
ovsport = OVSPort(port.vif_id, port.ofport,
port.vif_mac, device_owner)
ovsport.add_subnet(subnet_uuid)
self.local_ports[port.vif_id] = ovsport
vlan_to_use = lvm.vlan
if lvm.network_type == p_const.TYPE_VLAN:
vlan_to_use = lvm.segmentation_id
# create a rule for this vm port
self.int_br.install_dvr_to_src_mac(
network_type=lvm.network_type,
vlan_tag=vlan_to_use,
gateway_mac=subnet_info['gateway_mac'],
dst_mac=ovsport.get_mac(),
dst_port=ovsport.get_ofport())
def _bind_centralized_snat_port_on_dvr_subnet(self, port, lvm,
fixed_ips, device_owner):
# since centralized-SNAT (CSNAT) port must have only one fixed
# IP, directly use fixed_ips[0]
fixed_ip = fixed_ips[0]
if port.vif_id in self.local_ports:
# throw an error if CSNAT port is already on a different
# dvr routed subnet
ovsport = self.local_ports[port.vif_id]
subs = list(ovsport.get_subnets())
if subs[0] == fixed_ip['subnet_id']:
return
LOG.error(_LE("Centralized-SNAT port %(port)s on subnet "
"%(port_subnet)s already seen on a different "
"subnet %(orig_subnet)s"), {
"port": port.vif_id,
"port_subnet": fixed_ip['subnet_id'],
"orig_subnet": subs[0],
})
return
subnet_uuid = fixed_ip['subnet_id']
ldm = None
subnet_info = None
if subnet_uuid not in self.local_dvr_map:
# no csnat ports seen on this subnet - create csnat state
# for this subnet
subnet_info = self.plugin_rpc.get_subnet_for_dvr(
self.context, subnet_uuid, fixed_ips=fixed_ips)
if not subnet_info:
LOG.warning(_LW("DVR: Unable to retrieve subnet information "
"for subnet_id %s. The subnet or the gateway "
"may have already been deleted"), subnet_uuid)
return
LOG.debug("get_subnet_for_dvr for subnet %(uuid)s "
"returned with %(info)s",
{"uuid": subnet_uuid, "info": subnet_info})
ldm = LocalDVRSubnetMapping(subnet_info, port.ofport)
self.local_dvr_map[subnet_uuid] = ldm
else:
ldm = self.local_dvr_map[subnet_uuid]
subnet_info = ldm.get_subnet_info()
# Store csnat OF Port in the existing DVRSubnetMap
ldm.set_csnat_ofport(port.ofport)
# create ovsPort footprint for csnat port
ovsport = OVSPort(port.vif_id, port.ofport,
port.vif_mac, device_owner)
ovsport.add_subnet(subnet_uuid)
self.local_ports[port.vif_id] = ovsport
vlan_to_use = lvm.vlan
if lvm.network_type == p_const.TYPE_VLAN:
vlan_to_use = lvm.segmentation_id
self.int_br.install_dvr_to_src_mac(
network_type=lvm.network_type,
vlan_tag=vlan_to_use,
gateway_mac=subnet_info['gateway_mac'],
dst_mac=ovsport.get_mac(),
dst_port=ovsport.get_ofport())
def bind_port_to_dvr(self, port, local_vlan_map,
fixed_ips, device_owner):
if not self.in_distributed_mode():
return
if local_vlan_map.network_type not in (constants.TUNNEL_NETWORK_TYPES
+ [p_const.TYPE_VLAN]):
LOG.debug("DVR: Port %s is with network_type %s not supported"
" for dvr plumbing", port.vif_id,
local_vlan_map.network_type)
return
if (port.vif_id in self.local_ports and
self.local_ports[port.vif_id].ofport != port.ofport):
LOG.info(_LI("DVR: Port %(vif)s changed port number to "
"%(ofport)s, rebinding."),
{'vif': port.vif_id, 'ofport': port.ofport})
self.unbind_port_from_dvr(port, local_vlan_map)
if device_owner == n_const.DEVICE_OWNER_DVR_INTERFACE:
self._bind_distributed_router_interface_port(port,
local_vlan_map,
fixed_ips,
device_owner)
if device_owner and n_utils.is_dvr_serviced(device_owner):
self._bind_port_on_dvr_subnet(port, local_vlan_map,
fixed_ips,
device_owner)
if device_owner == n_const.DEVICE_OWNER_ROUTER_SNAT:
self._bind_centralized_snat_port_on_dvr_subnet(port,
local_vlan_map,
fixed_ips,
device_owner)
def _unbind_distributed_router_interface_port(self, port, lvm):
ovsport = self.local_ports[port.vif_id]
# removal of distributed router interface
subnet_ids = ovsport.get_subnets()
subnet_set = set(subnet_ids)
network_type = lvm.network_type
physical_network = lvm.physical_network
vlan_to_use = lvm.vlan
if network_type == p_const.TYPE_VLAN:
vlan_to_use = lvm.segmentation_id
# ensure we process for all the subnets laid on this removed port
for sub_uuid in subnet_set:
if sub_uuid not in self.local_dvr_map:
continue
ldm = self.local_dvr_map[sub_uuid]
subnet_info = ldm.get_subnet_info()
ip_version = subnet_info['ip_version']
# DVR is no more owner
ldm.set_dvr_owned(False)
# remove all vm rules for this dvr subnet
# clear of compute_ports altogether
compute_ports = ldm.get_compute_ofports()
for vif_id in compute_ports:
comp_port = self.local_ports[vif_id]
self.int_br.delete_dvr_to_src_mac(
network_type=network_type,
vlan_tag=vlan_to_use, dst_mac=comp_port.get_mac())
ldm.remove_all_compute_ofports()
if ldm.get_csnat_ofport() == constants.OFPORT_INVALID:
# if there is no csnat port for this subnet, remove
# this subnet from local_dvr_map, as no dvr (or) csnat
# ports available on this agent anymore
self.local_dvr_map.pop(sub_uuid, None)
if network_type == p_const.TYPE_VLAN:
br = self.phys_brs[physical_network]
if network_type in constants.TUNNEL_NETWORK_TYPES:
br = self.tun_br
if ip_version == 4:
br.delete_dvr_process_ipv4(
vlan_tag=lvm.vlan, gateway_ip=subnet_info['gateway_ip'])
else:
br.delete_dvr_process_ipv6(
vlan_tag=lvm.vlan, gateway_mac=subnet_info['gateway_mac'])
ovsport.remove_subnet(sub_uuid)
if lvm.network_type == p_const.TYPE_VLAN:
br = self.phys_brs[physical_network]
if lvm.network_type in constants.TUNNEL_NETWORK_TYPES:
br = self.tun_br
br.delete_dvr_process(vlan_tag=lvm.vlan, vif_mac=port.vif_mac)
# release port state
self.local_ports.pop(port.vif_id, None)
def _unbind_port_on_dvr_subnet(self, port, lvm):
ovsport = self.local_ports[port.vif_id]
# This confirms that this compute port being removed belonged
# to a dvr hosted subnet.
LOG.debug("DVR: Removing plumbing for compute port %s", port)
subnet_ids = ovsport.get_subnets()
# ensure we process for all the subnets laid on this port
for sub_uuid in subnet_ids:
if sub_uuid not in self.local_dvr_map:
continue
ldm = self.local_dvr_map[sub_uuid]
ldm.remove_compute_ofport(port.vif_id)
vlan_to_use = lvm.vlan
if lvm.network_type == p_const.TYPE_VLAN:
vlan_to_use = lvm.segmentation_id
# first remove this vm port rule
self.int_br.delete_dvr_to_src_mac(
network_type=lvm.network_type,
vlan_tag=vlan_to_use, dst_mac=ovsport.get_mac())
# release port state
self.local_ports.pop(port.vif_id, None)
def _unbind_centralized_snat_port_on_dvr_subnet(self, port, lvm):
ovsport = self.local_ports[port.vif_id]
# This confirms that this compute port being removed belonged
# to a dvr hosted subnet.
LOG.debug("DVR: Removing plumbing for csnat port %s", port)
sub_uuid = list(ovsport.get_subnets())[0]
# ensure we process for all the subnets laid on this port
if sub_uuid not in self.local_dvr_map:
return
ldm = self.local_dvr_map[sub_uuid]
ldm.set_csnat_ofport(constants.OFPORT_INVALID)
vlan_to_use = lvm.vlan
if lvm.network_type == p_const.TYPE_VLAN:
vlan_to_use = lvm.segmentation_id
# then remove csnat port rule
self.int_br.delete_dvr_to_src_mac(
network_type=lvm.network_type,
vlan_tag=vlan_to_use, dst_mac=ovsport.get_mac())
if not ldm.is_dvr_owned():
# if not owned by DVR (only used for csnat), remove this
# subnet state altogether
self.local_dvr_map.pop(sub_uuid, None)
# release port state
self.local_ports.pop(port.vif_id, None)
def unbind_port_from_dvr(self, vif_port, local_vlan_map):
if not self.in_distributed_mode():
return
# Handle port removed use-case
if vif_port and vif_port.vif_id not in self.local_ports:
LOG.debug("DVR: Non distributed port, ignoring %s", vif_port)
return
ovsport = self.local_ports[vif_port.vif_id]
device_owner = ovsport.get_device_owner()
if device_owner == n_const.DEVICE_OWNER_DVR_INTERFACE:
self._unbind_distributed_router_interface_port(vif_port,
local_vlan_map)
if device_owner and n_utils.is_dvr_serviced(device_owner):
self._unbind_port_on_dvr_subnet(vif_port,
local_vlan_map)
if device_owner == n_const.DEVICE_OWNER_ROUTER_SNAT:
self._unbind_centralized_snat_port_on_dvr_subnet(vif_port,
local_vlan_map)
| {
"content_hash": "c2580eaac2569cf63cf5f48e89431b26",
"timestamp": "",
"source": "github",
"line_count": 700,
"max_line_length": 79,
"avg_line_length": 42.64142857142857,
"alnum_prop": 0.5580756474253744,
"repo_name": "sebrandon1/neutron",
"id": "8965d4313f8ff00183f65adb9b26fb517d5839b4",
"size": "30507",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "neutron/plugins/ml2/drivers/openvswitch/agent/ovs_dvr_neutron_agent.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Mako",
"bytes": "1047"
},
{
"name": "Python",
"bytes": "9903006"
},
{
"name": "Shell",
"bytes": "14339"
}
],
"symlink_target": ""
} |
"""
ORCID Member
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501
OpenAPI spec version: Latest
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from orcid_api_v3.models.external_i_ds_v30_rc1 import ExternalIDsV30Rc1 # noqa: F401,E501
from orcid_api_v3.models.last_modified_date_v30_rc1 import LastModifiedDateV30Rc1 # noqa: F401,E501
from orcid_api_v3.models.work_summary_v30_rc1 import WorkSummaryV30Rc1 # noqa: F401,E501
class WorkGroupV30Rc1(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'last_modified_date': 'LastModifiedDateV30Rc1',
'external_ids': 'ExternalIDsV30Rc1',
'work_summary': 'list[WorkSummaryV30Rc1]'
}
attribute_map = {
'last_modified_date': 'last-modified-date',
'external_ids': 'external-ids',
'work_summary': 'work-summary'
}
def __init__(self, last_modified_date=None, external_ids=None, work_summary=None): # noqa: E501
"""WorkGroupV30Rc1 - a model defined in Swagger""" # noqa: E501
self._last_modified_date = None
self._external_ids = None
self._work_summary = None
self.discriminator = None
if last_modified_date is not None:
self.last_modified_date = last_modified_date
if external_ids is not None:
self.external_ids = external_ids
if work_summary is not None:
self.work_summary = work_summary
@property
def last_modified_date(self):
"""Gets the last_modified_date of this WorkGroupV30Rc1. # noqa: E501
:return: The last_modified_date of this WorkGroupV30Rc1. # noqa: E501
:rtype: LastModifiedDateV30Rc1
"""
return self._last_modified_date
@last_modified_date.setter
def last_modified_date(self, last_modified_date):
"""Sets the last_modified_date of this WorkGroupV30Rc1.
:param last_modified_date: The last_modified_date of this WorkGroupV30Rc1. # noqa: E501
:type: LastModifiedDateV30Rc1
"""
self._last_modified_date = last_modified_date
@property
def external_ids(self):
"""Gets the external_ids of this WorkGroupV30Rc1. # noqa: E501
:return: The external_ids of this WorkGroupV30Rc1. # noqa: E501
:rtype: ExternalIDsV30Rc1
"""
return self._external_ids
@external_ids.setter
def external_ids(self, external_ids):
"""Sets the external_ids of this WorkGroupV30Rc1.
:param external_ids: The external_ids of this WorkGroupV30Rc1. # noqa: E501
:type: ExternalIDsV30Rc1
"""
self._external_ids = external_ids
@property
def work_summary(self):
"""Gets the work_summary of this WorkGroupV30Rc1. # noqa: E501
:return: The work_summary of this WorkGroupV30Rc1. # noqa: E501
:rtype: list[WorkSummaryV30Rc1]
"""
return self._work_summary
@work_summary.setter
def work_summary(self, work_summary):
"""Sets the work_summary of this WorkGroupV30Rc1.
:param work_summary: The work_summary of this WorkGroupV30Rc1. # noqa: E501
:type: list[WorkSummaryV30Rc1]
"""
self._work_summary = work_summary
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(WorkGroupV30Rc1, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, WorkGroupV30Rc1):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| {
"content_hash": "4fb764c6417fdb7d4aa9b5074474bff4",
"timestamp": "",
"source": "github",
"line_count": 164,
"max_line_length": 119,
"avg_line_length": 31.98170731707317,
"alnum_prop": 0.5971401334604385,
"repo_name": "Royal-Society-of-New-Zealand/NZ-ORCID-Hub",
"id": "97f852fb34675f6400efe9769efab6a0e808dca2",
"size": "5262",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "orcid_api_v3/models/work_group_v30_rc1.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "20266"
},
{
"name": "Dockerfile",
"bytes": "3303"
},
{
"name": "HTML",
"bytes": "239338"
},
{
"name": "JavaScript",
"bytes": "2240"
},
{
"name": "Makefile",
"bytes": "600"
},
{
"name": "PLpgSQL",
"bytes": "2581"
},
{
"name": "Python",
"bytes": "7935510"
},
{
"name": "Shell",
"bytes": "12088"
}
],
"symlink_target": ""
} |
import unittest
import imath
import IECore
import Gaffer
import GafferTest
import GafferScene
import GafferSceneTest
class RenderControllerTest( GafferSceneTest.SceneTestCase ) :
def testConstructorAndAccessors( self ) :
sphere = GafferScene.Sphere()
context1 = Gaffer.Context()
renderer = GafferScene.Private.IECoreScenePreview.Renderer.create(
"OpenGL",
GafferScene.Private.IECoreScenePreview.Renderer.RenderType.Interactive
)
controller = GafferScene.RenderController( sphere["out"], context1, renderer )
self.assertTrue( controller.renderer().isSame( renderer ) )
self.assertTrue( controller.getScene().isSame( sphere["out"] ) )
self.assertTrue( controller.getContext().isSame( context1 ) )
cube = GafferScene.Cube()
context2 = Gaffer.Context()
controller.setScene( cube["out"] )
controller.setContext( context2 )
self.assertTrue( controller.getScene().isSame( cube["out"] ) )
self.assertTrue( controller.getContext().isSame( context2 ) )
def testBoundUpdate( self ) :
sphere = GafferScene.Sphere()
group = GafferScene.Group()
group["in"][0].setInput( sphere["out"] )
renderer = GafferScene.Private.IECoreScenePreview.Renderer.create(
"OpenGL",
GafferScene.Private.IECoreScenePreview.Renderer.RenderType.Interactive
)
controller = GafferScene.RenderController( group["out"], Gaffer.Context(), renderer )
controller.update()
self.assertEqual(
renderer.command( "gl:queryBound", {} ),
group["out"].bound( "/" )
)
sphere["transform"]["translate"]["x"].setValue( 1 )
controller.update()
self.assertEqual(
renderer.command( "gl:queryBound", {} ),
group["out"].bound( "/" )
)
def testUpdateMatchingPaths( self ) :
sphere = GafferScene.Sphere()
group = GafferScene.Group()
group["in"][0].setInput( sphere["out"] )
group["in"][1].setInput( sphere["out"] )
renderer = GafferScene.Private.IECoreScenePreview.Renderer.create(
"OpenGL",
GafferScene.Private.IECoreScenePreview.Renderer.RenderType.Interactive
)
controller = GafferScene.RenderController( group["out"], Gaffer.Context(), renderer )
controller.setMinimumExpansionDepth( 3 )
controller.update()
def bound( path ) :
renderer.option( "gl:selection", IECore.PathMatcherData( IECore.PathMatcher( [ path ] ) ) )
return renderer.command( "gl:queryBound", { "selection" : True } )
boundOrig = sphere["out"].bound( "/sphere" )
self.assertEqual( bound( "/group/sphere" ), boundOrig )
self.assertEqual( bound( "/group/sphere1" ), boundOrig )
sphere["radius"].setValue( 2 )
self.assertEqual( bound( "/group/sphere" ), boundOrig )
self.assertEqual( bound( "/group/sphere1" ), boundOrig )
controller.updateMatchingPaths( IECore.PathMatcher( [ "/group/sphere" ] ) )
boundUpdated = sphere["out"].bound( "/sphere" )
self.assertEqual( bound( "/group/sphere" ), boundUpdated )
self.assertEqual( bound( "/group/sphere1" ), boundOrig )
controller.update()
self.assertEqual( bound( "/group/sphere" ), boundUpdated )
self.assertEqual( bound( "/group/sphere1" ), boundUpdated )
def testUpdateMatchingPathsAndInheritedTransforms( self ) :
sphere = GafferScene.Sphere()
group = GafferScene.Group()
group["in"][0].setInput( sphere["out"] )
group["in"][1].setInput( sphere["out"] )
renderer = GafferScene.Private.IECoreScenePreview.Renderer.create(
"OpenGL",
GafferScene.Private.IECoreScenePreview.Renderer.RenderType.Interactive
)
controller = GafferScene.RenderController( group["out"], Gaffer.Context(), renderer )
controller.setMinimumExpansionDepth( 3 )
controller.update()
def bound( path ) :
renderer.option( "gl:selection", IECore.PathMatcherData( IECore.PathMatcher( [ path ] ) ) )
return renderer.command( "gl:queryBound", { "selection" : True } )
untranslatedBound = group["out"].bound( "/group/sphere" ) * group["out"].fullTransform( "/group/sphere" )
self.assertEqual( bound( "/group/sphere" ), untranslatedBound )
self.assertEqual( bound( "/group/sphere1" ), untranslatedBound )
group["transform"]["translate"]["x"].setValue( 2 )
translatedBound = group["out"].bound( "/group/sphere" ) * group["out"].fullTransform( "/group/sphere" )
controller.updateMatchingPaths( IECore.PathMatcher( [ "/group/sphere" ] ) )
self.assertEqual( bound( "/group/sphere" ), translatedBound )
self.assertEqual( bound( "/group/sphere1" ), untranslatedBound )
controller.update()
self.assertEqual( bound( "/group/sphere" ), translatedBound )
self.assertEqual( bound( "/group/sphere1" ), translatedBound )
def testUpdateRemoveFromLightSet( self ) :
sphere = GafferScene.Sphere()
lightSet = GafferScene.Set()
lightSet["in"].setInput( sphere["out"] )
lightSet["name"].setValue( '__lights' )
lightSet["paths"].setValue( IECore.StringVectorData( [ '/sphere' ] ) )
renderer = GafferScene.Private.IECoreScenePreview.Renderer.create(
"OpenGL",
GafferScene.Private.IECoreScenePreview.Renderer.RenderType.Interactive
)
controller = GafferScene.RenderController( sphere["out"], Gaffer.Context(), renderer )
controller.update()
self.assertEqual(
renderer.command( "gl:queryBound", {} ),
lightSet["out"].bound( "/" )
)
controller.setScene( lightSet["out"] )
controller.update()
self.assertEqual(
renderer.command( "gl:queryBound", {} ),
lightSet["out"].bound( "/" )
)
# While doing this exact same thing worked the first time, there was a bug where
# rendering geo that had previously been rendered in the lights pass would fail.
controller.setScene( sphere["out"] )
controller.update()
self.assertEqual(
renderer.command( "gl:queryBound", {} ),
lightSet["out"].bound( "/" )
)
def testLightLinks( self ) :
sphere = GafferScene.Sphere()
attributes = GafferScene.StandardAttributes()
attributes["in"].setInput( sphere["out"] )
attributes["attributes"]["linkedLights"]["enabled"].setValue( True )
attributes["attributes"]["linkedLights"]["value"].setValue( "defaultLights" )
attributes["attributes"]["doubleSided"]["enabled"].setValue( True )
attributes["attributes"]["doubleSided"]["value"].setValue( False )
lightA = GafferSceneTest.TestLight()
lightA["name"].setValue( "lightA" )
lightA["sets"].setValue( "A" )
lightB = GafferSceneTest.TestLight()
lightB["name"].setValue( "lightB" )
lightB["sets"].setValue( "B" )
group = GafferScene.Group()
group["in"][0].setInput( attributes["out"] )
group["in"][1].setInput( lightA["out"] )
group["in"][2].setInput( lightB["out"] )
renderer = GafferScene.Private.IECoreScenePreview.CapturingRenderer()
controller = GafferScene.RenderController( group["out"], Gaffer.Context(), renderer )
controller.setMinimumExpansionDepth( 10 )
controller.update()
capturedSphere = renderer.capturedObject( "/group/sphere" )
capturedLightA = renderer.capturedObject( "/group/lightA" )
capturedLightB = renderer.capturedObject( "/group/lightB" )
# Since the linking expression is "defaultLights" and there are
# no non-default lights, we don't expect to have light links.
self.assertEqual( capturedSphere.capturedLinks( "lights" ), None )
self.assertEqual( capturedSphere.numLinkEdits( "lights" ), 1 )
# If we restrict to just one set of lights, then we expect an
# edit to update the links.
attributes["attributes"]["linkedLights"]["value"].setValue( "A" )
controller.update()
self.assertEqual( capturedSphere.capturedLinks( "lights" ), { capturedLightA } )
self.assertEqual( capturedSphere.numLinkEdits( "lights" ), 2 )
# Likewise if we restrict to the other set of lights.
attributes["attributes"]["linkedLights"]["value"].setValue( "B" )
controller.update()
self.assertEqual( capturedSphere.capturedLinks( "lights" ), { capturedLightB } )
self.assertEqual( capturedSphere.numLinkEdits( "lights" ), 3 )
# If we change an attribute which has no bearing on light linking,
# we don't want links to be emitted again. Attributes change frequently
# and light linking can be expensive.
attributes["attributes"]["doubleSided"]["value"].setValue( True )
controller.update()
self.assertEqual( capturedSphere.capturedLinks( "lights" ), { capturedLightB } )
self.assertEqual( capturedSphere.numLinkEdits( "lights" ), 3 )
del capturedSphere, capturedLightA, capturedLightB
@GafferTest.TestRunner.PerformanceTestMethod()
def testLightLinkPerformance( self ) :
numSpheres = 10000
numLights = 1000
# Make a bunch of spheres
sphere = GafferScene.Sphere()
spherePlane = GafferScene.Plane()
spherePlane["name"].setValue( "spheres" )
spherePlane["divisions"].setValue( imath.V2i( 1, numSpheres / 2 - 1 ) )
sphereInstancer = GafferScene.Instancer()
sphereInstancer["in"].setInput( spherePlane["out"] )
sphereInstancer["prototypes"].setInput( sphere["out"] )
sphereInstancer["parent"].setValue( "/spheres" )
# Make a bunch of lights
light = GafferSceneTest.TestLight()
lightPlane = GafferScene.Plane()
lightPlane["name"].setValue( "lights" )
lightPlane["divisions"].setValue( imath.V2i( 1, numLights / 2 - 1 ) )
lightInstancer = GafferScene.Instancer()
lightInstancer["in"].setInput( lightPlane["out"] )
lightInstancer["prototypes"].setInput( light["out"] )
lightInstancer["parent"].setValue( "/lights" )
# Make a single non-default light. This
# will trigger linking of all the others.
nonDefaultLight = GafferSceneTest.TestLight()
nonDefaultLight["defaultLight"].setValue( False )
# Group everything into one scene
group = GafferScene.Group()
group["in"][0].setInput( sphereInstancer["out"] )
group["in"][1].setInput( lightInstancer["out"] )
group["in"][2].setInput( nonDefaultLight["out"] )
# See how quickly we can output those links
renderer = GafferScene.Private.IECoreScenePreview.CapturingRenderer()
controller = GafferScene.RenderController( group["out"], Gaffer.Context(), renderer )
controller.setMinimumExpansionDepth( 10 )
with GafferTest.TestRunner.PerformanceScope() :
controller.update()
# Sanity check that we did output links as expected.
links = renderer.capturedObject( "/group/spheres/instances/sphere/0" ).capturedLinks( "lights" )
self.assertEqual( len( links ), numLights )
def testHideLinkedLight( self ) :
# One default light and one non-default light, which will
# result in light links being emitted to the renderer.
defaultLight = GafferSceneTest.TestLight()
defaultLight["name"].setValue( "defaultLight" )
defaultLightAttributes = GafferScene.StandardAttributes()
defaultLightAttributes["in"].setInput( defaultLight["out"] )
nonDefaultLight = GafferSceneTest.TestLight()
nonDefaultLight["name"].setValue( "nonDefaultLight" )
nonDefaultLight["defaultLight"].setValue( False )
plane = GafferScene.Plane()
group = GafferScene.Group()
group["in"][0].setInput( defaultLightAttributes["out"] )
group["in"][1].setInput( nonDefaultLight["out"] )
group["in"][2].setInput( plane["out"] )
# Output a scene. Only the default light should be linked.
renderer = GafferScene.Private.IECoreScenePreview.CapturingRenderer()
controller = GafferScene.RenderController( group["out"], Gaffer.Context(), renderer )
controller.setMinimumExpansionDepth( 10 )
controller.update()
capturedPlane = renderer.capturedObject( "/group/plane" )
self.assertEqual( capturedPlane.capturedLinks( "lights" ), { renderer.capturedObject( "/group/defaultLight" ) } )
# Hide the default light. It should be removed from the render,
# and the plane should be linked to an empty light set.
defaultLightAttributes["attributes"]["visibility"]["enabled"].setValue( True )
defaultLightAttributes["attributes"]["visibility"]["value"].setValue( False )
controller.update()
self.assertIsNone( renderer.capturedObject( "/group/defaultLight" ) )
self.assertEqual( capturedPlane.capturedLinks( "lights" ), set() )
def testAttributeDirtyPropagation( self ) :
sphere = GafferScene.Sphere()
group = GafferScene.Group()
options = GafferScene.StandardOptions()
sphereSet = GafferScene.Set()
sphereSet["name"].setValue( "render:spheres" )
setFilter = GafferScene.PathFilter()
setFilter["paths"].setValue( IECore.StringVectorData( [ "/group/sphere" ] ) )
sphereSet["filter"].setInput( setFilter["out"] )
group["in"][0].setInput( sphere["out"] )
options["in"].setInput( group["out"] )
sphereSet["in"].setInput( options["out"] )
globalAttr = GafferScene.CustomAttributes()
globalAttrPlug = Gaffer.NameValuePlug( "user:globalAttr", IECore.IntData( 0 ), flags = Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic )
globalAttr["attributes"].addChild( globalAttrPlug )
globalAttr["global"].setValue( True )
groupAttr = GafferScene.CustomAttributes()
groupAttrPlug = Gaffer.NameValuePlug( "localAttr1", IECore.IntData( 0 ), flags = Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic )
groupAttr["attributes"].addChild( groupAttrPlug )
groupAttrFilter = GafferScene.PathFilter()
groupAttr["filter"].setInput( groupAttrFilter["out"] )
groupAttrFilter["paths"].setValue( IECore.StringVectorData( [ "/group" ] ) )
sphereAttr = GafferScene.CustomAttributes()
sphereAttrPlug = Gaffer.NameValuePlug( "user:localAttr2", IECore.IntData( 0 ), flags = Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic )
sphereAttr["attributes"].addChild( sphereAttrPlug )
sphereAttrFilter = GafferScene.PathFilter()
sphereAttr["filter"].setInput( sphereAttrFilter["out"] )
sphereAttrFilter["paths"].setValue( IECore.StringVectorData( [ "/group/sphere" ] ) )
globalAttr["in"].setInput( sphereSet["out"] )
groupAttr["in"].setInput( globalAttr["out"] )
sphereAttr["in"].setInput( groupAttr["out"] )
renderer = GafferScene.Private.IECoreScenePreview.CapturingRenderer()
controller = GafferScene.RenderController( sphereAttr["out"], Gaffer.Context(), renderer )
controller.setMinimumExpansionDepth( 10 )
controller.update()
capturedSphere = renderer.capturedObject( "/group/sphere" )
self.assertEqual( capturedSphere.numAttributeEdits(), 1 )
self.assertEqual(
capturedSphere.capturedAttributes().attributes(),
IECore.CompoundObject( {
"user:globalAttr" : IECore.IntData( 0 ),
"localAttr1" : IECore.IntData( 0 ),
"user:localAttr2" : IECore.IntData( 0 ),
"sets" : IECore.InternedStringVectorData( [ "spheres" ] )
} )
)
sphereAttrPlug["value"].setValue( 1 )
controller.update()
self.assertEqual( capturedSphere.numAttributeEdits(), 2 )
self.assertEqual(
capturedSphere.capturedAttributes().attributes(),
IECore.CompoundObject( {
"user:globalAttr" : IECore.IntData( 0 ),
"localAttr1" : IECore.IntData( 0 ),
"user:localAttr2" : IECore.IntData( 1 ),
"sets" : IECore.InternedStringVectorData( [ "spheres" ] )
} )
)
groupAttrPlug["value"].setValue( 2 )
controller.update()
self.assertEqual( capturedSphere.numAttributeEdits(), 3 )
self.assertEqual(
capturedSphere.capturedAttributes().attributes(),
IECore.CompoundObject( {
"user:globalAttr" : IECore.IntData( 0 ),
"localAttr1" : IECore.IntData( 2 ),
"user:localAttr2" : IECore.IntData( 1 ),
"sets" : IECore.InternedStringVectorData( [ "spheres" ] )
} )
)
globalAttrPlug["value"].setValue( 3 )
controller.update()
self.assertEqual( capturedSphere.numAttributeEdits(), 4 )
self.assertEqual(
capturedSphere.capturedAttributes().attributes(),
IECore.CompoundObject( {
"user:globalAttr" : IECore.IntData( 3 ),
"localAttr1" : IECore.IntData( 2 ),
"user:localAttr2" : IECore.IntData( 1 ),
"sets" : IECore.InternedStringVectorData( [ "spheres" ] )
} )
)
sphereSet["enabled"].setValue( False )
controller.update()
self.assertEqual( capturedSphere.numAttributeEdits(), 5 )
self.assertEqual(
capturedSphere.capturedAttributes().attributes(),
IECore.CompoundObject( {
"user:globalAttr" : IECore.IntData( 3 ),
"localAttr1" : IECore.IntData( 2 ),
"user:localAttr2" : IECore.IntData( 1 ),
"sets" : IECore.InternedStringVectorData( [ ] )
} )
)
options["options"]["renderCamera"]["enabled"].setValue( True )
controller.update()
self.assertEqual( capturedSphere.numAttributeEdits(), 5 )
options["options"]["renderCamera"]["value"].setValue( "/camera" )
controller.update()
self.assertEqual( capturedSphere.numAttributeEdits(), 5 )
del capturedSphere
def testNullObjects( self ) :
camera = GafferScene.Camera()
sphere = GafferScene.Sphere()
light = GafferSceneTest.TestLight()
lightAttr = GafferScene.StandardAttributes()
lightAttr["in"].setInput( sphere["out"] )
lightAttr["attributes"]["linkedLights"]["enabled"].setValue( True )
lightAttr["attributes"]["linkedLights"]["value"].setValue( "defaultLights" )
group = GafferScene.Group()
group["in"][0].setInput( camera["out"] )
group["in"][1].setInput( sphere["out"] )
group["in"][2].setInput( light["out"] )
allFilter = GafferScene.PathFilter()
allFilter["paths"].setValue( IECore.StringVectorData( [ "..." ] ) )
attr = GafferScene.CustomAttributes()
unrenderableAttrPlug = Gaffer.NameValuePlug( "cr:unrenderable", IECore.BoolData( True ), flags = Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic )
attr["attributes"].addChild( unrenderableAttrPlug )
attr["filter"].setInput( allFilter["out"] )
attr["in"].setInput( group["out"] )
renderer = GafferScene.Private.IECoreScenePreview.CapturingRenderer()
controller = GafferScene.RenderController( attr["out"], Gaffer.Context(), renderer )
controller.setMinimumExpansionDepth( 10 )
controller.update()
if __name__ == "__main__":
unittest.main()
| {
"content_hash": "a52b83202c264b15e92a54aaafbda61d",
"timestamp": "",
"source": "github",
"line_count": 495,
"max_line_length": 154,
"avg_line_length": 35.63232323232323,
"alnum_prop": 0.7148769701780248,
"repo_name": "lucienfostier/gaffer",
"id": "8e69d1d763834a0f2387e43e6195cf3bb5118c33",
"size": "19441",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "python/GafferSceneTest/RenderControllerTest.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "41979"
},
{
"name": "C++",
"bytes": "7610953"
},
{
"name": "CMake",
"bytes": "85201"
},
{
"name": "GLSL",
"bytes": "6236"
},
{
"name": "Python",
"bytes": "7892655"
},
{
"name": "Shell",
"bytes": "15031"
}
],
"symlink_target": ""
} |
"""async14tcpserver.py: TCP Echo server protocol
Usage:
async14tcpserver.py
"""
import asyncio
async def handle_echo(reader, writer):
data = await reader.read(100)
message = data.decode()
addr = writer.get_extra_info('peername')
print(f"Received {message} from {addr}")
print(f"Send: {message}", flush=True)
writer.write(data)
await writer.drain()
print("Close the client socket", flush=True)
writer.close()
def main():
loop = asyncio.get_event_loop()
coro = asyncio.start_server(handle_echo, '127.0.0.1', 8888, loop=loop)
server = loop.run_until_complete(coro)
# Serve requests until Ctrl+C is pressed
print(f'Serving on {server.sockets[0].getsockname()}', flush=True)
try:
loop.run_forever()
except KeyboardInterrupt:
pass
# Close the server
server.close()
loop.run_until_complete(server.wait_closed())
loop.close()
if __name__ == '__main__':
main()
| {
"content_hash": "55ab822066ca0d7363c9bae8d4735cc5",
"timestamp": "",
"source": "github",
"line_count": 40,
"max_line_length": 74,
"avg_line_length": 24.075,
"alnum_prop": 0.6479750778816199,
"repo_name": "showa-yojyo/bin",
"id": "8f3e3a42c243e7a74f515e2bd4e1f6159e3bcd2a",
"size": "985",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "async/async14tcpserver.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "298627"
},
{
"name": "Shell",
"bytes": "1566"
}
],
"symlink_target": ""
} |
import rope.base.builtins
import rope.base.codeanalyze
import rope.base.pynames
from rope.base import ast, exceptions, utils
class Scope(object):
def __init__(self, pycore, pyobject, parent_scope):
self.pycore = pycore
self.pyobject = pyobject
self.parent = parent_scope
def get_names(self):
"""Return the names defined or imported in this scope"""
return self.pyobject.get_attributes()
def get_defined_names(self):
"""Return the names defined in this scope"""
return self.pyobject._get_structural_attributes()
def get_name(self, name):
"""Return name `PyName` defined in this scope"""
if name not in self.get_names():
raise exceptions.NameNotFoundError('name %s not found' % name)
return self.get_names()[name]
def __getitem__(self, key):
"""The same as ``get_name(key)``"""
return self.get_name(key)
def __contains__(self, key):
"""The same as ``key in self.get_names()``"""
return key in self.get_names()
@utils.saveit
def get_scopes(self):
"""Return the subscopes of this scope
The returned scopes should be sorted by the order they appear.
"""
return self._create_scopes()
def lookup(self, name):
if name in self.get_names():
return self.get_names()[name]
if self.parent is not None:
return self.parent._propagated_lookup(name)
return None
def get_propagated_names(self):
"""Return the visible names of this scope
Return the names defined in this scope that are visible from
scopes containing this scope. This method returns the same
dictionary returned by `get_names()` except for `ClassScope`
which returns an empty dict.
"""
return self.get_names()
def _propagated_lookup(self, name):
if name in self.get_propagated_names():
return self.get_propagated_names()[name]
if self.parent is not None:
return self.parent._propagated_lookup(name)
return None
def _create_scopes(self):
return [pydefined.get_scope()
for pydefined in self.pyobject._get_defined_objects()]
def _get_global_scope(self):
current = self
while current.parent is not None:
current = current.parent
return current
def get_start(self):
return self.pyobject.get_ast().lineno
def get_body_start(self):
body = self.pyobject.get_ast().body
if body:
return body[0].lineno
return self.get_start()
def get_end(self):
pymodule = self._get_global_scope().pyobject
return pymodule.logical_lines.logical_line_in(self.logical_end)[1]
@utils.saveit
def get_logical_end(self):
global_scope = self._get_global_scope()
return global_scope._scope_finder.find_scope_end(self)
start = property(get_start)
end = property(get_end)
logical_end = property(get_logical_end)
def get_kind(self):
pass
class GlobalScope(Scope):
def __init__(self, pycore, module):
super(GlobalScope, self).__init__(pycore, module, None)
self.names = module._get_concluded_data()
def get_start(self):
return 1
def get_kind(self):
return 'Module'
def get_name(self, name):
try:
return self.pyobject[name]
except exceptions.AttributeNotFoundError:
if name in self.builtin_names:
return self.builtin_names[name]
raise exceptions.NameNotFoundError('name %s not found' % name)
def get_names(self):
if self.names.get() is None:
result = dict(self.builtin_names)
result.update(super(GlobalScope, self).get_names())
self.names.set(result)
return self.names.get()
def get_inner_scope_for_line(self, lineno, indents=None):
return self._scope_finder.get_holding_scope(self, lineno, indents)
def get_inner_scope_for_offset(self, offset):
return self._scope_finder.get_holding_scope_for_offset(self, offset)
@property
@utils.saveit
def _scope_finder(self):
return _HoldingScopeFinder(self.pyobject)
@property
def builtin_names(self):
return rope.base.builtins.builtins.get_attributes()
class FunctionScope(Scope):
def __init__(self, pycore, pyobject, visitor):
super(FunctionScope, self).__init__(pycore, pyobject,
pyobject.parent.get_scope())
self.names = None
self.returned_asts = None
self.is_generator = None
self.defineds = None
self.visitor = visitor
def _get_names(self):
if self.names is None:
self._visit_function()
return self.names
def _visit_function(self):
if self.names is None:
new_visitor = self.visitor(self.pycore, self.pyobject)
for n in ast.get_child_nodes(self.pyobject.get_ast()):
ast.walk(n, new_visitor)
self.names = new_visitor.names
self.names.update(self.pyobject.get_parameters())
self.returned_asts = new_visitor.returned_asts
self.is_generator = new_visitor.generator
self.defineds = new_visitor.defineds
def _get_returned_asts(self):
if self.names is None:
self._visit_function()
return self.returned_asts
def _is_generator(self):
if self.is_generator is None:
self._get_returned_asts()
return self.is_generator
def get_names(self):
return self._get_names()
def _create_scopes(self):
if self.defineds is None:
self._visit_function()
return [pydefined.get_scope() for pydefined in self.defineds]
def get_kind(self):
return 'Function'
def invalidate_data(self):
for pyname in self.get_names().values():
if isinstance(pyname, (rope.base.pynames.AssignedName,
rope.base.pynames.EvaluatedName)):
pyname.invalidate()
class ClassScope(Scope):
def __init__(self, pycore, pyobject):
super(ClassScope, self).__init__(pycore, pyobject,
pyobject.parent.get_scope())
def get_kind(self):
return 'Class'
def get_propagated_names(self):
return {}
class _HoldingScopeFinder(object):
def __init__(self, pymodule):
self.pymodule = pymodule
def get_indents(self, lineno):
return rope.base.codeanalyze.count_line_indents(
self.lines.get_line(lineno))
def _get_scope_indents(self, scope):
return self.get_indents(scope.get_start())
def get_holding_scope(self, module_scope, lineno, line_indents=None):
if line_indents is None:
line_indents = self.get_indents(lineno)
current_scope = module_scope
new_scope = current_scope
while new_scope is not None and \
(new_scope.get_kind() == 'Module' or
self._get_scope_indents(new_scope) <= line_indents):
current_scope = new_scope
if current_scope.get_start() == lineno and \
current_scope.get_kind() != 'Module':
return current_scope
new_scope = None
for scope in current_scope.get_scopes():
if scope.get_start() <= lineno:
if lineno <= scope.get_end():
new_scope = scope
break
else:
break
return current_scope
def _is_empty_line(self, lineno):
line = self.lines.get_line(lineno)
return line.strip() == '' or line.lstrip().startswith('#')
def _get_body_indents(self, scope):
return self.get_indents(scope.get_body_start())
def get_holding_scope_for_offset(self, scope, offset):
return self.get_holding_scope(
scope, self.lines.get_line_number(offset))
def find_scope_end(self, scope):
if not scope.parent:
return self.lines.length()
end = scope.pyobject.get_ast().body[-1].lineno
scope_start = self.pymodule.logical_lines.logical_line_in(scope.start)
if scope_start[1] >= end:
# handling one-liners
body_indents = self._get_scope_indents(scope) + 4
else:
body_indents = self._get_body_indents(scope)
for l in self.logical_lines.generate_starts(
min(end + 1, self.lines.length()), self.lines.length() + 1):
if not self._is_empty_line(l):
if self.get_indents(l) < body_indents:
return end
else:
end = l
return end
@property
def lines(self):
return self.pymodule.lines
@property
def code(self):
return self.pymodule.source_code
@property
def logical_lines(self):
return self.pymodule.logical_lines
class TemporaryScope(Scope):
"""Currently used for list comprehensions and generator expressions
These scopes do not appear in the `get_scopes()` method of their
parent scopes.
"""
def __init__(self, pycore, parent_scope, names):
super(TemporaryScope, self).__init__(
pycore, parent_scope.pyobject, parent_scope)
self.names = names
def get_names(self):
return self.names
def get_defined_names(self):
return self.names
def _create_scopes(self):
return []
def get_kind(self):
return 'Temporary'
| {
"content_hash": "a6187797136db7f70faa1a7eb73f9a2d",
"timestamp": "",
"source": "github",
"line_count": 314,
"max_line_length": 78,
"avg_line_length": 31.05732484076433,
"alnum_prop": 0.5917760459392944,
"repo_name": "JetChars/vim",
"id": "0bed19a92912ccda603e3f470ca48154c417bb59",
"size": "9752",
"binary": false,
"copies": "21",
"ref": "refs/heads/master",
"path": "vim/bundle/python-mode/pymode/libs2/rope/base/pyscopes.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CoffeeScript",
"bytes": "1402"
},
{
"name": "Erlang",
"bytes": "6887"
},
{
"name": "GCC Machine Description",
"bytes": "525"
},
{
"name": "Go",
"bytes": "2239"
},
{
"name": "HTML",
"bytes": "134"
},
{
"name": "JavaScript",
"bytes": "2128"
},
{
"name": "Makefile",
"bytes": "2763"
},
{
"name": "Python",
"bytes": "3294722"
},
{
"name": "Ruby",
"bytes": "40061"
},
{
"name": "Shell",
"bytes": "4058"
},
{
"name": "VimL",
"bytes": "5034489"
}
],
"symlink_target": ""
} |
from pyspider.libs.base_handler import *
class Handler(BaseHandler):
'''
this is a sample handler
'''
@every(minutes=24 * 60, seconds=0)
def on_start(self):
self.crawl('http://scrapy.org/', callback=self.index_page)
@config(age=10 * 24 * 60 * 60)
def index_page(self, response):
for each in response.doc('a[href^="http://"]').items():
self.crawl(each.attr.href, callback=self.detail_page)
def detail_page(self, response):
return {
"url": response.url,
"title": response.doc('title').text(),
}
| {
"content_hash": "c5b27bde3d050279b72aed845c9a096f",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 66,
"avg_line_length": 27.181818181818183,
"alnum_prop": 0.5769230769230769,
"repo_name": "t4skforce/pyspider",
"id": "d973bdaa916d00abaad2e423092245700046d155",
"size": "718",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "pyspider/libs/sample_handler.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "21118"
},
{
"name": "JavaScript",
"bytes": "34090"
},
{
"name": "Python",
"bytes": "271384"
}
],
"symlink_target": ""
} |
#===============================================================================
# Copyright (c) 2014 Geoscience Australia
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither Geoscience Australia nor the names of its contributors may be
# used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#===============================================================================
'''
Created on 12/03/2015
@author: Alex Ip
'''
import sys
import os
import logging
import ConfigParser
import collections
from _gdfutils import log_multiline
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO) # Logging level for this module
class ConfigFile(object):
def _parse_config_file(self):
'''
Function to return a nested dict of config file entries
Returns:
dict {<section_name>: {<key>: <value>,... },... }
'''
logger.debug('Opening config file %s', self._path)
config_parser = ConfigParser.SafeConfigParser(allow_no_value=True)
config_parser.read(self._path)
config_dict = collections.OrderedDict() # Need to preserve order of sections
for section_name in config_parser.sections():
section_dict = {}
config_dict[section_name.lower()] = section_dict
for attribute_name in config_parser.options(section_name):
attribute_value = config_parser.get(section_name, attribute_name)
section_dict[attribute_name.lower()] = attribute_value
log_multiline(logger.debug, config_dict, 'config_dict', '\t')
return config_dict
def __init__(self, path):
'''Constructor for class ConfigFile
Parameters:
path: Path to valid config file (required)
'''
log_multiline(logger.debug, path, 'path', '\t')
self._path = os.path.abspath(path)
assert os.path.exists(self._path), "%s does not exist" % self._path
self._configuration = self._parse_config_file()
log_multiline(logger.debug, self.__dict__, 'ConfigFile.__dict__', '\t')
@property
def path(self):
return self._path
@property
def configuration(self):
return self._configuration.copy()
| {
"content_hash": "6baee4fb152a22ac7dca5e9c3c8fa12e",
"timestamp": "",
"source": "github",
"line_count": 92,
"max_line_length": 86,
"avg_line_length": 40.858695652173914,
"alnum_prop": 0.6264964086193137,
"repo_name": "GeoscienceAustralia/gdf",
"id": "56a587f3966ad184259485d75eea3563aea9e314",
"size": "3782",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "gdf/_config_file.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "273579"
},
{
"name": "Shell",
"bytes": "9222"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('accounts', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='AccountRules',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('permissions', models.CharField(choices=[(b'A', b'Administration'), (b'W', b'Read/write'), (b'R', b'Read')], max_length=1)),
],
),
migrations.DeleteModel(
name='InvitationRequest',
),
migrations.AlterModelOptions(
name='account',
options={'ordering': ('create', 'name'), 'verbose_name': 'Account'},
),
migrations.RemoveField(
model_name='account',
name='user',
),
migrations.AlterField(
model_name='account',
name='create',
field=models.DateField(auto_now_add=True, verbose_name='Creation date'),
),
migrations.AddField(
model_name='accountrules',
name='account',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='accounts.Account'),
),
migrations.AddField(
model_name='accountrules',
name='user',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='account',
name='users',
field=models.ManyToManyField(related_name='account', through='accounts.AccountRules', to=settings.AUTH_USER_MODEL),
),
]
| {
"content_hash": "5f480b94311d8498119451cfc7476522",
"timestamp": "",
"source": "github",
"line_count": 54,
"max_line_length": 141,
"avg_line_length": 35.2037037037037,
"alnum_prop": 0.5791688584955287,
"repo_name": "sebastienbarbier/723e_server",
"id": "41fc6dff72dc73ef279bea6d59eebb49e65388dc",
"size": "1974",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "seven23/models/accounts/migrations/0002_auto_20161128_1335.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "182572"
},
{
"name": "Makefile",
"bytes": "352"
},
{
"name": "Python",
"bytes": "99185"
}
],
"symlink_target": ""
} |
"""* Default settings for jscribe.
@module jscribe.conf.defaults
"""
INPUT_PATHS = ["./"] # paths to source files that should be discovered
IGNORE_PATHS_REGEX = []
FILE_REGEX = r".*?[.]js$"
FILE_IGNORE_REGEX = None
DOCUMENTATION_OUTPUT_PATH = "./"
DOC_STRING_REGEX = [r"[/][*][*]", r"(?<!\\)[*][/]"]
TAG_REGEX = r"^\s*?[@](?P<tag>.*?)\s"
IGNORE_INVALID_TAGS = False
TEMPLATE = "default"
TEMPLATE_SETTINGS = {
"SHOW_LINE_NUMBER": True,
"FOOTER_TEXT": "Footer text",
"TITLE": "My Docs Title",
"ELEMENT_TEMPLATES": {},
}
TAG_SETTINGS_PATH = "jscribe.conf.jstagsettings"
OUTPUT_ENCODING = "utf-8"
LANGUAGE = "javascript"
GENERATOR = "html"
ALL_SOURCE_FILES = False
| {
"content_hash": "db03b2a87832d44d936a559dd0748b65",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 71,
"avg_line_length": 28.958333333333332,
"alnum_prop": 0.6201438848920864,
"repo_name": "mindbrave/jscribe",
"id": "d50c64b5db189c7294cb7172ff1d2d0e5701f2b0",
"size": "742",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "jscribe/conf/defaults.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "17820"
},
{
"name": "Python",
"bytes": "267951"
}
],
"symlink_target": ""
} |
'''
1224. Spiral
Time limit: 1.0 second
Memory limit: 64 MB
[Description]
A brand new sapper robot is able to neutralize mines in a rectangular region having
integer height and width (N and M respectively). Before the robot begins its work it
is placed near the top leftmost cell of the rectangle heading right. Then the robot
starts moving and neutralizing mines making a clockwise spiral way (see picture).
The spiral twists towards the inside of the region, covering all the cells. The region
is considered safe when all the cells are visited and checked by the robot.
Your task is to determine the number of the turns the robot has to make during its work.
[Input]
The input contains two integers in the following order: N, M (1 ≤ N, M ≤ 2^31 − 1).
[Output]
The output consists of a single integer value — the number of the turns.
'''
import sys
import math
def get_str_from_stdin():
return sys.stdin.readline().strip('\r\n')
def get_int_from_stdin():
return int(get_str_from_stdin())
def t(n, m):
if n == 1:
return 0
if m == 1:
# here n must be larger than 1
return 1
if n == 2:
# here m must be larger than 1
return 2
if m == 2:
# here n must be larger than 2
return 3
if m >= n:
if n % 2 == 0:
p = n / 2 - 1
return t(2, m - 2 * p) + 4 * p
else:
p = (n - 1) / 2
return t(1, m - 2 * p) + 4 * p
else:
if m % 2 == 0:
p = m / 2 - 1
return t(n - 2 * p, 2) + 4 * p
else:
p = (m - 1) / 2
return t(n - 2 * p, 1) + 4 * p
def calc():
n, m = get_str_from_stdin().split(' ')
n = int(n)
m = int(m)
print t(n, m)
if __name__ == '__main__':
calc()
| {
"content_hash": "77f7ab7aae49c34d0cb5b59d154b1062",
"timestamp": "",
"source": "github",
"line_count": 73,
"max_line_length": 88,
"avg_line_length": 24.54794520547945,
"alnum_prop": 0.5731026785714286,
"repo_name": "matrixjoeq/timus_solutions",
"id": "f85dbab239f1218a1fa27504ec50a7ab95b9de1f",
"size": "1843",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "1224/slu.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "12510"
},
{
"name": "C++",
"bytes": "96832"
},
{
"name": "Python",
"bytes": "134479"
},
{
"name": "Shell",
"bytes": "181"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.contrib.sites.models import Site
from future.builtins import str
from unittest import skipUnless
from django.contrib.auth import get_user_model
from django.contrib.auth.models import AnonymousUser
from django.db import connection
from django.http import HttpResponse
from django.shortcuts import resolve_url
from django.template import Context, Template
from django.test.utils import override_settings
from django.utils.http import urlquote_plus
from django.utils.six.moves.urllib.parse import urlparse
from django.utils.translation import get_language
from mezzanine.conf import settings
from mezzanine.core.models import CONTENT_STATUS_PUBLISHED
from mezzanine.core.request import current_request
from mezzanine.pages.models import Page, RichTextPage
from mezzanine.pages.admin import PageAdminForm
from mezzanine.urls import PAGES_SLUG
from mezzanine.utils.sites import override_current_site_id
from mezzanine.utils.tests import TestCase
User = get_user_model()
class PagesTests(TestCase):
def setUp(self):
"""
Make sure we have a thread-local request with a site_id attribute set.
"""
super(PagesTests, self).setUp()
from mezzanine.core.request import _thread_local
request = self._request_factory.get('/')
request.site_id = settings.SITE_ID
_thread_local.request = request
def test_page_ascendants(self):
"""
Test the methods for looking up ascendants efficiently
behave as expected.
"""
# Create related pages.
primary, created = RichTextPage.objects.get_or_create(title="Primary")
secondary, created = primary.children.get_or_create(title="Secondary")
tertiary, created = secondary.children.get_or_create(title="Tertiary")
# Test that get_ascendants() returns the right thing.
page = Page.objects.get(id=tertiary.id)
ascendants = page.get_ascendants()
self.assertEqual(ascendants[0].id, secondary.id)
self.assertEqual(ascendants[1].id, primary.id)
# Test ascendants are returned in order for slug, using
# a single DB query.
connection.queries_log.clear()
pages_for_slug = Page.objects.with_ascendants_for_slug(tertiary.slug)
self.assertEqual(len(connection.queries), 1)
self.assertEqual(pages_for_slug[0].id, tertiary.id)
self.assertEqual(pages_for_slug[1].id, secondary.id)
self.assertEqual(pages_for_slug[2].id, primary.id)
# Test page.get_ascendants uses the cached attribute,
# without any more queries.
connection.queries_log.clear()
ascendants = pages_for_slug[0].get_ascendants()
self.assertEqual(len(connection.queries), 0)
self.assertEqual(ascendants[0].id, secondary.id)
self.assertEqual(ascendants[1].id, primary.id)
# Use a custom slug in the page path, and test that
# Page.objects.with_ascendants_for_slug fails, but
# correctly falls back to recursive queries.
secondary.slug += "custom"
secondary.save()
pages_for_slug = Page.objects.with_ascendants_for_slug(tertiary.slug)
self.assertEqual(len(pages_for_slug[0]._ascendants), 0)
connection.queries_log.clear()
ascendants = pages_for_slug[0].get_ascendants()
self.assertEqual(len(connection.queries), 2) # 2 parent queries
self.assertEqual(pages_for_slug[0].id, tertiary.id)
self.assertEqual(ascendants[0].id, secondary.id)
self.assertEqual(ascendants[1].id, primary.id)
def test_set_parent(self):
old_parent, _ = RichTextPage.objects.get_or_create(title="Old parent")
new_parent, _ = RichTextPage.objects.get_or_create(title="New parent")
child, _ = RichTextPage.objects.get_or_create(
title="Child", slug="kid")
self.assertTrue(child.parent is None)
self.assertTrue(child.slug == "kid")
child.set_parent(old_parent)
child.save()
self.assertEqual(child.parent_id, old_parent.id)
self.assertTrue(child.slug == "old-parent/kid")
child = RichTextPage.objects.get(id=child.id)
self.assertEqual(child.parent_id, old_parent.id)
self.assertTrue(child.slug == "old-parent/kid")
child.set_parent(new_parent)
child.save()
self.assertEqual(child.parent_id, new_parent.id)
self.assertTrue(child.slug == "new-parent/kid")
child = RichTextPage.objects.get(id=child.id)
self.assertEqual(child.parent_id, new_parent.id)
self.assertTrue(child.slug == "new-parent/kid")
child.set_parent(None)
child.save()
self.assertTrue(child.parent is None)
self.assertTrue(child.slug == "kid")
child = RichTextPage.objects.get(id=child.id)
self.assertTrue(child.parent is None)
self.assertTrue(child.slug == "kid")
child = RichTextPage(title="child2")
child.set_parent(new_parent)
self.assertEqual(child.slug, "new-parent/child2")
# Assert that cycles are detected.
p1, _ = RichTextPage.objects.get_or_create(title="p1")
p2, _ = RichTextPage.objects.get_or_create(title="p2")
p2.set_parent(p1)
with self.assertRaises(AttributeError):
p1.set_parent(p1)
with self.assertRaises(AttributeError):
p1.set_parent(p2)
p2c = RichTextPage.objects.get(title="p2")
with self.assertRaises(AttributeError):
p1.set_parent(p2c)
def test_set_slug(self):
parent, _ = RichTextPage.objects.get_or_create(
title="Parent", slug="parent")
child, _ = RichTextPage.objects.get_or_create(
title="Child", slug="parent/child", parent_id=parent.id)
parent.set_slug("new-parent-slug")
self.assertTrue(parent.slug == "new-parent-slug")
parent = RichTextPage.objects.get(id=parent.id)
self.assertTrue(parent.slug == "new-parent-slug")
child = RichTextPage.objects.get(id=child.id)
self.assertTrue(child.slug == "new-parent-slug/child")
def test_login_required(self):
public, _ = RichTextPage.objects.get_or_create(
title="Public", slug="public", login_required=False)
private, _ = RichTextPage.objects.get_or_create(
title="Private", slug="private", login_required=True)
accounts_installed = ("mezzanine.accounts" in settings.INSTALLED_APPS)
args = {"for_user": AnonymousUser()}
self.assertTrue(public in RichTextPage.objects.published(**args))
self.assertTrue(private not in RichTextPage.objects.published(**args))
args = {"for_user": User.objects.get(username=self._username)}
self.assertTrue(public in RichTextPage.objects.published(**args))
self.assertTrue(private in RichTextPage.objects.published(**args))
public_url = public.get_absolute_url()
private_url = private.get_absolute_url()
self.client.logout()
response = self.client.get(private_url, follow=True)
login_prefix = ""
login_url = resolve_url(settings.LOGIN_URL)
login_next = private_url
try:
redirects_count = len(response.redirect_chain)
response_url = response.redirect_chain[-1][0]
except (AttributeError, IndexError):
redirects_count = 0
response_url = ""
if urlparse(response_url).path.startswith("/%s/" % get_language()):
# With LocaleMiddleware a language code can be added at the
# beginning of the path.
login_prefix = "/%s" % get_language()
if redirects_count > 1:
# With LocaleMiddleware and a string LOGIN_URL there can be
# a second redirect that encodes the next parameter.
login_next = urlquote_plus(login_next)
login = "%s%s?next=%s" % (login_prefix, login_url, login_next)
if accounts_installed:
# For an inaccessible page with mezzanine.accounts we should
# see a login page, without it 404 is more appropriate than an
# admin login.
target_status_code = 200
else:
target_status_code = 404
self.assertRedirects(response, login,
target_status_code=target_status_code)
response = self.client.get(public_url, follow=True)
self.assertEqual(response.status_code, 200)
if accounts_installed:
# View / pattern name redirect properly, without encoding next.
login = "%s%s?next=%s" % (login_prefix, login_url, private_url)
with override_settings(LOGIN_URL="login"):
# Note: The "login" is a pattern name in accounts.urls.
response = self.client.get(public_url, follow=True)
self.assertEqual(response.status_code, 200)
response = self.client.get(private_url, follow=True)
self.assertRedirects(response, login)
self.client.login(username=self._username, password=self._password)
response = self.client.get(private_url, follow=True)
self.assertEqual(response.status_code, 200)
response = self.client.get(public_url, follow=True)
self.assertEqual(response.status_code, 200)
if accounts_installed:
with override_settings(LOGIN_URL="mezzanine.accounts.views.login"):
response = self.client.get(public_url, follow=True)
self.assertEqual(response.status_code, 200)
response = self.client.get(private_url, follow=True)
self.assertEqual(response.status_code, 200)
with override_settings(LOGIN_URL="login"):
response = self.client.get(public_url, follow=True)
self.assertEqual(response.status_code, 200)
response = self.client.get(private_url, follow=True)
self.assertEqual(response.status_code, 200)
def test_page_menu_queries(self):
"""
Test that rendering a page menu executes the same number of
queries regardless of the number of pages or levels of
children.
"""
template = ('{% load pages_tags %}'
'{% page_menu "pages/menus/tree.html" %}')
before = self.queries_used_for_template(template)
self.assertTrue(before > 0)
self.create_recursive_objects(RichTextPage, "parent", title="Page",
status=CONTENT_STATUS_PUBLISHED)
after = self.queries_used_for_template(template)
self.assertEqual(before, after)
def test_page_menu_flags(self):
"""
Test that pages only appear in the menu templates they've been
assigned to show in.
"""
menus = []
pages = []
template = "{% load pages_tags %}"
for i, label, path in settings.PAGE_MENU_TEMPLATES:
menus.append(i)
pages.append(RichTextPage.objects.create(in_menus=list(menus),
title="Page for %s" % str(label),
status=CONTENT_STATUS_PUBLISHED))
template += "{%% page_menu '%s' %%}" % path
rendered = Template(template).render(Context({}))
for page in pages:
self.assertEqual(rendered.count(page.title), len(page.in_menus))
def test_page_menu_default(self):
"""
Test that the settings-defined default value for the ``in_menus``
field is used, also checking that it doesn't get forced to text,
but that sequences are made immutable.
"""
with override_settings(
PAGE_MENU_TEMPLATES=((8, "a", "a"), (9, "b", "b"))):
with override_settings(PAGE_MENU_TEMPLATES_DEFAULT=None):
page_in_all_menus = Page.objects.create()
self.assertEqual(page_in_all_menus.in_menus, (8, 9))
with override_settings(PAGE_MENU_TEMPLATES_DEFAULT=tuple()):
page_not_in_menus = Page.objects.create()
self.assertEqual(page_not_in_menus.in_menus, tuple())
with override_settings(PAGE_MENU_TEMPLATES_DEFAULT=[9]):
page_in_a_menu = Page.objects.create()
self.assertEqual(page_in_a_menu.in_menus, (9,))
def test_overridden_page(self):
"""
Test that a page with a slug matching a non-page urlpattern
return ``True`` for its overridden property.
"""
# BLOG_SLUG is empty then urlpatterns for pages are prefixed
# with PAGE_SLUG, and generally won't be overridden. In this
# case, there aren't any overridding URLs by default, so bail
# on the test.
if PAGES_SLUG:
return
page, created = RichTextPage.objects.get_or_create(slug="edit")
self.assertTrue(page.overridden())
def test_unicode_slug_parm_to_processor_for(self):
"""
Test that passing an unicode slug to processor_for works for
python 2.x
"""
from mezzanine.pages.page_processors import processor_for
@processor_for(u'test unicode string')
def test_page_processor(request, page):
return {}
page, _ = RichTextPage.objects.get_or_create(title="test page")
self.assertEqual(test_page_processor(current_request(), page), {})
def test_exact_page_processor_for(self):
"""
Test that passing exact_page=True works with the PageMiddleware
"""
from mezzanine.pages.middleware import PageMiddleware
from mezzanine.pages.page_processors import processor_for
from mezzanine.pages.views import page as page_view
@processor_for('foo/bar', exact_page=True)
def test_page_processor(request, page):
return HttpResponse("bar")
foo, _ = RichTextPage.objects.get_or_create(title="foo")
bar, _ = RichTextPage.objects.get_or_create(title="bar", parent=foo)
request = self._request_factory.get('/foo/bar/')
request.user = self._user
response = PageMiddleware().process_view(request, page_view, [], {})
self.assertTrue(isinstance(response, HttpResponse))
self.assertContains(response, "bar")
@skipUnless(settings.USE_MODELTRANSLATION and len(settings.LANGUAGES) > 1,
"modeltranslation configured for several languages required")
def test_page_slug_has_correct_lang(self):
"""
Test that slug generation is done for the default language and
not the active one.
"""
from collections import OrderedDict
from django.utils.translation import get_language, activate
from mezzanine.utils.urls import slugify
default_language = get_language()
code_list = OrderedDict(settings.LANGUAGES)
del code_list[default_language]
title_1 = "Title firt language"
title_2 = "Title second language"
page, _ = RichTextPage.objects.get_or_create(title=title_1)
for code in code_list:
try:
activate(code)
except:
pass
else:
break
# No valid language found
page.delete()
return
page.title = title_2
page.save()
self.assertEqual(page.get_slug(), slugify(title_1))
self.assertEqual(page.title, title_2)
activate(default_language)
self.assertEqual(page.title, title_1)
page.delete()
def test_clean_slug(self):
"""
Test that PageAdminForm strips leading and trailing slashes
from slugs or returns `/`.
"""
class TestPageAdminForm(PageAdminForm):
class Meta:
fields = ["slug"]
model = Page
data = {'slug': '/'}
submitted_form = TestPageAdminForm(data=data)
self.assertTrue(submitted_form.is_valid())
self.assertEqual(submitted_form.cleaned_data['slug'], "/")
data = {'slug': '/hello/world/'}
submitted_form = TestPageAdminForm(data=data)
self.assertTrue(submitted_form.is_valid())
self.assertEqual(submitted_form.cleaned_data['slug'], 'hello/world')
def test_ascendants_different_site(self):
site2 = Site.objects.create(domain='site2.example.com', name='Site 2')
parent = Page.objects.create(title="Parent", site=site2)
child = parent.children.create(title="Child", site=site2)
grandchild = child.children.create(title="Grandchild", site=site2)
# Re-retrieve grandchild so its parent attribute is not cached
with override_current_site_id(site2.id):
grandchild = Page.objects.get(pk=grandchild.pk)
with self.assertNumQueries(1):
self.assertListEqual(grandchild.get_ascendants(), [child, parent])
| {
"content_hash": "ed0f3258cba4e2947d1b4fec5f9fc90e",
"timestamp": "",
"source": "github",
"line_count": 401,
"max_line_length": 79,
"avg_line_length": 42.34663341645885,
"alnum_prop": 0.630999352217184,
"repo_name": "sjuxax/mezzanine",
"id": "1b9f9dba8a652571784eaf11e2d1badb2efbed28",
"size": "16981",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "mezzanine/pages/tests.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "CSS",
"bytes": "60016"
},
{
"name": "HTML",
"bytes": "89073"
},
{
"name": "JavaScript",
"bytes": "452258"
},
{
"name": "Python",
"bytes": "660105"
}
],
"symlink_target": ""
} |
"""
AniColle Library
Collect your animes like a geek.
Database model and operations here.
Unlike the previous version, this version returns objects as results rather than dictionaries by default.
You can force convert it into a dict by using to_dict().
"""
from peewee import *
import re
from .seeker import seeker
from .config import config
import os
from json import loads as json_loads, dumps as json_dump
from pypinyin import lazy_pinyin, Style
run_mode = os.getenv('ANICOLLE_MODE') or 'default'
try:
config = config[run_mode]
# print("Running with", run_mode, "mode")
except KeyError :
print("No such running mode. Check your ANICOLLE_MODE system env please.")
exit()
db = SqliteDatabase(config.DATABASE)
class Bangumi(Model):
# `id` field is added automatically
name = TextField() # Bangumi name
cur_epi = IntegerField(default=0) # Currently viewing episode
on_air_epi = IntegerField(default=0) # (Placeholder)
on_air_day = IntegerField(default=0) # The on air weekday. [1-7] for Monday - Sunday, 0 for not on air, 8 for not fixed on air day.
seeker = TextField(default='[]')
'''
Seeker is a modularized part of the program which is used to seek new episode of a bangumi programatically.
Seekers are placed under `seeker` directory, and they are imported into this file as a dict named `seeker`.
Seeker data saved in database is a serialized array (in json format), as following shows:
[
{
"seeker": SEEKER_NAME,
"chk_key": SEEKER_CHECK_KEY
},
]
chk_key is used for calling the `seek` function of 'seeker', usually a search keyword of the specific bangumi.
For example, you want to download 'Tokyo Ghoul' from bilibili, then you should use "东京喰种" as a chk_key.
For more information on `chk_key`, please refer to our wiki.
'''
class Meta:
database = db
def to_dict(self):
return {
'id': self.id,
'name': self.name,
'cur_epi': self.cur_epi,
'on_air_epi': self.on_air_epi,
'on_air_day': self.on_air_day,
'seeker': self.seeker,
'name_pinyin': ''.join(lazy_pinyin(self.name, Style.FIRST_LETTER))
}
def dbInit():
db.connect()
db.create_tables([Bangumi], safe=True)
db.close()
def getAni( bid=-1, on_air_day=-1 ):
db.connect()
r = []
try:
if bid>=0:
# get a single record
r = Bangumi.get(Bangumi.id==bid).to_dict()
elif on_air_day>=0:
# get a set of records
for bgm in Bangumi.select().where(Bangumi.on_air_day==on_air_day):
r.append(bgm.to_dict())
else:
# get all records
for bgm in Bangumi.select().order_by(Bangumi.on_air_day):
r.append(bgm.to_dict())
return r
except Bangumi.DoesNotExist:
return None
finally:
db.close()
def create( name, cur_epi=0, on_air_day=0, seeker=[] ):
db.connect()
bgm = Bangumi(name=name, cur_epi=cur_epi, on_air_day=on_air_day, seeker=json_dump(seeker));
bgm.save()
db.close()
return bgm.to_dict()
def modify( bid, name=None, cur_epi=None, on_air_day=None, seeker=None ):
db.connect()
try:
bgm = Bangumi.get(Bangumi.id==bid)
if name:
bgm.name = name
if cur_epi:
bgm.cur_epi = int(cur_epi)
if on_air_day:
bgm.on_air_day = int(on_air_day)
if seeker:
bgm.seeker = json_dump(seeker)
bgm.save()
return bgm.to_dict()
except Bangumi.DoesNotExist:
return 0
finally:
db.close()
def remove(bid):
db.connect()
try:
bgm = Bangumi.get(Bangumi.id==bid)
bgm.delete_instance()
return 1
except Bangumi.DoesNotExist:
return 0
finally:
db.close()
def increase( bid ):
db.connect()
try:
bgm = Bangumi.get(Bangumi.id==bid)
bgm.cur_epi = bgm.cur_epi +1
bgm.save()
return bgm.to_dict()
except Bangumi.DoesNotExist:
return 0
finally:
db.close()
def decrease( bid ):
db.connect()
try:
bgm = Bangumi.get(Bangumi.id==bid)
bgm.cur_epi = bgm.cur_epi -1
bgm.save()
return bgm.to_dict()
except Bangumi.DoesNotExist:
return 0
finally:
db.close()
def chkup(bid, episode=None):
def getParams(chk_key):
pattern = "\s+--params:(.*)$"
match = re.search(pattern, chk_key)
if not match:
return chk_key, None
else:
params_str = match.group(1)
params = str(params_str).split(",")
params = list(map(lambda e: str(e).strip(), params))
return chk_key, params
db.connect()
try:
bgm = Bangumi.get(Bangumi.id == bid)
except Bangumi.DoesNotExist:
return 0
else:
'''
Start of checking module
\/_\/_\/_\/_\/_\/_\/_\/_\/
'''
if episode is None or episode == '':
episode = bgm.cur_epi+1 # Check current episode +1
else:
episode = int(episode)
r = []
bgm_seeker_data = json_loads(bgm.seeker)
for seeker_seed in bgm_seeker_data:
try:
if not seeker_seed['chk_key']:
continue
chk_key, params = getParams(seeker_seed['chk_key'])
# Maybe we need some new names. This can be confusable.
seek_result = seeker[seeker_seed['seeker']].seek(
seeker_seed['chk_key'], episode, params)
if type(seek_result) == list:
r = r+seek_result
except KeyError:
print("[WARN] Seeker named", seeker_seed['seeker'],
"not found. (Not registered?)")
return r
'''
_/\_/\_/\_/\_/\_/\_/\_/\_
End of checking module
'''
finally:
db.close()
| {
"content_hash": "8cddd25576e65d3925667789cc731ae8",
"timestamp": "",
"source": "github",
"line_count": 211,
"max_line_length": 138,
"avg_line_length": 28.96208530805687,
"alnum_prop": 0.5576828669612175,
"repo_name": "chienius/anicolle",
"id": "97655b2a29a8340713f930d7e0d0464dfd8082e9",
"size": "6165",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "anicolle/core.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "16612"
}
],
"symlink_target": ""
} |
from django.core.cache import cache
from django.test import TestCase
class PolyaxonBaseTest(TestCase):
COLLECT_TASKS = False
def setUp(self):
# Flush cache
cache.clear()
# Mock celery default sent task
self.mock_send_task()
super().setUp()
self.worker_send = {}
def mock_send_task(self):
from celery import current_app
def send_task(name, args=(), kwargs=None, **opts):
kwargs = kwargs or {}
if name in current_app.tasks:
task = current_app.tasks[name]
return task.apply_async(args, kwargs, **opts)
elif self.worker_send:
self.worker_send[name] = {"args": args, "kwargs": kwargs, "opts": opts}
current_app.send_task = send_task
class PolyaxonBaseTestSerializer(PolyaxonBaseTest):
query = None
serializer_class = None
model_class = None
factory_class = None
expected_keys = {}
num_objects = 2
def test_serialize_one(self):
raise NotImplementedError
def create_one(self):
raise NotImplementedError
def create_multiple(self):
for i in range(self.num_objects):
self.create_one()
def test_serialize_many(self):
self.create_multiple()
data = self.serializer_class(self.query.all(), many=True).data
assert len(data) == self.num_objects
for d in data:
assert set(d.keys()) == self.expected_keys
| {
"content_hash": "3b4719e5e4ddbea3f6288ade3978643f",
"timestamp": "",
"source": "github",
"line_count": 53,
"max_line_length": 87,
"avg_line_length": 28.056603773584907,
"alnum_prop": 0.6005379959650302,
"repo_name": "polyaxon/polyaxon",
"id": "2db017d763e39949dcd302cccec4f2c5a80e6426",
"size": "2092",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "platform/polycommon/polycommon/test_cases/base.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "1989"
},
{
"name": "Python",
"bytes": "5201898"
},
{
"name": "Shell",
"bytes": "1565"
}
],
"symlink_target": ""
} |
"""Implements a sparse balanced and asynchronous E-I model, loosely based
on Borges and Kopell, 2005.
"""
from __future__ import division
import argparse
import numpy as np
from brian2 import *
from syncological.async import model
from syncological.ping import analyze_result, save_result
parser = argparse.ArgumentParser(
description="A sparse, balanced, and asynchronous E-I model.",
formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument(
"name",
help="Name of exp, used to save results as hdf5."
)
parser.add_argument(
"-t", "--time",
help="Simulation run time (in ms)",
default=2,
type=float
)
parser.add_argument(
"--stim",
help="Simulus time (in ms)",
default=1.5,
type=float
)
parser.add_argument(
"--rate",
help="Stimulus firing rate (approx)",
default=5,
type=float
)
parser.add_argument(
"--w_e",
help="Input weight to E (msiemens)",
default=0.5,
type=float
)
parser.add_argument(
"--w_i",
help="Input weight to E (msiemens)",
default=0.5,
type=float
)
parser.add_argument(
"--w_ei",
help="Weight E -> I (msiemens)",
default=0.1,
type=float
)
parser.add_argument(
"--w_ie",
help="Weight I -> E (msiemens)",
default=0.5,
type=float
)
parser.add_argument(
"--seed",
help="Seed value",
default=None
)
args = parser.parse_args()
try:
seed = int(args.seed)
except TypeError:
seed = None
result = model(
args.time,
args.stim, args.rate,
args.w_e, args.w_i, args.w_ei, args.w_ie,
seed=seed
)
save_result(args.name, result)
analysis = analyze_result(args.name, args.stim, result, fs=10000, save=True)
| {
"content_hash": "4a454163624293a7066176411caa23c0",
"timestamp": "",
"source": "github",
"line_count": 81,
"max_line_length": 76,
"avg_line_length": 20.925925925925927,
"alnum_prop": 0.6489675516224189,
"repo_name": "voytekresearch/syncological",
"id": "12f530849cb9fb87f3e2ec141728cdeffae6c7f1",
"size": "1741",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "bin/async.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "1897596"
},
{
"name": "Makefile",
"bytes": "13307"
},
{
"name": "Python",
"bytes": "56039"
},
{
"name": "R",
"bytes": "11198"
}
],
"symlink_target": ""
} |
import simplejson
from flask import jsonify, request, url_for
from . import viewing
import iiifoo_utils
from dbmodels import Manifest, Image
from source_mappings import source_mapping, is_dynamic_source
pub_req_optmap = ['source_url', 'manifest_id']
# Make sure to change relevant paths in tests etc. if/when this changes.
@viewing.route('/iiif/<source_type>/<path:options>/manifest')
@iiifoo_utils.crossdomain(origin="*")
@iiifoo_utils.iiif_presentation_api_view
def get_manifest(source_type, options):
"""Get a manifest for the given info.
For authoring sources, gets it from db.
For dynamic sources, creates it on request.
"""
source_type = source_mapping.get((source_type, 'base'))
if not source_type:
return jsonify({"message": "bad type", "success": False}), 502
if not is_dynamic_source(source_type):
options = iiifoo_utils.parse_mapped_rest_options(options, pub_req_optmap)
if not source_type:
return jsonify({"message": "bad type", "success": False}), 502
manifest = Manifest.query.filter_by(
id=options['manifest_id'], source_type=source_type.type_name,
source_url=options['source_url']
).first()
if not manifest:
return jsonify({"message": "manifest not found",
"success": False}), 404
# canvases = manifest['sequences'][0]['canvases']
# canvas_image_ids = [image_id_from_canvas_id(canvas['@id'])
# for canvas in canvases]
# for image_id in canvas_image_ids:
# if source_type.allowed_access(image_id=image_id,
# cookie=session['']) # TODO
responsetext = manifest.manifest
else:
parsed_options = iiifoo_utils.parse_rest_options(options)
nph = source_type(parsed_options)
responsetext = \
nph.get_manifest(url_root=request.base_url.rstrip("manifest"))
return responsetext, 200
@viewing.route('/iiif/<source_type>/<path:options>'
'/list/<canvas_name>')
@iiifoo_utils.crossdomain(origin="*")
@iiifoo_utils.iiif_presentation_api_view
def get_annotation_list(source_type, options, canvas_name):
source_type = source_mapping.get((source_type, 'base'))
if not source_type:
return jsonify({"message": "bad type", "success": False}), 502
if not is_dynamic_source(source_type):
options = iiifoo_utils.parse_mapped_rest_options(options, pub_req_optmap)
pi = Image.query.filter_by(
identifier=canvas_name,
manifest_id=options['manifest_id'],
source_type=source_type.type_name,
source_url=options['source_url']
).first()
if pi:
response = jsonify(simplejson.loads(pi.annotations)), 200
else:
response = jsonify({"message": "image not found",
"success": False}), 404
return response
else:
options = iiifoo_utils.parse_rest_options(options)
nph = source_type(options)
manifest_url = url_for('.get_manifest', source_type=source_type,
options=options)
manifest_url = "".join([request.url_root.rstrip('/'), manifest_url])
annotations = nph.get_annotations(canvas_name=canvas_name,
manifest_url=manifest_url)
return jsonify(annotations)
@viewing.route('/iiif/<source_type>/<path:options>/canvas/<canvas_name>')
@iiifoo_utils.crossdomain(origin="*")
@iiifoo_utils.iiif_presentation_api_view
def get_canvas(source_type, options, canvas_name):
source_type = source_mapping.get((source_type, 'base'))
if not source_type:
return jsonify({"message": "bad type", "success": False}), 502
if not is_dynamic_source(source_type):
options = iiifoo_utils.parse_mapped_rest_options(options, pub_req_optmap)
m = Manifest.query.filter_by(
id=options['manifest_id'],
source_type=source_type.type_name,
source_url=options['source_url']
).first()
if not m:
return jsonify({"message": "manifest not found",
"success": False}), 404
manifest = simplejson.loads(m.manifest)
canvases = manifest['sequences'][0]['canvases']
canvas_image_ids = [iiifoo_utils.image_id_from_canvas_id(canvas['@id'])
for canvas in canvases]
index = canvas_image_ids.index(canvas_name)
response = jsonify(canvases[index]), 200
return response
else:
raise NotImplementedError()
| {
"content_hash": "025ee2bd20bdcf31dde252683e08dd37",
"timestamp": "",
"source": "github",
"line_count": 111,
"max_line_length": 81,
"avg_line_length": 42.08108108108108,
"alnum_prop": 0.6118604153286235,
"repo_name": "hashimmm/iiifoo",
"id": "7e335a4830b70b3dee89948329178ec951ac4e8c",
"size": "4671",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "iiifoo_server/viewing/iiif_metadata_api_views.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "104017"
},
{
"name": "Gherkin",
"bytes": "7898"
},
{
"name": "HTML",
"bytes": "11507"
},
{
"name": "JavaScript",
"bytes": "14754"
},
{
"name": "Python",
"bytes": "204017"
},
{
"name": "Shell",
"bytes": "400"
}
],
"symlink_target": ""
} |
"""
A port of Infobot's nickometer command from Perl. This plugin
provides one command (called nickometer) which will tell you how 'lame'
an IRC nick is. It's an elitist hacker thing, but quite fun.
"""
import supybot
import supybot.world as world
# Use this for the version of this plugin. You may wish to put a CVS keyword
# in here if you're keeping the plugin in CVS or some similar system.
__version__ = ""
# XXX Replace this with an appropriate author or supybot.Author instance.
__author__ = supybot.authors.baggins
# This is a dictionary mapping supybot.Author instances to lists of
# contributions.
__contributors__ = {}
import config
import plugin
reload(plugin) # In case we're being reloaded.
# Add more reloads here if you add third-party modules and want them to be
# reloaded when this plugin is reloaded. Don't forget to import them as well!
if world.testing:
import test
Class = plugin.Class
configure = config.configure
# vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79:
| {
"content_hash": "18bae78fea3f886649831576bc254bad",
"timestamp": "",
"source": "github",
"line_count": 34,
"max_line_length": 78,
"avg_line_length": 29.823529411764707,
"alnum_prop": 0.7544378698224852,
"repo_name": "kblin/supybot-gsoc",
"id": "e27199e508cf396b97f91b7a4c40eedfd0580a4f",
"size": "3182",
"binary": false,
"copies": "15",
"ref": "refs/heads/stable",
"path": "plugins/Nickometer/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "2238011"
}
],
"symlink_target": ""
} |
import pyaf.Bench.TS_datasets as tsds
import tests.artificial.process_artificial_dataset as art
art.process_dataset(N = 32 , FREQ = 'D', seed = 0, trendtype = "LinearTrend", cycle_length = 5, transform = "Difference", sigma = 0.0, exog_count = 20, ar_order = 0); | {
"content_hash": "bad25ae196614095b77a61a7e6fc9ac2",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 166,
"avg_line_length": 38,
"alnum_prop": 0.706766917293233,
"repo_name": "antoinecarme/pyaf",
"id": "6c14f5d549fd313673db39e6bf5b8c1e5fd135a2",
"size": "266",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/artificial/transf_Difference/trend_LinearTrend/cycle_5/ar_/test_artificial_32_Difference_LinearTrend_5__20.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "6773299"
},
{
"name": "Procfile",
"bytes": "24"
},
{
"name": "Python",
"bytes": "54209093"
},
{
"name": "R",
"bytes": "807"
},
{
"name": "Shell",
"bytes": "3619"
}
],
"symlink_target": ""
} |
import logging
from classymq import exchanges, queues
from classymq.lib.uuid import UUIDRequest
log = logging.getLogger(__name__)
class AMQPAPIRequest(UUIDRequest):
def __init__(self, uuid=None, message=None, *args, **kwargs):
super(AMQPAPIRequest, self).__init__(uuid, *args, **kwargs)
self.message = message
def __str__(self):
d = {}
for k, v in self.items():
sv = str(v)
if len(sv) > 500:
sv = sv[:500] + '...'
d[k] = sv
return str(d)
class AMQPAPIExchange(exchanges.BaseExchange):
KEY = "amqpapi"
# TYPE = exchanges.EXCHANGE_TYPES.TOPIC
class AMQPAPIQueue(queues.BaseQueue):
KEY = "amqpapi-%(prefix)s-%(uuid)s"
AUTO_DELETE = True
| {
"content_hash": "18158099d682f7f30283fafe629e805d",
"timestamp": "",
"source": "github",
"line_count": 28,
"max_line_length": 67,
"avg_line_length": 26.964285714285715,
"alnum_prop": 0.5933774834437087,
"repo_name": "gdoermann/classymq",
"id": "ae98c9fbd13bc05efa8ad8c1a8b7f18aa8951566",
"size": "755",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "classymq/api/common.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "6986"
},
{
"name": "Python",
"bytes": "68656"
},
{
"name": "Shell",
"bytes": "6463"
}
],
"symlink_target": ""
} |
from __future__ import print_function
import os
import csv
import sys
if os.path.exists("/home/ggdhines/github/pyIBCC/python"):
sys.path.append("/home/ggdhines/github/pyIBCC/python")
else:
sys.path.append("/Users/greghines/Code/pyIBCC/python")
import ibcc
if os.path.isdir("/Users/greghines/Databases/serengeti"):
baseDir = "/Users/greghines/Databases/serengeti/"
else:
baseDir = "/home/ggdhines/Databases/serengeti/"
species2 = ['elephant','zebra','warthog','impala','buffalo','wildebeest','gazelleThomsons','dikDik','giraffe','gazelleGrants','lionFemale','baboon','hippopotamus','ostrich','human','otherBird','hartebeest','secretaryBird','hyenaSpotted','mongoose','reedbuck','topi','guineaFowl','eland','aardvark','lionMale','porcupine','koriBustard','bushbuck','hyenaStriped','jackal','cheetah','waterbuck','leopard','reptiles','serval','aardwolf','vervetMonkey','rodents','honeyBadger','batEaredFox','rhinoceros','civet','genet','zorilla','hare','caracal','wildcat']
#species = ['gazelleThomsons']
species = ['buffalo','wildebeest','zebra']
users = []
photos = []
def createConfigFile(classID):
f = open(baseDir+"ibcc/"+str(classID)+"config.py",'wb')
print("import numpy as np\nscores = np.array([0,1])", file=f)
print("nScores = len(scores)", file=f)
print("nClasses = 2",file=f)
print("inputFile = '"+baseDir+"ibcc/"+str(classID)+".in'", file=f)
print("outputFile = '"+baseDir+"ibcc/"+str(classID)+".out'", file=f)
print("confMatFile = '"+baseDir+"ibcc/"+str(classID)+".mat'", file=f)
# if numClasses == 4:
# print("alpha0 = np.array([[2, 2, 2, 2], [2, 2, 2, 2], [2, 2, 2, 2], [2, 2,2, 2]])", file=f)
# print("nu0 = np.array([25.0, 25.0, 25.0, 1.0])", file=f)
# elif numClasses == 2:
# print("alpha0 = np.array([[2, 1], [1, 2],])", file=f)
# print("nu0 = np.array([50.,50.])", file=f)
# else:
# assert(False)
f.close()
individualClassifications = []
reader = csv.reader(open(baseDir+"filtered20","rU"), delimiter="\t")
for userName, photoName, classification in reader:
individualClassifications.append((userName,photoName,classification))
ibccClassifications = []
for i, s in enumerate(species):
print(s)
createConfigFile(i)
f = open(baseDir+"ibcc/"+str(i)+".in",'wb')
for userName,photoName,classification in individualClassifications:
if classification == "[]":
classification = []
else:
classification = [int(v) for v in classification[1:-1].split(",")]
if not(userName in users):
users.append(userName)
userIndex = len(users)-1
else:
userIndex = users.index(userName)
if not(photoName in photos):
photos.append(photoName)
photoIndex = len(photos)- 1
else:
photoIndex = photos.index(photoName)
if i in classification:
print(str(userIndex)+","+str(photoIndex)+",1", file=f)
else:
print(str(userIndex)+","+str(photoIndex)+",0", file=f)
f.close()
ibcc.runIbcc(baseDir+"ibcc/"+str(i)+"config.py")
#read in the predicted classifications
#next, read in the the experts' classifications
ibccClassifications = [0 for p in photos]
print("Reading in IBCC results")
reader = csv.reader(open(baseDir+"ibcc/"+str(i)+".out", "rU"), delimiter=" ")
next(reader, None)
for row in reader:
photoIndex = int(float(row[0]))
pos = float(row[2])
if pos >= 0.5:
ibccClassifications[photoIndex] = 1
mistakes = {}
#now go back to the users input and estimate what their confusion matrices would like
for userName,photoName,classification in individualClassifications:
photoIndex = photos.index(photoName)
if classification == "[]":
classification = []
else:
classification = [int(v) for v in classification[1:-1].split(",")]
if ibccClassifications[photoIndex] == 1:
if not(i in classification) and len(classification) == 1:
if len(classification) != 1:
continue
correct = species[i]
reported = species2[classification[0]]
if correct == reported:
continue
if not((correct,reported) in mistakes) :
mistakes[(correct,reported)] = 1
else:
mistakes[(correct,reported)] += 1
for (correct,incorrect) in mistakes:
print(correct,incorrect,mistakes[(correct,incorrect)])
continue
#next, read in the the experts' classifications
expertClassifications = [0 for p in photos]
print("Reading in expert classification")
reader = csv.reader(open(baseDir+"expert_classifications_raw.csv", "rU"), delimiter=",")
next(reader, None)
for row in reader:
photoName = row[2]
photoIndex = photos.index(photoName)
tagged = row[12]
if s in tagged:
expertClassifications[photoIndex] = 1
| {
"content_hash": "0088b06b47e3cf7a2215bde65e2e560e",
"timestamp": "",
"source": "github",
"line_count": 146,
"max_line_length": 552,
"avg_line_length": 34.897260273972606,
"alnum_prop": 0.6125613346418057,
"repo_name": "zooniverse/aggregation",
"id": "0057358f70a9bdb47cfd2e1aa630936093e22a43",
"size": "5117",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "experimental/serengeti/individualIBCC.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "723"
},
{
"name": "Python",
"bytes": "2184451"
},
{
"name": "Scala",
"bytes": "629"
},
{
"name": "Shell",
"bytes": "190"
}
],
"symlink_target": ""
} |
from django.conf.urls import patterns, include, url
from django.contrib import admin
admin.autodiscover()
from django.views.generic import TemplateView
from thanksobama.main.views import *
urlpatterns = patterns('',
# Examples:
# url(r'^$', 'thanksobama.views.home', name='home'),
# url(r'^blog/', include('blog.urls')),
url(r'^admin/', include(admin.site.urls)),
url(r'^$', TemplateView.as_view(template_name='index.html')),
url(r'^new-question/$', NewQuestion.as_view())
)
| {
"content_hash": "04cc025f85da50033d7a06aa2556a124",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 62,
"avg_line_length": 29.235294117647058,
"alnum_prop": 0.6901408450704225,
"repo_name": "kz26/ThanksObama",
"id": "97c952a0655b2fcd99d19a209d75b6efdb606cbb",
"size": "497",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "thanksobama/urls.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "33"
},
{
"name": "CoffeeScript",
"bytes": "1835"
},
{
"name": "Python",
"bytes": "8931"
}
],
"symlink_target": ""
} |
from swgpy.object import *
def create(kernel):
result = Building()
result.template = "object/building/military/shared_outpost_shed_s04.iff"
result.attribute_template_id = -1
result.stfName("building_name","military_guard_tower_1")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result | {
"content_hash": "f79ac36c3cea7e766d50b9576d158fc5",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 73,
"avg_line_length": 24.615384615384617,
"alnum_prop": 0.703125,
"repo_name": "obi-two/Rebelion",
"id": "5d427b190190dcad648a777a532c2ec1a633dd2c",
"size": "465",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "data/scripts/templates/object/building/military/shared_outpost_shed_s04.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "11818"
},
{
"name": "C",
"bytes": "7699"
},
{
"name": "C++",
"bytes": "2293610"
},
{
"name": "CMake",
"bytes": "39727"
},
{
"name": "PLSQL",
"bytes": "42065"
},
{
"name": "Python",
"bytes": "7499185"
},
{
"name": "SQLPL",
"bytes": "41864"
}
],
"symlink_target": ""
} |
import _plotly_utils.basevalidators
class CmidValidator(_plotly_utils.basevalidators.NumberValidator):
def __init__(self, plotly_name="cmid", parent_name="parcats.line", **kwargs):
super(CmidValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
implied_edits=kwargs.pop("implied_edits", {}),
role=kwargs.pop("role", "info"),
**kwargs
)
| {
"content_hash": "29e046b8b75306de385200254ab31dfc",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 81,
"avg_line_length": 38.15384615384615,
"alnum_prop": 0.5967741935483871,
"repo_name": "plotly/python-api",
"id": "a553f23c8dbb036b7f9def1e88e27840b4f8d4ea",
"size": "496",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "packages/python/plotly/plotly/validators/parcats/line/_cmid.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "6870"
},
{
"name": "Makefile",
"bytes": "1708"
},
{
"name": "Python",
"bytes": "823245"
},
{
"name": "Shell",
"bytes": "3238"
}
],
"symlink_target": ""
} |
'''Get columns by name from SQL query'''
# sqlite3
import sqlite3
db = sqlite3.connect(':memory:')
db.row_factory = sqlite3.Row
# psycopg2
import psycopg2
from psycopg2.extras import DictCursor
db = psycopg2.connect('my-dbn-string')
cur = db.cursor(cursor_factory=DictCursor)
# Then
cur.execute('select * from people')
for row in cur:
print(row['name'])
| {
"content_hash": "36c74a8242098b66f0d386a8a0857026",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 42,
"avg_line_length": 21.235294117647058,
"alnum_prop": 0.7285318559556787,
"repo_name": "tebeka/pythonwise",
"id": "b11593a35df06409b877497022f719e131f10a16",
"size": "361",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sql-col-by-name.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "AppleScript",
"bytes": "419"
},
{
"name": "Assembly",
"bytes": "130"
},
{
"name": "Awk",
"bytes": "94"
},
{
"name": "C",
"bytes": "3348"
},
{
"name": "CSS",
"bytes": "7156"
},
{
"name": "Dockerfile",
"bytes": "691"
},
{
"name": "Go",
"bytes": "17160"
},
{
"name": "HTML",
"bytes": "28603"
},
{
"name": "JavaScript",
"bytes": "75641"
},
{
"name": "Jupyter Notebook",
"bytes": "542450"
},
{
"name": "Makefile",
"bytes": "2242"
},
{
"name": "Mako",
"bytes": "795"
},
{
"name": "Python",
"bytes": "1039734"
},
{
"name": "Shell",
"bytes": "23126"
},
{
"name": "TeX",
"bytes": "257"
},
{
"name": "Vim script",
"bytes": "785"
}
],
"symlink_target": ""
} |
from disassemblers.libdisassemble.opcode86 import regs
from copy import deepcopy
legal_integers = ['rdi', 'rsi', 'rdx', 'rcx', 'r8', 'r9']
legal_sse = ['xmm0', 'xmm1', 'xmm2', 'xmm3', 'xmm4', 'xmm5', 'xmm6', 'xmm7']
legal_other = ['rax']
def reg_normalize(reg):
'''
Normalize a register to a form independent of its size.
'''
idx = list(map(lambda x: x[0], regs)).index(reg)
return regs[idx&0xF][0]
class Params:
'''
A data structure that holds the current list of parameters used
and is able to let you know when it ends using some heuristics.
'''
def __init__(self):
self.memory = []
self.integers = []
self.sse = []
self.other = []
self.args = []
def add(self, reg, arg):
'''
Try to add a register to the list of params.
If its the next legal register in a list of parameters, it's added
and True is returned. If it isn't, False is returned so that the
function can be wrapped.
'''
arg = deepcopy(arg)
if 'w' in arg:
arg['w'] = False
arg['r'] = True
try:
param = reg_normalize(reg)
except ValueError:
return False
try:
if legal_integers[len(self.integers)] == param:
self.integers.append(reg)
self.args.append(arg)
return True
elif legal_sse[len(self.sse)] == reg:
#fix normalization here
self.sse.append(reg)
self.args.append(arg)
return True
elif legal_other[len(self.other)] == param:
self.other.append(reg)
return True
else:
return False
except IndexError:
return False
def fold(cfg, symbols):
'''
Fold as many function calls as its possible, infering arguments lists
along the way.
'''
for block, depth in cfg.iterblocks():
inside_call = False
for n, line in enumerate(reversed(block)):
if inside_call:
if 'dest' not in line['ins']:
continue
dest = line['ins']['dest']
param = dest['value']
if call_params.add(param, dest):
pass
else:
apply_ins = {'op': 'apply', 'function': function_name, 'args': call_params.args}
eax = {'value': 'eax', 'repr': 'eax', 'r': False, 'w': True}
mov_ins = {'op': 'mov', 'src': apply_ins, 'dest': eax}
block[len(block)-call_n-1]['ins'] = mov_ins
inside_call = False
call_params = None
if line['ins']['op'] == 'call':
inside_call = True
call_n = n
call_params = Params()
function_name = 'unknown_function'
if 'repr' in line['ins']['dest']:
if type(line['ins']['dest']['repr']) == int:
addr = line['loc']+line['length']+line['ins']['dest']['repr']
if addr in symbols:
function_name = symbols[addr]
| {
"content_hash": "e71d6c13d99c45458564c2ff7fc62909",
"timestamp": "",
"source": "github",
"line_count": 93,
"max_line_length": 100,
"avg_line_length": 35.365591397849464,
"alnum_prop": 0.4843417452113104,
"repo_name": "drx/ocd",
"id": "9bdf7391faee11d28a56157d67d40566ac4a3762",
"size": "3289",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/function_calls.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "3306"
},
{
"name": "Makefile",
"bytes": "159"
},
{
"name": "Python",
"bytes": "310636"
}
],
"symlink_target": ""
} |
import functools
import logbook
import math
import numpy as np
import numpy.linalg as la
from six import iteritems
import pandas as pd
from . import risk
from . risk import (
alpha,
check_entry,
downside_risk,
information_ratio,
sharpe_ratio,
sortino_ratio,
)
log = logbook.Logger('Risk Period')
choose_treasury = functools.partial(risk.choose_treasury,
risk.select_treasury_duration)
class RiskMetricsPeriod(object):
def __init__(self, start_date, end_date, returns, env,
benchmark_returns=None, algorithm_leverages=None):
self.env = env
treasury_curves = env.treasury_curves
if treasury_curves.index[-1] >= start_date:
mask = ((treasury_curves.index >= start_date) &
(treasury_curves.index <= end_date))
self.treasury_curves = treasury_curves[mask]
else:
# our test is beyond the treasury curve history
# so we'll use the last available treasury curve
self.treasury_curves = treasury_curves[-1:]
self.start_date = start_date
self.end_date = end_date
if benchmark_returns is None:
br = env.benchmark_returns
benchmark_returns = br[(br.index >= returns.index[0]) &
(br.index <= returns.index[-1])]
self.algorithm_returns = self.mask_returns_to_period(returns,
env)
self.benchmark_returns = self.mask_returns_to_period(benchmark_returns,
env)
self.algorithm_leverages = algorithm_leverages
self.calculate_metrics()
def calculate_metrics(self):
self.benchmark_period_returns = \
self.calculate_period_returns(self.benchmark_returns)
self.algorithm_period_returns = \
self.calculate_period_returns(self.algorithm_returns)
if not self.algorithm_returns.index.equals(
self.benchmark_returns.index
):
message = "Mismatch between benchmark_returns ({bm_count}) and \
algorithm_returns ({algo_count}) in range {start} : {end}"
message = message.format(
bm_count=len(self.benchmark_returns),
algo_count=len(self.algorithm_returns),
start=self.start_date,
end=self.end_date
)
raise Exception(message)
self.num_trading_days = len(self.benchmark_returns)
self.trading_day_counts = pd.stats.moments.rolling_count(
self.algorithm_returns, self.num_trading_days)
self.mean_algorithm_returns = \
self.algorithm_returns.cumsum() / self.trading_day_counts
self.benchmark_volatility = self.calculate_volatility(
self.benchmark_returns)
self.algorithm_volatility = self.calculate_volatility(
self.algorithm_returns)
self.treasury_period_return = choose_treasury(
self.treasury_curves,
self.start_date,
self.end_date,
self.env,
)
self.sharpe = self.calculate_sharpe()
# The consumer currently expects a 0.0 value for sharpe in period,
# this differs from cumulative which was np.nan.
# When factoring out the sharpe_ratio, the different return types
# were collapsed into `np.nan`.
# TODO: Either fix consumer to accept `np.nan` or make the
# `sharpe_ratio` return type configurable.
# In the meantime, convert nan values to 0.0
if pd.isnull(self.sharpe):
self.sharpe = 0.0
self.sortino = self.calculate_sortino()
self.information = self.calculate_information()
self.beta, self.algorithm_covariance, self.benchmark_variance, \
self.condition_number, self.eigen_values = self.calculate_beta()
self.alpha = self.calculate_alpha()
self.excess_return = self.algorithm_period_returns - \
self.treasury_period_return
self.max_drawdown = self.calculate_max_drawdown()
self.max_leverage = self.calculate_max_leverage()
def to_dict(self):
"""
Creates a dictionary representing the state of the risk report.
Returns a dict object of the form:
"""
period_label = self.end_date.strftime("%Y-%m")
rval = {
'trading_days': self.num_trading_days,
'benchmark_volatility': self.benchmark_volatility,
'algo_volatility': self.algorithm_volatility,
'treasury_period_return': self.treasury_period_return,
'algorithm_period_return': self.algorithm_period_returns,
'benchmark_period_return': self.benchmark_period_returns,
'sharpe': self.sharpe,
'sortino': self.sortino,
'information': self.information,
'beta': self.beta,
'alpha': self.alpha,
'excess_return': self.excess_return,
'max_drawdown': self.max_drawdown,
'max_leverage': self.max_leverage,
'period_label': period_label
}
return {k: None if check_entry(k, v) else v
for k, v in iteritems(rval)}
def __repr__(self):
statements = []
metrics = [
"algorithm_period_returns",
"benchmark_period_returns",
"excess_return",
"num_trading_days",
"benchmark_volatility",
"algorithm_volatility",
"sharpe",
"sortino",
"information",
"algorithm_covariance",
"benchmark_variance",
"beta",
"alpha",
"max_drawdown",
"max_leverage",
"algorithm_returns",
"benchmark_returns",
"condition_number",
"eigen_values"
]
for metric in metrics:
value = getattr(self, metric)
statements.append("{m}:{v}".format(m=metric, v=value))
return '\n'.join(statements)
def mask_returns_to_period(self, daily_returns, env):
if isinstance(daily_returns, list):
returns = pd.Series([x.returns for x in daily_returns],
index=[x.date for x in daily_returns])
else: # otherwise we're receiving an index already
returns = daily_returns
trade_days = env.trading_days
trade_day_mask = returns.index.normalize().isin(trade_days)
mask = ((returns.index >= self.start_date) &
(returns.index <= self.end_date) & trade_day_mask)
returns = returns[mask]
return returns
def calculate_period_returns(self, returns):
period_returns = (1. + returns).prod() - 1
return period_returns
def calculate_volatility(self, daily_returns):
return np.std(daily_returns, ddof=1) * math.sqrt(self.num_trading_days)
def calculate_sharpe(self):
"""
http://en.wikipedia.org/wiki/Sharpe_ratio
"""
return sharpe_ratio(self.algorithm_volatility,
self.algorithm_period_returns,
self.treasury_period_return)
def calculate_sortino(self):
"""
http://en.wikipedia.org/wiki/Sortino_ratio
"""
mar = downside_risk(self.algorithm_returns,
self.mean_algorithm_returns,
self.num_trading_days)
# Hold on to downside risk for debugging purposes.
self.downside_risk = mar
return sortino_ratio(self.algorithm_period_returns,
self.treasury_period_return,
mar)
def calculate_information(self):
"""
http://en.wikipedia.org/wiki/Information_ratio
"""
return information_ratio(self.algorithm_returns,
self.benchmark_returns)
def calculate_beta(self):
"""
.. math::
\\beta_a = \\frac{\mathrm{Cov}(r_a,r_p)}{\mathrm{Var}(r_p)}
http://en.wikipedia.org/wiki/Beta_(finance)
"""
# it doesn't make much sense to calculate beta for less than two days,
# so return nan.
if len(self.algorithm_returns) < 2:
return np.nan, np.nan, np.nan, np.nan, []
returns_matrix = np.vstack([self.algorithm_returns,
self.benchmark_returns])
C = np.cov(returns_matrix, ddof=1)
# If there are missing benchmark values, then we can't calculate the
# beta.
if not np.isfinite(C).all():
return np.nan, np.nan, np.nan, np.nan, []
eigen_values = la.eigvals(C)
condition_number = max(eigen_values) / min(eigen_values)
algorithm_covariance = C[0][1]
benchmark_variance = C[1][1]
beta = algorithm_covariance / benchmark_variance
return (
beta,
algorithm_covariance,
benchmark_variance,
condition_number,
eigen_values
)
def calculate_alpha(self):
"""
http://en.wikipedia.org/wiki/Alpha_(investment)
"""
return alpha(self.algorithm_period_returns,
self.treasury_period_return,
self.benchmark_period_returns,
self.beta)
def calculate_max_drawdown(self):
compounded_returns = []
cur_return = 0.0
for r in self.algorithm_returns:
try:
cur_return += math.log(1.0 + r)
# this is a guard for a single day returning -100%, if returns are
# greater than -1.0 it will throw an error because you cannot take
# the log of a negative number
except ValueError:
log.debug("{cur} return, zeroing the returns".format(
cur=cur_return))
cur_return = 0.0
compounded_returns.append(cur_return)
cur_max = None
max_drawdown = None
for cur in compounded_returns:
if cur_max is None or cur > cur_max:
cur_max = cur
drawdown = (cur - cur_max)
if max_drawdown is None or drawdown < max_drawdown:
max_drawdown = drawdown
if max_drawdown is None:
return 0.0
return 1.0 - math.exp(max_drawdown)
def calculate_max_leverage(self):
if self.algorithm_leverages is None:
return 0.0
else:
return max(self.algorithm_leverages)
| {
"content_hash": "ab1e8d2290ff7dc0a9623ba9778e1dee",
"timestamp": "",
"source": "github",
"line_count": 306,
"max_line_length": 79,
"avg_line_length": 35.11437908496732,
"alnum_prop": 0.5617496510004654,
"repo_name": "wilsonkichoi/zipline",
"id": "b4af1288984ded1f8d7aca8ee7f7a7f4e7a457c3",
"size": "11328",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "zipline/finance/risk/period.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "6778"
},
{
"name": "Emacs Lisp",
"bytes": "138"
},
{
"name": "Jupyter Notebook",
"bytes": "171073"
},
{
"name": "PowerShell",
"bytes": "3260"
},
{
"name": "Python",
"bytes": "2653811"
},
{
"name": "Shell",
"bytes": "6381"
}
],
"symlink_target": ""
} |
print("\nExercise 10.11\n")
#
# Question 1
# 1. Two words are a "reverse pair" if each is the reverse of the other. Write
# a program that finds all the reverse pairs in the word list.
#
import bisect
fin = open('words.txt')
wlist = []
def word_list(word):
for line in fin:
word = line.strip()
wlist.append(word)
return wlist
def in_bisect(word_list, word):
i = bisect.bisect_left(word_list, word)
if i == len(word_list):
return False
return word_list[i] == word
def reverse_pair(wlist):
reverse_pair_list = []
for word in wlist:
if in_bisect(wlist, word[::-1]):
pair = (word, word[::-1])
reverse_pair_list.append(pair)
return reverse_pair_list
wlist = word_list(fin)
print(reverse_pair(wlist))
| {
"content_hash": "5e0febdf620398a579206a3ff31d75de",
"timestamp": "",
"source": "github",
"line_count": 38,
"max_line_length": 79,
"avg_line_length": 21.210526315789473,
"alnum_prop": 0.609181141439206,
"repo_name": "ITSE-1402/git-classworkspace",
"id": "add225b50c298df78e7190f3eef22d72e42dc8b9",
"size": "912",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Yao_repo/homework-chapter-10/exercise10.11.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "4009"
}
],
"symlink_target": ""
} |
import pytest
import six
from kafka.partitioner import Murmur2Partitioner
from kafka.partitioner.default import DefaultPartitioner
def test_default_partitioner():
partitioner = DefaultPartitioner()
all_partitions = list(range(100))
available = all_partitions
# partitioner should return the same partition for the same key
p1 = partitioner(b'foo', all_partitions, available)
p2 = partitioner(b'foo', all_partitions, available)
assert p1 == p2
assert p1 in all_partitions
# when key is None, choose one of available partitions
assert partitioner(None, all_partitions, [123]) == 123
# with fallback to all_partitions
assert partitioner(None, all_partitions, []) in all_partitions
def test_hash_bytes():
p = Murmur2Partitioner(range(1000))
assert p.partition(bytearray(b'test')) == p.partition(b'test')
def test_hash_encoding():
p = Murmur2Partitioner(range(1000))
assert p.partition('test') == p.partition(u'test')
def test_murmur2_java_compatibility():
p = Murmur2Partitioner(range(1000))
# compare with output from Kafka's org.apache.kafka.clients.producer.Partitioner
assert p.partition(b'') == 681
assert p.partition(b'a') == 524
assert p.partition(b'ab') == 434
assert p.partition(b'abc') == 107
assert p.partition(b'123456789') == 566
assert p.partition(b'\x00 ') == 742
| {
"content_hash": "eb7f1bee78063ac5a85c6dcc80c1a13f",
"timestamp": "",
"source": "github",
"line_count": 43,
"max_line_length": 84,
"avg_line_length": 32.23255813953488,
"alnum_prop": 0.7027417027417028,
"repo_name": "zackdever/kafka-python",
"id": "52b6b81d138e96b9019f1ab44f0a99f1d5c83060",
"size": "1386",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/test_partitioner.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "675698"
},
{
"name": "Shell",
"bytes": "2646"
}
],
"symlink_target": ""
} |
import json
from ..utils import check_resource, minimum_version
class NetworkApiMixin(object):
@minimum_version('1.21')
def networks(self, names=None, ids=None):
filters = {}
if names:
filters['name'] = names
if ids:
filters['id'] = ids
params = {'filters': json.dumps(filters)}
url = self._url("/networks")
res = self._get(url, params=params)
return self._result(res, json=True)
@minimum_version('1.21')
def create_network(self, name, driver=None, options=None, ipam=None):
if options is not None and not isinstance(options, dict):
raise TypeError('options must be a dictionary')
data = {
'name': name,
'driver': driver,
'options': options,
'ipam': ipam,
}
url = self._url("/networks/create")
res = self._post_json(url, data=data)
return self._result(res, json=True)
@minimum_version('1.21')
def remove_network(self, net_id):
url = self._url("/networks/{0}", net_id)
res = self._delete(url)
self._raise_for_status(res)
@minimum_version('1.21')
def inspect_network(self, net_id):
url = self._url("/networks/{0}", net_id)
res = self._get(url)
return self._result(res, json=True)
@check_resource
@minimum_version('1.21')
def connect_container_to_network(self, container, net_id):
data = {"container": container}
url = self._url("/networks/{0}/connect", net_id)
self._post_json(url, data=data)
@check_resource
@minimum_version('1.21')
def disconnect_container_from_network(self, container, net_id):
data = {"container": container}
url = self._url("/networks/{0}/disconnect", net_id)
self._post_json(url, data=data)
| {
"content_hash": "0520858d9994317981f5dcf0dd0a062d",
"timestamp": "",
"source": "github",
"line_count": 60,
"max_line_length": 73,
"avg_line_length": 31.05,
"alnum_prop": 0.5743424584004294,
"repo_name": "rhatdan/docker-py",
"id": "ce3f51119cd2f7fc6d655a03593ed910d8f7912d",
"size": "1863",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "docker/api/network.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "2227"
},
{
"name": "Python",
"bytes": "329736"
}
],
"symlink_target": ""
} |
from unittest import TestCase, mock
from airflow.providers.google.marketing_platform.hooks.campaign_manager import GoogleCampaignManagerHook
from tests.providers.google.cloud.utils.base_gcp_mock import mock_base_gcp_hook_default_project_id
API_VERSION = "v3.3"
GCP_CONN_ID = "google_cloud_default"
REPORT_ID = "REPORT_ID"
PROFILE_ID = "PROFILE_ID"
ENCRYPTION_SOURCE = "encryption_source"
ENCRYPTION_ENTITY_TYPE = "encryption_entity_type"
ENCRYPTION_ENTITY_ID = 1234567
class TestGoogleCampaignManagerHook(TestCase):
def setUp(self):
with mock.patch(
"airflow.providers.google.common.hooks.base_google.GoogleBaseHook.__init__",
new=mock_base_gcp_hook_default_project_id,
):
self.hook = GoogleCampaignManagerHook(gcp_conn_id=GCP_CONN_ID, api_version=API_VERSION)
@mock.patch(
"airflow.providers.google.marketing_platform.hooks."
"campaign_manager.GoogleCampaignManagerHook._authorize"
)
@mock.patch("airflow.providers.google.marketing_platform.hooks.campaign_manager.build")
def test_gen_conn(self, mock_build, mock_authorize):
result = self.hook.get_conn()
mock_build.assert_called_once_with(
"dfareporting",
API_VERSION,
http=mock_authorize.return_value,
cache_discovery=False,
)
assert mock_build.return_value == result
@mock.patch(
"airflow.providers.google.marketing_platform.hooks."
"campaign_manager.GoogleCampaignManagerHook.get_conn"
)
def test_delete_report(self, get_conn_mock):
return_value = "TEST"
get_conn_mock.return_value.reports.return_value.delete.return_value.execute.return_value = (
return_value
)
result = self.hook.delete_report(profile_id=PROFILE_ID, report_id=REPORT_ID)
get_conn_mock.return_value.reports.return_value.delete.assert_called_once_with(
profileId=PROFILE_ID, reportId=REPORT_ID
)
assert return_value == result
@mock.patch(
"airflow.providers.google.marketing_platform.hooks."
"campaign_manager.GoogleCampaignManagerHook.get_conn"
)
def test_get_report(self, get_conn_mock):
file_id = "FILE_ID"
return_value = "TEST"
# fmt: off
get_conn_mock.return_value.reports.return_value.files.return_value. \
get.return_value.execute.return_value = return_value
# fmt: on
result = self.hook.get_report(profile_id=PROFILE_ID, report_id=REPORT_ID, file_id=file_id)
get_conn_mock.return_value.reports.return_value.files.return_value.get.assert_called_once_with(
profileId=PROFILE_ID, reportId=REPORT_ID, fileId=file_id
)
assert return_value == result
@mock.patch(
"airflow.providers.google.marketing_platform.hooks."
"campaign_manager.GoogleCampaignManagerHook.get_conn"
)
def test_get_report_file(self, get_conn_mock):
file_id = "FILE_ID"
return_value = "TEST"
get_conn_mock.return_value.reports.return_value.files.return_value.get_media.return_value = (
return_value
)
result = self.hook.get_report_file(profile_id=PROFILE_ID, report_id=REPORT_ID, file_id=file_id)
get_conn_mock.return_value.reports.return_value.files.return_value.get_media.assert_called_once_with(
profileId=PROFILE_ID, reportId=REPORT_ID, fileId=file_id
)
assert return_value == result
@mock.patch(
"airflow.providers.google.marketing_platform.hooks."
"campaign_manager.GoogleCampaignManagerHook.get_conn"
)
def test_insert_report(self, get_conn_mock):
report = {"body": "test"}
return_value = "TEST"
get_conn_mock.return_value.reports.return_value.insert.return_value.execute.return_value = (
return_value
)
result = self.hook.insert_report(profile_id=PROFILE_ID, report=report)
get_conn_mock.return_value.reports.return_value.insert.assert_called_once_with(
profileId=PROFILE_ID, body=report
)
assert return_value == result
@mock.patch(
"airflow.providers.google.marketing_platform.hooks."
"campaign_manager.GoogleCampaignManagerHook.get_conn"
)
def test_list_reports(self, get_conn_mock):
max_results = 42
scope = "SCOPE"
sort_field = "SORT_FIELD"
sort_order = "SORT_ORDER"
items = ["item"]
return_value = {"nextPageToken": None, "items": items}
get_conn_mock.return_value.reports.return_value.list.return_value.execute.return_value = return_value
request_mock = mock.MagicMock()
request_mock.execute.return_value = {"nextPageToken": None, "items": items}
get_conn_mock.return_value.reports.return_value.list_next.side_effect = [
request_mock,
request_mock,
request_mock,
None,
]
result = self.hook.list_reports(
profile_id=PROFILE_ID,
max_results=max_results,
scope=scope,
sort_field=sort_field,
sort_order=sort_order,
)
get_conn_mock.return_value.reports.return_value.list.assert_called_once_with(
profileId=PROFILE_ID,
maxResults=max_results,
scope=scope,
sortField=sort_field,
sortOrder=sort_order,
)
assert items * 4 == result
@mock.patch(
"airflow.providers.google.marketing_platform.hooks."
"campaign_manager.GoogleCampaignManagerHook.get_conn"
)
def test_patch_report(self, get_conn_mock):
update_mask = {"test": "test"}
return_value = "TEST"
get_conn_mock.return_value.reports.return_value.patch.return_value.execute.return_value = return_value
result = self.hook.patch_report(profile_id=PROFILE_ID, report_id=REPORT_ID, update_mask=update_mask)
get_conn_mock.return_value.reports.return_value.patch.assert_called_once_with(
profileId=PROFILE_ID, reportId=REPORT_ID, body=update_mask
)
assert return_value == result
@mock.patch(
"airflow.providers.google.marketing_platform.hooks."
"campaign_manager.GoogleCampaignManagerHook.get_conn"
)
def test_run_report(self, get_conn_mock):
synchronous = True
return_value = "TEST"
get_conn_mock.return_value.reports.return_value.run.return_value.execute.return_value = return_value
result = self.hook.run_report(profile_id=PROFILE_ID, report_id=REPORT_ID, synchronous=synchronous)
get_conn_mock.return_value.reports.return_value.run.assert_called_once_with(
profileId=PROFILE_ID, reportId=REPORT_ID, synchronous=synchronous
)
assert return_value == result
@mock.patch(
"airflow.providers.google.marketing_platform.hooks."
"campaign_manager.GoogleCampaignManagerHook.get_conn"
)
def test_update_report(self, get_conn_mock):
return_value = "TEST"
get_conn_mock.return_value.reports.return_value.update.return_value.execute.return_value = (
return_value
)
result = self.hook.update_report(profile_id=PROFILE_ID, report_id=REPORT_ID)
get_conn_mock.return_value.reports.return_value.update.assert_called_once_with(
profileId=PROFILE_ID, reportId=REPORT_ID
)
assert return_value == result
@mock.patch(
"airflow.providers.google.marketing_platform."
"hooks.campaign_manager.GoogleCampaignManagerHook.get_conn"
)
@mock.patch(
"airflow.providers.google.marketing_platform.hooks.campaign_manager.GoogleCampaignManagerHook"
"._conversions_batch_request"
)
def test_conversion_batch_insert(self, batch_request_mock, get_conn_mock):
conversions = [{"conversions1": "value"}, {"conversions2": "value"}]
return_value = {'hasFailures': False}
get_conn_mock.return_value.conversions.return_value.batchinsert.return_value.execute.return_value = (
return_value
)
batch_request_mock.return_value = "batch_request_mock"
result = self.hook.conversions_batch_insert(
profile_id=PROFILE_ID,
conversions=conversions,
encryption_entity_id=ENCRYPTION_ENTITY_ID,
encryption_entity_type=ENCRYPTION_ENTITY_TYPE,
encryption_source=ENCRYPTION_SOURCE,
)
batch_request_mock.assert_called_once_with(
conversions=conversions,
encryption_entity_id=ENCRYPTION_ENTITY_ID,
encryption_entity_type=ENCRYPTION_ENTITY_TYPE,
encryption_source=ENCRYPTION_SOURCE,
kind="dfareporting#conversionsBatchInsertRequest",
)
get_conn_mock.return_value.conversions.return_value.batchinsert.assert_called_once_with(
profileId=PROFILE_ID, body=batch_request_mock.return_value
)
assert return_value == result
@mock.patch(
"airflow.providers.google.marketing_platform.hooks."
"campaign_manager.GoogleCampaignManagerHook.get_conn"
)
@mock.patch(
"airflow.providers.google.marketing_platform.hooks.campaign_manager.GoogleCampaignManagerHook"
"._conversions_batch_request"
)
def test_conversions_batch_update(self, batch_request_mock, get_conn_mock):
conversions = [{"conversions1": "value"}, {"conversions2": "value"}]
return_value = {'hasFailures': False}
get_conn_mock.return_value.conversions.return_value.batchupdate.return_value.execute.return_value = (
return_value
)
batch_request_mock.return_value = "batch_request_mock"
result = self.hook.conversions_batch_update(
profile_id=PROFILE_ID,
conversions=conversions,
encryption_entity_id=ENCRYPTION_ENTITY_ID,
encryption_entity_type=ENCRYPTION_ENTITY_TYPE,
encryption_source=ENCRYPTION_SOURCE,
)
batch_request_mock.assert_called_once_with(
conversions=conversions,
encryption_entity_id=ENCRYPTION_ENTITY_ID,
encryption_entity_type=ENCRYPTION_ENTITY_TYPE,
encryption_source=ENCRYPTION_SOURCE,
kind="dfareporting#conversionsBatchUpdateRequest",
)
get_conn_mock.return_value.conversions.return_value.batchupdate.assert_called_once_with(
profileId=PROFILE_ID, body=batch_request_mock.return_value
)
assert return_value == result
| {
"content_hash": "fa50197dd19c501d4c15508185a464d9",
"timestamp": "",
"source": "github",
"line_count": 289,
"max_line_length": 110,
"avg_line_length": 36.89273356401384,
"alnum_prop": 0.6519414743950478,
"repo_name": "lyft/incubator-airflow",
"id": "9eae66a9cf21d7d5c2598228d9bd59a8c41effea",
"size": "11449",
"binary": false,
"copies": "8",
"ref": "refs/heads/main",
"path": "tests/providers/google/marketing_platform/hooks/test_campaign_manager.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "13715"
},
{
"name": "Dockerfile",
"bytes": "17280"
},
{
"name": "HTML",
"bytes": "161328"
},
{
"name": "JavaScript",
"bytes": "25360"
},
{
"name": "Jinja",
"bytes": "8565"
},
{
"name": "Jupyter Notebook",
"bytes": "2933"
},
{
"name": "Mako",
"bytes": "1339"
},
{
"name": "Python",
"bytes": "10019710"
},
{
"name": "Shell",
"bytes": "220780"
}
],
"symlink_target": ""
} |
import scrapy
class ProxySpiderItem(scrapy.Item):
# define the fields for your item here like:
# name = scrapy.Field()
pass
| {
"content_hash": "74319ec5f646bd5e191c3817d50e847d",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 48,
"avg_line_length": 19.714285714285715,
"alnum_prop": 0.6884057971014492,
"repo_name": "arthurmmm/hq-proxies",
"id": "dfa18d041543dda43e82bfb1c64c0d23bbe3f4d3",
"size": "290",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "proxy_spider/items.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "28484"
}
],
"symlink_target": ""
} |
import qctests.CSIRO_wire_break
import util.testingProfile
import numpy
##### CSIRO_wire_break_test ---------------------------------------------------
def test_CSIRO_wire_break():
'''
Spot-check the nominal behavior of the CSIRO wire break test.
'''
# too cold at the bottom of xbt profile
p = util.testingProfile.fakeProfile([-2.399,-2.399,-2.4001], [10,20,30], probe_type=2)
qc = qctests.CSIRO_wire_break.test(p, None)
truth = numpy.zeros(3, dtype=bool)
truth[2] = True
assert numpy.array_equal(qc, truth), 'failed to flag too-cold temperature at bottom of profile'
# too hot at bottom of xbt profile
p = util.testingProfile.fakeProfile([31.99,31.99,32.001], [10,20,30], probe_type=2)
qc = qctests.CSIRO_wire_break.test(p, None)
truth = numpy.zeros(3, dtype=bool)
truth[2] = True
assert numpy.array_equal(qc, truth), 'failed to flag too-hot temperature at bottom of profile'
# right on border - no flag
p = util.testingProfile.fakeProfile([-2.399,-2.399,-2.4], [10,20,30], probe_type=2)
qc = qctests.CSIRO_wire_break.test(p, None)
truth = numpy.zeros(3, dtype=bool)
print(qc)
print(truth)
assert numpy.array_equal(qc, truth), 'flagged marginally cold temperature at bottom of profile'
p = util.testingProfile.fakeProfile([31.99,31.99,32], [10,20,30], probe_type=2)
qc = qctests.CSIRO_wire_break.test(p, None)
truth = numpy.zeros(3, dtype=bool)
assert numpy.array_equal(qc, truth), 'flagged marginally hot temperature at bottom of profile'
# don't flag if not an xbt
p = util.testingProfile.fakeProfile([0,0,-100], [10,20,30], probe_type=1)
qc = qctests.CSIRO_wire_break.test(p, None)
truth = numpy.zeros(3, dtype=bool)
assert numpy.array_equal(qc, truth), 'flagged non-xbt profile'
# don't flag if not at bottom of profile
p = util.testingProfile.fakeProfile([0,32.01,31.99], [10,20,30], probe_type=2)
qc = qctests.CSIRO_wire_break.test(p, None)
truth = numpy.zeros(3, dtype=bool)
assert numpy.array_equal(qc, truth), "flagged hot temperature that wasn't at bottom of profile"
# flag both sides of a gap
p = util.testingProfile.fakeProfile([9,9,10], [10,20,30], probe_type=2)
qc = qctests.CSIRO_wire_break.test(p, None)
truth = numpy.ones(3, dtype=bool)
truth[0] = False
assert numpy.array_equal(qc, truth), "should flag both sides of a gap"
| {
"content_hash": "2c94c1bab47385e9fda3d3818decb406",
"timestamp": "",
"source": "github",
"line_count": 56,
"max_line_length": 103,
"avg_line_length": 43.44642857142857,
"alnum_prop": 0.6588573777229757,
"repo_name": "BillMills/AutoQC",
"id": "c617d6dbeba3014f3450b02e08c7e71af2a23380",
"size": "2433",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "tests/CSIRO_wire_break_validation.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "830437"
},
{
"name": "Shell",
"bytes": "2581"
}
],
"symlink_target": ""
} |
from __future__ import print_function
import os
import numpy as np
import pandas as pd
import scipy.stats as sps
import matplotlib.pyplot as plt
import seaborn as sns
from statsmodels.stats.multicomp import pairwise_tukeyhsd
from shelve import DbfilenameShelf
from contextlib import closing
from collections import defaultdict
from functools import partial
from sklearn.preprocessing import OneHotEncoder
from genomic_neuralnet.analyses.plots \
import get_nn_model_data, palette, out_dir \
, get_significance_letters
sns.set_style('dark')
sns.set_palette(palette)
def string_to_label((species, trait)):
trait_name = trait.replace('_', ' ').title()
species_name = species.title()
if trait_name.count(' ') > 1:
trait_name = trait_name.replace(' ', '\n')
return '{}\n{}'.format(species_name, trait_name)
def make_best_dataframe(shelf_data):
data_dict = defaultdict(partial(defaultdict, dict))
num_models = len(shelf_data)
for model_name, optimization in shelf_data.iteritems():
for species_trait, opt_result in optimization.iteritems():
species, trait, gpu = tuple(species_trait.split('|'))
max_fit_index = opt_result.df['mean'].idxmax()
best_fit = opt_result.df.loc[max_fit_index]
mean_acc = best_fit.loc['mean']
sd_acc = best_fit.loc['std_dev']
hidden = best_fit.loc['hidden']
count = opt_result.folds * opt_result.runs
raw_results = best_fit.loc['raw_results']
data_dict[species][trait][model_name] = (mean_acc, sd_acc, count, raw_results, hidden)
# Add species column. Repeat once per trait per model (2*num models).
accuracy_df = pd.DataFrame({'species': np.repeat(data_dict.keys(), num_models*2)})
# Add trait column.
flattened_data = []
for species, trait_dict in data_dict.iteritems():
for trait, model_dict in trait_dict.iteritems():
for model, (mean, sd, count, raw_res, hidden) in model_dict.iteritems():
flattened_data.append((trait, model, mean, sd, count, raw_res, hidden))
accuracy_df['trait'], accuracy_df['model'], accuracy_df['mean'], \
accuracy_df['sd'], accuracy_df['count'], accuracy_df['raw_results'], \
accuracy_df['hidden'] = zip(*flattened_data)
return accuracy_df
def make_plot(accuracy_df):
accuracy_df = accuracy_df.sort_values(by=['species', 'trait'], ascending=[1,0])
fig, ax = plt.subplots()
species_and_traits = accuracy_df[['species', 'trait']].drop_duplicates()
x = np.arange(len(species_and_traits))
models = sorted(accuracy_df['model'].unique())
width = 0.22
species_list = species_and_traits['species']
trait_list = species_and_traits['trait']
bar_sets = []
error_offsets = []
for idx, model in enumerate(models):
means = accuracy_df[accuracy_df['model'] == model]['mean'].values
std_devs = accuracy_df[accuracy_df['model'] == model]['sd'].values
counts = accuracy_df[accuracy_df['model'] == model]['count'].values
std_errs = std_devs / np.sqrt(counts) # SE = sigma / sqrt(N)
# Size of 95% CI is the SE multiplied by a constant from the t distribution
# with n-1 degrees of freedom. [0] is the positive interval direction.
#confidence_interval_mult = sps.t.interval(alpha=0.95, df=counts - 1)[0]
#confidence_interval = confidence_interval_mult * std_errs
offset = width * idx
color = palette[idx]
b = ax.bar(x + offset, means, width, color=color)
e = ax.errorbar(x + offset + width/2, means, yerr=std_errs, ecolor='black', fmt='none')
bar_sets.append((b, model))
error_offsets.append(std_errs)
significance_letter_lookup = get_significance_letters(accuracy_df, ordered_model_names=models)
def label(idx, rects, model):
errors = error_offsets[idx]
for error, rect, species, trait in zip(errors, rects, species_list, trait_list):
height = rect.get_height()
significance_letter = significance_letter_lookup[model][species][trait]
ax.text( rect.get_x() + rect.get_width()/2.
, height + error + 0.02
, significance_letter
, ha='center'
, va='bottom')
[label(idx, b, m) for idx, (b,m) in enumerate(bar_sets)]
# Axis labels (layer 1).
ax.set_ylabel('Accuracy')
ax.set_xticks(x + width / 2 * len(models))
ax.set_xticklabels(map(string_to_label, zip(species_list, trait_list)))
ax.set_xlim((0 - width / 2, len(trait_list)))
ax.set_ylim((0, 1))
# Legend
ax.legend(map(lambda x: x[0], bar_sets), list(models))
plt.tight_layout()
fig_path = os.path.join(out_dir, 'network_comparison.png')
plt.savefig(fig_path, dpi=500)
plt.show()
def main():
data = get_nn_model_data()
accuracy_df = make_best_dataframe(data)
make_plot(accuracy_df)
if __name__ == '__main__':
main()
| {
"content_hash": "05a00e6c8133e81f86224e55d6bff1d4",
"timestamp": "",
"source": "github",
"line_count": 135,
"max_line_length": 98,
"avg_line_length": 37.37777777777778,
"alnum_prop": 0.6266349583828775,
"repo_name": "rileymcdowell/genomic-neuralnet",
"id": "bb99ed02b2320b769b94b87eec7311b89c06aa43",
"size": "5046",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "genomic_neuralnet/analyses/plots/network_comparison/compare_networks.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "101344"
},
{
"name": "R",
"bytes": "1914"
},
{
"name": "Shell",
"bytes": "12629"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
from os.path import dirname
from tml.api.mock import File
FIXTURES_PATH = '%s/fixtures' % dirname(dirname(__file__))
URLS = [('projects/current/definition', None),
('projects/current/definition', {'locale': 'en'}),
('projects/2/definition', None),
('languages/ru/definition', ),
('languages/en/definition', ),
('projects/1/translations', {'locale':'ru','page':1}),
('projects/1768/definition', {'locale':'ru,en', 'source': '/home/index'}),
('sources/register_keys', None),
('translation_keys/8ad5a7fe0a12729764e31a1e3ca80059/translations', {'locale':'ru','page':1}),
('translation_keys/bdc08159a02e7ff01ca188c03fa1323e/translations', {'locale':'ru','page':1}),
('sources/6a992d5529f459a44fee58c733255e86/translations', {'locale':'ru'}),]
class Client(File):
def __init__(self, data = {}):
super(Client, self).__init__(FIXTURES_PATH, data, False)
@classmethod
def read_all(cls):
return cls().readdir('')
class DummyUser(object):
def __init__(self, name, gender=None):
self.name = name
self.gender = gender or 'male'
| {
"content_hash": "9ed2235d51ed6cfad229a3e710e64809",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 101,
"avg_line_length": 38.32258064516129,
"alnum_prop": 0.6254208754208754,
"repo_name": "translationexchange/tml-python",
"id": "5e2201024a9509c456695d668d6b21b6867225e6",
"size": "1207",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/mock/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "1262"
},
{
"name": "Python",
"bytes": "446575"
},
{
"name": "Shell",
"bytes": "294"
}
],
"symlink_target": ""
} |
import os
import time
from tornado.testing import ExpectLog, AsyncTestCase, gen_test
from remoteappmanager.tests.temp_mixin import TempMixin
from remoteappmanager.tests.utils import mock_coro_factory
from unittest.mock import Mock, patch
from remoteappmanager.jupyterhub.auth import GitHubWhitelistAuthenticator
class TestGithubWhiteListAuthenticator(TempMixin,
AsyncTestCase):
def setUp(self):
self.auth = GitHubWhitelistAuthenticator()
self.auth.authenticate = mock_coro_factory(return_value="foo")
super().setUp()
@gen_test
def test_basic_auth(self):
auth = self.auth
response = yield auth.authenticate(Mock(), {"username": "foo"})
self.assertEqual(response, "foo")
@gen_test
def test_basic_auth_with_whitelist_file(self):
whitelist_path = os.path.join(self.tempdir, "whitelist.txt")
with open(whitelist_path, "w") as f:
f.write("foo\n")
f.write("bar\n")
auth = self.auth
auth.whitelist_file = whitelist_path
response = yield auth.get_authenticated_user(
Mock(), {"username": "foo"})
self.assertEqual(response['name'], "foo")
# Check again to touch the code that does not trigger another load
response = yield auth.get_authenticated_user(
Mock(), {"username": "foo"})
self.assertEqual(response["name"], "foo")
# wait one second, so that we see a change in mtime.
time.sleep(1)
# Change the file and see if we get a different behavior
with open(whitelist_path, "w") as f:
f.write("bar\n")
with ExpectLog('traitlets', "User 'foo' not in whitelist."):
response = yield auth.get_authenticated_user(
Mock(), {"username": "foo"})
self.assertEqual(response, None)
@gen_test
def test_basic_auth_without_whitelist_file(self):
auth = self.auth
auth.whitelist_file = "/does/not/exist.txt"
response = yield auth.get_authenticated_user(Mock(),
{"username": "foo"})
# Should be equivalent to no whitelist, so everybody allowed
self.assertEqual(response['name'], "foo")
@gen_test
def test_exception_during_read(self):
whitelist_path = os.path.join(self.tempdir, "whitelist.txt")
with open(whitelist_path, "w") as f:
f.write("bar\n")
auth = self.auth
auth.whitelist_file = whitelist_path
# Do the first triggering, so that we load the file content.
with ExpectLog('traitlets', "User 'foo' not in whitelist."):
response = yield auth.get_authenticated_user(
Mock(), {"username": "foo"})
self.assertEqual(response, None)
# Then try again with an exception occurring
with patch("os.path.getmtime") as p:
p.side_effect = Exception("BOOM!")
with ExpectLog('traitlets', ""):
response = yield auth.get_authenticated_user(
Mock(), {"username": "foo"})
self.assertEqual(response, None)
def test_dummy_setter(self):
whitelist_path = os.path.join(self.tempdir, "whitelist.txt")
with open(whitelist_path, "w") as f:
f.write("bar\n")
auth = self.auth
auth.whitelist_file = whitelist_path
auth.whitelist = set()
self.assertNotEqual(auth.whitelist, set())
@gen_test
def test_comment_out(self):
whitelist_path = os.path.join(self.tempdir, "whitelist.txt")
with open(whitelist_path, "w") as f:
f.write("# this is a comment\n")
f.write("foo\n")
f.write("bar\n")
auth = self.auth
auth.whitelist_file = whitelist_path
yield auth.get_authenticated_user(Mock(), {"username": "foo"})
self.assertEqual(len(auth.whitelist), 2)
| {
"content_hash": "937e271dbec370c577267e5ff850d4bc",
"timestamp": "",
"source": "github",
"line_count": 116,
"max_line_length": 74,
"avg_line_length": 34.310344827586206,
"alnum_prop": 0.5974874371859297,
"repo_name": "simphony/simphony-remote",
"id": "b933661ed2019d51caf8f68ee6c63f67cd01dd32",
"size": "3980",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "remoteappmanager/jupyterhub/auth/tests/test_github_whitelist_authenticator.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "14011"
},
{
"name": "JavaScript",
"bytes": "51718"
},
{
"name": "Makefile",
"bytes": "6052"
},
{
"name": "Mako",
"bytes": "494"
},
{
"name": "Python",
"bytes": "418020"
},
{
"name": "Shell",
"bytes": "1690"
},
{
"name": "Vue",
"bytes": "46644"
}
],
"symlink_target": ""
} |
import logging
import copy
from itertools import count
import claripy
from claripy.vsa import ValueSet, RegionAnnotation
from ..storage.memory import SimMemory, AddressWrapper, MemoryStoreRequest
from ..errors import SimMemoryError, SimAbstractMemoryError
from ..sim_options import KEEP_MEMORY_READS_DISCRETE, AVOID_MULTIVALUED_READS, REGION_MAPPING
from .symbolic_memory import SimSymbolicMemory
from ..state_plugins.sim_action_object import _raw_ast
l = logging.getLogger("angr.state_plugins.abstract_memory")
WRITE_TARGETS_LIMIT = 2048
READ_TARGETS_LIMIT = 4096
#pylint:disable=unidiomatic-typecheck
invalid_read_ctr = count()
class MemoryRegion(object):
def __init__(self, id, state, is_stack=False, related_function_addr=None, init_memory=True, backer_dict=None, endness=None): #pylint:disable=redefined-builtin,unused-argument
self._endness = endness
self._id = id
self._state = state
self._is_stack = id.startswith('stack_') # TODO: Fix it
self._related_function_addr = related_function_addr
# This is a map from tuple (basicblock_key, stmt_id) to
# AbstractLocation objects
self._alocs = { }
if init_memory:
if backer_dict is None:
self._memory = SimSymbolicMemory(memory_id=id, endness=self._endness, abstract_backer=True)
else:
self._memory = SimSymbolicMemory(memory_backer=backer_dict, memory_id=id, endness=self._endness, abstract_backer=True)
self._memory.set_state(state)
@property
def id(self):
return self._id
@property
def memory(self):
return self._memory
@property
def state(self):
return self._state
@property
def alocs(self):
return self._alocs
@property
def is_stack(self):
return self._is_stack
@property
def related_function_addr(self):
return self._related_function_addr
def get_abstract_locations(self, addr, size):
"""
Get a list of abstract locations that is within the range of [addr, addr + size]
This implementation is pretty slow. But since this method won't be called frequently, we can live with the bad
implementation for now.
:param addr: Starting address of the memory region.
:param size: Size of the memory region, in bytes.
:return: A list of covered AbstractLocation objects, or an empty list if there is none.
"""
ret = [ ]
for aloc in self._alocs.itervalues():
for seg in aloc.segments:
if seg.offset >= addr and seg.offset < addr + size:
ret.append(aloc)
break
return ret
def addrs_for_name(self, name):
return self.memory.addrs_for_name(name)
def set_state(self, state):
self._state = state
self._memory.set_state(state)
@SimMemory.memo
def copy(self, memo):
r = MemoryRegion(self._id, self.state,
is_stack=self._is_stack,
related_function_addr=self._related_function_addr,
init_memory=False, endness=self._endness)
r._memory = self.memory.copy(memo)
r._alocs = copy.deepcopy(self._alocs)
return r
def store(self, request, bbl_addr, stmt_id, ins_addr):
if ins_addr is not None:
#aloc_id = (bbl_addr, stmt_id)
aloc_id = ins_addr
else:
# It comes from a SimProcedure. We'll use bbl_addr as the aloc_id
aloc_id = bbl_addr
if aloc_id not in self._alocs:
self._alocs[aloc_id] = self.state.se.AbstractLocation(bbl_addr,
stmt_id,
self.id,
region_offset=request.addr,
size=len(request.data) // self.state.arch.byte_width)
return self.memory._store(request)
else:
if self._alocs[aloc_id].update(request.addr, len(request.data) // self.state.arch.byte_width):
return self.memory._store(request)
else:
#return self.memory._store_with_merge(request)
return self.memory._store(request)
def load(self, addr, size, bbl_addr, stmt_idx, ins_addr): #pylint:disable=unused-argument
#if bbl_addr is not None and stmt_id is not None:
return self.memory.load(addr, size, inspect=False)
def _merge_alocs(self, other_region):
"""
Helper function for merging.
"""
merging_occurred = False
for aloc_id, aloc in other_region.alocs.iteritems():
if aloc_id not in self.alocs:
self.alocs[aloc_id] = aloc.copy()
merging_occurred = True
else:
# Update it
merging_occurred |= self.alocs[aloc_id].merge(aloc)
return merging_occurred
def merge(self, others, merge_conditions, common_ancestor=None):
merging_occurred = False
for other_region in others:
merging_occurred |= self._merge_alocs(other_region)
merging_occurred |= self.memory.merge(
[other_region.memory], merge_conditions, common_ancestor=common_ancestor
)
return merging_occurred
def widen(self, others):
widening_occurred = False
for other_region in others:
widening_occurred |= self._merge_alocs(other_region)
widening_occurred |= self.memory.widen([ other_region.memory ])
return widening_occurred
def __contains__(self, addr):
return addr in self.memory
def was_written_to(self, addr):
return self.memory.was_written_to(addr)
def dbg_print(self, indent=0):
"""
Print out debugging information
"""
print "%sA-locs:" % (" " * indent)
for aloc_id, aloc in self._alocs.items():
print "%s<0x%x> %s" % (" " * (indent + 2), aloc_id, aloc)
print "%sMemory:" % (" " * indent)
self.memory.dbg_print(indent=indent + 2)
class SimAbstractMemory(SimMemory): #pylint:disable=abstract-method
"""
This is an implementation of the abstract store in paper [TODO].
Some differences:
- For stack variables, we map the absolute stack address to each region so
that we can effectively trace stack accesses. When tracing into a new
function, you should call set_stack_address_mapping() to create a new mapping.
When exiting from a function, you should cancel the previous mapping by
calling unset_stack_address_mapping().
Currently this is only used for stack!
"""
def __init__(self, memory_backer=None, memory_id="mem", endness=None, stack_region_map=None,
generic_region_map=None):
SimMemory.__init__(self,
endness=endness,
stack_region_map=stack_region_map,
generic_region_map=generic_region_map,
)
self._regions = {}
self._stack_size = None
self._memory_id = memory_id
self.id = self._memory_id
# Since self.state is None at this time (self.state will be set to the real SimState instance later when
# self.set_state() is called), we just save the backer argument to a temporary variable, and then initialize it
# later in self.set_state() method.
self._temp_backer = memory_backer
@property
def regions(self):
return self._regions
def _region_base(self, region):
"""
Get the base address of a memory region.
:param str region: ID of the memory region
:return: Address of the memory region
:rtype: int
"""
if region == 'global':
region_base_addr = 0
elif region.startswith('stack_'):
region_base_addr = self._stack_region_map.absolutize(region, 0)
else:
region_base_addr = self._generic_region_map.absolutize(region, 0)
return region_base_addr
def set_stack_size(self, size):
self._stack_size = size
def create_region(self, key, state, is_stack, related_function_addr, endness, backer_dict=None):
"""
Create a new MemoryRegion with the region key specified, and store it to self._regions.
:param key: a string which is the region key
:param state: the SimState instance
:param bool is_stack: Whether this memory region is on stack. True/False
:param related_function_addr: Which function first creates this memory region. Just for reference.
:param endness: The endianness.
:param backer_dict: The memory backer object.
:return: None
"""
self._regions[key] = MemoryRegion(key,
state=state,
is_stack=is_stack,
related_function_addr=related_function_addr,
endness=endness,
backer_dict=backer_dict,
)
def _normalize_address(self, region_id, relative_address, target_region=None):
"""
If this is a stack address, we convert it to a correct region and address
:param region_id: a string indicating which region the address is relative to
:param relative_address: an address that is relative to the region parameter
:param target_region: the ideal target region that address is normalized to. None means picking the best fit.
:return: an AddressWrapper object
"""
if self._stack_region_map.is_empty and self._generic_region_map.is_empty:
# We don't have any mapped region right now
return AddressWrapper(region_id, 0, relative_address, False, None)
# We wanna convert this address to an absolute address first
if region_id.startswith('stack_'):
absolute_address = self._stack_region_map.absolutize(region_id, relative_address)
else:
absolute_address = self._generic_region_map.absolutize(region_id, relative_address)
stack_base = self._stack_region_map.stack_base
if (relative_address <= stack_base and
relative_address > stack_base - self._stack_size) or \
(target_region is not None and target_region.startswith('stack_')):
# The absolute address seems to be in the stack region.
# Map it to stack
new_region_id, new_relative_address, related_function_addr = self._stack_region_map.relativize(
absolute_address,
target_region_id=target_region
)
return AddressWrapper(new_region_id, self._region_base(new_region_id), new_relative_address, True,
related_function_addr
)
else:
new_region_id, new_relative_address, related_function_addr = self._generic_region_map.relativize(
absolute_address,
target_region_id=target_region
)
return AddressWrapper(new_region_id, self._region_base(new_region_id), new_relative_address, False, None)
def set_state(self, state):
"""
Overriding the SimStatePlugin.set_state() method
:param state: A SimState object
:return: None
"""
# Sanity check
if REGION_MAPPING not in state.options:
# add REGION_MAPPING into state.options
l.warning('Option "REGION_MAPPING" must be enabled when using SimAbstractMemory as the memory model. '
'The option is added to state options as a courtesy.'
)
state.options.add(REGION_MAPPING)
SimMemory.set_state(self, state)
for _,v in self._regions.items():
v.set_state(state)
# Delayed initialization of backer argument from __init__
if self._temp_backer is not None:
for region, backer_dict in self._temp_backer.items():
self._regions[region] = MemoryRegion(region, self.state,
init_memory=True,
backer_dict=backer_dict,
endness=self.endness
)
self._temp_backer = None
def normalize_address(self, addr, is_write=False, convert_to_valueset=False, target_region=None, condition=None): #pylint:disable=arguments-differ
"""
Convert a ValueSet object into a list of addresses.
:param addr: A ValueSet object (which describes an address)
:param is_write: Is this address used in a write or not
:param convert_to_valueset: True if you want to have a list of ValueSet instances instead of AddressWrappers,
False otherwise
:param target_region: Which region to normalize the address to. To leave the decision to SimuVEX, set it to None
:return: A list of AddressWrapper or ValueSet objects
"""
# Apply the condition if necessary
if condition is not None:
addr = self._apply_condition_to_symbolic_addr(addr, condition)
if type(addr) in (int, long):
addr = self.state.se.BVV(addr, self.state.arch.bits)
addr_with_regions = self._normalize_address_type(addr)
address_wrappers = [ ]
for region, addr_si in addr_with_regions:
if is_write:
concrete_addrs = addr_si.eval(WRITE_TARGETS_LIMIT)
if len(concrete_addrs) == WRITE_TARGETS_LIMIT:
self.state.history.add_event('mem', message='too many targets to write to. address = %s' % addr_si)
else:
concrete_addrs = addr_si.eval(READ_TARGETS_LIMIT)
if len(concrete_addrs) == READ_TARGETS_LIMIT:
self.state.history.add_event('mem', message='too many targets to read from. address = %s' % addr_si)
for c in concrete_addrs:
aw = self._normalize_address(region, c, target_region=target_region)
address_wrappers.append(aw)
if convert_to_valueset:
return [ i.to_valueset(self.state) for i in address_wrappers ]
else:
return address_wrappers
def _normalize_address_type(self, addr): #pylint:disable=no-self-use
"""
Convert address of different types to a list of mapping between region IDs and offsets (strided intervals).
:param claripy.ast.Base addr: Address to convert
:return: A list of mapping between region IDs and offsets.
:rtype: dict
"""
addr_e = _raw_ast(addr)
if isinstance(addr_e, (claripy.bv.BVV, claripy.vsa.StridedInterval, claripy.vsa.ValueSet)):
raise SimMemoryError('_normalize_address_type() does not take claripy models.')
if isinstance(addr_e, claripy.ast.Base):
if not isinstance(addr_e._model_vsa, ValueSet):
# Convert it to a ValueSet first by annotating it
addr_e = addr_e.annotate(RegionAnnotation('global', 0, addr_e._model_vsa))
return addr_e._model_vsa.items()
else:
raise SimAbstractMemoryError('Unsupported address type %s' % type(addr_e))
# FIXME: symbolic_length is also a hack!
def _store(self, req):
address_wrappers = self.normalize_address(req.addr, is_write=True, convert_to_valueset=False)
req.actual_addresses = [ ]
req.stored_values = [ ]
for aw in address_wrappers:
r = self._do_store(aw.address, req.data, aw.region, req.endness,
is_stack=aw.is_on_stack, related_function_addr=aw.function_address)
if r.completed:
req.completed = True
req.actual_addresses.append(aw.to_valueset(self.state))
req.constraints.extend(r.constraints)
req.stored_values.extend(r.stored_values)
# No constraints are generated...
return req
def _do_store(self, addr, data, key, endness, is_stack=False, related_function_addr=None):
if type(key) is not str:
raise Exception('Incorrect type %s of region_key' % type(key))
bbl_addr, stmt_id, ins_addr = self.state.scratch.bbl_addr, self.state.scratch.stmt_idx, self.state.scratch.ins_addr
if key not in self._regions:
self.create_region(key, self.state, is_stack, related_function_addr, self.endness)
r = MemoryStoreRequest(addr, data=data, endness=endness)
self._regions[key].store(r, bbl_addr, stmt_id, ins_addr)
return r
def _load(self, addr, size, condition=None, fallback=None,
inspect=True, events=True, ret_on_segv=False):
address_wrappers = self.normalize_address(addr, is_write=False, condition=condition)
if isinstance(size, claripy.ast.BV) and isinstance(size._model_vsa, ValueSet):
# raise Exception('Unsupported type %s for size' % type(size._model_vsa))
l.warning('_load(): size %s is a ValueSet. Something is wrong.', size)
if self.state.scratch.ins_addr is not None:
var_name = 'invalid_read_%d_%#x' % (
invalid_read_ctr.next(),
self.state.scratch.ins_addr
)
else:
var_name = 'invalid_read_%d_None' % invalid_read_ctr.next()
return address_wrappers, self.state.se.Unconstrained(var_name, 32), [True]
val = None
if len(address_wrappers) > 1 and AVOID_MULTIVALUED_READS in self.state.options:
val = self.state.se.Unconstrained('unconstrained_read', size * self.state.arch.byte_width)
return address_wrappers, val, [True]
for aw in address_wrappers:
new_val = self._do_load(aw.address, size, aw.region,
is_stack=aw.is_on_stack,
related_function_addr=aw.function_address,
)
if val is None:
if KEEP_MEMORY_READS_DISCRETE in self.state.options:
val = self.state.se.DSIS(to_conv=new_val, max_card=100000)
else:
val = new_val
else:
val = val.union(new_val)
return address_wrappers, val, [True]
def _do_load(self, addr, size, key, is_stack=False, related_function_addr=None):
if type(key) is not str:
raise Exception('Incorrect type %s of region_key' % type(key))
bbl_addr, stmt_id, ins_addr = self.state.scratch.bbl_addr, self.state.scratch.stmt_idx, self.state.scratch.ins_addr
if key not in self._regions:
self.create_region(key, self.state, is_stack, related_function_addr, self.endness)
return self._regions[key].load(addr, size, bbl_addr, stmt_id, ins_addr)
def _apply_condition_to_symbolic_addr(self, addr, condition):
_, converted = self.state.solver.constraint_to_si(condition)
for original_expr, constrained_expr in converted:
addr = addr.replace(original_expr, constrained_expr)
return addr
def _copy_contents(self, dst, src, size, condition=None, src_memory=None, dst_memory=None, inspect=True,
disable_actions=False):
src_memory = self if src_memory is None else src_memory
dst_memory = self if dst_memory is None else dst_memory
max_size = self.state.se.max_int(size)
if max_size == 0:
return None, [ ]
data = src_memory.load(src, max_size, inspect=inspect, disable_actions=disable_actions)
dst_memory.store(dst, data, size=size, condition=condition, inspect=inspect, disable_actions=disable_actions)
return data
def find(self, addr, what, max_search=None, max_symbolic_bytes=None, default=None, step=1):
if type(addr) in (int, long):
addr = self.state.se.BVV(addr, self.state.arch.bits)
addr = self._normalize_address_type(addr)
# TODO: For now we are only finding in one region!
for region, si in addr:
si = self.state.se.SI(to_conv=si)
r, s, i = self._regions[region].memory.find(si, what, max_search=max_search,
max_symbolic_bytes=max_symbolic_bytes, default=default,
step=step
)
# Post process r so that it's still a ValueSet variable
region_base_addr = self._region_base(region)
r = self.state.se.ValueSet(r.size(), region, region_base_addr, r._model_vsa)
return r, s, i
def get_segments(self, addr, size):
"""
Get a segmented memory region based on AbstractLocation information available from VSA.
Here are some assumptions to make this method fast:
- The entire memory region [addr, addr + size] is located within the same MemoryRegion
- The address 'addr' has only one concrete value. It cannot be concretized to multiple values.
:param addr: An address
:param size: Size of the memory area in bytes
:return: An ordered list of sizes each segment in the requested memory region
"""
address_wrappers = self.normalize_address(addr, is_write=False)
# assert len(address_wrappers) > 0
aw = address_wrappers[0]
region_id = aw.region
if region_id in self.regions:
region = self.regions[region_id]
alocs = region.get_abstract_locations(aw.address, size)
# Collect all segments and sort them
segments = [ ]
for aloc in alocs:
segments.extend(aloc.segments)
segments = sorted(segments, key=lambda x: x.offset)
# Remove all overlapping segments
processed_segments = [ ]
last_seg = None
for seg in segments:
if last_seg is None:
last_seg = seg
processed_segments.append(seg)
else:
# Are they overlapping?
if seg.offset >= last_seg.offset and seg.offset <= last_seg.offset + size:
continue
processed_segments.append(seg)
# Make it a list of sizes
sizes = [ ]
next_pos = aw.address
for seg in processed_segments:
if seg.offset > next_pos:
gap = seg.offset - next_pos
assert gap > 0
sizes.append(gap)
next_pos += gap
if seg.size + next_pos > aw.address + size:
sizes.append(aw.address + size - next_pos)
next_pos += aw.address + size - next_pos
else:
sizes.append(seg.size)
next_pos += seg.size
if not sizes:
return [ size ]
return sizes
else:
# The region doesn't exist. Then there is only one segment!
return [ size ]
@SimMemory.memo
def copy(self, memo):
"""
Make a copy of this SimAbstractMemory object
:return:
"""
am = SimAbstractMemory(
memory_id=self._memory_id,
endness=self.endness,
stack_region_map=self._stack_region_map,
generic_region_map=self._generic_region_map
)
for region_id, region in self._regions.items():
am._regions[region_id] = region.copy(memo)
am._stack_size = self._stack_size
return am
def merge(self, others, merge_conditions, common_ancestor=None):
"""
Merge this guy with another SimAbstractMemory instance
"""
merging_occurred = False
for o in others:
for region_id, region in o._regions.items():
if region_id in self._regions:
merging_occurred |= self._regions[region_id].merge(
[region], merge_conditions, common_ancestor=common_ancestor
)
else:
merging_occurred = True
self._regions[region_id] = region
return merging_occurred
def widen(self, others):
widening_occurred = False
for o in others:
for region_id, region in o._regions.items():
if region_id in self._regions:
widening_occurred |= self._regions[region_id].widen([ region ])
else:
widening_occurred = True
self._regions[region_id] = region
return widening_occurred
def __contains__(self, dst):
if type(dst) in (int, long):
dst = self.state.se.BVV(dst, self.state.arch.bits)
addrs = self._normalize_address_type(dst)
for region, addr in addrs:
address_wrapper = self._normalize_address(region, addr.min)
return address_wrapper.address in self.regions[address_wrapper.region]
return False
def map_region(self, addr, length, permissions, init_zero=False): # pylint: disable=unused-argument
"""
Map a number of pages at address `addr` with permissions `permissions`.
:param addr: address to map the pages at
:param length: length in bytes of region to map, will be rounded upwards to the page size
:param permissions: AST of permissions to map, will be a bitvalue representing flags
:param init_zero: Initialize page with zeros
"""
l.warning('map_region() is not yet supported by SimAbstractMmeory.')
return
def unmap_region(self, addr, length): # pylint: disable=unused-argument
"""
Unmap a number of pages at address `addr`
:param addr: address to unmap the pages at
:param length: length in bytes of region to map, will be rounded upwards to the page size
"""
l.warning('unmap_region() is not yet supported by SimAbstractMmeory.')
return
def was_written_to(self, dst):
if type(dst) in (int, long):
dst = self.state.se.BVV(dst, self.state.arch.bits)
addrs = self._normalize_address_type(dst)
for region, addr in addrs:
address_wrapper = self._normalize_address(region, addr.min)
return self.regions[address_wrapper.region].was_written_to(address_wrapper.address)
return False
def dbg_print(self):
"""
Print out debugging information
"""
for region_id, region in self.regions.items():
print "Region [%s]:" % region_id
region.dbg_print(indent=2)
from ..sim_state import SimState
SimState.register_default('abs_memory', SimAbstractMemory)
| {
"content_hash": "b3f1c8698e14ea8ec31cd9bfc0e37339",
"timestamp": "",
"source": "github",
"line_count": 692,
"max_line_length": 178,
"avg_line_length": 39.895953757225435,
"alnum_prop": 0.5804476963199072,
"repo_name": "tyb0807/angr",
"id": "f58b3b7f4a04e5d146024d138070575d5c0dbfce",
"size": "27608",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "angr/state_plugins/abstract_memory.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "C",
"bytes": "6375"
},
{
"name": "C++",
"bytes": "39875"
},
{
"name": "Makefile",
"bytes": "610"
},
{
"name": "Python",
"bytes": "3884780"
}
],
"symlink_target": ""
} |
"""Functions that deals with local and device ports."""
import contextlib
import fcntl
import httplib
import logging
import os
import re
import socket
import traceback
import cmd_helper
import constants
#The following two methods are used to allocate the port source for various
# types of test servers. Because some net relates tests can be run on shards
# at same time, it's important to have a mechanism to allocate the port process
# safe. In here, we implement the safe port allocation by leveraging flock.
def ResetTestServerPortAllocation():
"""Reset the port allocation to start from TEST_SERVER_PORT_FIRST.
Returns:
Returns True if reset successes. Otherwise returns False.
"""
try:
with open(constants.TEST_SERVER_PORT_FILE, 'w') as fp:
fp.write('%d' % constants.TEST_SERVER_PORT_FIRST)
if os.path.exists(constants.TEST_SERVER_PORT_LOCKFILE):
os.unlink(constants.TEST_SERVER_PORT_LOCKFILE)
return True
except Exception as e:
logging.error(e)
return False
def AllocateTestServerPort():
"""Allocate a port incrementally.
Returns:
Returns a valid port which should be in between TEST_SERVER_PORT_FIRST and
TEST_SERVER_PORT_LAST. Returning 0 means no more valid port can be used.
"""
port = 0
try:
fp_lock = open(constants.TEST_SERVER_PORT_LOCKFILE, 'w')
fcntl.flock(fp_lock, fcntl.LOCK_EX)
# Get current valid port and calculate next valid port.
assert os.path.exists(constants.TEST_SERVER_PORT_FILE)
with open(constants.TEST_SERVER_PORT_FILE, 'r+') as fp:
port = int(fp.read())
while IsHostPortUsed(port):
port += 1
if (port > constants.TEST_SERVER_PORT_LAST or
port < constants.TEST_SERVER_PORT_FIRST):
port = 0
else:
fp.seek(0, os.SEEK_SET)
fp.write('%d' % (port + 1))
except Exception as e:
logging.info(e)
finally:
if fp_lock:
fcntl.flock(fp_lock, fcntl.LOCK_UN)
fp_lock.close()
logging.info('Allocate port %d for test server.', port)
return port
def IsHostPortUsed(host_port):
"""Checks whether the specified host port is used or not.
Uses -n -P to inhibit the conversion of host/port numbers to host/port names.
Args:
host_port: Port on host we want to check.
Returns:
True if the port on host is already used, otherwise returns False.
"""
port_info = '(127\.0\.0\.1)|(localhost)\:%d' % host_port
# TODO(jnd): Find a better way to filter the port.
re_port = re.compile(port_info, re.MULTILINE)
if re_port.findall(cmd_helper.GetCmdOutput(['lsof', '-nPi:%d' % host_port])):
return True
return False
def IsDevicePortUsed(adb, device_port, state=''):
"""Checks whether the specified device port is used or not.
Args:
adb: Instance of AndroidCommands for talking to the device.
device_port: Port on device we want to check.
state: String of the specified state. Default is empty string, which
means any state.
Returns:
True if the port on device is already used, otherwise returns False.
"""
base_url = '127.0.0.1:%d' % device_port
netstat_results = adb.RunShellCommand('netstat', log_result=False)
for single_connect in netstat_results:
# Column 3 is the local address which we want to check with.
connect_results = single_connect.split()
is_state_match = connect_results[5] == state if state else True
if connect_results[3] == base_url and is_state_match:
return True
return False
def IsHttpServerConnectable(host, port, tries=3, command='GET', path='/',
expected_read='', timeout=2):
"""Checks whether the specified http server is ready to serve request or not.
Args:
host: Host name of the HTTP server.
port: Port number of the HTTP server.
tries: How many times we want to test the connection. The default value is
3.
command: The http command we use to connect to HTTP server. The default
command is 'GET'.
path: The path we use when connecting to HTTP server. The default path is
'/'.
expected_read: The content we expect to read from the response. The default
value is ''.
timeout: Timeout (in seconds) for each http connection. The default is 2s.
Returns:
Tuple of (connect status, client error). connect status is a boolean value
to indicate whether the server is connectable. client_error is the error
message the server returns when connect status is false.
"""
assert tries >= 1
for i in xrange(0, tries):
client_error = None
try:
with contextlib.closing(httplib.HTTPConnection(
host, port, timeout=timeout)) as http:
# Output some debug information when we have tried more than 2 times.
http.set_debuglevel(i >= 2)
http.request(command, path)
r = http.getresponse()
content = r.read()
if r.status == 200 and r.reason == 'OK' and content == expected_read:
return (True, '')
client_error = ('Bad response: %s %s version %s\n ' %
(r.status, r.reason, r.version) +
'\n '.join([': '.join(h) for h in r.getheaders()]))
except (httplib.HTTPException, socket.error) as e:
# Probably too quick connecting: try again.
exception_error_msgs = traceback.format_exception_only(type(e), e)
if exception_error_msgs:
client_error = ''.join(exception_error_msgs)
# Only returns last client_error.
return (False, client_error or 'Timeout')
| {
"content_hash": "4f800006316d3dd78ec19683ead33857",
"timestamp": "",
"source": "github",
"line_count": 156,
"max_line_length": 79,
"avg_line_length": 35.544871794871796,
"alnum_prop": 0.6703336339044184,
"repo_name": "keishi/chromium",
"id": "be994c362c74a0ec26f98688d72fb573fc20cfa6",
"size": "5712",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "build/android/pylib/ports.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "ASP",
"bytes": "853"
},
{
"name": "Arduino",
"bytes": "464"
},
{
"name": "Assembly",
"bytes": "1172794"
},
{
"name": "C",
"bytes": "67452317"
},
{
"name": "C#",
"bytes": "1132"
},
{
"name": "C++",
"bytes": "132681259"
},
{
"name": "F#",
"bytes": "381"
},
{
"name": "Go",
"bytes": "19048"
},
{
"name": "Java",
"bytes": "361412"
},
{
"name": "JavaScript",
"bytes": "16603687"
},
{
"name": "Objective-C",
"bytes": "9609581"
},
{
"name": "PHP",
"bytes": "97796"
},
{
"name": "Perl",
"bytes": "918683"
},
{
"name": "Python",
"bytes": "6407891"
},
{
"name": "R",
"bytes": "524"
},
{
"name": "Shell",
"bytes": "4192593"
},
{
"name": "Tcl",
"bytes": "277077"
}
],
"symlink_target": ""
} |
from experiments_manager.consumers import send_message
from experiments_manager.helper import MessageStatus
from git_manager.helpers.helper import get_exp_or_package_from_repo_name
from helpers.helper import get_package_or_experiment_without_request
from MOOCworkbench.celery import app
@app.task
def task_write_requirements_file(object_id, object_type):
"""Task to write requirements file to repository"""
exp_or_package = get_package_or_experiment_without_request(object_type, object_id)
language_helper = exp_or_package.language_helper()
language_helper.write_requirements_file()
username = exp_or_package.owner.user.username
send_message(username, MessageStatus.SUCCESS, 'Dependencies updated in GitHub')
@app.task
def task_update_requirements(repository_name):
"""Task to update the requirements for exp/package with repository_name,
task useful for git webhook"""
experiment = get_exp_or_package_from_repo_name(repository_name)
language_helper = experiment.language_helper()
language_helper.update_requirements()
| {
"content_hash": "fe079190dd9d105a9ea227523222dd7e",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 86,
"avg_line_length": 44.458333333333336,
"alnum_prop": 0.7778819119025304,
"repo_name": "MOOCworkbench/MOOCworkbench",
"id": "83a977fd2ecc0f7a4a8c36a8f59b09c1142e51a3",
"size": "1067",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "requirements_manager/tasks.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1942"
},
{
"name": "HTML",
"bytes": "129189"
},
{
"name": "Python",
"bytes": "423140"
},
{
"name": "Shell",
"bytes": "952"
}
],
"symlink_target": ""
} |
from __future__ import (
absolute_import, division, print_function, unicode_literals
)
import ome_files
class TestVersion(object):
def test_upstream_version(self):
v = ome_files.upstream_version_info
for a in "major", "minor", "patch", "extra":
assert hasattr(v, a)
assert len(v) == 4
assert v[:1] == (v.major,)
assert v[:2] == (v.major, v.minor)
assert v[:3] == (v.major, v.minor, v.patch)
assert v[:4] == (v.major, v.minor, v.patch, v.extra)
exp_str = "%d.%d.%d%s" % (v.major, v.minor, v.patch, v.extra)
assert str(v) == exp_str
assert ome_files.upstream_version == exp_str
| {
"content_hash": "d4f8b56f77dca7f173439164eadf99b6",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 69,
"avg_line_length": 32.38095238095238,
"alnum_prop": 0.5661764705882353,
"repo_name": "simleo/ome-files-py",
"id": "57e9054abf3d23cd119f353f376f58e9a3ca031d",
"size": "2064",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "test/test_omefiles.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "C",
"bytes": "4536"
},
{
"name": "C++",
"bytes": "21586"
},
{
"name": "Python",
"bytes": "39733"
},
{
"name": "Shell",
"bytes": "352"
}
],
"symlink_target": ""
} |
"""A base class for EE Functions."""
# Using lowercase function naming to match the JavaScript names.
# pylint: disable=g-bad-name
import textwrap
from . import computedobject
from . import ee_exception
from . import encodable
from . import serializer
class Function(encodable.EncodableFunction):
"""An abstract base class for functions callable by the EE API.
Subclasses must implement encode_invocation() and getSignature().
"""
# A function used to type-coerce arguments and return values.
_promoter = staticmethod(lambda value, type_name: value)
@staticmethod
def _registerPromoter(promoter):
"""Registers a function used to type-coerce arguments and return values.
Args:
promoter: A function used to type-coerce arguments and return values.
Passed a value as the first parameter and a type name as the second.
Can be used, for example, promote numbers or strings to Images.
Should return the input promoted if the type is recognized,
otherwise the original input.
"""
Function._promoter = staticmethod(promoter)
def getSignature(self):
"""Returns a description of the interface provided by this function.
Returns:
The function's signature, a dictionary containing:
name: string
returns: type name string
args: list of argument dictionaries, each containing:
name: string
type: type name string
optional: boolean
default: an arbitrary primitive or encodable object
"""
raise NotImplementedError(
'Function subclasses must implement getSignature().')
def call(self, *args, **kwargs):
"""Calls the function with the given positional and keyword arguments.
Args:
*args: The positional arguments to pass to the function.
**kwargs: The named arguments to pass to the function.
Returns:
A ComputedObject representing the called function. If the signature
specifies a recognized return type, the returned value will be cast
to that type.
"""
return self.apply(self.nameArgs(args, kwargs))
def apply(self, named_args):
"""Calls the function with a dictionary of named arguments.
Args:
named_args: A dictionary of named arguments to pass to the function.
Returns:
A ComputedObject representing the called function. If the signature
specifies a recognized return type, the returned value will be cast
to that type.
"""
result = computedobject.ComputedObject(self, self.promoteArgs(named_args))
return Function._promoter(result, self.getReturnType())
def promoteArgs(self, args):
"""Promotes arguments to their types based on the function's signature.
Verifies that all required arguments are provided and no unknown arguments
are present.
Args:
args: A dictionary of keyword arguments to the function.
Returns:
A dictionary of promoted arguments.
Raises:
EEException: If unrecognized arguments are passed or required ones are
missing.
"""
specs = self.getSignature()['args']
# Promote all recognized args.
promoted_args = {}
known = set()
for spec in specs:
name = spec['name']
if name in args:
promoted_args[name] = Function._promoter(args[name], spec['type'])
elif not spec.get('optional'):
raise ee_exception.EEException(
'Required argument (%s) missing to function: %s'
% (name, self.name))
known.add(name)
# Check for unknown arguments.
unknown = set(args.keys()).difference(known)
if unknown:
raise ee_exception.EEException(
'Unrecognized arguments %s to function: %s' % (unknown, self.name))
return promoted_args
def nameArgs(self, args, extra_keyword_args=None):
"""Converts a list of positional arguments to a map of keyword arguments.
Uses the function's signature for argument names. Note that this does not
check whether the array contains enough arguments to satisfy the call.
Args:
args: Positional arguments to the function.
extra_keyword_args: Optional named arguments to add.
Returns:
Keyword arguments to the function.
Raises:
EEException: If conflicting arguments or too many of them are supplied.
"""
specs = self.getSignature()['args']
# Handle positional arguments.
if len(specs) < len(args):
raise ee_exception.EEException(
'Too many (%d) arguments to function: %s' % (len(args), self.name))
named_args = dict([(spec['name'], value)
for spec, value in zip(specs, args)])
# Handle keyword arguments.
if extra_keyword_args:
for name in extra_keyword_args:
if name in named_args:
raise ee_exception.EEException(
'Argument %s specified as both positional and '
'keyword to function: %s' % (name, self.name))
named_args[name] = extra_keyword_args[name]
# Unrecognized arguments are checked in promoteArgs().
return named_args
def getReturnType(self):
return self.getSignature()['returns']
def serialize(self, for_cloud_api=True):
return serializer.toJSON(
self, for_cloud_api=for_cloud_api
)
def __str__(self):
"""Returns a user-readable docstring for this function."""
DOCSTRING_WIDTH = 75
signature = self.getSignature()
parts = []
if 'description' in signature:
parts.append(
textwrap.fill(signature['description'], width=DOCSTRING_WIDTH))
args = signature['args']
if args:
parts.append('')
parts.append('Args:')
for arg in args:
name_part = ' ' + arg['name']
if 'description' in arg:
name_part += ': '
arg_header = name_part + arg['description']
else:
arg_header = name_part
arg_doc = textwrap.fill(arg_header,
width=DOCSTRING_WIDTH - len(name_part),
subsequent_indent=' ' * 6)
parts.append(arg_doc)
return '\n'.join(parts)
class SecondOrderFunction(Function):
"""A function that executes the result of a function."""
def __init__(self, function_body, signature):
"""Creates a SecondOrderFunction.
Args:
function_body: The function that returns the function to execute.
signature: The signature of the function to execute, as described in
getSignature().
"""
super().__init__()
self._function_body = function_body
self._signature = signature
def encode_invocation(self, encoder):
return self._function_body.encode(encoder)
def encode_cloud_invocation(self, encoder):
return {'functionReference': encoder(self._function_body)}
def getSignature(self):
return self._signature
| {
"content_hash": "4080f7a77ded85ac03e984aff6b45956",
"timestamp": "",
"source": "github",
"line_count": 214,
"max_line_length": 78,
"avg_line_length": 32.06542056074766,
"alnum_prop": 0.6594287379772661,
"repo_name": "google/earthengine-api",
"id": "e41bc6a9c7e67dd8403654119f6936357a32bb76",
"size": "6884",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "python/ee/function.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "1352"
},
{
"name": "JavaScript",
"bytes": "3544345"
},
{
"name": "Jupyter Notebook",
"bytes": "216509"
},
{
"name": "Python",
"bytes": "698991"
},
{
"name": "Shell",
"bytes": "1447"
},
{
"name": "TypeScript",
"bytes": "42297"
}
],
"symlink_target": ""
} |
import random
from faker import Faker
from . import College, Essay, Major, RecommendationLetter, TestScore, ChecklistItem, Acceptance, StudentScholarship
from .. import db
from sqlalchemy.orm import validates
student_colleges = db.Table('student_colleges',
db.Column('college_id', db.Integer,
db.ForeignKey('college.id')),
db.Column('student_profile_id', db.Integer,
db.ForeignKey('student_profile.id')))
student_interests = db.Table('student_interests',
db.Column('interest_id', db.Integer,
db.ForeignKey('interest.id')),
db.Column('student_profile_id', db.Integer,
db.ForeignKey('student_profile.id')))
student_majors = db.Table('student_majors',
db.Column('major_id', db.Integer,
db.ForeignKey('major.id')),
db.Column('student_profile_id', db.Integer,
db.ForeignKey('student_profile.id')))
class StudentProfile(db.Model):
id = db.Column(db.Integer, primary_key=True)
user = db.relationship("User", back_populates="student_profile")
# PERSONAL INFO
phone_number = db.Column(db.String(15), index=True)
high_school = db.Column(db.String, index=True)
district = db.Column(db.String, index=True)
city = db.Column(db.String, index=True)
state = db.Column(db.String, index=True)
graduation_year = db.Column(db.String, index=True)
grade = db.Column(db.Integer, index=True)
# ACADEMIC INFO
unweighted_gpa = db.Column(db.Float, index=True)
weighted_gpa = db.Column(db.Float, index=True)
test_scores = db.relationship(
'TestScore', backref='student_profile', lazy=True)
majors = db.relationship(
'Major',
secondary=student_majors,
backref=db.backref('student_profiles', lazy='dynamic'))
colleges = db.relationship(
'College',
secondary=student_colleges,
backref=db.backref('student_profiles', lazy='dynamic'))
interests = db.relationship(
'Interest',
secondary=student_interests,
backref=db.backref('student_profiles', lazy='dynamic'))
# APPLICATION INFO
# either 'Incomplete' or 'Complete'
fafsa_status = db.Column(db.String, index=True, default='Incomplete')
common_app_essay = db.Column(
db.String, index=True, default='') # link to common app essay
common_app_essay_status = db.Column(
db.String, index=True, default='Incomplete')
early_deadline = db.Column(db.Boolean, default=False)
essays = db.relationship('Essay')
recommendation_letters = db.relationship('RecommendationLetter')
acceptances = db.relationship('Acceptance')
scholarships = db.relationship('StudentScholarship')
scholarship_amount = db.Column(db.Float, index=True)
checklist = db.relationship('ChecklistItem')
cal_token = db.Column(db.String, index=True)
cal_refresh_token = db.Column(db.String, index=True)
cal_token_uri = db.Column(db.String, index=True)
cal_client_id = db.Column(db.String, index=True)
cal_client_secret = db.Column(db.String, index=True)
cal_scopes = db.Column(db.String, index=True)
cal_state = db.Column(db.String, index=True)
@validates('common_app_essay_status')
def validate_status(self, key, status):
assert status in [
'Incomplete', 'Waiting', 'Reviewed', 'Edited', 'Done'
]
return status
@staticmethod
def generate_fake():
fake = Faker()
year = random.choice([['2018', '12'], ['2019', '11'], ['2020', '10']])
fafsa_status = random.choice(['Incomplete', 'Complete'])
essay_status = random.choice(
['Incomplete', 'Waiting', 'Reviewed', 'Edited', 'Done'])
profile = StudentProfile(
high_school='{} High School'.format(fake.street_name()),
district='{} District'.format(fake.city()),
city=fake.city(),
state=fake.state(),
graduation_year=year[0],
grade=year[1],
unweighted_gpa=round(random.uniform(2, 4), 2),
weighted_gpa=round(random.uniform(2,5), 2),
test_scores=TestScore.generate_fake(),
majors=random.sample(Major.query.all(), 3),
colleges=random.sample(College.query.all(), 3),
fafsa_status=fafsa_status,
common_app_essay='https://google.com',
common_app_essay_status=essay_status,
early_deadline=bool(random.getrandbits(1)),
essays=Essay.generate_fake(),
recommendation_letters=RecommendationLetter.generate_fake(),
acceptances=Acceptance.generate_fake(),
scholarships=StudentScholarship.generate_fake(),
scholarship_amount=0,
checklist=ChecklistItem.generate_fake())
for i in profile.scholarships:
profile.scholarship_amount = profile.scholarship_amount + i.award_amount
return profile
def __repr__(self):
s = '<Student Profile\n'
s += 'High School: {}\n'.format(self.high_school)
s += 'District: {}\n'.format(self.district)
s += 'City, State: {}, {}\n'.format(self.city, self.state)
s += 'Gradution Year: {}\n'.format(self.graduation_year)
s += 'Grade: {}\n'.format(self.grade)
s += 'Unweighted GPA: {}\n'.format(self.unweighted_gpa)
s += 'Weighted GPA: {}\n'.format(self.weighted_gpa)
s += 'Test Scores: {}\n'.format(self.test_scores)
s += 'Majors: {}\n'.format(','.join([m.name for m in self.majors]))
s += 'Colleges: {}\n'.format(','.join([c.name for c in self.colleges]))
s += 'FAFSA Status {}\n'.format(self.fafsa_status)
s += 'Common App Essay: {}\n'.format(self.common_app_essay)
s += 'Essays: {}\n'.format(self.essays)
s += 'Recommendation Letters: {}'.format(
self.recommendation_letters) + '>'
s += 'Acceptances: {}'.format(
self.acceptances) + '>'
return s
| {
"content_hash": "ae941fc8120b34ca35cd1cc575aa138c",
"timestamp": "",
"source": "github",
"line_count": 133,
"max_line_length": 115,
"avg_line_length": 47.02255639097744,
"alnum_prop": 0.5940198273105213,
"repo_name": "hack4impact/next-gen-scholars",
"id": "23c8337fc6e2ae3f0ee4f7f38e56dfcc208e6a37",
"size": "6254",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "app/models/student_profile.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "44495"
},
{
"name": "HTML",
"bytes": "191164"
},
{
"name": "JavaScript",
"bytes": "763273"
},
{
"name": "Python",
"bytes": "213104"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
from ..client import MultiClient
from ..shell import MultiShell
class MultiCinder(MultiClient):
def __init__(self):
super(MultiCinder, self).__init__()
self.default_executable = 'cinder'
self.prefix_list += ["cinder_", "cinderclient_"]
def main_client():
multistack_shell = MultiShell(MultiCinder)
multistack_shell.run_client()
| {
"content_hash": "475e5cbebce27d2e192c5e8258932d82",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 56,
"avg_line_length": 24.058823529411764,
"alnum_prop": 0.6748166259168704,
"repo_name": "testeddoughnut/multistack",
"id": "808f91c4c021a394fccdc960aca2389425bf7f6b",
"size": "1034",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "multistack/clients/cinder.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "37138"
}
],
"symlink_target": ""
} |
import sympy
import collections
import stepprinter
from stepprinter import functionnames, replace_u_var
from sympy.core.function import AppliedUndef
from sympy.functions.elementary.trigonometric import TrigonometricFunction
from sympy.strategies.core import switch, identity
def Rule(name, props=""):
return collections.namedtuple(name, props + " context symbol")
ConstantRule = Rule("ConstantRule", "number")
ConstantTimesRule = Rule("ConstantTimesRule", "constant other substep")
PowerRule = Rule("PowerRule", "base exp")
AddRule = Rule("AddRule", "substeps")
MulRule = Rule("MulRule", "terms substeps")
DivRule = Rule("DivRule", "numerator denominator numerstep denomstep")
ChainRule = Rule("ChainRule", "substep inner u_var innerstep")
TrigRule = Rule("TrigRule", "f")
ExpRule = Rule("ExpRule", "f base")
LogRule = Rule("LogRule", "arg base")
FunctionRule = Rule("FunctionRule")
AlternativeRule = Rule("AlternativeRule", "alternatives")
DontKnowRule = Rule("DontKnowRule")
RewriteRule = Rule("RewriteRule", "rewritten substep")
DerivativeInfo = collections.namedtuple('DerivativeInfo', 'expr symbol')
evaluators = {}
def evaluates(rule):
def _evaluates(func):
func.rule = rule
evaluators[rule] = func
return func
return _evaluates
def power_rule(derivative):
expr, symbol = derivative.expr, derivative.symbol
base, exp = expr.as_base_exp()
if not base.has(symbol):
if isinstance(exp, sympy.Symbol):
return ExpRule(expr, base, expr, symbol)
else:
u = sympy.Dummy()
f = base ** u
return ChainRule(
ExpRule(f, base, f, u),
exp, u,
diff_steps(exp, symbol),
expr, symbol
)
elif not exp.has(symbol):
if isinstance(base, sympy.Symbol):
return PowerRule(base, exp, expr, symbol)
else:
u = sympy.Dummy()
f = u ** exp
return ChainRule(
PowerRule(u, exp, f, u),
base, u,
diff_steps(base, symbol),
expr, symbol
)
else:
return DontKnowRule(expr, symbol)
def add_rule(derivative):
expr, symbol = derivative.expr, derivative.symbol
return AddRule([diff_steps(arg, symbol) for arg in expr.args],
expr, symbol)
def constant_rule(derivative):
expr, symbol = derivative.expr, derivative.symbol
return ConstantRule(expr, expr, symbol)
def mul_rule(derivative):
expr, symbol = derivative
terms = expr.args
is_div = 1 / sympy.Wild("denominator")
coeff, f = expr.as_independent(symbol)
if coeff != 1:
return ConstantTimesRule(coeff, f, diff_steps(f, symbol), expr, symbol)
numerator, denominator = expr.as_numer_denom()
if denominator != 1:
return DivRule(numerator, denominator,
diff_steps(numerator, symbol),
diff_steps(denominator, symbol), expr, symbol)
return MulRule(terms, [diff_steps(g, symbol) for g in terms], expr, symbol)
def trig_rule(derivative):
expr, symbol = derivative
arg = expr.args[0]
default = TrigRule(expr, expr, symbol)
if not isinstance(arg, sympy.Symbol):
u = sympy.Dummy()
default = ChainRule(
TrigRule(expr.func(u), expr.func(u), u),
arg, u, diff_steps(arg, symbol),
expr, symbol)
if isinstance(expr, (sympy.sin, sympy.cos)):
return default
elif isinstance(expr, sympy.tan):
f_r = sympy.sin(arg) / sympy.cos(arg)
return AlternativeRule([
default,
RewriteRule(f_r, diff_steps(f_r, symbol), expr, symbol)
], expr, symbol)
elif isinstance(expr, sympy.csc):
f_r = 1 / sympy.sin(arg)
return AlternativeRule([
default,
RewriteRule(f_r, diff_steps(f_r, symbol), expr, symbol)
], expr, symbol)
elif isinstance(expr, sympy.sec):
f_r = 1 / sympy.cos(arg)
return AlternativeRule([
default,
RewriteRule(f_r, diff_steps(f_r, symbol), expr, symbol)
], expr, symbol)
elif isinstance(expr, sympy.cot):
f_r_1 = 1 / sympy.tan(arg)
f_r_2 = sympy.cos(arg) / sympy.sin(arg)
return AlternativeRule([
default,
RewriteRule(f_r_1, diff_steps(f_r_1, symbol), expr, symbol),
RewriteRule(f_r_2, diff_steps(f_r_2, symbol), expr, symbol)
], expr, symbol)
else:
return DontKnowRule(f, symbol)
def exp_rule(derivative):
expr, symbol = derivative
exp = expr.args[0]
if isinstance(exp, sympy.Symbol):
return ExpRule(expr, sympy.E, expr, symbol)
else:
u = sympy.Dummy()
f = sympy.exp(u)
return ChainRule(ExpRule(f, sympy.E, f, u),
exp, u, diff_steps(exp, symbol), expr, symbol)
def log_rule(derivative):
expr, symbol = derivative
arg = expr.args[0]
if len(expr.args) == 2:
base = expr.args[1]
else:
base = sympy.E
if isinstance(arg, sympy.Symbol):
return LogRule(arg, base, expr, symbol)
else:
u = sympy.Dummy()
return ChainRule(LogRule(u, base, sympy.log(u, base), u),
arg, u, diff_steps(arg, symbol), expr, symbol)
def function_rule(derivative):
return FunctionRule(derivative.expr, derivative.symbol)
@evaluates(ConstantRule)
def eval_constant(*args):
return 0
@evaluates(ConstantTimesRule)
def eval_constanttimes(constant, other, substep, expr, symbol):
return constant * diff(substep)
@evaluates(AddRule)
def eval_add(substeps, expr, symbol):
results = [diff(step) for step in substeps]
return sum(results)
@evaluates(DivRule)
def eval_div(numer, denom, numerstep, denomstep, expr, symbol):
d_numer = diff(numerstep)
d_denom = diff(denomstep)
return (denom * d_numer - numer * d_denom) / (denom **2)
@evaluates(ChainRule)
def eval_chain(substep, inner, u_var, innerstep, expr, symbol):
return diff(substep).subs(u_var, inner) * diff(innerstep)
@evaluates(PowerRule)
@evaluates(ExpRule)
@evaluates(LogRule)
@evaluates(DontKnowRule)
@evaluates(FunctionRule)
def eval_default(*args):
func, symbol = args[-2], args[-1]
if isinstance(func, sympy.Symbol):
func = sympy.Pow(func, 1, evaluate=False)
# Automatically derive and apply the rule (don't use diff() directly as
# chain rule is a separate step)
substitutions = []
mapping = {}
constant_symbol = sympy.Dummy()
for arg in func.args:
if symbol in arg.free_symbols:
mapping[symbol] = arg
substitutions.append(symbol)
else:
mapping[constant_symbol] = arg
substitutions.append(constant_symbol)
rule = func.func(*substitutions).diff(symbol)
return rule.subs(mapping)
@evaluates(MulRule)
def eval_mul(terms, substeps, expr, symbol):
diffs = map(diff, substeps)
result = sympy.S.Zero
for i in range(len(terms)):
subresult = diffs[i]
for index, term in enumerate(terms):
if index != i:
subresult *= term
result += subresult
return result
@evaluates(TrigRule)
def eval_default_trig(*args):
return sympy.trigsimp(eval_default(*args))
@evaluates(RewriteRule)
def eval_rewrite(rewritten, substep, expr, symbol):
return diff(substep)
@evaluates(AlternativeRule)
def eval_alternative(alternatives, expr, symbol):
return diff(alternatives[1])
def diff_steps(expr, symbol):
deriv = DerivativeInfo(expr, symbol)
def key(deriv):
expr = deriv.expr
if isinstance(expr, TrigonometricFunction):
return TrigonometricFunction
elif isinstance(expr, AppliedUndef):
return AppliedUndef
elif not expr.has(symbol):
return 'constant'
else:
return expr.func
return switch(key, {
sympy.Pow: power_rule,
sympy.Symbol: power_rule,
sympy.Dummy: power_rule,
sympy.Add: add_rule,
sympy.Mul: mul_rule,
TrigonometricFunction: trig_rule,
sympy.exp: exp_rule,
sympy.log: log_rule,
AppliedUndef: function_rule,
'constant': constant_rule
})(deriv)
def diff(rule):
try:
return evaluators[rule.__class__](*rule)
except KeyError:
raise ValueError("Cannot evaluate derivative")
class DiffPrinter(object):
def __init__(self, rule):
self.print_rule(rule)
self.rule = rule
def print_rule(self, rule):
if isinstance(rule, PowerRule):
self.print_Power(rule)
elif isinstance(rule, ChainRule):
self.print_Chain(rule)
elif isinstance(rule, ConstantRule):
self.print_Number(rule)
elif isinstance(rule, ConstantTimesRule):
self.print_ConstantTimes(rule)
elif isinstance(rule, AddRule):
self.print_Add(rule)
elif isinstance(rule, MulRule):
self.print_Mul(rule)
elif isinstance(rule, DivRule):
self.print_Div(rule)
elif isinstance(rule, TrigRule):
self.print_Trig(rule)
elif isinstance(rule, ExpRule):
self.print_Exp(rule)
elif isinstance(rule, LogRule):
self.print_Log(rule)
elif isinstance(rule, DontKnowRule):
self.print_DontKnow(rule)
elif isinstance(rule, AlternativeRule):
self.print_Alternative(rule)
elif isinstance(rule, RewriteRule):
self.print_Rewrite(rule)
elif isinstance(rule, FunctionRule):
self.print_Function(rule)
else:
self.append(repr(rule))
def print_Power(self, rule):
with self.new_step():
self.append("Apply the power rule: {0} goes to {1}".format(
self.format_math(rule.context),
self.format_math(diff(rule))))
def print_Number(self, rule):
with self.new_step():
self.append("The derivative of the constant {} is zero.".format(
self.format_math(rule.number)))
def print_ConstantTimes(self, rule):
with self.new_step():
self.append("The derivative of a constant times a function "
"is the constant times the derivative of the function.")
with self.new_level():
self.print_rule(rule.substep)
self.append("So, the result is: {}".format(
self.format_math(diff(rule))))
def print_Add(self, rule):
with self.new_step():
self.append("Differentiate {} term by term:".format(
self.format_math(rule.context)))
with self.new_level():
for substep in rule.substeps:
self.print_rule(substep)
self.append("The result is: {}".format(
self.format_math(diff(rule))))
def print_Mul(self, rule):
with self.new_step():
self.append("Apply the product rule:".format(
self.format_math(rule.context)))
fnames = map(lambda n: sympy.Function(n)(rule.symbol),
functionnames(len(rule.terms)))
derivatives = map(lambda f: sympy.Derivative(f, rule.symbol), fnames)
ruleform = []
for index in range(len(rule.terms)):
buf = []
for i in range(len(rule.terms)):
if i == index:
buf.append(derivatives[i])
else:
buf.append(fnames[i])
ruleform.append(reduce(lambda a,b: a*b, buf))
self.append(self.format_math_display(
sympy.Eq(sympy.Derivative(reduce(lambda a,b: a*b, fnames),
rule.symbol),
sum(ruleform))))
for fname, deriv, term, substep in zip(fnames, derivatives,
rule.terms, rule.substeps):
self.append("{}; to find {}:".format(
self.format_math(sympy.Eq(fname, term)),
self.format_math(deriv)
))
with self.new_level():
self.print_rule(substep)
self.append("The result is: " + self.format_math(diff(rule)))
def print_Div(self, rule):
with self.new_step():
f, g = rule.numerator, rule.denominator
fp, gp = f.diff(rule.symbol), g.diff(rule.symbol)
x = rule.symbol
ff = sympy.Function("f")(x)
gg = sympy.Function("g")(x)
qrule_left = sympy.Derivative(ff / gg, rule.symbol)
qrule_right = sympy.ratsimp(sympy.diff(sympy.Function("f")(x) /
sympy.Function("g")(x)))
qrule = sympy.Eq(qrule_left, qrule_right)
self.append("Apply the quotient rule, which is:")
self.append(self.format_math_display(qrule))
self.append("{} and {}.".format(self.format_math(sympy.Eq(ff, f)),
self.format_math(sympy.Eq(gg, g))))
self.append("To find {}:".format(self.format_math(ff.diff(rule.symbol))))
with self.new_level():
self.print_rule(rule.numerstep)
self.append("To find {}:".format(self.format_math(gg.diff(rule.symbol))))
with self.new_level():
self.print_rule(rule.denomstep)
self.append("Now plug in to the quotient rule:")
self.append(self.format_math(diff(rule)))
def print_Chain(self, rule):
with self.new_step(), self.new_u_vars() as (u, du):
self.append("Let {}.".format(self.format_math(sympy.Eq(u, rule.inner))))
self.print_rule(replace_u_var(rule.substep, rule.u_var, u))
with self.new_step():
if isinstance(rule.innerstep, FunctionRule):
self.append(
"Then, apply the chain rule. Multiply by {}:".format(
self.format_math(
sympy.Derivative(rule.inner, rule.symbol))))
self.append(self.format_math_display(diff(rule)))
else:
self.append(
"Then, apply the chain rule. Multiply by {}:".format(
self.format_math(
sympy.Derivative(rule.inner, rule.symbol))))
with self.new_level():
self.print_rule(rule.innerstep)
self.append("The result of the chain rule is:")
self.append(self.format_math_display(diff(rule)))
def print_Trig(self, rule):
with self.new_step():
if isinstance(rule.f, sympy.sin):
self.append("The derivative of sine is cosine:")
elif isinstance(rule.f, sympy.cos):
self.append("The derivative of cosine is negative sine:")
elif isinstance(rule.f, sympy.sec):
self.append("The derivative of secant is secant times tangent:")
elif isinstance(rule.f, sympy.csc):
self.append("The derivative of cosecant is negative cosecant times cotangent:")
self.append("{}".format(
self.format_math_display(sympy.Eq(
sympy.Derivative(rule.f, rule.symbol),
diff(rule)))))
def print_Exp(self, rule):
with self.new_step():
if rule.base == sympy.E:
self.append("The derivative of {} is itself.".format(
self.format_math(sympy.exp(rule.symbol))))
else:
self.append(
self.format_math(sympy.Eq(sympy.Derivative(rule.f, rule.symbol),
diff(rule))))
def print_Log(self, rule):
with self.new_step():
if rule.base == sympy.E:
self.append("The derivative of {} is {}.".format(
self.format_math(rule.context),
self.format_math(diff(rule))
))
else:
# This case shouldn't come up often, seeing as SymPy
# automatically applies the change-of-base identity
self.append("The derivative of {} is {}.".format(
self.format_math(sympy.log(rule.symbol, rule.base,
evaluate=False)),
self.format_math(1/(rule.arg * sympy.ln(rule.base)))))
self.append("So {}".format(
self.format_math(sympy.Eq(
sympy.Derivative(rule.context, rule.symbol),
diff(rule)))))
def print_Alternative(self, rule):
with self.new_step():
self.append("There are multiple ways to do this derivative.")
self.append("One way:")
with self.new_level():
self.print_rule(rule.alternatives[0])
def print_Rewrite(self, rule):
with self.new_step():
self.append("Rewrite the function to be differentiated:")
self.append(self.format_math_display(
sympy.Eq(rule.context, rule.rewritten)))
self.print_rule(rule.substep)
def print_Function(self, rule):
with self.new_step():
self.append("Trivial:")
self.append(self.format_math_display(
sympy.Eq(sympy.Derivative(rule.context, rule.symbol),
diff(rule))))
def print_DontKnow(self, rule):
with self.new_step():
self.append("Don't know the steps in finding this derivative.")
self.append("But the derivative is")
self.append(self.format_math_display(diff(rule)))
class HTMLPrinter(DiffPrinter, stepprinter.HTMLPrinter):
def __init__(self, rule):
self.alternative_functions_printed = set()
stepprinter.HTMLPrinter.__init__(self)
DiffPrinter.__init__(self, rule)
def print_Alternative(self, rule):
if rule.context.func in self.alternative_functions_printed:
self.print_rule(rule.alternatives[0])
elif len(rule.alternatives) == 2:
self.alternative_functions_printed.add(rule.context.func)
self.print_rule(rule.alternatives[1])
else:
self.alternative_functions_printed.add(rule.context.func)
with self.new_step():
self.append("There are multiple ways to do this derivative.")
for index, r in enumerate(rule.alternatives[1:]):
with self.new_collapsible():
self.append_header("Method #{}".format(index + 1))
with self.new_level():
self.print_rule(r)
def finalize(self):
answer = diff(self.rule)
if answer:
simp = sympy.simplify(answer)
if simp != answer:
answer = simp
with self.new_step():
self.append("Now simplify:")
self.append(self.format_math_display(simp))
self.lines.append('</ol>')
self.lines.append('<hr/>')
self.level = 0
self.append('The answer is:')
self.append(self.format_math_display(answer))
return '\n'.join(self.lines)
def print_html_steps(function, symbol):
a = HTMLPrinter(diff_steps(function, symbol))
return a.finalize()
| {
"content_hash": "67197d60ceee11db1fa0a54c3097fb94",
"timestamp": "",
"source": "github",
"line_count": 534,
"max_line_length": 95,
"avg_line_length": 36.92134831460674,
"alnum_prop": 0.5679651044836681,
"repo_name": "iScienceLuvr/sympy_gamma",
"id": "9294ffeac57cfa5444d0260545fe3d987bfaed3c",
"size": "19716",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "app/logic/diffsteps.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "17636"
},
{
"name": "HTML",
"bytes": "10800"
},
{
"name": "JavaScript",
"bytes": "109690"
},
{
"name": "Python",
"bytes": "128516"
}
],
"symlink_target": ""
} |
"""
Flickrsaver: A screensaver for Flickr enthusiasts
See README for more information.
Copyright (c) 2010, Johannes H. Jensen.
License: BSD, see LICENSE for more details.
"""
import time
import os
import signal
import logging
import urllib2
from random import randint
from threading import Thread, Event, Condition, RLock
import flickrapi
import glib
import gobject
from gtk import gdk
import clutter
import clutter.x11
gobject.threads_init()
clutter.threads_init()
log = logging.getLogger('flickrsaver')
log.setLevel(logging.DEBUG)
API_KEY = "59b92bf5694c292121537c3a754d7b85"
flickr = flickrapi.FlickrAPI(API_KEY)
""" Where we keep photos """
cache_dir = os.path.join(glib.get_user_cache_dir(), 'flickrsaver')
class PhotoSource(object):
def get_photo(self):
""" Return the (filename, fp) of a photo from the source, where
fp is an open file descriptor. """
raise NotImplementedError
class FlickrSource(PhotoSource):
""" Flickr photo source """
common_args = {'extras': 'url_s,url_m,url_z,url_l,url_o',
'per_page': 500}
def __init__(self):
""" Flickr photo source """
PhotoSource.__init__(self)
self.results = None
def get_tree(self):
raise NotImplementedError()
def get_photo(self):
if not self.results:
log.debug("Downloading list...")
tree = self.get_tree()
self.results = tree.find('photos').findall('photo')
url = None
while not url:
r = randint(0, len(self.results) - 1)
p = self.results.pop(r)
if 'url_o' in p.attrib:
url = p.attrib['url_o']
elif 'url_l' in p.attrib:
url = p.attrib['url_l']
elif 'url_z' in p.attrib:
url = p.attrib['url_z']
elif 'url_m' in p.attrib:
url = p.attrib['url_m']
elif 'url_s' in p.attrib:
url = p.attrib['url_s']
else:
log.warn("No suitable URL found for photo #%s", p.attrib['id'])
continue
log.debug("Downloading %s...", url)
fp = urllib2.urlopen(url)
filename = os.path.basename(url)
return filename, fp
class Interestingness(FlickrSource):
def get_tree(self):
return flickr.interestingness_getList(**self.common_args)
def __repr__(self):
return 'Interestingness()'
class Photostream(FlickrSource):
def __init__(self, user_id):
FlickrSource.__init__(self)
self.user_id = user_id
def get_tree(self):
return flickr.people_getPublicPhotos(user_id=self.user_id, **self.common_args)
def __repr__(self):
return 'Photostream(%r)' % (self.user_id)
class Group(FlickrSource):
def __init__(self, group_id):
FlickrSource.__init__(self)
self.group_id = group_id
def get_tree(self):
return flickr.groups_pools_getPhotos(group_id=self.group_id, **self.common_args)
def __repr__(self):
return 'Group(%r)' % (self.group_id)
class Search(FlickrSource):
def __init__(self, text):
FlickrSource.__init__(self)
self.text = text
def get_tree(self):
return flickr.photos_search(text=self.text, sort='relevance', **self.common_args)
def __repr__(self):
return 'Search(%r)' % (self.text)
class PhotoPool(Thread):
""" A pool of photos! """
def __init__(self, num_photos=10, sources=[], pool_dir=cache_dir):
Thread.__init__(self)
self.num_photos = num_photos
self.sources = sources
self.dir = pool_dir
# Make sure cache dir exists
if not os.path.exists(self.dir):
os.mkdir(self.dir)
# Clean cache directory
self.clean_cache()
# Load cached photos
self.photos = os.listdir(self.dir)
# Delete queue
self.trash = []
# Condition when a new photo is added
self.added = Condition()
# Condition when a photo is removed
self.removed = Condition()
# Event for stopping the pool
self._stop = Event()
def add_source(self, source):
self.sources.append(source)
def is_empty(self):
return len(self.photos) == 0
def is_full(self):
return len(self.photos) >= self.num_photos
def add(self, filename):
""" Add a photo to the pool """
with self.added:
self.photos.append(filename)
self.added.notifyAll()
def pop(self, filename=None):
""" Pop a photo from the pool
If filename is not set, a random photo will be returned
"""
if not self.photos and self.trash:
# Recycle
log.debug("Recycling...")
self.add(self.trash.pop(0))
while not self.photos and not self._stop.is_set():
with self.added:
# Wait for a photo to be produced
self.added.wait(0.1)
if self._stop.is_set():
return None
# TODO: filename arg?
with self.removed:
r = randint(0, len(self.photos) - 1)
p = self.photos.pop(r)
self.removed.notify()
log.debug("Photo %s consumed", p)
return p
def delete(self, filename):
""" Mark file as deleted """
self.trash.append(filename)
'''
if os.path.isabs(filename):
assert os.path.dirname(filename) == cache_dir
else:
filename = os.path.join(self.dir, filename)
os.remove(filename)
'''
def run(self):
src = 0
while not self._stop.is_set():
if self.is_full():
with self.removed:
self.removed.wait(0.1)
if not self.is_full() and self.sources:
source = self.sources[src]
log.debug("Photo source: %r", source)
try:
# Copy photo to pool
name, fp = source.get_photo()
filename = os.path.join(self.dir, name)
partial_filename = filename + '.part'
f = open(partial_filename, 'wb')
completed = False
while not completed and not self._stop.is_set():
d = fp.read(1024)
if d:
f.write(d)
else:
completed = True
f.close()
if completed:
os.rename(partial_filename, filename)
log.debug("Completed %s", filename)
self.add(filename)
except Exception as e:
log.warning("Source '%s' failed: %s", source, e)
time.sleep(1)
# Next source
src = (src + 1) % len(self.sources)
# Empty trash
while self.trash and len(self.photos) + len(self.trash) > self.num_photos:
f = self.trash.pop()
log.debug("Deleting %s...", f)
os.remove(os.path.join(self.dir, f))
# In case of no sources, don't clog up the CPU
time.sleep(0.1)
log.debug("Pool stopped")
def clean_cache(self):
log.debug("Cleaning cache...")
# Remove partials from cache
for f in os.listdir(self.dir):
if f.endswith('.part'):
log.debug("Deleting partial: %s", f)
os.unlink(os.path.join(self.dir, f))
def stop(self):
log.info("Stopping pool...")
self._stop.set()
class PhotoUpdater(Thread):
def __init__(self, saver, photo_pool, interval=10):
Thread.__init__(self)
self.saver = saver
self.photo_pool = photo_pool
self.interval = interval
self._stop = Event()
def run(self):
ts = 0
while not self._stop.is_set():
if time.time() - ts >= self.interval:
log.debug("Updater: Next!")
p = self.photo_pool.pop()
if p:
filename = os.path.join(self.photo_pool.dir, p)
self.saver.set_photo(filename, None)
ts = time.time()
time.sleep(0.1)
log.debug("Updater stopped")
def stop(self):
log.debug("Stopping updater...")
self._stop.set()
class FlickrSaver(object):
def __init__(self, photo_sources=[]):
# Update queueing
self.update_id = 0
self.filename = None
# Set up Clutter stage and actors
self.stage = clutter.Stage()
self.stage.set_title('Flickrsaver')
self.stage.set_color('#000000')
self.stage.set_size(400, 400)
self.stage.set_user_resizable(True)
self.stage.connect('destroy', self.quit)
self.stage.connect('notify::allocation', self.size_changed)
self.stage.connect('key-press-event', self.key_pressed)
if 'XSCREENSAVER_WINDOW' in os.environ:
xwin = int(os.environ['XSCREENSAVER_WINDOW'], 0)
clutter.x11.set_stage_foreign(self.stage, xwin)
# Allow SIGINT to pass through, allowing the screensaver host
# to properly shut down the screensaver when needed
signal.signal(signal.SIGINT, signal.SIG_DFL)
self.photo1 = clutter.Texture()
self.photo1.set_opacity(0)
self.stage.add(self.photo1)
self.photo2 = clutter.Texture()
self.photo2.set_opacity(0)
self.stage.add(self.photo2)
self.photo = self.photo2
# Animation
self.timeline = clutter.Timeline(duration=2000)
self.alpha = clutter.Alpha(self.timeline, clutter.EASE_IN_CUBIC)
self.fade_in = clutter.BehaviourOpacity(0, 255, self.alpha)
self.fade_out = clutter.BehaviourOpacity(255, 0, self.alpha)
self.stage.show_all()
# Photo pool
self.photo_pool = PhotoPool()
# Photo sources
for ps in photo_sources:
self.photo_pool.add_source(ps)
# Photo updater
self.updater = PhotoUpdater(self, self.photo_pool)
# gobject.timeout_add_seconds(5, self.next_photo)
def update(self):
""" Update actors to new photo
Note: must not be called from other than the main thread!
"""
log.debug("Displaying %s", self.filename)
prev = self.photo
if self.photo == self.photo1:
self.photo = self.photo2
else:
self.photo = self.photo1
try:
self.load_photo()
self.rotate_photo()
self.scale_photo()
self.fade_in.remove_all()
self.fade_out.remove_all()
self.fade_in.apply(self.photo)
self.fade_out.apply(prev)
self.timeline.rewind()
self.timeline.start()
except glib.GError as e:
log.warning("Could not load photo: %s", e)
self.photo = prev
finally:
# Finished, clear update_id
self.update_id = 0
# Mark file for deletion
if self.filename:
self.photo_pool.delete(self.filename)
return False
def queue_update(self):
""" Queue an update of the graph """
if not self.update_id:
# No previous updates pending
self.update_id = gobject.idle_add(self.update)
def set_photo(self, filename, info):
self.filename = filename
self.queue_update()
def load_photo(self):
""" Load and position photo """
self.photo.set_from_file(self.filename)
w, h = self.photo.get_size()
sw, sh = self.stage.get_size()
# Set anchor to center of image
self.photo.set_anchor_point(w/2, h/2)
# Position center of image to center of stage
self.photo.set_position(sw/2, sh/2)
def rotate_photo(self):
""" Rotate photo based on orientation info """
# Clear rotation
self.photo.set_rotation(clutter.X_AXIS, 0, 0, 0, 0)
self.photo.set_rotation(clutter.Y_AXIS, 0, 0, 0, 0)
self.photo.set_rotation(clutter.Z_AXIS, 0, 0, 0, 0)
# Read metadata
log.debug("rotate_photo: Reading metadata... %s", self.filename)
pixbuf = gdk.pixbuf_new_from_file(self.filename)
orientation = pixbuf.get_option('orientation')
if not orientation:
return
orientation = int(orientation)
log.debug("rotate_photo: Orientation = %d", orientation)
if orientation == 1:
# (row #0 - col #0)
# top - left: No rotation necessary
log.debug("rotate_photo: No rotation")
elif orientation == 2:
# top - right: Flip horizontal
log.debug("rotate_photo: Flip horizontal")
self.photo.set_rotation(clutter.Y_AXIS, 180, 0, 0, 0)
elif orientation == 3:
# bottom - right: Rotate 180
log.debug("rotate_photo: Rotate 180")
self.photo.set_rotation(clutter.Z_AXIS, 180, 0, 0, 0)
elif orientation == 4:
# bottom - left: Flip vertical
log.debug("rotate_photo: Flip vertical")
self.photo.set_rotation(clutter.X_AXIS, 180, 0, 0, 0)
elif orientation == 5:
# left - top: Transpose
log.debug("rotate_photo: Transpose")
self.photo.set_rotation(clutter.Y_AXIS, 180, 0, 0, 0)
self.photo.set_rotation(clutter.Z_AXIS, -90, 0, 0, 0)
elif orientation == 6:
# right - top: Rotate 90
log.debug("rotate_photo: Rotate 90")
self.photo.set_rotation(clutter.Z_AXIS, 90, 0, 0, 0)
elif orientation == 7:
# right - bottom: Transverse
log.debug("rotate_photo: Transpose")
self.photo.set_rotation(clutter.Y_AXIS, 180, 0, 0, 0)
self.photo.set_rotation(clutter.Z_AXIS, 90, 0, 0, 0)
elif orientation == 8:
# left - bottom: Rotate -90
log.debug("rotate_photo: Rotate -90")
self.photo.set_rotation(clutter.Z_AXIS, -90, 0, 0, 0)
def scale_photo(self):
""" Scale photo to fit stage size """
# Clear scale
self.photo.set_scale(1, 1)
width, height = self.stage.get_size()
ow, oh = self.photo.get_transformed_size()
w = ow
h = oh
log.debug("scale_photo: Stage: %sx%s, Photo: %sx%s", width, height, ow, oh)
if ow > width or oh > height:
scale = width / ow
h = oh * scale
if h > height:
scale = height / oh
self.photo.set_scale(scale, scale)
log.debug("Downscaling photo by %s%%", scale * 100)
def size_changed(self, *args):
""" Stage size changed """
width, height = self.stage.get_size()
log.debug("Stage size: %dx%d", width, height)
# Update photo position and scale
if self.filename:
self.load_photo()
self.scale_photo()
def key_pressed(self, stage, event):
if event.keyval == clutter.keysyms.space:
log.debug("NEXT PHOTO!")
self.next_photo()
def main(self):
self.photo_pool.start()
self.updater.start()
clutter.main()
def quit(self, *args):
log.info("Exiting...")
self.updater.stop()
self.photo_pool.stop()
self.updater.join()
self.photo_pool.join()
clutter.main_quit()
if __name__ == '__main__':
import argparse
'''
if 'XSCREENSAVER_WINDOW' in os.environ:
f = open('/tmp/foo', 'w')
f.write('XSCREENSAVER_WINDOW=' + os.environ['XSCREENSAVER_WINDOW'] + '\n')
f.close()
'''
# Parse command-line arguments
#Photostream('7353466@N08')
parser = argparse.ArgumentParser(description='A screensaver for Flickr enthusiasts')
sg = parser.add_argument_group('Photo sources')
sg.add_argument('-u', '--user', action='append', default=[], metavar='USER_ID',
help="Show photos from user's Photostream")
sg.add_argument('-g', '--group', action='append', default=[], metavar='GROUP_ID',
help="Show photos from group's Photostream")
sg.add_argument('-i', '--interesting', action='store_true',
help="Show interesting photos")
sg.add_argument('-s', '--search', action='append', default=[], metavar='TEXT',
help="Show photos matching text")
#parser.add_argument('-d', '--days', type=int,
# help="Only show photos newer than the specified number of days")
args = parser.parse_args()
photo_sources = []
# User's photostream
for user_id in args.user:
source = Photostream(user_id)
photo_sources.append(source)
# Group's photostream
for group_id in args.group:
source = Group(group_id)
photo_sources.append(source)
# Search text
for text in args.search:
source = Search(text)
photo_sources.append(source)
# Default: Interestingness
if args.interesting or not photo_sources:
source = Interestingness()
photo_sources.append(source)
# Fire up the screensaver
fs = FlickrSaver(photo_sources)
fs.main()
| {
"content_hash": "def4762ca05eb19a14b2d304d205bdef",
"timestamp": "",
"source": "github",
"line_count": 593,
"max_line_length": 89,
"avg_line_length": 31.160202360876898,
"alnum_prop": 0.5224050221885486,
"repo_name": "joh/Flickrsaver",
"id": "9eb3e52319107c27b81665686897d7335aedbf65",
"size": "18500",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "flickrsaver.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "19231"
}
],
"symlink_target": ""
} |
import sys
from .testconfig import dsn
from .testutils import unittest
from . import test_async
from . import test_bugX000
from . import test_bug_gc
from . import test_cancel
from . import test_connection
from . import test_copy
from . import test_cursor
from . import test_dates
from . import test_extras_dictcursor
from . import test_green
from . import test_lobject
from . import test_module
from . import test_notify
from . import test_psycopg2_dbapi20
from . import test_quote
from . import test_transaction
from . import test_types_basic
from . import test_types_extras
if sys.version_info[:2] >= (2, 5):
from . import test_with
else:
test_with = None
def test_suite():
# If connection to test db fails, bail out early.
import psycopg2
try:
cnn = psycopg2.connect(dsn)
except Exception as e:
print("Failed connection to test db:", e.__class__.__name__, e)
print("Please set env vars 'PSYCOPG2_TESTDB*' to valid values.")
sys.exit(1)
else:
cnn.close()
suite = unittest.TestSuite()
suite.addTest(test_async.test_suite())
suite.addTest(test_bugX000.test_suite())
suite.addTest(test_bug_gc.test_suite())
suite.addTest(test_cancel.test_suite())
suite.addTest(test_connection.test_suite())
suite.addTest(test_copy.test_suite())
suite.addTest(test_cursor.test_suite())
suite.addTest(test_dates.test_suite())
suite.addTest(test_extras_dictcursor.test_suite())
suite.addTest(test_green.test_suite())
suite.addTest(test_lobject.test_suite())
suite.addTest(test_module.test_suite())
suite.addTest(test_notify.test_suite())
suite.addTest(test_psycopg2_dbapi20.test_suite())
suite.addTest(test_quote.test_suite())
suite.addTest(test_transaction.test_suite())
suite.addTest(test_types_basic.test_suite())
suite.addTest(test_types_extras.test_suite())
if test_with:
suite.addTest(test_with.test_suite())
return suite
if __name__ == '__main__':
unittest.main(defaultTest='test_suite')
| {
"content_hash": "5950e119813d753c01e14e9d44d6a1af",
"timestamp": "",
"source": "github",
"line_count": 65,
"max_line_length": 72,
"avg_line_length": 31.446153846153845,
"alnum_prop": 0.6952054794520548,
"repo_name": "pabulumm/neighbors",
"id": "ae32ac216498cb1741b9e34aed342284517a70dd",
"size": "3038",
"binary": false,
"copies": "7",
"ref": "refs/heads/master",
"path": "lib/python3.4/site-packages/psycopg2/tests/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "167622"
},
{
"name": "HTML",
"bytes": "221496"
},
{
"name": "JavaScript",
"bytes": "325471"
},
{
"name": "Python",
"bytes": "7896264"
},
{
"name": "Shell",
"bytes": "12645"
},
{
"name": "Smarty",
"bytes": "789"
}
],
"symlink_target": ""
} |
import argparse
import re
import requests
import urlparse
import os
import json
from keystoneclient.v2_0 import Client as keystoneclient
class NailgunClient(object):
def __init__(self, admin_node_ip, **kwargs):
self.url = "http://{0}:8000".format(admin_node_ip)
keystone_url = "http://{0}:5000/v2.0".format(admin_node_ip)
ksclient = keystoneclient(auth_url=keystone_url, **kwargs)
self.headers = {"X-Auth-Token": ksclient.auth_token,
"Content-Type": "application/json"}
def _get_cluster_list(self):
endpoint = urlparse.urljoin(self.url, "api/clusters")
return requests.get(endpoint, headers=self.headers).json()
def _get_cluster(self, cluster_id):
endpoint = urlparse.urljoin(self.url,
"api/clusters/{0}".format(cluster_id))
return requests.get(endpoint, headers=self.headers).json()
def _get_cluster_attributes(self, cluster_id):
endpoint = urlparse.urljoin(self.url,
"api/clusters/{0}/attributes".format(
cluster_id))
return requests.get(endpoint, headers=self.headers).json()
def _get_list_nodes(self):
endpoint = urlparse.urljoin(self.url, "api/nodes")
return requests.get(endpoint, headers=self.headers).json()
def _get_list_networks(self, cluster_id):
net_provider = self._get_cluster(cluster_id)["net_provider"]
endpoint = urlparse.urljoin(self.url,
"/api/clusters/{0}"
"/network_configuration/{1}".format(
cluster_id, net_provider))
return requests.get(endpoint, headers=self.headers).json()
def _create_cluster(self, data):
endpoint = urlparse.urljoin(self.url, "api/clusters")
return requests.post(endpoint, headers=self.headers,
data=json.dumps(data))
def list_cluster_nodes(self, cluster_id):
endpoint = urlparse.urljoin(
self.url, "api/nodes/?cluster_id={}".format(cluster_id))
return requests.get(endpoint, headers=self.headers).json()
def update_cluster_attributes(self, cluster_id, attrs):
endpoint = urlparse.urljoin(
self.url, "api/clusters/{}/attributes".format(cluster_id))
return requests.put(endpoint, headers=self.headers,
data=json.dumps(attrs))
def update_node(self, node_id, data):
endpoint = urlparse.urljoin(self.url, "api/nodes/{}".format(node_id))
return requests.put(endpoint, headers=self.headers,
data=json.dumps(data))
def get_node_interfaces(self, node_id):
endpoint = urlparse.urljoin(self.url,
"api/nodes/{}/interfaces".format(node_id))
return requests.get(endpoint, headers=self.headers).json()
def put_node_interfaces(self, data):
"""
:param data: [{'id': node_id, 'interfaces': interfaces}]
:return: response
"""
endpoint = urlparse.urljoin(self.url, "api/nodes/interfaces")
return requests.put(endpoint, headers=self.headers,
data=json.dumps(data))
def update_cluster_networks(self, cluster_id, data):
net_provider = self._get_cluster(cluster_id)["net_provider"]
endpoint = urlparse.urljoin(
self.url,
"api/clusters/{}/network_configuration/{}".format(cluster_id,
net_provider))
return requests.put(endpoint, headers=self.headers,
data=json.dumps(data))
def get_node_disks(self, node_id):
endpoint = urlparse.urljoin(self.url,
"api/nodes/{}/disks".format(node_id))
return requests.get(endpoint, headers=self.headers).json()
def put_node_disks(self, node_id, data):
endpoint = urlparse.urljoin(self.url,
"api/nodes/{}/disks".format(node_id))
return requests.put(endpoint, headers=self.headers,
data=json.dumps(data))
def get_cluster_id(cluster_name):
for cluster in client._get_cluster_list():
if cluster["name"] == cluster_name:
return cluster["id"]
else:
raise NameError("Can not find cluster with specified name")
parser = argparse.ArgumentParser(
description="Script for dump/restore cluster config")
parser.add_argument('admin_node_ip', metavar='10.20.0.2', type=str,
help='IP of fuel master node')
parser.add_argument("-fuel_user", dest='fuel_user', type=str,
help="Fuel username", default='admin')
parser.add_argument("-fuel_password", dest='fuel_password',
type=str, help="Fuel password",
default='admin')
parser.add_argument("-fuel_tenant", dest='fuel_tenant', type=str,
help="Fuel tenant", default='admin')
parser.add_argument('-dump_cluster', dest='dump_cluster', type=str,
default="",
help='Name of cluster which configuration need to dump')
parser.add_argument("-dump_folder", dest="dump_folder", type=str,
default="",
help="Folder where cluster config will store")
parser.add_argument("-restore_cluster", dest="restore_cluster", type=str,
default="",
help="Folder which contains cluster config")
args = parser.parse_args()
client = NailgunClient(args.admin_node_ip, username=args.fuel_user,
password=args.fuel_password,
tenant_name=args.fuel_tenant)
if args.dump_cluster:
cluster_id = get_cluster_id(args.dump_cluster)
if args.dump_folder:
if not os.path.exists(args.dump_folder):
os.makedirs(args.dump_folder)
folder = args.dump_folder
else:
os.makedirs(args.dump_cluster)
folder = args.dump_cluster
with open("{}/cluster.json".format(folder), "w") as cluster:
json.dump(client._get_cluster(cluster_id), cluster, sort_keys=False,
indent=4)
with open("{}/cluster_attributes.json".format(folder),
"w") as cluster_attrs:
json.dump(client._get_cluster_attributes(cluster_id), cluster_attrs,
sort_keys=False, indent=4)
with open("{}/cluster_networks.json".format(folder), "w") as cluster_net:
json.dump(client._get_list_networks(cluster_id), cluster_net,
sort_keys=False, indent=4)
for node in client.list_cluster_nodes(cluster_id):
with open("{}/node-{}.json".format(folder, node["id"]),
"w") as node_cfg:
json.dump(node, node_cfg, sort_keys=False, indent=4)
with open(
"{}/node-{}-networks.json".format(folder,
node["id"]),
"w") as node_net:
json.dump(client.get_node_interfaces(node["id"]), node_net,
sort_keys=False, indent=4)
with open(
"{}/node-{}-disks.json".format(folder,
node["id"]),
"w") as node_disks:
json.dump(client.get_node_disks(node["id"]), node_disks,
sort_keys=False, indent=4)
if args.restore_cluster:
if not os.path.exists(args.restore_cluster):
raise NameError("This folder does not exist")
folder = args.restore_cluster
if os.path.isfile("{}/cluster.json".format(folder)):
with open("{}/cluster.json".format(folder)) as cluster:
cluster_data = json.load(cluster)
new_cluster_data = {
"name": cluster_data["name"],
"release": cluster_data["release_id"],
"mode": cluster_data["mode"],
"net_provider": cluster_data["net_provider"]
}
if cluster_data.get("net_segment_type"):
new_cluster_data["net_segment_type"] = cluster_data[
"net_segment_data"]
elif os.path.isfile("{}/cluster_networks.json".format(folder)):
with open(
"{}/cluster_networks.json".format(folder)) as cluster_nets:
cluster_nets_data = json.load(cluster_nets)
if cluster_data["net_provider"] == "neutron":
new_cluster_data["net_segment_type"] = \
cluster_nets_data["networking_parameters"][
"segmentation_type"]
else:
new_cluster_data["net_manager"] = \
cluster_nets_data["networking_parameters"][
"net_manager"]
new_clust = client._create_cluster(new_cluster_data).json()
else:
raise NameError("Can not find cluster.json")
if os.path.isfile("{}/cluster_attributes.json".format(folder)):
with open(
"{}/cluster_attributes.json".format(folder)) as cluster_attrs:
cluster_attrs_data = json.load(cluster_attrs)
new_cluster_attrs = client.update_cluster_attributes(
new_clust["id"], cluster_attrs_data)
if os.path.isfile("{}/cluster_networks.json".format(folder)):
with open("{}/cluster_networks.json".format(folder)) as cluster_nets:
cluster_nets_data = json.load(cluster_nets)
restore_cluster_nets_data = client._get_list_networks(new_clust["id"])
for key, value in cluster_nets_data["networking_parameters"].items():
if key == "base_mac":
continue
restore_cluster_nets_data["networking_parameters"][key] = value
for net in cluster_nets_data["networks"]:
if net["name"] == "fuelweb_admin":
continue
for new_net in restore_cluster_nets_data["networks"]:
if net["name"] == new_net["name"]:
for key, value in net.items():
if key in ["cluster_id", "id"]:
continue
new_net[key] = value
client.update_cluster_networks(new_clust["id"],
restore_cluster_nets_data)
_nodes = re.compile('-(\d+)\.json$')
nodes = [
node.split('.')[0]
for node in os.listdir(folder) if _nodes.search(node)]
for node in nodes:
with open("{}/{}.json".format(folder, node)) as node_base:
node_base_cfg = json.load(node_base)
available_nodes = [nod for nod in client._get_list_nodes()
if not nod["cluster"] and nod["online"]]
for available_node in available_nodes:
if (node_base_cfg["manufacturer"] !=
available_node["manufacturer"]):
continue
if os.path.isfile("{}/{}-networks.json".format(folder, node)):
with open("{}/{}-networks.json".format(
folder, node)) as node_net:
node_net_cfg = json.load(node_net)
new_interfaces = client.get_node_interfaces(
available_node["id"])
if len(node_net_cfg) != len(new_interfaces):
continue
if os.path.isfile("{}/{}-disks.json".format(folder, node)):
with open("{}/{}-disks.json".format(
folder, node)) as node_disks:
node_disk_cfg = json.load(node_disks)
new_disks = client.get_node_disks(available_node["id"])
if len(node_disk_cfg) != len(new_disks):
continue
good_disks = []
for disk in sorted(node_disk_cfg,
key=lambda k: k['size'], reverse=True):
needed_size = 0
for volume in disk["volumes"]:
needed_size += volume["size"]
for new_disk in new_disks:
if needed_size <= new_disk["size"]:
new_disk["volumes"] = disk["volumes"]
appr_disk = new_disk
break
else:
raise Exception("All disks are to small")
good_disks.append(new_disks.pop(
new_disks.index(appr_disk)))
good_node = available_node
break
else:
raise Exception("Can not find appropriate node")
data = {
"cluster_id": new_clust["id"],
"pending_roles": node_base_cfg["pending_roles"],
"pending_addition": True,
}
client.update_node(good_node["id"], data)
if os.path.isfile("{}/{}-networks.json".format(folder, node)):
all_nets = {}
new_interfaces = client.get_node_interfaces(good_node["id"])
for netw in new_interfaces:
all_nets.update(
{net["name"]: net for net in netw["assigned_networks"]})
for interface in node_net_cfg:
for new_interface in new_interfaces:
if interface["name"] == new_interface["name"]:
ass_interfaces = [
all_nets[i["name"]] for i in interface[
"assigned_networks"]]
new_interface["assigned_networks"] = ass_interfaces
resp = client.put_node_interfaces(
[{"id": good_node["id"], "interfaces": new_interfaces}]
)
print resp
print resp.content
if os.path.isfile("{}/{}-disks.json".format(folder, node)):
resp = client.put_node_disks(good_node["id"], good_disks)
| {
"content_hash": "d262c18cb03463f4d5a1aba5b351bbd0",
"timestamp": "",
"source": "github",
"line_count": 341,
"max_line_length": 79,
"avg_line_length": 41.14076246334311,
"alnum_prop": 0.5372442797063226,
"repo_name": "smurashov/test-infra",
"id": "586f34465679c15dff383eccaf2bb579d86d9616",
"size": "14029",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "fuel/nailgun.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "45339"
},
{
"name": "Shell",
"bytes": "3893"
}
],
"symlink_target": ""
} |
from domain.container import apply_to_container
| {
"content_hash": "2ee8366dc0acc93ee8815aa8aacb6458",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 47,
"avg_line_length": 48,
"alnum_prop": 0.8541666666666666,
"repo_name": "mcuadros/python-solid-example",
"id": "b43500b277593d1394278c9ebd1345f8822ced28",
"size": "48",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "domain/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "8928"
}
],
"symlink_target": ""
} |
import datetime
from os import path
import urllib
import numpy as np
import ocw.data_source.local as local
import ocw.dataset_processor as dsp
import ocw.evaluation as evaluation
import ocw.metrics as metrics
import ocw.plotter as plotter
# File URL leader
FILE_LEADER = "http://zipper.jpl.nasa.gov/dist/"
# Two Local Model Files
FILE_1 = "AFRICA_KNMI-RACMO2.2b_CTL_ERAINT_MM_50km_1989-2008_tasmax.nc"
FILE_2 = "AFRICA_UC-WRF311_CTL_ERAINT_MM_50km-rg_1989-2008_tasmax.nc"
# Filename for the output image/plot (without file extension)
OUTPUT_PLOT = "wrf_bias_compared_to_knmi"
FILE_1_PATH = path.join('/tmp', FILE_1)
FILE_2_PATH = path.join('/tmp', FILE_2)
if not path.exists(FILE_1_PATH):
urllib.urlretrieve(FILE_LEADER + FILE_1, FILE_1_PATH)
if not path.exists(FILE_2_PATH):
urllib.urlretrieve(FILE_LEADER + FILE_2, FILE_2_PATH)
""" Step 1: Load Local NetCDF Files into OCW Dataset Objects """
print("Loading %s into an OCW Dataset Object" % (FILE_1_PATH,))
knmi_dataset = local.load_file(FILE_1_PATH, "tasmax")
print("KNMI_Dataset.values shape: (times, lats, lons) - %s \n" %
(knmi_dataset.values.shape,))
print("Loading %s into an OCW Dataset Object" % (FILE_2_PATH,))
wrf_dataset = local.load_file(FILE_2_PATH, "tasmax")
print("WRF_Dataset.values shape: (times, lats, lons) - %s \n" %
(wrf_dataset.values.shape,))
""" Step 2: Temporally Rebin the Data into an Annual Timestep """
print("Temporally Rebinning the Datasets to an Annual Timestep")
knmi_dataset = dsp.temporal_rebin(knmi_dataset, temporal_resolution='annual')
wrf_dataset = dsp.temporal_rebin(wrf_dataset, temporal_resolution='annual')
print("KNMI_Dataset.values shape: %s" % (knmi_dataset.values.shape,))
print("WRF_Dataset.values shape: %s \n\n" % (wrf_dataset.values.shape,))
""" Step 3: Spatially Regrid the Dataset Objects to a 1 degree grid """
# The spatial_boundaries() function returns the spatial extent of the dataset
print("The KNMI_Dataset spatial bounds (min_lat, max_lat, min_lon, max_lon) are: \n"
"%s\n" % (knmi_dataset.spatial_boundaries(), ))
print("The KNMI_Dataset spatial resolution (lat_resolution, lon_resolution) is: \n"
"%s\n\n" % (knmi_dataset.spatial_resolution(), ))
min_lat, max_lat, min_lon, max_lon = knmi_dataset.spatial_boundaries()
# Using the bounds we will create a new set of lats and lons on 1 degree step
new_lons = np.arange(min_lon, max_lon, 1)
new_lats = np.arange(min_lat, max_lat, 1)
# Spatially regrid datasets using the new_lats, new_lons numpy arrays
print("Spatially Regridding the KNMI_Dataset...")
knmi_dataset = dsp.spatial_regrid(knmi_dataset, new_lats, new_lons)
print("Final shape of the KNMI_Dataset: \n"
"%s\n" % (knmi_dataset.values.shape, ))
print("Spatially Regridding the WRF_Dataset...")
wrf_dataset = dsp.spatial_regrid(wrf_dataset, new_lats, new_lons)
print("Final shape of the WRF_Dataset: \n"
"%s\n" % (wrf_dataset.values.shape, ))
""" Step 4: Build a Metric to use for Evaluation - Bias for this example """
# You can build your own metrics, but OCW also ships with some common metrics
print("Setting up a Bias metric to use for evaluation")
bias = metrics.Bias()
""" Step 5: Create an Evaluation Object using Datasets and our Metric """
# The Evaluation Class Signature is:
# Evaluation(reference, targets, metrics, subregions=None)
# Evaluation can take in multiple targets and metrics, so we need to convert
# our examples into Python lists. Evaluation will iterate over the lists
print("Making the Evaluation definition")
bias_evaluation = evaluation.Evaluation(knmi_dataset, [wrf_dataset], [bias])
print("Executing the Evaluation using the object's run() method")
bias_evaluation.run()
""" Step 6: Make a Plot from the Evaluation.results """
# The Evaluation.results are a set of nested lists to support many different
# possible Evaluation scenarios.
#
# The Evaluation results docs say:
# The shape of results is (num_metrics, num_target_datasets) if no subregion
# Accessing the actual results when we have used 1 metric and 1 dataset is
# done this way:
print("Accessing the Results of the Evaluation run")
results = bias_evaluation.results[0][0]
print("The results are of type: %s" % type(results))
# From the bias output I want to make a Contour Map of the region
print("Generating a contour map using ocw.plotter.draw_contour_map()")
lats = new_lats
lons = new_lons
fname = OUTPUT_PLOT
gridshape = (4, 5) # 20 Years worth of plots. 20 rows in 1 column
plot_title = "TASMAX Bias of WRF Compared to KNMI (1989 - 2008)"
sub_titles = range(1989, 2009, 1)
plotter.draw_contour_map(results, lats, lons, fname,
gridshape=gridshape, ptitle=plot_title,
subtitles=sub_titles)
| {
"content_hash": "1a7ef6cf1d3fbcaba00b3fa17f2a22a9",
"timestamp": "",
"source": "github",
"line_count": 109,
"max_line_length": 84,
"avg_line_length": 43.37614678899082,
"alnum_prop": 0.7235617597292724,
"repo_name": "jarifibrahim/climate",
"id": "ffa5cda9173abef6e170b651a5382583e45b4d5b",
"size": "5515",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "examples/simple_model_to_model_bias.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ApacheConf",
"bytes": "24139"
},
{
"name": "CSS",
"bytes": "2587"
},
{
"name": "HTML",
"bytes": "38243"
},
{
"name": "JavaScript",
"bytes": "124509"
},
{
"name": "OpenEdge ABL",
"bytes": "14713"
},
{
"name": "Python",
"bytes": "901332"
},
{
"name": "Ruby",
"bytes": "537"
},
{
"name": "Shell",
"bytes": "4808"
}
],
"symlink_target": ""
} |
"""Compatibility tests for dulwich repositories."""
from cStringIO import StringIO
import itertools
import os
from dulwich.objects import (
hex_to_sha,
)
from dulwich.repo import (
check_ref_format,
)
from dulwich.tests.utils import (
tear_down_repo,
)
from dulwich.tests.compat.utils import (
run_git_or_fail,
import_repo,
CompatTestCase,
)
class ObjectStoreTestCase(CompatTestCase):
"""Tests for git repository compatibility."""
def setUp(self):
super(ObjectStoreTestCase, self).setUp()
self._repo = import_repo('server_new.export')
self.addCleanup(tear_down_repo, self._repo)
def _run_git(self, args):
return run_git_or_fail(args, cwd=self._repo.path)
def _parse_refs(self, output):
refs = {}
for line in StringIO(output):
fields = line.rstrip('\n').split(' ')
self.assertEqual(3, len(fields))
refname, type_name, sha = fields
check_ref_format(refname[5:])
hex_to_sha(sha)
refs[refname] = (type_name, sha)
return refs
def _parse_objects(self, output):
return set(s.rstrip('\n').split(' ')[0] for s in StringIO(output))
def test_bare(self):
self.assertTrue(self._repo.bare)
self.assertFalse(os.path.exists(os.path.join(self._repo.path, '.git')))
def test_head(self):
output = self._run_git(['rev-parse', 'HEAD'])
head_sha = output.rstrip('\n')
hex_to_sha(head_sha)
self.assertEqual(head_sha, self._repo.refs['HEAD'])
def test_refs(self):
output = self._run_git(
['for-each-ref', '--format=%(refname) %(objecttype) %(objectname)'])
expected_refs = self._parse_refs(output)
actual_refs = {}
for refname, sha in self._repo.refs.as_dict().iteritems():
if refname == 'HEAD':
continue # handled in test_head
obj = self._repo[sha]
self.assertEqual(sha, obj.id)
actual_refs[refname] = (obj.type_name, obj.id)
self.assertEqual(expected_refs, actual_refs)
# TODO(dborowitz): peeled ref tests
def _get_loose_shas(self):
output = self._run_git(['rev-list', '--all', '--objects', '--unpacked'])
return self._parse_objects(output)
def _get_all_shas(self):
output = self._run_git(['rev-list', '--all', '--objects'])
return self._parse_objects(output)
def assertShasMatch(self, expected_shas, actual_shas_iter):
actual_shas = set()
for sha in actual_shas_iter:
obj = self._repo[sha]
self.assertEqual(sha, obj.id)
actual_shas.add(sha)
self.assertEqual(expected_shas, actual_shas)
def test_loose_objects(self):
# TODO(dborowitz): This is currently not very useful since fast-imported
# repos only contained packed objects.
expected_shas = self._get_loose_shas()
self.assertShasMatch(expected_shas,
self._repo.object_store._iter_loose_objects())
def test_packed_objects(self):
expected_shas = self._get_all_shas() - self._get_loose_shas()
self.assertShasMatch(expected_shas,
itertools.chain(*self._repo.object_store.packs))
def test_all_objects(self):
expected_shas = self._get_all_shas()
self.assertShasMatch(expected_shas, iter(self._repo.object_store))
| {
"content_hash": "350b5d42790cbda7f5514567d65e565d",
"timestamp": "",
"source": "github",
"line_count": 106,
"max_line_length": 80,
"avg_line_length": 32.764150943396224,
"alnum_prop": 0.5974661675784624,
"repo_name": "johndbritton/gitviz",
"id": "ceebf51408dcaba26a530869242efc3e76d0c28f",
"size": "4274",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "dulwich/dulwich/tests/compat/test_repository.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Awk",
"bytes": "1916"
},
{
"name": "Batchfile",
"bytes": "3268"
},
{
"name": "C",
"bytes": "48666"
},
{
"name": "CSS",
"bytes": "5054"
},
{
"name": "Groff",
"bytes": "82"
},
{
"name": "HTML",
"bytes": "4369"
},
{
"name": "Handlebars",
"bytes": "2883"
},
{
"name": "JavaScript",
"bytes": "168931"
},
{
"name": "Makefile",
"bytes": "8587"
},
{
"name": "PHP",
"bytes": "4954"
},
{
"name": "Python",
"bytes": "1603823"
},
{
"name": "Shell",
"bytes": "1455"
}
],
"symlink_target": ""
} |
import logging
from urllib.parse import urljoin, urlencode
from werkzeug.local import LocalProxy
from werkzeug.middleware.profiler import ProfilerMiddleware
from flask import Flask, request
from flask import url_for as flask_url_for
from flask_sqlalchemy import SQLAlchemy
from flask_migrate import Migrate
from flask_mail import Mail
from flask_cors import CORS
from flask_babel import Babel
from flask_talisman import Talisman
from followthemoney import set_model_locale
from elasticsearch import Elasticsearch, TransportError
from servicelayer.cache import get_redis
from servicelayer.archive import init_archive
from servicelayer.extensions import get_extensions
from servicelayer.util import service_retries, backoff
from servicelayer.logs import configure_logging, LOG_FORMAT_JSON
from servicelayer import settings as sls
from aleph import settings
from aleph.cache import Cache
from aleph.oauth import configure_oauth
from aleph.util import LoggingTransport
NONE = "'none'"
log = logging.getLogger(__name__)
db = SQLAlchemy()
migrate = Migrate()
mail = Mail()
babel = Babel()
talisman = Talisman()
def create_app(config={}):
configure_logging(level=logging.DEBUG)
app = Flask("aleph")
app.config.from_object(settings)
app.config.update(config)
if "postgres" not in settings.DATABASE_URI:
raise RuntimeError("aleph database must be PostgreSQL!")
app.config.update(
{
"SQLALCHEMY_DATABASE_URI": settings.DATABASE_URI,
"FLASK_SKIP_DOTENV": True,
"FLASK_DEBUG": settings.DEBUG,
"BABEL_DOMAIN": "aleph",
"PROFILE": settings.PROFILE,
}
)
if settings.PROFILE:
app.wsgi_app = ProfilerMiddleware(app.wsgi_app, restrictions=[30])
migrate.init_app(app, db, directory=settings.ALEMBIC_DIR)
configure_oauth(app, cache=get_cache())
mail.init_app(app)
db.init_app(app)
babel.init_app(app)
CORS(
app,
resources=r"/api/*",
origins=settings.CORS_ORIGINS,
supports_credentials=True,
)
feature_policy = {
"accelerometer": NONE,
"camera": NONE,
"geolocation": NONE,
"gyroscope": NONE,
"magnetometer": NONE,
"microphone": NONE,
"payment": NONE,
"usb": NONE,
}
talisman.init_app(
app,
force_https=settings.FORCE_HTTPS,
strict_transport_security=settings.FORCE_HTTPS,
feature_policy=feature_policy,
content_security_policy=settings.CONTENT_POLICY,
)
from aleph.views import mount_app_blueprints
mount_app_blueprints(app)
# This executes all registered init-time plugins so that other
# applications can register their behaviour.
for plugin in get_extensions("aleph.init"):
plugin(app=app)
return app
@babel.localeselector
def determine_locale():
try:
options = settings.UI_LANGUAGES
locale = request.accept_languages.best_match(options)
locale = locale or str(babel.default_locale)
except RuntimeError:
locale = str(babel.default_locale)
set_model_locale(locale)
return locale
@migrate.configure
def configure_alembic(config):
config.set_main_option("sqlalchemy.url", settings.DATABASE_URI)
return config
def get_es():
url = settings.ELASTICSEARCH_URL
timeout = settings.ELASTICSEARCH_TIMEOUT
for attempt in service_retries():
try:
if not hasattr(settings, "_es_instance"):
# When logging structured logs, use a custom transport to log
# all es queries and their response time
if sls.LOG_FORMAT == LOG_FORMAT_JSON:
es = Elasticsearch(
url, transport_class=LoggingTransport, timeout=timeout
)
else:
es = Elasticsearch(url, timeout=timeout)
es.info()
settings._es_instance = es
return settings._es_instance
except TransportError as exc:
log.exception("ElasticSearch error: %s", exc.error)
backoff(failures=attempt)
raise RuntimeError("Could not connect to ElasticSearch")
def get_archive():
if not hasattr(settings, "_archive"):
settings._archive = init_archive()
return settings._archive
def get_cache():
if not hasattr(settings, "_cache") or settings._cache is None:
settings._cache = Cache(get_redis(), prefix=settings.APP_NAME)
return settings._cache
es = LocalProxy(get_es)
kv = LocalProxy(get_redis)
cache = LocalProxy(get_cache)
archive = LocalProxy(get_archive)
def url_for(*a, **kw):
"""Overwrite Flask url_for to force external paths."""
try:
kw["_external"] = False
query = kw.pop("_query", None)
relative = kw.pop("_relative", False)
path = flask_url_for(*a, **kw)
return url_external(path, query, relative=relative)
except RuntimeError:
return None
def url_external(path, query, relative=False):
"""Generate external URLs with HTTPS (if configured)."""
if query is not None:
path = "%s?%s" % (path, urlencode(query))
if relative:
return path
return urljoin(settings.APP_UI_URL, path)
| {
"content_hash": "c8151fa690aabbe1bb4dca07d01c608d",
"timestamp": "",
"source": "github",
"line_count": 177,
"max_line_length": 78,
"avg_line_length": 29.847457627118644,
"alnum_prop": 0.6596630702252508,
"repo_name": "pudo/aleph",
"id": "ade523cbc86005cab4548b4a687a0722132fa21d",
"size": "5283",
"binary": false,
"copies": "1",
"ref": "refs/heads/dependabot/pip/develop/jsonschema-4.1.2",
"path": "aleph/core.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "15949"
},
{
"name": "HTML",
"bytes": "170476"
},
{
"name": "JavaScript",
"bytes": "111287"
},
{
"name": "Makefile",
"bytes": "1319"
},
{
"name": "Mako",
"bytes": "412"
},
{
"name": "Python",
"bytes": "492593"
}
],
"symlink_target": ""
} |
"""
This module contains a collection of models that implement a simple function:
:func:`~revscoring.ScorerModel.score`. Currently, all models are
a subclass of :class:`revscoring.scorer_models.MLScorerModel`
which means that they also implement
:meth:`~revscoring.scorer_models.MLScorerModel.train` and
:meth:`~revscoring.scorer_models.MLScorerModel.test` methods. See
:mod:`revscoring.scorer_models.statistics` for stats that can be applied to
models.
Support Vector Classifiers
++++++++++++++++++++++++++
.. automodule:: revscoring.scorer_models.svc
Naive Bayes Classifiers
+++++++++++++++++++++++
.. automodule:: revscoring.scorer_models.nb
Random Forest
+++++++++++++
.. automodule:: revscoring.scorer_models.rf
Gradient Boosting
+++++++++++++++++
.. automodule:: revscoring.scorer_models.gradient_boosting
Abstract classes
++++++++++++++++
.. automodule:: revscoring.scorer_models.scorer_model
"""
from .svc import SVC, LinearSVC, RBFSVC
from .gradient_boosting import GradientBoosting
from .nb import NB, GaussianNB, MultinomialNB, BernoulliNB
from .scorer_model import ScorerModel, MLScorerModel
from .sklearn_classifier import ScikitLearnClassifier
from .rf import RF
__all__ = [
SVC, LinearSVC, RBFSVC, NB, GaussianNB, MultinomialNB, BernoulliNB,
ScorerModel, MLScorerModel, ScikitLearnClassifier, RF, GradientBoosting
]
| {
"content_hash": "9a9d0ac51945089a85a2e2aae472757c",
"timestamp": "",
"source": "github",
"line_count": 44,
"max_line_length": 77,
"avg_line_length": 30.681818181818183,
"alnum_prop": 0.737037037037037,
"repo_name": "yafeunteun/wikipedia-spam-classifier",
"id": "31e30f44d75b818ffb8e7d992df84584408d8dc3",
"size": "1350",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "revscoring/revscoring/scorer_models/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "7262"
},
{
"name": "Jupyter Notebook",
"bytes": "971575"
},
{
"name": "Makefile",
"bytes": "7446"
},
{
"name": "Python",
"bytes": "796831"
},
{
"name": "Shell",
"bytes": "132"
}
],
"symlink_target": ""
} |
from urllib import request
response = request.urlopen('http://localhost:8080/')
print('RESPONSE:', response)
print('URL :', response.geturl())
headers = response.info()
print('DATE :', headers['date'])
print('HEADERS :')
print('---------')
print(headers)
data = response.read().decode('utf-8')
print('LENGTH :', len(data))
print('DATA :')
print('---------')
print(data)
| {
"content_hash": "439290451826b1e7afec22f762861934",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 52,
"avg_line_length": 22.58823529411765,
"alnum_prop": 0.625,
"repo_name": "jasonwee/asus-rt-n14uhp-mrtg",
"id": "6920c2d964798c6a32666f3e9cfdd7420ed6238f",
"size": "384",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/lesson_the_internet/urllib_request_urlopen.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "45876"
},
{
"name": "HTML",
"bytes": "107072"
},
{
"name": "JavaScript",
"bytes": "161335"
},
{
"name": "Python",
"bytes": "6923750"
},
{
"name": "Shell",
"bytes": "7616"
}
],
"symlink_target": ""
} |
""" Registro de las clases en el admin de django """
from django.contrib import admin
from .models import SliderImage, SliderAlbum
class SliderImageAdmin(admin.ModelAdmin):
list_display = ('thumb', 'name','description', 'order')
admin.site.register(SliderImage, SliderImageAdmin)
class SliderAlbumAdmin(admin.ModelAdmin):
filter_horizontal = 'images',
admin.site.register(SliderAlbum, SliderAlbumAdmin)
| {
"content_hash": "0e82bffcf7d9056598dda39e2a67ee17",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 60,
"avg_line_length": 32.38461538461539,
"alnum_prop": 0.7600950118764845,
"repo_name": "zakwilson/cmsplugin_jcarousellite",
"id": "4570b545c509b9db480bd463add2dee25fbec86e",
"size": "497",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "admin.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "24939"
}
],
"symlink_target": ""
} |
from .geoid import tract_geoid, tract_census_geoid
__all__ = ["tract_geoid", "tract_census_geoid"] | {
"content_hash": "ac2d495cea50cd51071dc8a7b7dc801c",
"timestamp": "",
"source": "github",
"line_count": 3,
"max_line_length": 50,
"avg_line_length": 33,
"alnum_prop": 0.7070707070707071,
"repo_name": "CivicKnowledge/metatab-packages",
"id": "d63f62f23145c67426f56d5138068344cd201c3d",
"size": "100",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "broken/sangis.org-census_regions/lib/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "163951"
},
{
"name": "Jupyter Notebook",
"bytes": "760451"
},
{
"name": "Makefile",
"bytes": "2603"
},
{
"name": "Python",
"bytes": "34795"
}
],
"symlink_target": ""
} |
import sys
import os
import time
speed = sys.argv[1]
t = sys.argv[2] #time
os.system('python driveM1.py -%s'% speed)
os.system('python driveM3.py %s'% speed)
time.sleep(float(t))
os.system('python driveM1.py 0')
os.system('python driveM3.py 0')
| {
"content_hash": "c44ebbe9ad34b7d8a3e71e8b2cc1e512",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 41,
"avg_line_length": 19.076923076923077,
"alnum_prop": 0.7016129032258065,
"repo_name": "cletusw/goal-e",
"id": "8fa8ac7c8dc3eb5bddaafb963b2077885413e46a",
"size": "248",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "misc/drive_straight.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "1130"
},
{
"name": "C++",
"bytes": "62145"
},
{
"name": "Python",
"bytes": "69360"
}
],
"symlink_target": ""
} |
__author__ = 'q00222219@huawei'
import time
from heat.openstack.common import log as logging
import commonutils
import constant
import exception
LOG = logging.getLogger(__name__)
class CascadingConfiger(object):
def __init__(self, cascading_ip, user, password, cascaded_domain,
cascaded_api_ip, v2v_gw):
self.cascading_ip = cascading_ip
self.user = user
self.password = password
self.cascaded_domain = cascaded_domain
self.cascaded_ip = cascaded_api_ip
self.v2v_gw = v2v_gw
def do_config(self):
start_time = time.time()
LOG.info("start config cascading, cascading: %s" % self.cascading_ip)
# modify dns server address
address = "/%(cascaded_domain)s/%(cascaded_ip)s" \
% {"cascaded_domain": self.cascaded_domain,
"cascaded_ip": self.cascaded_ip}
for i in range(3):
try:
commonutils.execute_cmd_without_stdout(
host=self.cascading_ip,
user=self.user,
password=self.password,
cmd='cd %(dir)s; sh %(script)s add %(address)s'
% {"dir": constant.Cascading.REMOTE_SCRIPTS_DIR,
"script": constant.PublicConstant.
MODIFY_DNS_SERVER_ADDRESS,
"address": address})
break
except exception.SSHCommandFailure as e:
LOG.error("modify cascading dns address error, cascaded: "
"%s, error: %s"
% (self.cascaded_domain, e.format_message()))
time.sleep(1)
LOG.info(
"config cascading dns address success, cascading: %s"
% self.cascading_ip)
# config keystone
for i in range(3):
try:
commonutils.execute_cmd_without_stdout(
host=self.cascading_ip,
user=self.user,
password=self.password,
cmd='cd %(dir)s;'
'sh %(script)s %(cascaded_domain)s'
% {"dir": constant.RemoveConstant.REMOTE_SCRIPTS_DIR,
"script":
constant.RemoveConstant.REMOVE_KEYSTONE_SCRIPT,
"cascaded_domain": self.cascaded_domain})
commonutils.execute_cmd_without_stdout(
host=self.cascading_ip,
user=self.user,
password=self.password,
cmd='cd %(dir)s;'
'sh %(script)s %(cascaded_domain)s %(v2v_gw)s'
% {"dir": constant.Cascading.REMOTE_SCRIPTS_DIR,
"script":
constant.Cascading.KEYSTONE_ENDPOINT_SCRIPT,
"cascaded_domain": self.cascaded_domain,
"v2v_gw": self.v2v_gw})
break
except exception.SSHCommandFailure as e:
LOG.error(
"create keystone endpoint error, cascaded: %s, error: %s"
% (self.cascaded_domain, e.format_message()))
time.sleep(1)
LOG.info("config cascading keystone success, cascading: %s"
% self.cascading_ip)
for i in range(3):
try:
commonutils.execute_cmd_without_stdout(
host=self.cascading_ip,
user=self.user,
password=self.password,
cmd='cd %(dir)s; sh %(script)s %(cascaded_domain)s'
% {"dir": constant.Cascading.REMOTE_SCRIPTS_DIR,
"script":
constant.Cascading.ENABLE_OPENSTACK_SERVICE,
"cascaded_domain": self.cascaded_domain})
break
except exception.SSHCommandFailure as e:
LOG.error(
"enable openstack service error, cascaded: %s, error: %s"
% (self.cascaded_domain, e.format_message()))
time.sleep(1)
LOG.info("enable openstack service success, cascading: %s"
% self.cascading_ip)
cost_time = time.time() - start_time
LOG.info("config cascading success, cascading: %s, cost time: %d"
% (self.cascading_ip, cost_time))
| {
"content_hash": "a779d5549c329061da8ca25e953612bb",
"timestamp": "",
"source": "github",
"line_count": 109,
"max_line_length": 78,
"avg_line_length": 41.74311926605505,
"alnum_prop": 0.49164835164835163,
"repo_name": "Hybrid-Cloud/orchard",
"id": "88ee9f64656062720879c717b7f921eb8dc15ef2",
"size": "4573",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "code/cloudmanager/cascading_configer.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "1067396"
},
{
"name": "Shell",
"bytes": "49859"
}
],
"symlink_target": ""
} |
"""Adds config flow for Mill integration."""
import logging
from mill import Mill
import voluptuous as vol
from homeassistant import config_entries
from homeassistant.const import CONF_PASSWORD, CONF_USERNAME
from homeassistant.helpers.aiohttp_client import async_get_clientsession
from .const import DOMAIN # pylint:disable=unused-import
_LOGGER = logging.getLogger(__name__)
DATA_SCHEMA = vol.Schema(
{vol.Required(CONF_USERNAME): str, vol.Required(CONF_PASSWORD): str}
)
class MillConfigFlow(config_entries.ConfigFlow, domain=DOMAIN):
"""Handle a config flow for Mill integration."""
VERSION = 1
CONNECTION_CLASS = config_entries.CONN_CLASS_CLOUD_POLL
async def async_step_user(self, user_input=None):
"""Handle the initial step."""
if user_input is None:
return self.async_show_form(
step_id="user", data_schema=DATA_SCHEMA, errors={},
)
username = user_input[CONF_USERNAME].replace(" ", "")
password = user_input[CONF_PASSWORD].replace(" ", "")
mill_data_connection = Mill(
username, password, websession=async_get_clientsession(self.hass),
)
errors = {}
if not await mill_data_connection.connect():
errors["connection_error"] = "connection_error"
return self.async_show_form(
step_id="user", data_schema=DATA_SCHEMA, errors=errors,
)
unique_id = username
await self.async_set_unique_id(unique_id)
self._abort_if_unique_id_configured()
return self.async_create_entry(
title=unique_id, data={CONF_USERNAME: username, CONF_PASSWORD: password},
)
| {
"content_hash": "d659e7ae8c3362c7f5ad87dfa873374a",
"timestamp": "",
"source": "github",
"line_count": 55,
"max_line_length": 85,
"avg_line_length": 31.01818181818182,
"alnum_prop": 0.64947245017585,
"repo_name": "titilambert/home-assistant",
"id": "08eb0f5c5361367af8208be3873eff560634019c",
"size": "1706",
"binary": false,
"copies": "5",
"ref": "refs/heads/dev",
"path": "homeassistant/components/mill/config_flow.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "1488"
},
{
"name": "Python",
"bytes": "25849092"
},
{
"name": "Shell",
"bytes": "4410"
}
],
"symlink_target": ""
} |