text
stringlengths 4
1.02M
| meta
dict |
---|---|
'''
Script to generate:
a. measurement units, their max min and avg values from the data/ metadata generated in previous steps.
b. clustering of units based on domain. eg: metre, centimeter -> LENGTH, joule ->ENERGY etc.
c. clustering of units based on MIME type
Requirements:
i. A map of units and domains- available in unitMap.json
ii. A map of filename to MIME type- available in fileMap.json (Built on my system on our data,
pls see the steps below to understand how you can build it for your use)
Flow of this program (IMPORTANT that you read this in order to build all the requirements):
------------------------------------------------------------------------------------------
i. Use ./mappingUnitToDomain.py to generate mapping from UNIT->DOMAIN (already created)
This requires a file unit.txt which has a dictionary of Domain name and related units.
Creates a file unitMap.json which will be used in this program.
Note: No need to do anything unless you wish to add a unit not present here!
ii. Use ./mappingFileNameToMimeType.py to generate mapping from FILE_NAME->MIME Type.
To do this we require all the file paths and Mime Types present in fulldump-path-all-json folder.
Note1: These files having mime type and file_names were created using Apache Tika (see https://github.com/harshfatepuria/data-analysis-test)
Creates a file fileMap.json which will be used in this program.
Note2: The folder contains the fileMap.json, which can be used as baseline off the TREC Polar Dynamic Domain Dataset Fulldump (~1.7 M files)
So, if you are using the same dataset, no need to change anything.
Note3: In case using Polar fulldump dataset, can use same fileMap. It is ~111 MB, and can be downloaded from:
https://drive.google.com/file/d/0ByYnDjKhosqbNWhEVVFnVmJXYWs/view?usp=sharing
iii.Program requires the extracted measurement metadata present in folder 'measurement/'
This metadata was generated using a hybrid measurement parser (wrapped as a Tika parser) which leverages Tika's text extraction capabilities
and a new measurement extractor model developed as a part of the previous project- Scientific Content Enrichment.
(See https://github.com/harshfatepuria/Scientific-Content-Enrichment-in-the-Text-Retrieval-Conference-TREC-Polar-Dynamic-Domain-Dataset/tree/master/cs599-content-enrichment/src/main/java/measurement)
iv. This Program prepares the files for D3 visualizations- Circle Packing (Mike Bostok's Library; see- http://bl.ocks.org/mbostock/4063530 )
For queries/ comments/ suggestions/ doubts, contact the collaborators of the project:
Rahul Agrawal(rahulagr@usc.edu), Harsh Fatepuria(fatepuri@usc.edu), Warut Roadrungwasinkul(roadrung@usc.edu)
Grad Students, Dept. of CSE, University of Southern California, CA 90089
'''
import sys
import time
from glob import iglob
import json
import os
CURSOR_UP_ONE = '\x1b[1A'
ERASE_LINE = '\x1b[2K'
print time.strftime("%Y-%m-%d %H:%M:%S", time.gmtime()), " script started... loading unitMap... "
f= open("unitMap.json")
unitMap= json.loads(f.read())
f.close()
print time.strftime("%Y-%m-%d %H:%M:%S", time.gmtime()), " unitMap loaded... loading fileMap... "
f= open("fileMap.json")
filemap=eval(f.read())
f.close()
print time.strftime("%Y-%m-%d %H:%M:%S", time.gmtime()), " fileMap loaded... started parsing the files...\n"
meaDomain={}
meaMime={}
mea={}
cnt=0
fileR="result/"
for path, subdirs, files in os.walk(fileR):
for FP in files:
filepath= path+'/'+FP
#print filepath
f = open(filepath,"r")
data= eval(f.read())
#print data
for i in data["metadata"]["measurement_unit"]:
#print i
############################################################################################
#Finding measurement values form the files in 'measurement/' folder and adding in dictionary
############################################################################################
if i in mea:
mea[i].append(float(data["metadata"]["measurement_value"][data["metadata"]["measurement_unit"].index(i)]))
else:
mea[i]=[]
mea[i].append(float(data["metadata"]["measurement_value"][data["metadata"]["measurement_unit"].index(i)]))
################################
#Clustering based on Domain Type
################################
if i in unitMap:
if unitMap[i] in meaDomain:
if i in meaDomain[unitMap[i]]:
meaDomain[unitMap[i]][i]= meaDomain[unitMap[i]][i] +1
else:
meaDomain[unitMap[i]][i]=1
else:
meaDomain[unitMap[i]]={}
meaDomain[unitMap[i]][i]=1
#############################
#Clusterig based on MIME type
#############################
fileType= filemap[FP.split('.')[0]]
if fileType in meaMime:
meaMime[fileType]
if i in meaMime[fileType]:
meaMime[fileType][i]=meaMime[fileType][i]+1
else:
meaMime[fileType][i]=1
else:
meaMime[fileType]={}
meaMime[fileType][i]=1
cnt=cnt+1
#print(CURSOR_UP_ONE + ERASE_LINE+ CURSOR_UP_ONE)
print "Parsing file ", cnt
#break
print time.strftime("%Y-%m-%d %H:%M:%S", time.gmtime()), " ",cnt," files parsed... writing logs...\n"
# i. Creating a dictionary of min, max and average values for each unit and dumping results in a file
maxmin={}
op= open("meaMaxMinOutputJSON.json","w")
for k,v in mea.items():
maxmin[k]=[0,0,0]
maxmin[k][0]=min(v)
maxmin[k][1]=max(v)
maxmin[k][2]=sum(v)/float(len(v))
keys=json.dumps(maxmin, sort_keys=True)
op.write(keys)
op.close()
#print "MEA Data:\n--------\n",maxmin
# ii. Writing Measuremnt count data clustered based on Doamin of units
'''
op= open("meaDomainOutputJSON.json","w")
keys=json.dumps(meaDomain, sort_keys=True)
op.write(keys)
op.close()
'''
flare={}
flare["name"]="Measurement Domain Cluster"
flare["children"]=[]
for k,v in meaDomain.items():
x={}
x["name"]=k
x["children"]=[]
for a,b in v.items():
y={}
y["name"]=a
y["size"]=b
x["children"].append(y)
flare["children"].append(x)
#print "\n",flare,"\n"
op= open("meaDomainOutputFlareJSONForD3.json","w")
keys=json.dumps(flare, sort_keys=True)
op.write(keys)
op.close()
# iii. Writing Measuremnt count data clustered based on Mime type of files
'''
op= open("meaMimeOutputJSON.json","w")
keys=json.dumps(meaMime, sort_keys=True)
op.write(str(keys))
op.close()
'''
flare={}
flare["name"]="Measurement MIME Cluster"
flare["children"]=[]
for k,v in meaMime.items():
x={}
x["name"]=k
x["children"]=[]
for a,b in v.items():
y={}
y["name"]=a
y["size"]=b
x["children"].append(y)
flare["children"].append(x)
#print "\n",flare,"\n"
op= open("meaMimeOutputFlareJSONForD3.json","w")
keys=json.dumps(flare, sort_keys=True)
op.write(keys)
op.close()
print time.strftime("%Y-%m-%d %H:%M:%S", time.gmtime()), "ending script...\n"
| {
"content_hash": "d55d2d4cd4e5b74bcb4a629ccb7ec3f8",
"timestamp": "",
"source": "github",
"line_count": 194,
"max_line_length": 201,
"avg_line_length": 34.86082474226804,
"alnum_prop": 0.6608014194883928,
"repo_name": "harshfatepuria/Evaluation-of-Content-Analysis-on-TREC-Polat-DD-Dataset",
"id": "65c27c924e4dc7aded54efe2dec81cd7cd564a8b",
"size": "6763",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "measurement clustering/measurementParser.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "77419"
},
{
"name": "Java",
"bytes": "73356"
},
{
"name": "JavaScript",
"bytes": "21685"
},
{
"name": "Python",
"bytes": "26740"
}
],
"symlink_target": ""
} |
from django.core.checks import Error, register
from feder.records.models import Record, AbstractRecord
from feder.records.registry import record_type_registry
@register()
def record_type_registry_fill_check(app_configs, **kwargs):
errors = []
for field in Record._meta.related_objects:
if (
issubclass(field.related_model, AbstractRecord)
and field.related_model not in record_type_registry
):
errors.append(
Error(
"Missing required record type definition.",
hint="Add missing required data type and load in AppConfig.",
obj=field.related_model,
id="records.E001",
)
)
return errors
| {
"content_hash": "a3568d19d4bd58c2f0e6cbe66732c356",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 81,
"avg_line_length": 32.333333333333336,
"alnum_prop": 0.5927835051546392,
"repo_name": "watchdogpolska/feder",
"id": "7241ee45c7f869f4f13792ffaa2f0a0bd11e3f49",
"size": "776",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "feder/records/checks.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "624"
},
{
"name": "HTML",
"bytes": "183421"
},
{
"name": "JavaScript",
"bytes": "6245"
},
{
"name": "Makefile",
"bytes": "2086"
},
{
"name": "Python",
"bytes": "574027"
},
{
"name": "SCSS",
"bytes": "40546"
},
{
"name": "Shell",
"bytes": "214"
}
],
"symlink_target": ""
} |
from django.conf.urls.defaults import patterns, url
from nepal.resources import views
urlpatterns = patterns('',
url('^profile/edit/(\d+)/$', views.profile_edit, name='nepal-resources-profile-edit'),
url('^pack/create/$', views.pack_create, name='nepal-resources-pack-create'),
url('^pack/edit/(\d+)/$', views.pack_edit, name='nepal-resources-pack-edit'),
url('^pack/delete/(\d+)/$', views.pack_delete, name='nepal-resources-pack-delete'),
url('^pack/list/$', views.pack_list, name='nepal-resources-pack-list'),
)
| {
"content_hash": "e08de62d65ec3e9eb1198ee90576ac0b",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 90,
"avg_line_length": 56.3,
"alnum_prop": 0.6483126110124334,
"repo_name": "hollow/nepal",
"id": "53a88f5d533da7b442b2d016d60ddb17e9b3dbbb",
"size": "563",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "nepal/resources/urls.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "30932"
},
{
"name": "HTML",
"bytes": "62216"
},
{
"name": "JavaScript",
"bytes": "33285"
},
{
"name": "Python",
"bytes": "364392"
},
{
"name": "Shell",
"bytes": "2317"
}
],
"symlink_target": ""
} |
from collections import defaultdict
from itertools import combinations, product
import kevlar
from kevlar.sequence import Record, KmerOfInterest
from kevlar.sequence import write_record, parse_augmented_fastx
from khmer import Counttable
from networkx import Graph, connected_components
from re import search
class KevlarPartitionLabelError(ValueError):
pass
def parse_fasta(data):
"""Load sequences in Fasta format.
This generator function yields a tuple containing a defline and a sequence
for each record in the Fasta data. Stolen shamelessly from
http://stackoverflow.com/a/7655072/459780.
"""
name, seq = None, []
for line in data:
line = line.rstrip()
if line.startswith('>'):
if name:
yield (name, ''.join(seq))
name, seq = line, []
else:
seq.append(line)
if name: # pragma: no cover
yield (name, ''.join(seq))
def parse_seq_dict(data):
"""Load sequences from a Fasta file into a dictionary."""
seqs = dict()
for defline, sequence in parse_fasta(data):
seqid = defline[1:].replace('\t', ' ').split(' ')[0]
assert seqid not in seqs, seqid
seqs[seqid] = sequence
return seqs
def afxstream(filelist):
for infile in filelist:
fh = kevlar.open(infile, 'r')
for record in parse_augmented_fastx(fh):
yield record
def partition_id(readname):
partmatch = search(r'kvcc=(\d+)', readname)
if not partmatch:
return None
return partmatch.group(1)
def parse_partitioned_reads(readstream):
current_part = None
reads = list()
for read in readstream:
name = read.name if hasattr(read, 'name') else read.defline
part = partition_id(name)
if part is None:
reads.append(read)
current_part = False
continue
if current_part is False:
message = 'reads with and without partition labels (kvcc=#)'
raise KevlarPartitionLabelError(message)
if part != current_part:
if current_part:
yield current_part, reads
reads = list()
current_part = part
reads.append(read)
if current_part is False:
current_part = None
yield current_part, reads
def parse_single_partition(readstream, partid):
"""
Retrieve a single partition (by label) from a stream of partitioned reads.
"""
for pid, partition in parse_partitioned_reads(readstream):
if pid == partid:
yield pid, partition
| {
"content_hash": "735d0a81fcebaecea4e0fc375914a3b0",
"timestamp": "",
"source": "github",
"line_count": 92,
"max_line_length": 78,
"avg_line_length": 28.293478260869566,
"alnum_prop": 0.6258163657318478,
"repo_name": "dib-lab/kevlar",
"id": "57df89d11023d9d6291a27c233e65b05cb1c8356",
"size": "2974",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "kevlar/seqio.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "3342"
},
{
"name": "C++",
"bytes": "16738"
},
{
"name": "Dockerfile",
"bytes": "1538"
},
{
"name": "Makefile",
"bytes": "2648"
},
{
"name": "Python",
"bytes": "488299"
},
{
"name": "Shell",
"bytes": "4576"
}
],
"symlink_target": ""
} |
"""A collection of data structures that are particularly
useful for developing and improving a classifier
"""
import numpy
import json
class ConfusionMatrix(object):
"""Confusion matrix for evaluating a classifier
For more information on confusion matrix en.wikipedia.org/wiki/Confusion_matrix
"""
INIT_NUM_CLASSES = 100
NEGATIVE_CLASS = '__NEGATIVE_CLASS__'
def __init__(self, alphabet=None):
if alphabet is None:
self.alphabet = Alphabet()
self.matrix = numpy.zeros((self.INIT_NUM_CLASSES, self.INIT_NUM_CLASSES))
else:
self.alphabet = alphabet
num_classes = alphabet.size()
self.matrix = numpy.zeros((num_classes, num_classes))
def __iadd__(self, other):
self.matrix += other.matrix
return self
def add(self, prediction, true_answer):
"""Add one data point to the confusion matrix
If prediction is an integer, we assume that it's a legitimate index
on the confusion matrix.
If prediction is a string, then we will do the look up to
map to the integer index for the confusion matrix.
"""
if type(prediction) == int and type(true_answer) == int:
self.matrix[prediction, true_answer] += 1
else:
self.alphabet.add(prediction)
self.alphabet.add(true_answer)
prediction_index = self.alphabet.get_index(prediction)
true_answer_index = self.alphabet.get_index(true_answer)
self.matrix[prediction_index, true_answer_index] += 1
# XXX: this will fail if the prediction_index is greater than
# the initial capacity. I should grow the matrix if this crashes
def add_list(self, predictions, true_answers):
"""Add a list of data point to the confusion matrix
A list can be a list of integers.
If prediction is an integer, we assume that it's a legitimate index
on the confusion matrix.
A list can be a list of strings.
If prediction is a string, then we will do the look up to
map to the integer index for the confusion matrix.
"""
for p, t in zip(predictions, true_answers):
self.add(p, t)
def get_prf_for_i(self, i):
"""Compute precision, recall, and f1 score for a given index."""
if sum(self.matrix[i, :]) == 0:
precision = 1.0
else:
precision = self.matrix[i, i] / sum(self.matrix[i, :])
if sum(self.matrix[:, i]) == 0:
recall = 1.0
else:
recall = self.matrix[i, i] / sum(self.matrix[:, i])
if precision + recall != 0.0:
f1 = 2.0 * precision * recall / (precision + recall)
else:
f1 = 0.0
return (precision, recall, f1)
def get_prf_for_all(self):
"""Compute precision, recall, and f1 score for all indexes."""
precision = numpy.zeros(self.alphabet.size())
recall = numpy.zeros(self.alphabet.size())
f1 = numpy.zeros(self.alphabet.size())
# compute precision, recall, and f1
for i in xrange(self.alphabet.size()):
precision[i], recall[i], f1[i] = self.get_prf_for_i(i)
return (precision, recall, f1)
def get_prf(self, class_name):
"""Compute precision, recall, and f1 score for a given class. """
i = self.alphabet.get_index(class_name)
return self.get_prf_for_i(i)
def compute_micro_average_f1(self):
total_correct = 0.0
for i in xrange(self.alphabet.size()):
total_correct += self.matrix[i, i]
negative_index = self.alphabet.get_index(self.NEGATIVE_CLASS)
total_predicted = numpy.sum([x for i, x in enumerate(self.matrix.sum(1)) \
if negative_index == -1 or i != negative_index])
total_gold = numpy.sum([x for i, x in enumerate(self.matrix.sum(0)) \
if negative_index == -1 or i != negative_index])
if total_predicted == 0:
precision = 1.0
else:
precision = total_correct / total_predicted
if total_gold == 0:
recall = 1.0
else:
recall = total_correct / total_gold
if precision + recall != 0.0:
f1_score = 2.0 * (precision * recall) / (precision + recall)
else:
f1_score = 0.0
return (round(precision, 4), round(recall, 4), round(f1_score, 4))
def compute_average_f1(self):
precision, recall, f1 = self.get_prf_for_all()
return numpy.mean(f1)
def compute_average_prf(self):
precision, recall, f1 = self.get_prf_for_all()
return (round(numpy.mean(precision), 4),
round(numpy.mean(recall), 4),
round(numpy.mean(f1), 4))
def print_matrix(self):
num_classes = self.alphabet.size()
# header for the confusion matrix
header = [' '] + [self.alphabet.get_label(i) for i in xrange(num_classes)]
rows = []
# putting labels to the first column of rhw matrix
for i in xrange(num_classes):
row = [self.alphabet.get_label(i)] + [str(self.matrix[i, j]) for j in xrange(num_classes)]
rows.append(row)
print "row = predicted, column = truth"
print matrix_to_string(rows, header)
def print_summary(self):
precision = numpy.zeros(self.alphabet.size())
recall = numpy.zeros(self.alphabet.size())
f1 = numpy.zeros(self.alphabet.size())
max_len = 0
for i in xrange(self.alphabet.size()):
label = self.alphabet.get_label(i)
if label != self.NEGATIVE_CLASS and len(label) > max_len:
max_len = len(label)
lines = []
correct = 0.0
# compute precision, recall, and f1
for i in xrange(self.alphabet.size()):
precision[i], recall[i], f1[i] = self.get_prf_for_i(i)
correct += self.matrix[i, i]
label = self.alphabet.get_label(i)
if label != self.NEGATIVE_CLASS:
space = ' ' * (max_len - len(label) + 1)
lines.append('%s%s precision %1.4f\trecall %1.4f\tF1 %1.4f' % \
(label, space, precision[i], recall[i], f1[i]))
precision, recall, f1 = self.compute_micro_average_f1()
space = ' ' * (max_len - 14 + 1)
lines.append('*Micro-Average%s precision %1.4f\trecall %1.4f\tF1 %1.4f' % \
(space, numpy.mean(precision), numpy.mean(recall), numpy.mean(f1)))
lines.sort()
print '\n'.join(lines)
def print_out(self):
"""Printing out confusion matrix along with Macro-F1 score"""
self.print_matrix()
self.print_summary()
def matrix_to_string(matrix, header=None):
"""
Return a pretty, aligned string representation of a nxm matrix.
This representation can be used to print any tabular data, such as
database results. It works by scanning the lengths of each element
in each column, and determining the format string dynamically.
the implementation is adapted from here
mybravenewworld.wordpress.com/2010/09/19/print-tabular-data-nicely-using-python/
Args:
matrix - Matrix representation (list with n rows of m elements).
header - Optional tuple or list with header elements to be displayed.
Returns:
nicely formatted matrix string
"""
if isinstance(header, list):
header = tuple(header)
lengths = []
if header:
lengths = [len(column) for column in header]
# finding the max length of each column
for row in matrix:
for column in row:
i = row.index(column)
column = str(column)
column_length = len(column)
try:
max_length = lengths[i]
if column_length > max_length:
lengths[i] = column_length
except IndexError:
lengths.append(column_length)
# use the lengths to derive a formatting string
lengths = tuple(lengths)
format_string = ""
for length in lengths:
format_string += "%-" + str(length) + "s "
format_string += "\n"
# applying formatting string to get matrix string
matrix_str = ""
if header:
matrix_str += format_string % header
for row in matrix:
matrix_str += format_string % tuple(row)
return matrix_str
class Alphabet(object):
"""Two way map for label and label index
It is an essentially a code book for labels or features
This class makes it convenient for us to use numpy.array
instead of dictionary because it allows us to use index instead of
label string. The implemention of classifiers uses label index space
instead of label string space.
"""
def __init__(self):
self._index_to_label = {}
self._label_to_index = {}
self.num_labels = 0
self.growing = True
def __len__(self):
return self.size()
def __eq__(self, other):
return self._index_to_label == other._index_to_label and \
self._label_to_index == other._label_to_index and \
self.num_labels == other.num_labels
def size(self):
return self.num_labels
def has_label(self, label):
return label in self._label_to_index
def get_label(self, index):
"""Get label from index"""
if index >= self.num_labels:
raise KeyError("There are %d labels but the index is %d" % (self.num_labels, index))
return self._index_to_label[index]
def get_index(self, label):
"""Get index from label"""
if not self.has_label(label):
if self.growing:
self.add(label)
else:
return -1
return self._label_to_index[label]
def add(self, label):
"""Add an index for the label if it's a new label"""
if label not in self._label_to_index:
if not self.growing:
raise ValueError(
'Alphabet is not set to grow i.e. accepting new labels')
self._label_to_index[label] = self.num_labels
self._index_to_label[self.num_labels] = label
self.num_labels += 1
def json_dumps(self):
return json.dumps(self.to_dict())
@classmethod
def json_loads(cls, json_string):
json_dict = json.loads(json_string)
return Alphabet.from_dict(json_dict)
def to_dict(self):
return {
'_label_to_index': self._label_to_index
}
@classmethod
def from_dict(cls, alphabet_dictionary):
"""Create an Alphabet from dictionary
alphabet_dictionary is a dictionary with only one field
_label_to_index which is a map from label to index
and should be created with to_dict method above.
"""
alphabet = cls()
alphabet._label_to_index = alphabet_dictionary['_label_to_index']
alphabet._index_to_label = {}
for label, index in alphabet._label_to_index.items():
alphabet._index_to_label[index] = label
# making sure that the dimension agrees
assert (len(alphabet._index_to_label) == len(alphabet._label_to_index))
alphabet.num_labels = len(alphabet._index_to_label)
return alphabet
| {
"content_hash": "576937b040a7e1d77b6067bc9384c280",
"timestamp": "",
"source": "github",
"line_count": 323,
"max_line_length": 102,
"avg_line_length": 35.55727554179567,
"alnum_prop": 0.5856334349151067,
"repo_name": "jimmycallin/master-thesis",
"id": "a428921bafe206eeda19909a682e4310bfa68378",
"size": "11509",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "architectures/conll16st-hd-sdp/confusion_matrix.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "565920"
},
{
"name": "Python",
"bytes": "1220524"
},
{
"name": "Shell",
"bytes": "86524"
},
{
"name": "TeX",
"bytes": "75591"
}
],
"symlink_target": ""
} |
import getopt
import sys
from coapthon.server.coap import CoAP
from exampleresources import BasicResource,BasicResource1,BasicResource2,BasicResource3,BasicResource4,BasicResource5,BasicResource6,BasicResource7 , Long, Separate, Storage, Big, voidResource, XMLResource, ETAGResource, \
Child, \
MultipleEncodingResource, AdvancedResource, AdvancedResourceSeparate
__author__ = 'Giacomo Tanganelli'
class CoAPServer(CoAP):
def __init__(self, host, port, multicast=False):
CoAP.__init__(self, (host, port), multicast)
self.add_resource('basic/', BasicResource())
self.add_resource('basic1/', BasicResource1())
self.add_resource('basic2/', BasicResource2())
self.add_resource('basic3/', BasicResource3())
self.add_resource('basic4/', BasicResource4())
self.add_resource('basic5/', BasicResource5())
self.add_resource('basic6/', BasicResource6())
self.add_resource('basic7/', BasicResource7())
self.add_resource('storage/', Storage())
self.add_resource('separate/', Separate())
self.add_resource('long/', Long())
self.add_resource('big/', Big())
self.add_resource('void/', voidResource())
self.add_resource('xml/', XMLResource())
self.add_resource('encoding/', MultipleEncodingResource())
self.add_resource('etag/', ETAGResource())
self.add_resource('child/', Child())
self.add_resource('advanced/', AdvancedResource())
self.add_resource('advancedSeparate/', AdvancedResourceSeparate())
print "CoAP Server start on " + host + ":" + str(port)
print self.root.dump()
def usage(): # pragma: no cover
print "coapserver.py -i <ip address> -p <port>"
def main(argv): # pragma: no cover
ip = "0.0.0.0"
port = 5683
multicast = False
try:
opts, args = getopt.getopt(argv, "hi:p:m", ["ip=", "port=", "multicast"])
except getopt.GetoptError:
usage()
sys.exit(2)
for opt, arg in opts:
if opt == '-h':
usage()
sys.exit()
elif opt in ("-i", "--ip"):
ip = arg
elif opt in ("-p", "--port"):
port = int(arg)
elif opt in ("-m", "--multicast"):
multicast = True
server = CoAPServer(ip, port, multicast)
try:
server.listen(10)
except KeyboardInterrupt:
print "Server Shutdown"
server.close()
print "Exiting..."
if __name__ == "__main__": # pragma: no cover
main(sys.argv[1:])
| {
"content_hash": "1330c85efce37ec652db44f1b923b1b0",
"timestamp": "",
"source": "github",
"line_count": 72,
"max_line_length": 222,
"avg_line_length": 35.361111111111114,
"alnum_prop": 0.6087981146897093,
"repo_name": "thejdeep/CoAPthon",
"id": "c9d61ba3d701b63ab5d81e1a0e5667961e505aaa",
"size": "2569",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "coapserver.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "597346"
},
{
"name": "Shell",
"bytes": "429"
}
],
"symlink_target": ""
} |
from distutils.core import setup
setup(
name='sogemood-backend',
version='1.0',
packages=[''],
url='sogemood.appspot.com',
license='MIT',
author='mwerlen',
author_email='maxime@werlen.fr',
description='SogeMood\'s Backend'
)
| {
"content_hash": "62b5a026f7c713c0434c52019e7d5268",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 37,
"avg_line_length": 21.5,
"alnum_prop": 0.6395348837209303,
"repo_name": "SogeMood/sogemood-backend",
"id": "90d8d1638a20b26c58ede927d8edd43e9e26ad10",
"size": "258",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "15418"
}
],
"symlink_target": ""
} |
"""Implements the majority of smart_open's top-level API.
The main functions are:
* ``parse_uri()``
* ``open()``
"""
import collections
import io
import locale
import logging
import os
import os.path as P
import pathlib
import urllib.parse
import warnings
#
# This module defines a function called smart_open so we cannot use
# smart_open.submodule to reference to the submodules.
#
import smart_open.local_file as so_file
import smart_open.compression as so_compression
from smart_open import doctools
from smart_open import transport
#
# For backwards compatibility and keeping old unit tests happy.
#
from smart_open.compression import register_compressor # noqa: F401
from smart_open.utils import check_kwargs as _check_kwargs # noqa: F401
from smart_open.utils import inspect_kwargs as _inspect_kwargs # noqa: F401
logger = logging.getLogger(__name__)
DEFAULT_ENCODING = locale.getpreferredencoding(do_setlocale=False)
def _sniff_scheme(uri_as_string):
"""Returns the scheme of the URL only, as a string."""
#
# urlsplit doesn't work on Windows -- it parses the drive as the scheme...
# no protocol given => assume a local file
#
if os.name == 'nt' and '://' not in uri_as_string:
uri_as_string = 'file://' + uri_as_string
return urllib.parse.urlsplit(uri_as_string).scheme
def parse_uri(uri_as_string):
"""
Parse the given URI from a string.
Parameters
----------
uri_as_string: str
The URI to parse.
Returns
-------
collections.namedtuple
The parsed URI.
Notes
-----
smart_open/doctools.py magic goes here
"""
scheme = _sniff_scheme(uri_as_string)
submodule = transport.get_transport(scheme)
as_dict = submodule.parse_uri(uri_as_string)
#
# The conversion to a namedtuple is just to keep the old tests happy while
# I'm still refactoring.
#
Uri = collections.namedtuple('Uri', sorted(as_dict.keys()))
return Uri(**as_dict)
#
# To keep old unit tests happy while I'm refactoring.
#
_parse_uri = parse_uri
_builtin_open = open
def open(
uri,
mode='r',
buffering=-1,
encoding=None,
errors=None,
newline=None,
closefd=True,
opener=None,
compression=so_compression.INFER_FROM_EXTENSION,
transport_params=None,
):
r"""Open the URI object, returning a file-like object.
The URI is usually a string in a variety of formats.
For a full list of examples, see the :func:`parse_uri` function.
The URI may also be one of:
- an instance of the pathlib.Path class
- a stream (anything that implements io.IOBase-like functionality)
Parameters
----------
uri: str or object
The object to open.
mode: str, optional
Mimicks built-in open parameter of the same name.
buffering: int, optional
Mimicks built-in open parameter of the same name.
encoding: str, optional
Mimicks built-in open parameter of the same name.
errors: str, optional
Mimicks built-in open parameter of the same name.
newline: str, optional
Mimicks built-in open parameter of the same name.
closefd: boolean, optional
Mimicks built-in open parameter of the same name. Ignored.
opener: object, optional
Mimicks built-in open parameter of the same name. Ignored.
compression: str, optional (see smart_open.compression.get_supported_compression_types)
Explicitly specify the compression/decompression behavior.
transport_params: dict, optional
Additional parameters for the transport layer (see notes below).
Returns
-------
A file-like object.
Notes
-----
smart_open has several implementations for its transport layer (e.g. S3, HTTP).
Each transport layer has a different set of keyword arguments for overriding
default behavior. If you specify a keyword argument that is *not* supported
by the transport layer being used, smart_open will ignore that argument and
log a warning message.
smart_open/doctools.py magic goes here
See Also
--------
- `Standard library reference <https://docs.python.org/3.7/library/functions.html#open>`__
- `smart_open README.rst
<https://github.com/RaRe-Technologies/smart_open/blob/master/README.rst>`__
"""
logger.debug('%r', locals())
if not isinstance(mode, str):
raise TypeError('mode should be a string')
if compression not in so_compression.get_supported_compression_types():
raise ValueError(f'invalid compression type: {compression}')
if transport_params is None:
transport_params = {}
fobj = _shortcut_open(
uri,
mode,
compression=compression,
buffering=buffering,
encoding=encoding,
errors=errors,
newline=newline,
)
if fobj is not None:
return fobj
#
# This is a work-around for the problem described in Issue #144.
# If the user has explicitly specified an encoding, then assume they want
# us to open the destination in text mode, instead of the default binary.
#
# If we change the default mode to be text, and match the normal behavior
# of Py2 and 3, then the above assumption will be unnecessary.
#
if encoding is not None and 'b' in mode:
mode = mode.replace('b', '')
if isinstance(uri, pathlib.Path):
uri = str(uri)
explicit_encoding = encoding
encoding = explicit_encoding if explicit_encoding else DEFAULT_ENCODING
#
# This is how we get from the filename to the end result. Decompression is
# optional, but it always accepts bytes and returns bytes.
#
# Decoding is also optional, accepts bytes and returns text. The diagram
# below is for reading, for writing, the flow is from right to left, but
# the code is identical.
#
# open as binary decompress? decode?
# filename ---------------> bytes -------------> bytes ---------> text
# binary decompressed decode
#
try:
binary_mode = _get_binary_mode(mode)
except ValueError as ve:
raise NotImplementedError(ve.args[0])
binary = _open_binary_stream(uri, binary_mode, transport_params)
decompressed = so_compression.compression_wrapper(binary, binary_mode, compression)
if 'b' not in mode or explicit_encoding is not None:
decoded = _encoding_wrapper(
decompressed,
mode,
encoding=encoding,
errors=errors,
newline=newline,
)
else:
decoded = decompressed
#
# There are some useful methods in the binary readers, e.g. to_boto3, that get
# hidden by the multiple layers of wrapping we just performed. Promote
# them so they are visible to the user.
#
if decoded != binary:
promoted_attrs = ['to_boto3']
for attr in promoted_attrs:
try:
setattr(decoded, attr, getattr(binary, attr))
except AttributeError:
pass
return decoded
def _get_binary_mode(mode_str):
#
# https://docs.python.org/3/library/functions.html#open
#
# The order of characters in the mode parameter appears to be unspecified.
# The implementation follows the examples, just to be safe.
#
mode = list(mode_str)
binmode = []
if 't' in mode and 'b' in mode:
raise ValueError("can't have text and binary mode at once")
counts = [mode.count(x) for x in 'rwa']
if sum(counts) > 1:
raise ValueError("must have exactly one of create/read/write/append mode")
def transfer(char):
binmode.append(mode.pop(mode.index(char)))
if 'a' in mode:
transfer('a')
elif 'w' in mode:
transfer('w')
elif 'r' in mode:
transfer('r')
else:
raise ValueError(
"Must have exactly one of create/read/write/append "
"mode and at most one plus"
)
if 'b' in mode:
transfer('b')
elif 't' in mode:
mode.pop(mode.index('t'))
binmode.append('b')
else:
binmode.append('b')
if '+' in mode:
transfer('+')
#
# There shouldn't be anything left in the mode list at this stage.
# If there is, then either we've missed something and the implementation
# of this function is broken, or the original input mode is invalid.
#
if mode:
raise ValueError('invalid mode: %r' % mode_str)
return ''.join(binmode)
def _shortcut_open(
uri,
mode,
compression,
buffering=-1,
encoding=None,
errors=None,
newline=None,
):
"""Try to open the URI using the standard library io.open function.
This can be much faster than the alternative of opening in binary mode and
then decoding.
This is only possible under the following conditions:
1. Opening a local file; and
2. Compression is disabled
If it is not possible to use the built-in open for the specified URI, returns None.
:param str uri: A string indicating what to open.
:param str mode: The mode to pass to the open function.
:param str compression: The compression type selected.
:returns: The opened file
:rtype: file
"""
if not isinstance(uri, str):
return None
scheme = _sniff_scheme(uri)
if scheme not in (transport.NO_SCHEME, so_file.SCHEME):
return None
local_path = so_file.extract_local_path(uri)
if compression == so_compression.INFER_FROM_EXTENSION:
_, extension = P.splitext(local_path)
if extension in so_compression.get_supported_extensions():
return None
elif compression != so_compression.NO_COMPRESSION:
return None
open_kwargs = {}
if encoding is not None:
open_kwargs['encoding'] = encoding
mode = mode.replace('b', '')
if newline is not None:
open_kwargs['newline'] = newline
#
# binary mode of the builtin/stdlib open function doesn't take an errors argument
#
if errors and 'b' not in mode:
open_kwargs['errors'] = errors
return _builtin_open(local_path, mode, buffering=buffering, **open_kwargs)
def _open_binary_stream(uri, mode, transport_params):
"""Open an arbitrary URI in the specified binary mode.
Not all modes are supported for all protocols.
:arg uri: The URI to open. May be a string, or something else.
:arg str mode: The mode to open with. Must be rb, wb or ab.
:arg transport_params: Keyword argumens for the transport layer.
:returns: A named file object
:rtype: file-like object with a .name attribute
"""
if mode not in ('rb', 'rb+', 'wb', 'wb+', 'ab', 'ab+'):
#
# This should really be a ValueError, but for the sake of compatibility
# with older versions, which raise NotImplementedError, we do the same.
#
raise NotImplementedError('unsupported mode: %r' % mode)
if isinstance(uri, int):
#
# We're working with a file descriptor. If we open it, its name is
# just the integer value, which isn't helpful. Unfortunately, there's
# no easy cross-platform way to go from a file descriptor to the filename,
# so we just give up here. The user will have to handle their own
# compression, etc. explicitly.
#
fobj = _builtin_open(uri, mode, closefd=False)
return fobj
if not isinstance(uri, str):
raise TypeError("don't know how to handle uri %s" % repr(uri))
scheme = _sniff_scheme(uri)
submodule = transport.get_transport(scheme)
fobj = submodule.open_uri(uri, mode, transport_params)
if not hasattr(fobj, 'name'):
fobj.name = uri
return fobj
def _encoding_wrapper(fileobj, mode, encoding=None, errors=None, newline=None):
"""Decode bytes into text, if necessary.
If mode specifies binary access, does nothing, unless the encoding is
specified. A non-null encoding implies text mode.
:arg fileobj: must quack like a filehandle object.
:arg str mode: is the mode which was originally requested by the user.
:arg str encoding: The text encoding to use. If mode is binary, overrides mode.
:arg str errors: The method to use when handling encoding/decoding errors.
:returns: a file object
"""
logger.debug('encoding_wrapper: %r', locals())
#
# If the mode is binary, but the user specified an encoding, assume they
# want text. If we don't make this assumption, ignore the encoding and
# return bytes, smart_open behavior will diverge from the built-in open:
#
# open(filename, encoding='utf-8') returns a text stream in Py3
# smart_open(filename, encoding='utf-8') would return a byte stream
# without our assumption, because the default mode is rb.
#
if 'b' in mode and encoding is None:
return fileobj
if encoding is None:
encoding = DEFAULT_ENCODING
fileobj = io.TextIOWrapper(
fileobj,
encoding=encoding,
errors=errors,
newline=newline,
write_through=True,
)
return fileobj
class patch_pathlib(object):
"""Replace `Path.open` with `smart_open.open`"""
def __init__(self):
self.old_impl = _patch_pathlib(open)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
_patch_pathlib(self.old_impl)
def _patch_pathlib(func):
"""Replace `Path.open` with `func`"""
old_impl = pathlib.Path.open
pathlib.Path.open = func
return old_impl
def smart_open(
uri,
mode='rb',
buffering=-1,
encoding=None,
errors=None,
newline=None,
closefd=True,
opener=None,
ignore_extension=False,
**kwargs
):
#
# This is a thin wrapper of smart_open.open. It's here for backward
# compatibility. It works exactly like smart_open.open when the passed
# parameters are identical. Otherwise, it raises a DeprecationWarning.
#
# For completeness, the main differences of the old smart_open function:
#
# 1. Default mode was read binary (mode='rb')
# 2. compression parameter was called ignore_extension
# 3. Transport parameters were passed directly as kwargs
#
url = 'https://github.com/RaRe-Technologies/smart_open/blob/develop/MIGRATING_FROM_OLDER_VERSIONS.rst'
if kwargs:
raise DeprecationWarning(
'The following keyword parameters are not supported: %r. '
'See %s for more information.' % (sorted(kwargs), url)
)
message = 'This function is deprecated. See %s for more information' % url
warnings.warn(message, category=DeprecationWarning)
if ignore_extension:
compression = so_compression.NO_COMPRESSION
else:
compression = so_compression.INFER_FROM_EXTENSION
del kwargs, url, message, ignore_extension
return open(**locals())
#
# Prevent failures with doctools from messing up the entire library. We don't
# expect such failures, but contributed modules (e.g. new transport mechanisms)
# may not be as polished.
#
try:
doctools.tweak_open_docstring(open)
doctools.tweak_parse_uri_docstring(parse_uri)
except Exception as ex:
logger.error(
'Encountered a non-fatal error while building docstrings (see below). '
'help(smart_open) will provide incomplete information as a result. '
'For full help text, see '
'<https://github.com/RaRe-Technologies/smart_open/blob/master/help.txt>.'
)
logger.exception(ex)
| {
"content_hash": "85ea9de4834c0352cda1a2ee1cc366ff",
"timestamp": "",
"source": "github",
"line_count": 513,
"max_line_length": 106,
"avg_line_length": 30.857699805068226,
"alnum_prop": 0.6435881238155401,
"repo_name": "RaRe-Technologies/smart_open",
"id": "b827a2210194e86f4cf35a3dbe1c5905f344271b",
"size": "16006",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "smart_open/smart_open_lib.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "414510"
},
{
"name": "Shell",
"bytes": "4358"
}
],
"symlink_target": ""
} |
from typing import MutableMapping, MutableSequence
from google.protobuf import timestamp_pb2 # type: ignore
from google.type import postal_address_pb2 # type: ignore
import proto # type: ignore
from google.cloud.channel_v1.types import common
__protobuf__ = proto.module(
package="google.cloud.channel.v1",
manifest={
"Customer",
"ContactInfo",
},
)
class Customer(proto.Message):
r"""Entity representing a customer of a reseller or distributor.
Attributes:
name (str):
Output only. Resource name of the customer. Format:
accounts/{account_id}/customers/{customer_id}
org_display_name (str):
Required. Name of the organization that the
customer entity represents.
org_postal_address (google.type.postal_address_pb2.PostalAddress):
Required. The organization address for the
customer. To enforce US laws and embargoes, we
require a region and zip code. You must provide
valid addresses for every customer. To set the
customer's language, use the Customer-level
language code.
primary_contact_info (google.cloud.channel_v1.types.ContactInfo):
Primary contact info.
alternate_email (str):
Secondary contact email. You need to provide
an alternate email to create different domains
if a primary contact email already exists. Users
will receive a notification with credentials
when you create an admin.google.com account.
Secondary emails are also recovery email
addresses. Alternate emails are optional when
you create Team customers.
domain (str):
Required. The customer's primary domain. Must
match the primary contact email's domain.
create_time (google.protobuf.timestamp_pb2.Timestamp):
Output only. Time when the customer was
created.
update_time (google.protobuf.timestamp_pb2.Timestamp):
Output only. Time when the customer was
updated.
cloud_identity_id (str):
Output only. The customer's Cloud Identity ID
if the customer has a Cloud Identity resource.
language_code (str):
Optional. The BCP-47 language code, such as "en-US" or
"sr-Latn". For more information, see
https://www.unicode.org/reports/tr35/#Unicode_locale_identifier.
cloud_identity_info (google.cloud.channel_v1.types.CloudIdentityInfo):
Output only. Cloud Identity information for
the customer. Populated only if a Cloud Identity
account exists for this customer.
channel_partner_id (str):
Cloud Identity ID of the customer's channel
partner. Populated only if a channel partner
exists for this customer.
"""
name: str = proto.Field(
proto.STRING,
number=1,
)
org_display_name: str = proto.Field(
proto.STRING,
number=2,
)
org_postal_address: postal_address_pb2.PostalAddress = proto.Field(
proto.MESSAGE,
number=3,
message=postal_address_pb2.PostalAddress,
)
primary_contact_info: "ContactInfo" = proto.Field(
proto.MESSAGE,
number=4,
message="ContactInfo",
)
alternate_email: str = proto.Field(
proto.STRING,
number=5,
)
domain: str = proto.Field(
proto.STRING,
number=6,
)
create_time: timestamp_pb2.Timestamp = proto.Field(
proto.MESSAGE,
number=7,
message=timestamp_pb2.Timestamp,
)
update_time: timestamp_pb2.Timestamp = proto.Field(
proto.MESSAGE,
number=8,
message=timestamp_pb2.Timestamp,
)
cloud_identity_id: str = proto.Field(
proto.STRING,
number=9,
)
language_code: str = proto.Field(
proto.STRING,
number=10,
)
cloud_identity_info: common.CloudIdentityInfo = proto.Field(
proto.MESSAGE,
number=12,
message=common.CloudIdentityInfo,
)
channel_partner_id: str = proto.Field(
proto.STRING,
number=13,
)
class ContactInfo(proto.Message):
r"""Contact information for a customer account.
Attributes:
first_name (str):
The customer account contact's first name.
Optional for Team customers.
last_name (str):
The customer account contact's last name.
Optional for Team customers.
display_name (str):
Output only. The customer account contact's
display name, formatted as a combination of the
customer's first and last name.
email (str):
The customer account's contact email.
Required for entitlements that create
admin.google.com accounts, and serves as the
customer's username for those accounts. Use this
email to invite Team customers.
title (str):
Optional. The customer account contact's job
title.
phone (str):
The customer account's contact phone number.
"""
first_name: str = proto.Field(
proto.STRING,
number=1,
)
last_name: str = proto.Field(
proto.STRING,
number=2,
)
display_name: str = proto.Field(
proto.STRING,
number=4,
)
email: str = proto.Field(
proto.STRING,
number=5,
)
title: str = proto.Field(
proto.STRING,
number=6,
)
phone: str = proto.Field(
proto.STRING,
number=7,
)
__all__ = tuple(sorted(__protobuf__.manifest))
| {
"content_hash": "c2be0d5dfac97b1128b1326c837e841c",
"timestamp": "",
"source": "github",
"line_count": 180,
"max_line_length": 78,
"avg_line_length": 32.3,
"alnum_prop": 0.6056071551427589,
"repo_name": "googleapis/python-channel",
"id": "3d6ecc6cca4a6f8935be0f8fab75166010518942",
"size": "6414",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "google/cloud/channel_v1/types/customers.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2050"
},
{
"name": "Python",
"bytes": "1755858"
},
{
"name": "Shell",
"bytes": "30663"
}
],
"symlink_target": ""
} |
import rospy
import thread
import threading
import time
import mavros
import actionlib
import smach
import smach_ros
from math import *
from mavros.utils import *
from mavros import setpoint as SP
from tf.transformations import quaternion_from_euler
from uav_explorer_states import *
from util_states import *
from uav_worker_states import *
from kuri_msgs.msg import GenerateExplorationWaypointsAction, Task
from smach_ros import SimpleActionState
def main_cc_term_cb(outcome_map):
if(outcome_map['STATUS_CHECKING']=='theEnd'):
return True
else :
return False
def main_cc_cb(outcome_map):
if(outcome_map['STATUS_CHECKING']=='theEnd'):
return 'comp_done'
else:
return 'working'
def main(testing_mode):
rospy.init_node('kuri_mbzirc_challenge3_state_machine')
start= float(str(time.time()))#rospy.get_time() # stamp should update
rospy.loginfo(">>>>>>>>>>>>>>>>>>>>>>>>>>>start time : %f" , (start))
### MAIN
main_sm = smach.StateMachine(outcomes=['mission_accomplished',
'mission_incomplete',
'test_succeeded',
'test_failed']
)
with main_sm:
"""
#TODO testing modes are deleted (only choose normal run) --> later we will return them back
********************************************************************
* choose one of these modes: *
* 1- normalRun : runs the normal scenario *
* 2- testExplorer : test explorer states *
* 3- tastTaskAllocator : test task allocator *
* 4- testUAVWorkers : test the uav workers concurrent *
********************************************************************
"""
main_sm.userdata.testing_type = testing_mode
### exploration
exploration_sm = smach.StateMachine(outcomes=['exp_failed','exp_preempted'],
input_keys=['exploration_sm_in']
)
### detection and tracking
detection_sm = smach.StateMachine(outcomes=['tracker_failed','tracker_preempted'] )
### mapping
mapping_sm = smach.StateMachine(outcomes=['mapping_failed','mapping_preempted'] )
### task allocator
task_allocator_sm = smach.StateMachine(outcomes=['tasksReady','allocationPreempted','allocationFailed'],
output_keys=['task_allocator_out1','task_allocator_out2']
)
### Worker1 UAV
uav_worker1_sm = smach.StateMachine(outcomes=['workerDone','objectFellFailure','uav1NavigationPreempted','uav1NavigationFailed']
)
### Worker2 UAV
uav_worker2_sm = smach.StateMachine(outcomes=['workerDone','objectFellFailure','uav2NavigationPreempted','uav2NavigationFailed']
)
###state 1 in the main sm --> initialization
#TODO return back the testing modes ( it was removed since everything is running parallel)
smach.StateMachine.add('INITIALIZATION', InitTestingMode(),
transitions={
'normalRun' : 'STARTING'
#'testExplorer' : 'TEST_GENERATING_WAYPOINTS',
#'tastTaskAllocator' : 'TEST_TASK_ALLOCATOR',
#'testUAVWorkers' : 'TEST_UAV_WORKERS'
},
remapping={
'testing_type_in' : 'testing_type',
'waypoint_generator_test_in' : 'generate_waypoints_in',
'task_allocator_test_in':'task_allocator_in',
'path_generator_test_in':'tasks'
#'uav_worker1_test_in':'generating_navpaths_uav1_out',
#'uav_worker2_test_in':'generating_navpaths_uav2_out'
}
)
###state 2 in the main sm --> starting
smach.StateMachine.add('STARTING', Starting(1),
transitions={
'waitingforGPS':'STARTING',
'GPSFixed':'GENERATING_WAYPOINTS'
},
remapping={'starting_out':'uav_gps_loc'}
)
###state 3 in the main sm --> generating waypoints
#smach.StateMachine.add('GENERATING_WAYPOINTS', GenerateWaypoints(),
#transitions={
#'succeeded':'All_CC', #UAV_EXPLORER
#'aborted':'mission_incomplete',
#'preempted':'mission_incomplete'
#},
#remapping={
#'generate_waypoints_in':'uav_gps_loc',
#'generate_waypoints_out':'waypoints'
#}
#)
smach.StateMachine.add('GENERATING_WAYPOINTS', ReadWaypoints(),
transitions={
'succeeded':'All_CC', #UAV_EXPLORER
'aborted':'mission_incomplete'
},
remapping={
'generate_waypoints_in':'uav_gps_loc',
'generate_waypoints_out':'waypoints'
}
)
###state 4 in the main sm --> concurruncy
#>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
explorer_allocator_workers_cc = smach.Concurrence(outcomes=['comp_done','working'],
default_outcome='working',
child_termination_cb = main_cc_term_cb,
outcome_cb = main_cc_cb,
input_keys=['eaw_cc_in']
)
with explorer_allocator_workers_cc:
smach.Concurrence.add('STATUS_CHECKING', StatusChecking(rospy.get_time(),start) )
smach.Concurrence.add('EXPLORE', exploration_sm,
remapping={
'exploration_sm_in':'eaw_cc_in',
}
)
smach.Concurrence.add('DETECT', detection_sm
)
smach.Concurrence.add('MAP', mapping_sm
)
smach.Concurrence.add('TASKALLOCATION', task_allocator_sm
)
smach.Concurrence.add('WORKER2', uav_worker1_sm
)
smach.Concurrence.add('WORKER3', uav_worker2_sm
)
#THE BIG CONCURRUNCY THAT YOU WILL EVER SEE IN YOUR LIFE >_<
smach.StateMachine.add('All_CC', explorer_allocator_workers_cc,
transitions={
'comp_done':'mission_accomplished',
'working':'All_CC',
},
remapping={
'eaw_cc_in':'waypoints'
}
)
#>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
#>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
# Define the exploreration machine
#TODO: assign namespaces for each physical drone
#TODO: confirm that after gps homming the local position is set to 0,0,0 (in real experiments)
with exploration_sm:
smach.StateMachine.add('EXPLORING', ExploringNew(1),
transitions={
'succeeded':'EXPLORING',
'aborted':'exp_failed',
'preempted':'exp_preempted'
},
remapping={
'navigation_task':'exploration_sm_in',
}
)
#>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
# Define the detection machine
#TODO: add latch after the publish
#TODO: objects detector should return a list of objects and return an objects list even if it was one
with detection_sm:
smach.StateMachine.add('TRACKING', DetectingObjects(),
transitions={
'succeeded':'TRACKING',
'aborted':'tracker_failed',
'preempted':'tracker_preempted'
}
)
#>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
# Define the mapping machine
with mapping_sm:
smach.StateMachine.add('MAPPING', Mapping(),
transitions={
'succeeded':'MAPPING',
'aborted':'mapping_failed',
'preempted':'mapping_preempted'
}
)
#>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
# Define the task_allocator state machine
#TODO: it should genarate new tasks only if the workers are not performing a task
#TODO: it should be deployed in one system (centralized system, ground station) --> task allocator, path generator, and statemachine
with task_allocator_sm:
smach.StateMachine.add('ALLOCATING_TASKS', AllocatingTasks(),
transitions={
'succeeded':'GENERATING_NAVPATHS',
'aborted' : 'allocationFailed',
'preempted':'allocationPreempted'
},
remapping={
'allocating_tasks_out':'uavs_tasks'
}
)
smach.StateMachine.add('GENERATING_NAVPATHS', GeneratePaths(),
transitions={
'succeeded':'tasksReady',
'preempted':'allocationFailed',
'aborted' : 'allocationPreempted'
},
remapping={
'tasks':'uavs_tasks',
'generating_navpaths_uav1_out':'task_allocator_out1',
'generating_navpaths_uav2_out':'task_allocator_out2'
}
)
# End task_allocator state machine
#>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
#>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
# Define the uav_worker1 state machine
#TODO: landing : develop object picking controller (probably use butti object tracking)
#TODO: service call to check if the object is attached or not
#TODO: dropping : developing object dropping controller (butti drop box tracker)
### wORKER 1 :UAV2
with uav_worker1_sm:
smach.StateMachine.add('NAVIGATING_2_OBJECT', Navigating2ObjectNew(2),
transitions={
'succeeded':'PICKING_OBJECT',
'preempted':'uav1NavigationPreempted',
'aborted' : 'uav1NavigationFailed'
}
)
#TODO link the picking object to aerial manipulation action server [DONE]
smach.StateMachine.add('PICKING_OBJECT', PickingObject(),
transitions={
'succeeded':'NAVIGATING_2_DROPZONE',
'preempted':'OBJECT_FELL',
'aborted':'OBJECT_FELL'
}
)
smach.StateMachine.add('NAVIGATING_2_DROPZONE', Navigating2DropZone(0.5),
transitions={
'navigating':'NAVIGATING_2_DROPZONE',
'hovering':'DROPPING_OBJECT'
}
)
smach.StateMachine.add('DROPPING_OBJECT', DroppingObject(0.5),
transitions={
'dropping':'DROPPING_OBJECT',
'dropped':'workerDone',
'droppingFail':'OBJECT_FELL'
}
)
smach.StateMachine.add('OBJECT_FELL', ObjectFell(0.5),
transitions={
'canSee':'PICKING_OBJECT',
'cannotSee':'objectFellFailure'
}
)
# End uav_worker1 state machine
#>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
#>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
# Define the uav_worker2 state machine
#TODO: landing : develop object picking controller (probably use butti object tracking)
#TODO: service call to check if the object is attached or not
#TODO: dropping : developing object dropping controller (butti drop box tracker)
### wORKER 1 :UAV2
with uav_worker2_sm:
smach.StateMachine.add('NAVIGATING_2_OBJECT', Navigating2ObjectNew(3),
transitions={
'succeeded':'PICKING_OBJECT',
'preempted':'uav2NavigationPreempted',
'aborted' : 'uav2NavigationFailed'
}
)
#TODO link the picking object to aerial manipulation action server [DONE]
smach.StateMachine.add('PICKING_OBJECT', PickingObject(),
transitions={
'succeeded':'NAVIGATING_2_DROPZONE',
'preempted':'OBJECT_FELL',
'aborted':'OBJECT_FELL'
}
)
smach.StateMachine.add('NAVIGATING_2_DROPZONE', Navigating2DropZone(0.5),
transitions={
'navigating':'NAVIGATING_2_DROPZONE',
'hovering':'DROPPING_OBJECT'
}
)
smach.StateMachine.add('DROPPING_OBJECT', DroppingObject(0.5),
transitions={
'dropping':'DROPPING_OBJECT',
'dropped':'workerDone',
'droppingFail':'OBJECT_FELL'
}
)
smach.StateMachine.add('OBJECT_FELL', ObjectFell(0.5),
transitions={
'canSee':'PICKING_OBJECT',
'cannotSee':'objectFellFailure'
}
)
# End uav_worker2 state machine
#>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
# Create the introspection server
sis = smach_ros.IntrospectionServer('kuri_mbzirc_challenge3_state_machine_viewer', main_sm, '/kuri_mbzirc_challenge3_state_machine_root')
sis.start()
# Execute the main state machine
outcome = main_sm.execute()
rospy.spin()
sis.stop()
if __name__ == '__main__':
if len(sys.argv) < 2:
print(" Not enough arguments, please choose from the below testing modes: \n - normalRun \n - testExplorer \n - tastTaskAllocator \n - testUAVWorkers")
print('Defaulting to normal run mode.')
main('normalRun')
else:
main(sys.argv[1])
| {
"content_hash": "8169712c6dd3c8e9d52f504ba300f606",
"timestamp": "",
"source": "github",
"line_count": 373,
"max_line_length": 155,
"avg_line_length": 35.16353887399464,
"alnum_prop": 0.5417047880451357,
"repo_name": "kuri-kustar/kuri_mbzirc_challenge_3",
"id": "7a0501ab05ae91079393369e25d6ab3f1e0a3e41",
"size": "14402",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "kuri_system_coordinator/scripts/main_system_coordinator.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "25642"
},
{
"name": "C++",
"bytes": "239272"
},
{
"name": "CMake",
"bytes": "29034"
},
{
"name": "Python",
"bytes": "281386"
},
{
"name": "Shell",
"bytes": "692"
}
],
"symlink_target": ""
} |
"""Tests for baremetal utils."""
import mox
from nova import exception
from nova import test
from nova.virt.baremetal import utils
class BareMetalUtilsTestCase(test.TestCase):
def test_random_alnum(self):
s = utils.random_alnum(10)
self.assertEqual(len(s), 10)
s = utils.random_alnum(100)
self.assertEqual(len(s), 100)
| {
"content_hash": "a8b483140e964a08b7c8e4ea4330ed2e",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 44,
"avg_line_length": 22.5,
"alnum_prop": 0.6861111111111111,
"repo_name": "fajoy/nova",
"id": "827b1fcaf0b1b4ef78a61bd1b8916fa67f3858cb",
"size": "1077",
"binary": false,
"copies": "1",
"ref": "refs/heads/grizzly-2",
"path": "nova/tests/baremetal/test_utils.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "16002"
},
{
"name": "JavaScript",
"bytes": "7403"
},
{
"name": "Python",
"bytes": "7567423"
},
{
"name": "Shell",
"bytes": "15428"
}
],
"symlink_target": ""
} |
import datetime
import json
import logging
import os
import pickle
import threading
import time
import urllib
import uuid
from functools import wraps
import google.appengine.api.users
import webapp2
from google.appengine.api import taskqueue
from google.appengine.api.datastore_errors import TransactionFailedError
from google.appengine.api.images.images_stub import ImagesServiceStub
from google.appengine.datastore.datastore_rpc import TransactionOptions, ConfigOption
from google.appengine.ext import db, deferred
import influxdb
from influxdb.resultset import ResultSet
SERVER_SOFTWARE = os.environ.get("SERVER_SOFTWARE", "Development")
DEBUG = SERVER_SOFTWARE.startswith('Development')
# THIS PIECE OF CODE MUST BE ON TOP BECAUSE IT MONKEY PATCHES THE BUILTIN USER CLASS
# START MONKEY PATCH
def email_lower(email):
if email is None:
return None
email = email.email() if isinstance(email, google.appengine.api.users.User) else email
email = unicode(email) if not isinstance(email, unicode) else email
return email.lower()
original_constructor = google.appengine.api.users.User.__init__
def __init__(self, *args, **kwargs):
if args:
email = args[0]
if email:
lower_email = email_lower(email)
if lower_email != email:
args = list(args)
args[0] = lower_email
else:
email = kwargs.get('email', None)
if email != None:
lower_email = email_lower(email)
if lower_email != email:
kwargs['email'] = lower_email
original_constructor(self, *args, **kwargs)
google.appengine.api.users.User.__init__ = __init__
# END MONKEY PATCH
# MONKEY PATCH logging
# Add possibility to bring down error levels for deferreds
class _TLocal(threading.local):
def __init__(self):
self.suppress = 0
_tlocal = _TLocal()
del _TLocal
def start_suppressing():
_tlocal.suppress += 1
def stop_suppressing():
if _tlocal.suppress > 0:
_tlocal.suppress -= 1
def reset_suppressing():
_tlocal.suppress = 0
class suppressing(object):
def __enter__(self):
start_suppressing()
return self
def __exit__(self, *args, **kwargs):
stop_suppressing()
_orig_error = logging.error
_orig_critical = logging.critical
def _new_error(msg, *args, **kwargs):
suppress = kwargs.pop('_suppress', True)
if _tlocal.suppress > 0 and suppress:
logging.warning(msg, *args, **kwargs)
else:
_orig_error(msg, *args, **kwargs)
def _new_critical(msg, *args, **kwargs):
suppress = kwargs.pop('_suppress', True)
if _tlocal.suppress > 0 and suppress:
logging.warning(msg, *args, **kwargs)
else:
_orig_critical(msg, *args, **kwargs)
def _new_exception(msg, *args, **kwargs):
suppress = kwargs.pop('_suppress', True)
if _tlocal.suppress > 0 and suppress:
logging.warning(msg, *args, exc_info=1, **kwargs)
else:
_orig_error(msg, *args, exc_info=1, **kwargs)
logging.error = _new_error
logging.critical = _new_critical
logging.exception = _new_exception
class StubsFilter(logging.Filter):
def filter(self, record):
# Get rid of the annoying 'Sandbox prevented access to file' warnings
return 'stubs.py' != record.filename
logging.root.addFilter(StubsFilter())
# MONKEY PATCH db
# Add possibility to run post-transaction actions
class __TLocal(threading.local):
def __init__(self):
self.propagation = False
_temp_transaction_options = __TLocal()
del __TLocal
_orig_run_in_transaction_options = db.run_in_transaction_options
_options = [attr for attr in dir(TransactionOptions) if isinstance(getattr(TransactionOptions, attr), ConfigOption)]
_clone_options = lambda o: {attr: getattr(o, attr) for attr in _options}
_default_options = _clone_options(db.create_transaction_options())
def _wrap_run_in_transaction_func(is_retries, is_options):
@wraps(_orig_run_in_transaction_options)
def wrapped(*args, **kwargs):
if is_options:
options = _clone_options(args[0])
args = args[1:]
else:
options = dict(_default_options)
if is_retries:
retries = args[0]
args = args[1:]
else:
retries = options['retries']
options['retries'] = 0
if options.get('propagation') is None and _temp_transaction_options.propagation:
options['propagation'] = db.ALLOWED
options = db.create_transaction_options(**options)
if db.is_in_transaction():
return _orig_run_in_transaction_options(options, *args, **kwargs)
if not retries:
retries = 3
def run(transaction_guid):
max_tries = retries + 1
count = 0
while count < max_tries:
count += 1
start = time.time()
try:
return _orig_run_in_transaction_options(options, *args, **kwargs)
except (TransactionFailedError, db.Timeout) as e:
if isinstance(e, db.Timeout) and type(e) != db.Timeout:
raise e # only retrying in case of db.Timeout exceptions, not subclasses
if count == max_tries:
raise e
transactions.post_transaction_actions.reset(transaction_guid)
logging.info("%s: %s. Retrying... (%s)", e.__class__.__name__, e.message, count)
sleep_time = 1.1 - (time.time() - start)
if sleep_time > 0:
logging.info("Sleeping %s seconds ....", sleep_time)
time.sleep(sleep_time)
from rogerthat.utils import transactions
if db.is_in_transaction():
transaction_guid = transactions.post_transaction_actions.get_current_transaction_guid()
else:
transaction_guid = str(uuid.uuid4())
transactions.post_transaction_actions.set_current_transaction_guid(transaction_guid)
try:
r = run(transaction_guid)
except:
transactions.post_transaction_actions.finalize(success=False, transaction_guid=transaction_guid)
raise
try:
transactions.post_transaction_actions.finalize(success=True, transaction_guid=transaction_guid)
except:
logging.error("Caught exception in rpc.transaction_done", exc_info=1, _suppress=False)
return r
return wrapped
db.run_in_transaction = _wrap_run_in_transaction_func(is_retries=False, is_options=False)
db.run_in_transaction_custom_retries = _wrap_run_in_transaction_func(is_retries=True, is_options=False)
db.run_in_transaction_options = _wrap_run_in_transaction_func(is_retries=False, is_options=True)
# END MONKEY PATCH
def _allow_transaction_propagation(func, *args, **kwargs):
original_propagation_value = _temp_transaction_options.propagation
_temp_transaction_options.propagation = True
try:
return func(*args, **kwargs)
finally:
_temp_transaction_options.propagation = original_propagation_value
db.allow_transaction_propagation = _allow_transaction_propagation
del _allow_transaction_propagation
# MONKEY PATCH json.dump & json.dumps to eliminate useless white space
_orig_json_dumps = json.dumps
def _new_json_dumps(*args, **kwargs):
if len(args) < 8:
kwargs.setdefault("separators", (',', ':'))
try:
return _orig_json_dumps(*args, **kwargs)
except Exception as e:
logging.debug(args)
raise
json.dumps = _new_json_dumps
_orig_json_dump = json.dump
def _new_json_dump(*args, **kwargs):
if len(args) < 8:
kwargs.setdefault("separators", (',', ':'))
return _orig_json_dump(*args, **kwargs)
json.dump = _new_json_dump
# END MONKEY PATCH
# MONKEY PATCH os.path.expanduser & os.path.expanduser to avoid using
# unspported os.path.getuserid
_orig_os_path_expanduser = os.path.expanduser
def _new_os_path_expanduser(path):
return path
os.path.expanduser = _new_os_path_expanduser
# END MONKEY PATCH
# MONKEY PATCH deferred.defer
_old_deferred_defer = deferred.defer
def _new_deferred_defer(obj, *args, **kwargs):
# Sets current user and fixes an issue where the transactional argument wasn't supplied when the task is too large
from rogerthat.rpc import users
from rogerthat.utils import get_backend_service
from mcfw.consts import MISSING
if users.get_current_deferred_user() == MISSING:
kwargs['__user'] = users.get_current_user()
else:
kwargs['__user'] = users.get_current_deferred_user()
taskargs = dict((x, kwargs.pop(("_%s" % x), None))
for x in ("countdown", "eta", "name", "target",
"retry_options"))
taskargs["url"] = kwargs.pop("_url", deferred.deferred._DEFAULT_URL)
transactional = kwargs.pop("_transactional", False)
taskargs["headers"] = dict(deferred.deferred._TASKQUEUE_HEADERS)
taskargs["headers"].update(kwargs.pop("_headers", {}))
queue = kwargs.pop("_queue", deferred.deferred._DEFAULT_QUEUE)
pickled = deferred.serialize(obj, *args, **kwargs)
if not taskargs["target"] and taskargs["countdown"] is None: # Don't increase too high otherwise keepalive_task will break
taskargs["target"] = get_backend_service()
try:
if DEBUG:
logging.debug('Scheduling task on queue %s: %s.%s\n%s\n%s',
queue,
obj.__module__, obj.__name__,
''.join((', %s' % repr(a) for a in args)),
''.join((', %s=%s' % (k, repr(v)) for k, v in kwargs.iteritems())))
task = taskqueue.Task(payload=pickled, **taskargs)
return task.add(queue, transactional=transactional)
except taskqueue.TaskTooLargeError:
key = deferred.deferred._DeferredTaskEntity(data=pickled).put()
pickled = deferred.deferred.serialize(deferred.deferred.run_from_datastore, str(key))
task = taskqueue.Task(payload=pickled, **taskargs)
# this is the patched line (transactional=transactional)
return task.add(queue, transactional=transactional)
def _new_deferred_run(data):
try:
func, args, kwds = pickle.loads(data)
except Exception, e:
raise deferred.PermanentTaskFailure(e)
else:
from rogerthat.rpc import users
current_user = kwds.pop('__user', None)
if current_user:
users.set_deferred_user(current_user)
try:
from rogerthat.utils import get_current_queue, get_current_version
if DEBUG:
prefix = u'%s -> ' % get_current_version()
else:
prefix = u''
logging.debug('%sQueue: %s\ndeferred.run(%s.%s%s%s)',
prefix,
get_current_queue(),
func.__module__, func.__name__,
"".join((",\n %s" % repr(a) for a in args)),
"".join((",\n %s=%s" % (k, repr(v)) for k, v in kwds.iteritems())))
except:
logging.exception('Failed to log the info of this defer (%s)', func)
try:
return func(*args, **kwds)
except deferred.PermanentTaskFailure:
stop_suppressing()
raise
except:
request = webapp2.get_request()
if request:
execution_count_triggering_error_log = 9
execution_count = request.headers.get('X-Appengine-Taskexecutioncount', None)
if execution_count and int(execution_count) == execution_count_triggering_error_log:
logging.error('This deferred.run already failed %s times!', execution_count, _suppress=False)
raise
finally:
if current_user:
users.clear_user()
deferred.defer = deferred.deferred.defer = _new_deferred_defer
deferred.run = deferred.deferred.run = _new_deferred_run
# END MONKEY PATCH
# MONKEY PATCH expando unindexed properties
_orginal_expando_getattr = db.Expando.__getattribute__
def _new_expando_getattr(self, key):
if key == '_unindexed_properties':
return self.__class__._unindexed_properties.union(self.dynamic_properties())
return _orginal_expando_getattr(self, key)
db.Expando.__getattribute__ = _new_expando_getattr
# END MONKEY PATCH
try:
# disable the annoying AppenginePlatformWarning's
from requests.packages import urllib3
urllib3.disable_warnings()
except ImportError:
pass
try:
import requests # @UnusedImport
try:
import requests_toolbelt.adapters.appengine
requests_toolbelt.adapters.appengine.monkeypatch()
except ImportError:
logging.error('You must include `requests-toolbelt` in requirements.txt when using the `requests` library')
except ImportError:
pass
dummy2 = lambda: None
def _Dynamic_Composite(self, request, response):
"""Implementation of ImagesService::Composite.
Based off documentation of the PIL library at
http://www.pythonware.com/library/pil/handbook/index.htm
Args:
request: ImagesCompositeRequest - Contains image request info.
response: ImagesCompositeResponse - Contains transformed image.
Raises:
ApplicationError: Bad data was provided, likely data about the dimensions.
"""
from PIL import Image
from google.appengine.api import images
from google.appengine.api.images import images_service_pb
from google.appengine.api.images.images_stub import _BackendPremultiplication, _ArgbToRgbaTuple, RGBA
from google.appengine.runtime import apiproxy_errors
if (not request.canvas().width() or not request.canvas().height() or
not request.image_size() or not request.options_size()):
raise apiproxy_errors.ApplicationError(
images_service_pb.ImagesServiceError.BAD_TRANSFORM_DATA)
if (request.canvas().width() > 4000 or
request.canvas().height() > 4000 or
request.options_size() > images.MAX_COMPOSITES_PER_REQUEST):
raise apiproxy_errors.ApplicationError(
images_service_pb.ImagesServiceError.BAD_TRANSFORM_DATA)
width = request.canvas().width()
height = request.canvas().height()
color = _ArgbToRgbaTuple(request.canvas().color())
color = _BackendPremultiplication(color)
canvas = Image.new(RGBA, (width, height), color)
sources = []
for image in request.image_list():
sources.append(self._OpenImageData(image))
for options in request.options_list():
if (options.anchor() < images.TOP_LEFT or
options.anchor() > images.BOTTOM_RIGHT):
raise apiproxy_errors.ApplicationError(
images_service_pb.ImagesServiceError.BAD_TRANSFORM_DATA)
if options.source_index() >= len(sources) or options.source_index() < 0:
raise apiproxy_errors.ApplicationError(
images_service_pb.ImagesServiceError.BAD_TRANSFORM_DATA)
if options.opacity() < 0 or options.opacity() > 1:
raise apiproxy_errors.ApplicationError(
images_service_pb.ImagesServiceError.BAD_TRANSFORM_DATA)
source = sources[options.source_index()]
x_anchor = (options.anchor() % 3) * 0.5
y_anchor = (options.anchor() / 3) * 0.5
x_offset = int(options.x_offset() + x_anchor * (width - source.size[0]))
y_offset = int(options.y_offset() + y_anchor * (height - source.size[1]))
if source.mode == RGBA:
canvas.paste(source, (x_offset, y_offset), source)
else:
# Fix here: alpha must be an integer (and not a float)
alpha = int(options.opacity() * 255)
mask = Image.new('L', source.size, alpha)
canvas.paste(source, (x_offset, y_offset), mask)
response_value = self._EncodeImage(canvas, request.canvas().output())
response.mutable_image().set_content(response_value)
def new_query(self,
query,
params=None,
epoch=None,
expected_response_code=200,
database=None,
raise_errors=True,
chunked=False,
chunk_size=0):
"""Send a query to InfluxDB.
:param query: the actual query string
:type query: str
:param params: additional parameters for the request,
defaults to {}
:type params: dict
:param epoch: response timestamps to be in epoch format either 'h',
'm', 's', 'ms', 'u', or 'ns',defaults to `None` which is
RFC3339 UTC format with nanosecond precision
:type epoch: str
:param expected_response_code: the expected status code of response,
defaults to 200
:type expected_response_code: int
:param database: database to query, defaults to None
:type database: str
:param raise_errors: Whether or not to raise exceptions when InfluxDB
returns errors, defaults to True
:type raise_errors: bool
:param chunked: Enable to use chunked responses from InfluxDB.
With ``chunked`` enabled, one ResultSet is returned per chunk
containing all results within that chunk
:type chunked: bool
:param chunk_size: Size of each chunk to tell InfluxDB to use.
:type chunk_size: int
:returns: the queried data
:rtype: :class:`~.ResultSet`
"""
if params is None:
params = {}
params['q'] = query
params['db'] = database or self._database
if epoch is not None:
params['epoch'] = epoch
if chunked:
params['chunked'] = 'true'
if chunk_size > 0:
params['chunk_size'] = chunk_size
# START PATCH - use POST request instead of GET
# Needed because url length is max. 2KB on appengine
response = self.request(
url="query",
method='POST',
params=None,
data=urllib.urlencode(params),
headers={'Content-Type': 'application/x-www-form-urlencoded'},
expected_response_code=expected_response_code
)
# END PATCH
if chunked:
return self._read_chunked_response(response)
data = response.json()
results = [
ResultSet(result, raise_errors=raise_errors)
for result
in data.get('results', [])
]
# TODO(aviau): Always return a list. (This would be a breaking change)
if len(results) == 1:
return results[0]
return results
influxdb.InfluxDBClient.query = new_query
if DEBUG:
# Fixes "System error: new style getargs format but argument is not a tuple" on devserver for composite images
ImagesServiceStub._Dynamic_Composite = _Dynamic_Composite
# Log which datastore models are being read / written on the devserver.
from db_hooks import add_before_put_hook, add_after_get_hook
from db_hooks.hooks import put_hook, after_get_hook
add_before_put_hook(put_hook)
add_after_get_hook(after_get_hook)
| {
"content_hash": "fc8310609ab9a9927afc273f8d6a10a3",
"timestamp": "",
"source": "github",
"line_count": 579,
"max_line_length": 126,
"avg_line_length": 33.06390328151986,
"alnum_prop": 0.6334621813623067,
"repo_name": "our-city-app/oca-backend",
"id": "883b13afc6873bdfa9372bb528df87dfc55eb77d",
"size": "19784",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/add_1_monkey_patches.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "166"
},
{
"name": "CSS",
"bytes": "62142"
},
{
"name": "HTML",
"bytes": "697349"
},
{
"name": "JavaScript",
"bytes": "1023951"
},
{
"name": "PostScript",
"bytes": "4694678"
},
{
"name": "Python",
"bytes": "3149982"
},
{
"name": "Shell",
"bytes": "5839"
},
{
"name": "TypeScript",
"bytes": "690248"
}
],
"symlink_target": ""
} |
"""This module is deprecated. Please use :mod:`airflow.sensors.base`."""
import warnings
# pylint: disable=unused-import
from airflow.sensors.base import BaseSensorOperator # noqa
warnings.warn(
"This module is deprecated. Please use `airflow.sensors.base`.", DeprecationWarning, stacklevel=2
)
| {
"content_hash": "dc049d89f755c25ae300d5dae6f50f46",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 101,
"avg_line_length": 30.3,
"alnum_prop": 0.7623762376237624,
"repo_name": "sekikn/incubator-airflow",
"id": "cfcc6842f0f74b8b5c0aa3e53e1b74ceb66c14f0",
"size": "1090",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "airflow/sensors/base_sensor_operator.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "13715"
},
{
"name": "Dockerfile",
"bytes": "15900"
},
{
"name": "HTML",
"bytes": "151266"
},
{
"name": "JavaScript",
"bytes": "25486"
},
{
"name": "Jupyter Notebook",
"bytes": "2933"
},
{
"name": "Mako",
"bytes": "1339"
},
{
"name": "Python",
"bytes": "10792443"
},
{
"name": "Shell",
"bytes": "243458"
},
{
"name": "TSQL",
"bytes": "879"
}
],
"symlink_target": ""
} |
from boxlet.game_engine import GameEngine
import pyglet
from pyglet.gl import *
class MyWindow(pyglet.window.Window):
def __init__(self, **kwargs):
super(MyWindow, self).__init__(**kwargs)
glEnable(GL_BLEND)
glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA)
self.time = 0.0
self.dt = 1.0 / 60.0
self.game_engine = GameEngine()
self.clock_display = pyglet.clock.ClockDisplay()
pyglet.clock.schedule_interval(self.step, self.dt)
def close(self):
pyglet.clock.unschedule(self.step)
self.game_engine.delete()
super(MyWindow, self).close()
def on_draw(self):
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)
self.game_engine.draw(self.width, self.height)
self.clock_display.draw()
def step(self, dt):
self.time += dt
while self.game_engine.time + self.dt <= self.time:
self.game_engine.step(self.dt)
def on_key_press(self, key, modifiers):
if key == pyglet.window.key.ESCAPE:
self.close()
else:
self.game_engine.on_key_press(key, modifiers)
def on_key_release(self, key, modifiers):
self.game_engine.on_key_release(key, modifiers)
def on_mouse_drag(self, x, y, dx, dy, buttons, modifiers):
self.game_engine.on_mouse_drag(x, y, dx, dy, buttons, modifiers)
def on_mouse_enter(self, x, y):
self.game_engine.on_mouse_enter(x, y)
def on_mouse_leave(self, x, y):
self.game_engine.on_mouse_leave(x, y)
def on_mouse_motion(self, x, y, dx, dy):
self.game_engine.on_mouse_motion(x, y, dx, dy)
def on_mouse_press(self, x, y, button, modifiers):
self.game_engine.on_mouse_press(x, y, button, modifiers)
def on_mouse_release(self, x, y, button, modifiers):
self.game_engine.on_mouse_release(x, y, button, modifiers)
def on_mouse_scroll(self, x, y, scroll_x, scroll_y):
self.game_engine.on_mouse_scroll(x, y, scroll_x, scroll_y)
def main():
config = pyglet.gl.Config(double_buffer=True, sample_buffers=1, samples=4,
depth_size=8)
window = MyWindow(fullscreen=True, config=config)
pyglet.app.run()
if __name__ == '__main__':
main()
| {
"content_hash": "b1f69e6d79334a8cd869f08c3393a7b9",
"timestamp": "",
"source": "github",
"line_count": 69,
"max_line_length": 78,
"avg_line_length": 32.927536231884055,
"alnum_prop": 0.6139964788732394,
"repo_name": "elemel/boxlet",
"id": "23bf69f6f76bce2b0c770d70637bd09c30479b3b",
"size": "2272",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lib/boxlet/main.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "44339"
}
],
"symlink_target": ""
} |
"""Kerberos command"""
from __future__ import annotations
import daemon
from daemon.pidfile import TimeoutPIDLockFile
from airflow import settings
from airflow.security import kerberos as krb
from airflow.utils import cli as cli_utils
from airflow.utils.cli import setup_locations
@cli_utils.action_cli
def kerberos(args):
"""Start a kerberos ticket renewer"""
print(settings.HEADER)
if args.daemon:
pid, stdout, stderr, _ = setup_locations(
"kerberos", args.pid, args.stdout, args.stderr, args.log_file
)
with open(stdout, 'a') as stdout_handle, open(stderr, 'a') as stderr_handle:
stdout_handle.truncate(0)
stderr_handle.truncate(0)
ctx = daemon.DaemonContext(
pidfile=TimeoutPIDLockFile(pid, -1),
stdout=stdout_handle,
stderr=stderr_handle,
umask=int(settings.DAEMON_UMASK, 8),
)
with ctx:
krb.run(principal=args.principal, keytab=args.keytab)
else:
krb.run(principal=args.principal, keytab=args.keytab)
| {
"content_hash": "337ada52f03e06171bced692d12e53b5",
"timestamp": "",
"source": "github",
"line_count": 36,
"max_line_length": 84,
"avg_line_length": 30.944444444444443,
"alnum_prop": 0.6346499102333932,
"repo_name": "nathanielvarona/airflow",
"id": "eec77f6d6f745718fbaa54ccda7c42c1d7856b0c",
"size": "1899",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "airflow/cli/commands/kerberos_command.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "25980"
},
{
"name": "Dockerfile",
"bytes": "70681"
},
{
"name": "HCL",
"bytes": "3786"
},
{
"name": "HTML",
"bytes": "173025"
},
{
"name": "JavaScript",
"bytes": "142848"
},
{
"name": "Jinja",
"bytes": "38895"
},
{
"name": "Jupyter Notebook",
"bytes": "5482"
},
{
"name": "Mako",
"bytes": "1339"
},
{
"name": "Python",
"bytes": "23169682"
},
{
"name": "R",
"bytes": "313"
},
{
"name": "Shell",
"bytes": "211967"
},
{
"name": "TypeScript",
"bytes": "484556"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
import datetime
from django.db import migrations, models
from django.utils.timezone import utc
class Migration(migrations.Migration):
dependencies = [
('deploys', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='deploy',
name='created',
field=models.DateTimeField(auto_now_add=True, default=datetime.datetime(2016, 5, 10, 20, 59, 25, 105470, tzinfo=utc), verbose_name='created'),
preserve_default=False,
),
migrations.AddField(
model_name='deploy',
name='modified',
field=models.DateTimeField(auto_now=True, default=datetime.datetime(2016, 5, 10, 20, 59, 33, 121283, tzinfo=utc), verbose_name='modified'),
preserve_default=False,
),
]
| {
"content_hash": "72c6e09b1c81e6c529d39eca44843c68",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 154,
"avg_line_length": 31.48148148148148,
"alnum_prop": 0.6176470588235294,
"repo_name": "mQuadrics/videplo",
"id": "500dec6e30256d4cfcdf8b2700f0005cea35a378",
"size": "922",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "deploys/migrations/0002_auto_20160510_2300.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "47595"
},
{
"name": "HTML",
"bytes": "45684"
},
{
"name": "JavaScript",
"bytes": "90937"
},
{
"name": "Python",
"bytes": "27108"
}
],
"symlink_target": ""
} |
import uuid
import mock
import tempfile
from pyethereum.utils import sha3
class ConfigHook(object):
def before_feature(self, context, feature):
'''
.. note::
`context.conf` is used instead of `context.config` because `config`
is used internally in `context` by *behave*
'''
context.conf = conf = mock.MagicMock()
node_id = sha3(str(uuid.uuid1())).encode('hex')
tempdir = tempfile.mkdtemp()
def get_side_effect(section, option):
if section == 'network' and option == 'client_id':
return 'client id'
if section == 'network' and option == 'node_id':
return node_id
if section == 'wallet' and option == 'coinbase':
return '0'*40
if section == 'misc' and option == 'data_dir':
return tempdir
def getint_side_effect(section, option):
if section == 'network' and option == 'listen_port':
return 1234
if section == 'network' and option == 'num_peers':
return 10
conf.get.side_effect = get_side_effect
conf.getint.side_effect = getint_side_effect
hook = ConfigHook()
| {
"content_hash": "26bc16e487ce5f4d90eecff1052fe597",
"timestamp": "",
"source": "github",
"line_count": 44,
"max_line_length": 79,
"avg_line_length": 28.34090909090909,
"alnum_prop": 0.5517241379310345,
"repo_name": "mrmayfield/pyethereum",
"id": "2ee81283714174a1abb263fbe81e25120862dae4",
"size": "1247",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "features/hooks/config.py",
"mode": "33188",
"license": "mit",
"language": [],
"symlink_target": ""
} |
from mrq.job import Job
import pytest
from mrq.queue import Queue, send_task
import time
from mrq.context import set_current_config, get_config
def test_pause_resume(worker):
worker.start(flags="--paused_queues_refresh_interval=0.1")
Queue("high").pause()
assert Queue("high").is_paused()
# wait for the paused_queues list to be refreshed
time.sleep(2)
job_id1 = send_task(
"tests.tasks.general.MongoInsert", {"a": 41},
queue="high")
job_id2 = send_task(
"tests.tasks.general.MongoInsert", {"a": 43},
queue="low")
time.sleep(5)
job1 = Job(job_id1).fetch().data
job2 = Job(job_id2).fetch().data
assert job1["status"] == "queued"
assert job2["status"] == "success"
assert job2["result"] == {"a": 43}
assert worker.mongodb_jobs.tests_inserts.count() == 1
Queue("high").resume()
Job(job_id1).wait(poll_interval=0.01)
job1 = Job(job_id1).fetch().data
assert job1["status"] == "success"
assert job1["result"] == {"a": 41}
assert worker.mongodb_jobs.tests_inserts.count() == 2
def test_pause_refresh_interval(worker):
""" Tests that a refresh interval of 0 disables the pause functionnality """
worker.start(flags="--paused_queues_refresh_interval=0")
Queue("high").pause()
assert Queue("high").is_paused()
# wait for the paused_queues list to be refreshed
time.sleep(2)
job_id1 = send_task(
"tests.tasks.general.MongoInsert", {"a": 41},
queue="high")
time.sleep(5)
job1 = Job(job_id1).fetch().data
assert job1["status"] == "success"
assert job1["result"] == {"a": 41}
def test_pause_subqueue(worker):
# set config in current context in order to have a subqueue delimiter
set_current_config(get_config(config_type="worker"))
worker.start(queues="high high/", flags="--subqueues_refresh_interval=1 --paused_queues_refresh_interval=1")
Queue("high").pause()
assert Queue("high/").is_paused()
# wait for the paused_queues list to be refreshed
time.sleep(2)
job_id1 = send_task(
"tests.tasks.general.MongoInsert", {"a": 41},
queue="high")
job_id2 = send_task(
"tests.tasks.general.MongoInsert", {"a": 43},
queue="high/subqueue")
# wait a bit to make sure the jobs status will still be queued
time.sleep(5)
job1 = Job(job_id1).fetch().data
job2 = Job(job_id2).fetch().data
assert job1["status"] == "queued"
assert job2["status"] == "queued"
assert worker.mongodb_jobs.tests_inserts.count() == 0
Queue("high/").resume()
Job(job_id1).wait(poll_interval=0.01)
Job(job_id2).wait(poll_interval=0.01)
job1 = Job(job_id1).fetch().data
job2 = Job(job_id2).fetch().data
assert job1["status"] == "success"
assert job1["result"] == {"a": 41}
assert job2["status"] == "success"
assert job2["result"] == {"a": 43}
assert worker.mongodb_jobs.tests_inserts.count() == 2
| {
"content_hash": "7736fd488cd47b2a6b13bdaf7a9c4752",
"timestamp": "",
"source": "github",
"line_count": 124,
"max_line_length": 112,
"avg_line_length": 24.193548387096776,
"alnum_prop": 0.6233333333333333,
"repo_name": "pricingassistant/mrq",
"id": "42f5b33b9a0d4bf2f8d81dc65ec7480873ad770d",
"size": "3000",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_pause.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "5514"
},
{
"name": "Dockerfile",
"bytes": "2722"
},
{
"name": "HTML",
"bytes": "60608"
},
{
"name": "JavaScript",
"bytes": "78540"
},
{
"name": "Makefile",
"bytes": "2765"
},
{
"name": "Perl",
"bytes": "1374"
},
{
"name": "Python",
"bytes": "931744"
}
],
"symlink_target": ""
} |
import sys
import os
from rstem import led_matrix, button
import random
import time
# notify of progress
print("P50")
sys.stdout.flush()
# initialize led matrix
#led_matrix.init_grid(2,2)
led_matrix.init_matrices([(0,8),(8,8),(8,0),(0,0)])
# set up buttons
A = 4
B = 17
UP = 25
DOWN = 24
LEFT = 23
RIGHT = 18
START = 27
SELECT = 22
# setup exit and restart button
exit_button = button.Button(START)
restart_button = button.Button(A)
# notify of progress
print("P60")
sys.stdout.flush()
# initialize variables
num_rows, num_cols, curr_gen, next_gen = (None, None, None, None)
def get_num_neighbors(curr_gen, x, y):
"""Returns the number of (alive) neighbors of given pixel"""
count = 0
for j in range(y-1, y+2):
for i in range(x-1, x+2):
if not(i == x and j == y): # don't count itself
if i >= 0 and i < led_matrix.width() and j >= 0 and j < led_matrix.height():
if curr_gen[j][i] == 0xF:
count += 1
return count
def next_generation():
"""Creates next generation using Conway's Game of Life rules:
http://en.wikipedia.org/wiki/Conway's_Game_of_Life
"""
global next_gen
global curr_gen
for y in range(0,num_rows):
for x in range(0,num_cols):
num_neighbors = get_num_neighbors(curr_gen, x, y)
if curr_gen[y][x] == 0xF and num_neighbors < 2:
next_gen[y][x] = 0 # pixel died off, not enough neighbors
elif curr_gen[y][x] == 0xF and num_neighbors > 3:
next_gen[y][x] = 0 # pixel died off, too many neighbors
elif curr_gen[y][x] == 0 and num_neighbors == 3:
next_gen[y][x] = 0xF # birth of a new pixel
else:
next_gen[y][x] = curr_gen[y][x]
curr_gen, next_gen = next_gen, curr_gen # swap lists
def random_grid(width, height):
"""Creates a grid of random dead and alive pixels."""
grid = []
for y in range(height):
row = []
for x in range(width):
random_num = random.randint(0,3)
if random_num == 0: # make alive pixels less common
row.append(0xF) # add an alive pixel
else:
row.append(0x0) # add a dead pixel
grid.append(row)
return grid
def draw_grid():
"""Draws the current generation to led_matrix."""
for y in range(num_rows):
for x in range(num_cols):
led_matrix.point(x, y, curr_gen[y][x])
# whole game loop
while True:
# variables
num_rows = led_matrix.height()
num_cols = led_matrix.width()
# notify of progress
print("P80")
sys.stdout.flush()
curr_gen = random_grid(num_cols, num_rows)
# notify of progress
print("P90")
sys.stdout.flush()
next_gen = [[0 for i in range(num_cols)] for j in range(num_rows)]
# TODO allow sprite input instead of random grid?
# notify menu we are ready for the led matrix
print("READY")
sys.stdout.flush()
# single game loop
while True:
if exit_button.is_pressed():
# clean up stuff and exit the program
button.cleanup()
led_matrix.cleanup()
sys.exit(0)
elif restart_button.is_pressed():
break # break out of this inner loop (lets us restart generations)
else:
led_matrix.erase() # clear the display
draw_grid() # draw the current generation
led_matrix.show() # show on display
next_generation() # update generation to next generation
| {
"content_hash": "b1fe534b603f4ff96a3c53036f943774",
"timestamp": "",
"source": "github",
"line_count": 123,
"max_line_length": 92,
"avg_line_length": 29.70731707317073,
"alnum_prop": 0.5686918445539135,
"repo_name": "scottsilverlabs/raspberrystem",
"id": "a2418d2c8614872277d199fd4bd4e9d828ac01e1",
"size": "4277",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "rstem/projects/led_matrix_games/game_of_life.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "22961"
},
{
"name": "HTML",
"bytes": "231804"
},
{
"name": "Makefile",
"bytes": "10187"
},
{
"name": "Python",
"bytes": "327178"
},
{
"name": "Shell",
"bytes": "8375"
}
],
"symlink_target": ""
} |
from pickle_this import get_data | {
"content_hash": "e7f5c4bd39df5ed614e3e93b7fd9a58a",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 32,
"avg_line_length": 32,
"alnum_prop": 0.84375,
"repo_name": "chrisjdavie/Cookit",
"id": "44eefc70d1ea63459dd78cca1f0da8104a1682f7",
"size": "32",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "5-a-day-thing/data_out/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "44601"
}
],
"symlink_target": ""
} |
import pytest
from test.conftest import got_postgresql
@pytest.mark.skipif(not got_postgresql(), reason='Needs postgresql')
class TestSqlitePostgresSchemaEquivalence(object):
def test_equality(self, schema_sl, schema_pg):
sl_table_names = set([t.name for t in schema_sl.tables])
pg_table_names = set([t.name for t in schema_pg.tables])
assert sl_table_names == pg_table_names
for table_name in pg_table_names:
sl_table = schema_sl.tables_by_name[table_name]
pg_table = schema_pg.tables_by_name[table_name]
sl_col_names = set([c.name for c in sl_table.cols])
pg_col_names = set([c.name for c in pg_table.cols])
assert sl_col_names == pg_col_names
def remove_name_from_relations(relations):
for r in relations:
del r['name']
sl_relations = remove_name_from_relations(schema_sl.relations())
pg_relations = remove_name_from_relations(schema_pg.relations())
assert sl_relations == pg_relations
| {
"content_hash": "6a39d92f751ba0ea2a854cc3b0b0a779",
"timestamp": "",
"source": "github",
"line_count": 28,
"max_line_length": 76,
"avg_line_length": 38.464285714285715,
"alnum_prop": 0.6248839368616528,
"repo_name": "freewilll/abridger",
"id": "784ca88da4959d82031d8db166504dcee2042417",
"size": "1077",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/postgresql/test_sqlite_postgres_schema_equivalence.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "7736"
},
{
"name": "Makefile",
"bytes": "1356"
},
{
"name": "Python",
"bytes": "218290"
}
],
"symlink_target": ""
} |
import sys
from osgpypp import osg
from osgpypp import osgDB
from osgpypp import osgText
from osgpypp import osgViewer
# Translated from file 'osgtexture2D.cpp'
# OpenSceneGraph example, osgtexture2D.
#*
#* Permission is hereby granted, free of charge, to any person obtaining a copy
#* of this software and associated documentation files (the "Software"), to deal
#* in the Software without restriction, including without limitation the rights
#* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
#* copies of the Software, and to permit persons to whom the Software is
#* furnished to do so, subject to the following conditions:
#*
#* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
#* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
#* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
#* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
#* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
#* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
#* THE SOFTWARE.
#
#include <osg/Node>
#include <osg/Geometry>
#include <osg/Notify>
#include <osg/MatrixTransform>
#include <osg/Texture2D>
#include <osg/DrawPixels>
#include <osg/PolygonOffset>
#include <osg/Geode>
#include <osgDB/Registry>
#include <osgDB/ReadFile>
#include <osgText/Text>
#include <osgViewer/Viewer>
#####################################################################/
#
# filter wall and animation callback.
#
class FilterCallback (osg.NodeCallback) :
FilterCallback(osg.Texture2D* texture,osgText.Text* text,double delay=1.0):
_texture(texture),
_text(text),
_delay(delay),
_currPos(0),
_prevTime(0.0)
# start with a mip mapped mode to ensure that
_minFilterList.push_back(osg.Texture2D.LINEAR_MIPMAP_LINEAR)
_magFilterList.push_back(osg.Texture2D.LINEAR)
_textList.push_back("Tri-linear mip mapping (default filtering)\nsetFilter(MIN_FILTER,LINEAR_MIP_LINEAR)\nsetFilter(MAG_FILTER,LINEAR)")
_minFilterList.push_back(osg.Texture2D.NEAREST)
_magFilterList.push_back(osg.Texture2D.NEAREST)
_textList.push_back("Nearest filtering\nsetFilter(MIN_FILTER,NEAREST)\nsetFilter(MAG_FILTER,NEAREST)")
_minFilterList.push_back(osg.Texture2D.LINEAR)
_magFilterList.push_back(osg.Texture2D.LINEAR)
_textList.push_back("Linear filtering\nsetFilter(MIN_FILTER,LINEAR)\nsetFilter(MAG_FILTER,LINEAR)")
_minFilterList.push_back(osg.Texture2D.NEAREST_MIPMAP_NEAREST)
_magFilterList.push_back(osg.Texture2D.LINEAR)
_textList.push_back("nearest mip mapping (default filtering)\nsetFilter(MIN_FILTER,)\nsetFilter(MAG_FILTER,LINEAR)")
_minFilterList.push_back(osg.Texture2D.LINEAR_MIPMAP_NEAREST)
_magFilterList.push_back(osg.Texture2D.LINEAR)
_textList.push_back("bi-linear mip mapping\nsetFilter(MIN_FILTER,LINEAR_MIPMAP_NEAREST)\nsetFilter(MAG_FILTER,LINEAR)")
_minFilterList.push_back(osg.Texture2D.NEAREST_MIPMAP_LINEAR)
_magFilterList.push_back(osg.Texture2D.LINEAR)
_textList.push_back("bi-linear mip mapping\nsetFilter(MIN_FILTER,NEAREST_MIPMAP_LINEAR)\nsetFilter(MAG_FILTER,LINEAR)")
setValues()
virtual void operator()(osg.Node*, osg.NodeVisitor* nv)
if nv.getFrameStamp() :
currTime = nv.getFrameStamp().getSimulationTime()
if currTime-_prevTime>_delay :
# update filter modes and text.
setValues()
# advance the current positon, wrap round if required.
_currPos++
if _currPos>=_minFilterList.size() : _currPos=0
# record time
_prevTime = currTime
def setValues():
_texture.setFilter(osg.Texture2D.MIN_FILTER,_minFilterList[_currPos])
_texture.setFilter(osg.Texture2D.MAG_FILTER,_magFilterList[_currPos])
_text.setText(_textList[_currPos])
typedef std.vector<osg.Texture2D.FilterMode> FilterList
typedef std.vector<str> TextList
_texture = osg.Texture2D()
_text = osgText.Text()
_delay = double()
_minFilterList = FilterList()
_magFilterList = FilterList()
_textList = TextList()
_currPos = unsigned int()
_prevTime = double()
def createFilterWall(bb, filename):
group = osg.Group()
# left hand side of bounding box.
top_left = osg.Vec3(bb.xMin(),bb.yMin(),bb.zMax())
bottom_left = osg.Vec3(bb.xMin(),bb.yMin(),bb.zMin())
bottom_right = osg.Vec3(bb.xMin(),bb.yMax(),bb.zMin())
top_right = osg.Vec3(bb.xMin(),bb.yMax(),bb.zMax())
center = osg.Vec3(bb.xMin(),(bb.yMin()+bb.yMax())*0.5,(bb.zMin()+bb.zMax())*0.5)
height = bb.zMax()-bb.zMin()
# create the geometry for the wall.
geom = osg.Geometry()
vertices = osg.Vec3Array(4)
(*vertices)[0] = top_left
(*vertices)[1] = bottom_left
(*vertices)[2] = bottom_right
(*vertices)[3] = top_right
geom.setVertexArray(vertices)
texcoords = osg.Vec2Array(4)
(*texcoords)[0].set(0.0,1.0)
(*texcoords)[1].set(0.0,0.0)
(*texcoords)[2].set(1.0,0.0)
(*texcoords)[3].set(1.0,1.0)
geom.setTexCoordArray(0,texcoords)
normals = osg.Vec3Array(1)
(*normals)[0].set(1.0,0.0,0.0)
geom.setNormalArray(normals, osg.Array.BIND_OVERALL)
colors = osg.Vec4Array(1)
(*colors)[0].set(1.0,1.0,1.0,1.0)
geom.setColorArray(colors, osg.Array.BIND_OVERALL)
geom.addPrimitiveSet(osg.DrawArrays(GL_QUADS,0,4))
geom_geode = osg.Geode()
geom_geode.addDrawable(geom)
group.addChild(geom_geode)
# set up the texture state.
texture = osg.Texture2D()
texture.setDataVariance(osg.Object.DYNAMIC) # protect from being optimized away as static state.
texture.setImage(osgDB.readImageFile(filename))
stateset = geom.getOrCreateStateSet()
stateset.setTextureAttributeAndModes(0,texture,osg.StateAttribute.ON)
# create the text label.
text = osgText.Text()
text.setDataVariance(osg.Object.DYNAMIC)
text.setFont("fonts/arial.ttf")
text.setPosition(center)
text.setCharacterSize(height*0.03)
text.setAlignment(osgText.Text.CENTER_CENTER)
text.setAxisAlignment(osgText.Text.YZ_PLANE)
text_geode = osg.Geode()
text_geode.addDrawable(text)
text_stateset = text_geode.getOrCreateStateSet()
text_stateset.setAttributeAndModes(osg.PolygonOffset(-1.0,-1.0),osg.StateAttribute.ON)
group.addChild(text_geode)
# set the update callback to cycle through the various min and mag filter modes.
group.setUpdateCallback(FilterCallback(texture,text))
return group
#####################################################################/
#
# anisotropic wall and animation callback.
#
class AnisotropicCallback (osg.NodeCallback) :
AnisotropicCallback(osg.Texture2D* texture,osgText.Text* text,double delay=1.0):
_texture(texture),
_text(text),
_delay(delay),
_currPos(0),
_prevTime(0.0)
_maxAnisotropyList.push_back(1.0)
_textList.push_back("No anisotropic filtering (default)\nsetMaxAnisotropy(1.0)")
_maxAnisotropyList.push_back(2.0)
_textList.push_back("Anisotropic filtering\nsetMaxAnisotropy(2.0)")
_maxAnisotropyList.push_back(4.0)
_textList.push_back("Anisotropic filtering\nsetMaxAnisotropy(4.0)")
_maxAnisotropyList.push_back(8.0)
_textList.push_back("Anisotropic filtering\nsetMaxAnisotropy(8.0)")
_maxAnisotropyList.push_back(16.0)
_textList.push_back("Higest quality anisotropic filtering\nsetMaxAnisotropy(16.0)")
setValues()
virtual void operator()(osg.Node*, osg.NodeVisitor* nv)
if nv.getFrameStamp() :
currTime = nv.getFrameStamp().getSimulationTime()
if currTime-_prevTime>_delay :
# update filter modes and text.
setValues()
# advance the current positon, wrap round if required.
_currPos++
if _currPos>=_maxAnisotropyList.size() : _currPos=0
# record time
_prevTime = currTime
def setValues():
_texture.setMaxAnisotropy(_maxAnisotropyList[_currPos])
_text.setText(_textList[_currPos])
typedef std.vector<float> AnisotropyList
typedef std.vector<str> TextList
_texture = osg.Texture2D()
_text = osgText.Text()
_delay = double()
_maxAnisotropyList = AnisotropyList()
_textList = TextList()
_currPos = unsigned int()
_prevTime = double()
def createAnisotripicWall(bb, filename):
group = osg.Group()
# left hand side of bounding box.
top_left = osg.Vec3(bb.xMin(),bb.yMax(),bb.zMin())
bottom_left = osg.Vec3(bb.xMin(),bb.yMin(),bb.zMin())
bottom_right = osg.Vec3(bb.xMax(),bb.yMin(),bb.zMin())
top_right = osg.Vec3(bb.xMax(),bb.yMax(),bb.zMin())
center = osg.Vec3((bb.xMin()+bb.xMax())*0.5,(bb.yMin()+bb.yMax())*0.5,bb.zMin())
height = bb.yMax()-bb.yMin()
# create the geometry for the wall.
geom = osg.Geometry()
vertices = osg.Vec3Array(4)
(*vertices)[0] = top_left
(*vertices)[1] = bottom_left
(*vertices)[2] = bottom_right
(*vertices)[3] = top_right
geom.setVertexArray(vertices)
texcoords = osg.Vec2Array(4)
(*texcoords)[0].set(0.0,1.0)
(*texcoords)[1].set(0.0,0.0)
(*texcoords)[2].set(1.0,0.0)
(*texcoords)[3].set(1.0,1.0)
geom.setTexCoordArray(0,texcoords)
normals = osg.Vec3Array(1)
(*normals)[0].set(0.0,0.0,1.0)
geom.setNormalArray(normals, osg.Array.BIND_OVERALL)
colors = osg.Vec4Array(1)
(*colors)[0].set(1.0,1.0,1.0,1.0)
geom.setColorArray(colors, osg.Array.BIND_OVERALL)
geom.addPrimitiveSet(osg.DrawArrays(GL_QUADS,0,4))
geom_geode = osg.Geode()
geom_geode.addDrawable(geom)
group.addChild(geom_geode)
# set up the texture state.
texture = osg.Texture2D()
texture.setDataVariance(osg.Object.DYNAMIC) # protect from being optimized away as static state.
texture.setImage(osgDB.readImageFile(filename))
stateset = geom.getOrCreateStateSet()
stateset.setTextureAttributeAndModes(0,texture,osg.StateAttribute.ON)
# create the text label.
text = osgText.Text()
text.setDataVariance(osg.Object.DYNAMIC)
text.setFont("fonts/arial.ttf")
text.setPosition(center)
text.setCharacterSize(height*0.03)
text.setColor(osg.Vec4(1.0,0.0,1.0,1.0))
text.setAlignment(osgText.Text.CENTER_CENTER)
text.setAxisAlignment(osgText.Text.XY_PLANE)
text_geode = osg.Geode()
text_geode.addDrawable(text)
text_stateset = text_geode.getOrCreateStateSet()
text_stateset.setAttributeAndModes(osg.PolygonOffset(-1.0,-1.0),osg.StateAttribute.ON)
group.addChild(text_geode)
# set the update callback to cycle through the various min and mag filter modes.
group.setUpdateCallback(AnisotropicCallback(texture,text))
return group
#####################################################################/
#
# wrap wall and animation callback.
#
class WrapCallback (osg.NodeCallback) :
WrapCallback(osg.Texture2D* texture,osgText.Text* text,double delay=1.0):
_texture(texture),
_text(text),
_delay(delay),
_currPos(0),
_prevTime(0.0)
_wrapList.push_back(osg.Texture2D.CLAMP)
_textList.push_back("Default tex coord clamp\nsetWrap(WRAP_S,CLAMP)")
_wrapList.push_back(osg.Texture2D.CLAMP_TO_EDGE)
_textList.push_back("Clamp to edge extension\nsetWrap(WRAP_S,CLAMP_TO_EDGE)")
_wrapList.push_back(osg.Texture2D.CLAMP_TO_BORDER)
_textList.push_back("Clamp to border color extension\nsetWrap(WRAP_S,CLAMP_TO_BORDER)")
_wrapList.push_back(osg.Texture2D.REPEAT)
_textList.push_back("Repeat wrap\nsetWrap(WRAP_S,REPEAT)")
_wrapList.push_back(osg.Texture2D.MIRROR)
_textList.push_back("Mirror wrap extension\nsetWrap(WRAP_S,MIRROR)")
setValues()
virtual void operator()(osg.Node*, osg.NodeVisitor* nv)
if nv.getFrameStamp() :
currTime = nv.getFrameStamp().getSimulationTime()
if currTime-_prevTime>_delay :
# update filter modes and text.
setValues()
# advance the current positon, wrap round if required.
_currPos++
if _currPos>=_wrapList.size() : _currPos=0
# record time
_prevTime = currTime
def setValues():
_texture.setWrap(osg.Texture2D.WRAP_S,_wrapList[_currPos])
_texture.setWrap(osg.Texture2D.WRAP_T,_wrapList[_currPos])
_text.setText(_textList[_currPos])
typedef std.vector<osg.Texture2D.WrapMode> WrapList
typedef std.vector<str> TextList
_texture = osg.Texture2D()
_text = osgText.Text()
_delay = double()
_wrapList = WrapList()
_textList = TextList()
_currPos = unsigned int()
_prevTime = double()
def createWrapWall(bb, filename):
group = osg.Group()
# left hand side of bounding box.
top_left = osg.Vec3(bb.xMax(),bb.yMax(),bb.zMax())
bottom_left = osg.Vec3(bb.xMax(),bb.yMax(),bb.zMin())
bottom_right = osg.Vec3(bb.xMax(),bb.yMin(),bb.zMin())
top_right = osg.Vec3(bb.xMax(),bb.yMin(),bb.zMax())
center = osg.Vec3(bb.xMax(),(bb.yMin()+bb.yMax())*0.5,(bb.zMin()+bb.zMax())*0.5)
height = bb.zMax()-bb.zMin()
# create the geometry for the wall.
geom = osg.Geometry()
vertices = osg.Vec3Array(4)
(*vertices)[0] = top_left
(*vertices)[1] = bottom_left
(*vertices)[2] = bottom_right
(*vertices)[3] = top_right
geom.setVertexArray(vertices)
texcoords = osg.Vec2Array(4)
(*texcoords)[0].set(-1.0,2.0)
(*texcoords)[1].set(-1.0,-1.0)
(*texcoords)[2].set(2.0,-1.0)
(*texcoords)[3].set(2.0,2.0)
geom.setTexCoordArray(0,texcoords)
normals = osg.Vec3Array(1)
(*normals)[0].set(-1.0,0.0,0.0)
geom.setNormalArray(normals, osg.Array.BIND_OVERALL)
colors = osg.Vec4Array(1)
(*colors)[0].set(1.0,1.0,1.0,1.0)
geom.setColorArray(colors, osg.Array.BIND_OVERALL)
geom.addPrimitiveSet(osg.DrawArrays(GL_QUADS,0,4))
geom_geode = osg.Geode()
geom_geode.addDrawable(geom)
group.addChild(geom_geode)
# set up the texture state.
texture = osg.Texture2D()
texture.setDataVariance(osg.Object.DYNAMIC) # protect from being optimized away as static state.
texture.setBorderColor(osg.Vec4(1.0,1.0,1.0,0.5)) # only used when wrap is set to CLAMP_TO_BORDER
texture.setImage(osgDB.readImageFile(filename))
stateset = geom.getOrCreateStateSet()
stateset.setTextureAttributeAndModes(0,texture,osg.StateAttribute.ON)
stateset.setMode(GL_BLEND,osg.StateAttribute.ON)
stateset.setRenderingHint(osg.StateSet.TRANSPARENT_BIN)
# create the text label.
text = osgText.Text()
text.setDataVariance(osg.Object.DYNAMIC)
text.setFont("fonts/arial.ttf")
text.setPosition(center)
text.setCharacterSize(height*0.03)
text.setAlignment(osgText.Text.CENTER_CENTER)
text.setAxisAlignment(osgText.Text.YZ_PLANE)
text_geode = osg.Geode()
text_geode.addDrawable(text)
text_stateset = text_geode.getOrCreateStateSet()
text_stateset.setAttributeAndModes(osg.PolygonOffset(-1.0,-1.0),osg.StateAttribute.ON)
group.addChild(text_geode)
# set the update callback to cycle through the various min and mag filter modes.
group.setUpdateCallback(WrapCallback(texture,text))
return group
#####################################################################/
#
# sublooad wall and animation callback.
#
class ImageUpdateCallback (osg.NodeCallback) :
ImageUpdateCallback(osg.Texture2D* texture,osgText.Text* text,double delay=1.0):
_texture(texture),
_text(text),
_delay(delay),
_currPos(0),
_prevTime(0.0)
#if 1
originalImage = osgDB.readImageFile("Images/dog_left_eye.jpg")
subImage = osg.Image()
subImage.setUserData(originalImage) # attach the originalImage as user data to prevent it being deleted.
# now assign the appropriate portion data from the originalImage
subImage.setImage(originalImage.s()/2, originalImage.t()/2, originalImage.r(), # half the width and height
originalImage.getInternalTextureFormat(), # same internal texture format
originalImage.getPixelFormat(),originalImage.getDataType(), # same pixel foramt and data type
originalImage.data(originalImage.s()/4,originalImage.t()/4), # offset teh start point to 1/4 into the image
osg.Image.NO_DELETE, # don't attempt to delete the image data, leave this to the originalImage
originalImage.getPacking(), # use the the same packing
originalImage.s()) # use the width of the original image as the row width
subImage.setPixelBufferObject(osg.PixelBufferObject(subImage))
#if 0
OSG_NOTICE, "orignalImage iterator"
for(osg.Image.DataIterator itr(originalImage) itr.valid() ++itr)
OSG_NOTICE, " ", (void*)itr.data(), ", ", itr.size()
OSG_NOTICE, "subImage iterator, size ", subImage.s(), ", ", subImage.t()
i = 0
for(osg.Image.DataIterator itr(subImage) itr.valid() ++itr, ++i)
OSG_NOTICE, " ", i, ", ", (void*)itr.data(), ", ", itr.size()
for(unsigned char* d=const_cast<unsigned char*>(itr.data()) d<(itr.data()+itr.size()) ++d)
*d = 255-*d
#endif
_imageList.push_back(subImage)
#else:
_imageList.push_back(osgDB.readImageFile("Images/dog_left_eye.jpg"))
#endif
_textList.push_back("Subloaded Image 1 - dog_left_eye.jpg")
_imageList.push_back(osgDB.readImageFile("Images/dog_right_eye.jpg"))
_textList.push_back("Subloaded Image 2 - dog_right_eye.jpg")
setValues()
virtual void operator()(osg.Node*, osg.NodeVisitor* nv)
if nv.getFrameStamp() :
currTime = nv.getFrameStamp().getSimulationTime()
if currTime-_prevTime>_delay :
# update filter modes and text.
setValues()
# advance the current positon, wrap round if required.
_currPos++
if _currPos>=_imageList.size() : _currPos=0
# record time
_prevTime = currTime
def setValues():
# Note, as long as the images are the same dimensions subloading will be used
# to update the textures. If dimensions change then the texture objects have
# to be deleted and re-recreated.
#
# The load/subload happens during the draw traversal so doesn't happen on
# the setImage which just updates internal pointers and modifed flags.
_texture.setImage(_imageList[_currPos])
#_texture.dirtyTextureObject()
_text.setText(_textList[_currPos])
typedef std.vector< osg.Image > ImageList
typedef std.vector<str> TextList
_texture = osg.Texture2D()
_text = osgText.Text()
_delay = double()
_imageList = ImageList()
_textList = TextList()
_currPos = unsigned int()
_prevTime = double()
def createSubloadWall(bb):
group = osg.Group()
# left hand side of bounding box.
top_left = osg.Vec3(bb.xMin(),bb.yMax(),bb.zMax())
bottom_left = osg.Vec3(bb.xMin(),bb.yMax(),bb.zMin())
bottom_right = osg.Vec3(bb.xMax(),bb.yMax(),bb.zMin())
top_right = osg.Vec3(bb.xMax(),bb.yMax(),bb.zMax())
center = osg.Vec3((bb.xMax()+bb.xMin())*0.5,bb.yMax(),(bb.zMin()+bb.zMax())*0.5)
height = bb.zMax()-bb.zMin()
# create the geometry for the wall.
geom = osg.Geometry()
vertices = osg.Vec3Array(4)
(*vertices)[0] = top_left
(*vertices)[1] = bottom_left
(*vertices)[2] = bottom_right
(*vertices)[3] = top_right
geom.setVertexArray(vertices)
texcoords = osg.Vec2Array(4)
(*texcoords)[0].set(0.0,1.0)
(*texcoords)[1].set(0.0,0.0)
(*texcoords)[2].set(1.0,0.0)
(*texcoords)[3].set(1.0,1.0)
geom.setTexCoordArray(0,texcoords)
normals = osg.Vec3Array(1)
(*normals)[0].set(0.0,-1.0,0.0)
geom.setNormalArray(normals, osg.Array.BIND_OVERALL)
colors = osg.Vec4Array(1)
(*colors)[0].set(1.0,1.0,1.0,1.0)
geom.setColorArray(colors, osg.Array.BIND_OVERALL)
geom.addPrimitiveSet(osg.DrawArrays(GL_QUADS,0,4))
geom_geode = osg.Geode()
geom_geode.addDrawable(geom)
group.addChild(geom_geode)
# set up the texture state.
texture = osg.Texture2D()
texture.setDataVariance(osg.Object.DYNAMIC) # protect from being optimized away as static state.
texture.setFilter(osg.Texture2D.MIN_FILTER,osg.Texture2D.LINEAR)
texture.setFilter(osg.Texture2D.MAG_FILTER,osg.Texture2D.LINEAR)
stateset = geom.getOrCreateStateSet()
stateset.setTextureAttributeAndModes(0,texture,osg.StateAttribute.ON)
# create the text label.
text = osgText.Text()
text.setDataVariance(osg.Object.DYNAMIC)
text.setFont("fonts/arial.ttf")
text.setPosition(center)
text.setCharacterSize(height*0.03)
text.setAlignment(osgText.Text.CENTER_CENTER)
text.setAxisAlignment(osgText.Text.XZ_PLANE)
text_geode = osg.Geode()
text_geode.addDrawable(text)
text_stateset = text_geode.getOrCreateStateSet()
text_stateset.setAttributeAndModes(osg.PolygonOffset(-1.0,-1.0),osg.StateAttribute.ON)
group.addChild(text_geode)
# set the update callback to cycle through the various min and mag filter modes.
group.setUpdateCallback(ImageUpdateCallback(texture,text))
return group
def createModel():
# create the root node which will hold the model.
root = osg.Group()
# turn off lighting
root.getOrCreateStateSet().setMode(GL_LIGHTING,osg.StateAttribute.OFF)
bb = osg.BoundingBox(0.0,0.0,0.0,1.0,1.0,1.0)
root.addChild(createFilterWall(bb,"Images/lz.rgb"))
root.addChild(createAnisotripicWall(bb,"Images/primitives.gif"))
root.addChild(createWrapWall(bb,"Images/tree0.rgba"))
root.addChild(createSubloadWall(bb))
return root
int main(int , char **)
# construct the viewer.
viewer = osgViewer.Viewer()
# add model to viewer.
viewer.setSceneData( createModel() )
return viewer.run()
if __name__ == "__main__":
main(sys.argv)
| {
"content_hash": "eedc62fed4379163504183b0463f70b1",
"timestamp": "",
"source": "github",
"line_count": 709,
"max_line_length": 144,
"avg_line_length": 32.141043723554304,
"alnum_prop": 0.6478409689310163,
"repo_name": "JaneliaSciComp/osgpyplusplus",
"id": "b1806cf75d91c48f3d35f005a8decc4903d90943",
"size": "22970",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/rough_translated1/osgtexture2D.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "5836"
},
{
"name": "C++",
"bytes": "15619800"
},
{
"name": "CMake",
"bytes": "40664"
},
{
"name": "Python",
"bytes": "181050"
}
],
"symlink_target": ""
} |
import mne
import sys
from mne import compute_covariance
from my_settings import (epochs_folder, mne_folder)
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
reject = dict(
grad=4000e-13, # T / m (gradiometers)
mag=4e-12, # T (magnetometers)
eeg=180e-6 #
)
subject = sys.argv[1]
epochs = mne.read_epochs(epochs_folder + "%s_trial_start-epo.fif" % subject)
epochs.drop_bad(reject)
# fig = epochs.plot_drop_log(subject=subject, show=False)
# fig.savefig(epochs_folder + "pics/%s_drop_log.png" % subject)
# Make noise cov
cov = compute_covariance(epochs, tmin=None, tmax=0, method="shrunk")
mne.write_cov(mne_folder + "%s-cov.fif" % subject, cov)
| {
"content_hash": "a52d5974e5c0ceb7cc8cae4ea66a63c0",
"timestamp": "",
"source": "github",
"line_count": 28,
"max_line_length": 76,
"avg_line_length": 24.642857142857142,
"alnum_prop": 0.7072463768115942,
"repo_name": "MadsJensen/CAA",
"id": "8a1f48dae0509eaabdf15a850cdc57fbfa9bcf4c",
"size": "690",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "calc_cov.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "131985"
},
{
"name": "Shell",
"bytes": "385"
}
],
"symlink_target": ""
} |
from aiida.orm import Code, DataFactory
from aiida.orm.workflow import Workflow
from aiida.workflows.wf_phonon import WorkflowPhonon
from aiida.orm import load_node, load_workflow
from aiida.orm.calculation.inline import make_inline
StructureData = DataFactory('structure')
ParameterData = DataFactory('parameter')
ArrayData = DataFactory('array')
import numpy as np
#@make_inline
def generate_supercell2_inline(**kwargs):
structure = kwargs.pop('structure')
supercell = StructureData(cell=structure.cell)
for site in structure.sites:
supercell.append_atom(position=site.position,
symbols=site.kind_name)
return {"supercell": supercell}
@make_inline
def generate_supercell_inline(**kwargs):
import itertools
structure = kwargs.pop('structure')
supercell = kwargs.pop('supercell').dict.supercell
symbols = [site.kind_name for site in structure.sites]
positions=np.array([site.position for site in structure.sites])
position_super_cell = []
for k in range(positions.shape[0]):
for r in itertools.product(*[range(i) for i in supercell[::-1]]):
position_super_cell.append(positions[k,:] + np.dot(np.array(r[::-1]), structure.cell))
position_super_cell = np.array(position_super_cell)
symbol_super_cell = []
for j in range(positions.shape[0]):
symbol_super_cell += [symbols[j]] * np.prod(supercell)
supercell = StructureData(cell=np.dot(structure.cell, np.diag(supercell)))
for i, position in enumerate(position_super_cell):
supercell.append_atom(position=position.tolist(),
symbols=symbol_super_cell[i])
return {"supercell": supercell}
class WorkflowQuasiparticle(Workflow):
def __init__(self, **kwargs):
super(WorkflowQuasiparticle, self).__init__(**kwargs)
def generate_md_combinate(self, supercell, structure, parameters_md, parameters_dyna, force_constants):
codename = parameters_md['code']
code = Code.get_from_string(codename)
calc = code.new_calc(max_wallclock_seconds=3600,
resources=parameters_md['resources'])
calc.label = "md lammps calculation"
calc.description = "A much longer description"
calc.use_code(code)
calc.use_structure(structure)
calc.use_supercell(supercell)
calc.use_parameters(ParameterData(dict=parameters_md['parameters']))
calc.use_parameters_dynaphopy(ParameterData(dict=parameters_dyna['parameters']))
calc.use_force_constants(force_constants)
calc.use_potential(ParameterData(dict=parameters_md['potential']))
calc.store_all()
return calc
def generate_md_lammps(self, structure, parameters):
codename = parameters['code']
code = Code.get_from_string(codename)
calc = code.new_calc(max_wallclock_seconds=3600,
resources=parameters['resources'])
calc.label = "md lammps calculation"
calc.description = "A much longer description"
calc.use_code(code)
calc.use_structure(structure)
calc.use_parameters(ParameterData(dict=parameters['parameters']))
calc.use_potential(ParameterData(dict=parameters['potential']))
calc.store_all()
return calc
def generate_calculation_dynaphopy(self, structure, force_constants, parameters, trajectory):
codename = parameters['code']
code = Code.get_from_string(codename)
calc = code.new_calc(max_wallclock_seconds=3600,
resources=parameters['resources'])
# calc.label = "dynaphopy calculation"
# calc.description = "A much longer description"
calc.use_code(code)
calc.use_structure(structure)
calc.use_parameters(ParameterData(dict=parameters['parameters']))
calc.use_force_constants(force_constants)
calc.use_trajectory(trajectory)
calc.store_all()
return calc
# Calculates the reference crystal structure (optimize it if requested)
@Workflow.step
def start(self):
self.append_to_report('Starting workflow_workflow')
self.append_to_report('Phonon calculation of base structure')
wf_parameters = self.get_parameters()
wf = WorkflowPhonon(params=wf_parameters)
wf.store()
# wf = load_workflow(127)
self.attach_workflow(wf)
wf.start()
md_code = Code.get_from_string(wf_parameters['lammps_md']['code'])
if md_code.get_input_plugin_name() == 'lammps.combinate':
self.next(self.md_combinate)
else:
self.next(self.md_lammps)
# Generate the volume expanded cells
@Workflow.step
def md_lammps(self):
self.append_to_report('Temperatures expansion calculations')
wf_parameters = self.get_parameters()
structure = self.get_step(self.start).get_sub_workflows()[0].get_result('final_structure')
temperatures = np.array(wf_parameters['dynaphopy_input']['temperatures'])
# temperatures = np.array([200, 300, 400, 500, 600, 700, 800, 900, 1000])
inline_params = {'structure': structure,
'supercell': ParameterData(dict=wf_parameters['lammps_md'])}
supercell = generate_supercell_inline(**inline_params)[1]['supercell']
# nodes = [11504, 11507, 11510, 11513, 11516]
for i, temperature in enumerate(temperatures):
wf_parameters_md = dict(wf_parameters['lammps_md'])
wf_parameters_md['parameters']['temperature'] = temperature
calc = self.generate_md_lammps(supercell, wf_parameters_md)
calc.label = 'temperature_{}'.format(temperature)
# calc = load_node(nodes[i])
self.append_to_report('created MD calculation with PK={}'.format(calc.pk))
self.attach_calculation(calc)
self.next(self.dynaphopy)
# Collects the forces and prepares force constants
@Workflow.step
def dynaphopy(self):
wf_parameters = self.get_parameters()
harmonic_force_constants = self.get_step(self.start).get_sub_workflows()[0].get_result('force_constants')
harmonic_dos = self.get_step(self.start).get_sub_workflows()[0].get_result('dos')
structure = self.get_step(self.start).get_sub_workflows()[0].get_result('final_structure')
self.add_result('force_constants', harmonic_force_constants)
self.add_result('dos', harmonic_dos)
calcs = self.get_step_calculations(self.md_lammps)
# nodes = [11578, 11580, 11582, 11584, 11586]
for i, calc in enumerate(calcs):
trajectory = calc.out.trajectory_data
dynaphopy_input = dict(wf_parameters['dynaphopy_input'])
dynaphopy_input['parameters']['temperature'] = calc.inp.parameters.dict.temperature
dyna_calc = self.generate_calculation_dynaphopy(structure,
harmonic_force_constants,
dynaphopy_input,
trajectory)
dyna_calc.label = calc.label
# dyna_calc = load_node(nodes[i])
self.append_to_report('created QP calculation with PK={}'.format(dyna_calc.pk))
self.attach_calculation(dyna_calc)
self.next(self.collect)
@Workflow.step
def md_combinate(self):
self.append_to_report('Temperatures expansion calculations')
wf_parameters = self.get_parameters()
harmonic_force_constants = self.get_step(self.start).get_sub_workflows()[0].get_result('force_constants')
harmonic_dos = self.get_step(self.start).get_sub_workflows()[0].get_result('dos')
structure = self.get_step(self.start).get_sub_workflows()[0].get_result('final_structure')
self.add_result('force_constants', harmonic_force_constants)
self.add_result('dos', harmonic_dos)
temperatures = np.array(wf_parameters['dynaphopy_input']['temperatures'])
# temperatures = np.array([200, 300, 400, 500, 600, 700, 800, 900, 1000])
inline_params = {'structure': structure,
'supercell': ParameterData(dict=wf_parameters['input_md'])}
supercell = generate_supercell_inline(**inline_params)[1]['supercell']
# nodes = [11504, 11507, 11510, 11513, 11516]
for i, temperature in enumerate(temperatures):
wf_parameters_md = dict(wf_parameters['input_md'])
wf_parameters_md['parameters']['temperature'] = temperature
dynaphopy_input = dict(wf_parameters['dynaphopy_input'])
dynaphopy_input['parameters']['temperature'] = temperature
calc = self.generate_md_combinate(supercell, structure, wf_parameters_md, dynaphopy_input, harmonic_force_constants)
calc.label = 'temperature_{}'.format(temperature)
# calc = load_node(nodes[i])
self.append_to_report('created MD calculation with PK={}'.format(calc.pk))
self.attach_calculation(calc)
self.next(self.collect)
# Collects the forces and prepares force constants
@Workflow.step
def collect(self):
free_energy = []
entropy = []
temperature = []
cv = []
# get the phonon for 0 K
temperature = [0]
free_energy = [self.get_step('start').get_sub_workflows()[0].get_result('thermal_properties').get_array('free_energy')[0]]
entropy = [self.get_step('start').get_sub_workflows()[0].get_result('thermal_properties').get_array('entropy')[0]]
cv = [self.get_step('start').get_sub_workflows()[0].get_result('thermal_properties').get_array('cv')[0]]
# get temperature dependent properties from dynaphopy
wf_parameters = self.get_parameters()
md_code = Code.get_from_string(wf_parameters['input_md']['code'])
if md_code.get_input_plugin_name() == 'lammps.combinate':
calcs = self.get_step_calculations(self.md_combinate)
else:
calcs = self.get_step_calculations(self.dynaphopy)
for calc in calcs:
thermal_properties = calc.out.thermal_properties
temperature.append(thermal_properties.dict.temperature)
entropy.append(thermal_properties.dict.entropy)
free_energy.append(thermal_properties.dict.free_energy)
cv.append(thermal_properties.dict.cv)
order = np.argsort(temperature)
array_data = ArrayData()
array_data.set_array('temperature', np.array(temperature)[order])
array_data.set_array('free_energy', np.array(free_energy)[order])
array_data.set_array('entropy', np.array(entropy)[order])
array_data.set_array('cv', np.array(cv)[order])
array_data.store()
self.add_result('thermal_properties', array_data)
# Pass the final properties from phonon workflow
optimized_data = self.get_step(self.start).get_sub_workflows()[0].get_result('optimized_structure_data')
final_structure = self.get_step(self.start).get_sub_workflows()[0].get_result('final_structure')
self.add_result('optimized_structure_data', optimized_data)
self.add_result('final_structure', final_structure)
self.next(self.exit) | {
"content_hash": "7b72328f42209381cd4de2b84fa2b2f0",
"timestamp": "",
"source": "github",
"line_count": 279,
"max_line_length": 130,
"avg_line_length": 40.939068100358426,
"alnum_prop": 0.6382419891437576,
"repo_name": "abelcarreras/aiida_extensions",
"id": "6ce6cb236899414b56bed0ba36c3acc65f28d800",
"size": "11443",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "workflows/old/wf_quasiparticle_comb.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "12031"
},
{
"name": "Makefile",
"bytes": "7701"
},
{
"name": "Python",
"bytes": "528341"
}
],
"symlink_target": ""
} |
import django.utils.version
from .constants import DEFAULT_PROTOCOL
from .fields import PickledObjectField
__all__ = 'VERSION', '__version__', 'DEFAULT_PROTOCOL', 'PickledObjectField'
VERSION = (3, 1, 0, 'final', 0)
__version__ = django.utils.version.get_version(VERSION)
| {
"content_hash": "2d4383d4d4c93820627dce3d55dbcae4",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 76,
"avg_line_length": 27.6,
"alnum_prop": 0.7246376811594203,
"repo_name": "gintas/django-picklefield",
"id": "46e9c081aab70a730debbd77d2de1c4f2a7aa533",
"size": "276",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "picklefield/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "23152"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.contrib.sites.models import Site
from django.db import models, connection
from django.db.backends.utils import truncate_name
from django.test import TestCase
from .models.publication import Publication
from .models.article import Article
class Advertisement(models.Model):
customer = models.CharField(max_length=100)
publications = models.ManyToManyField("model_package.Publication", blank=True)
class ModelPackageTests(TestCase):
@classmethod
def setUpClass(cls):
super(ModelPackageTests, cls).setUpClass()
# This cleanup is necessary because contrib.sites cache
# makes tests interfere with each other, see #11505
Site.objects.clear_cache()
def test_m2m_tables_in_subpackage_models(self):
"""
Regression for #12168: models split into subpackages still get M2M
tables.
"""
p = Publication.objects.create(title="FooBar")
current_site = Site.objects.get_current()
self.assertEqual(current_site.domain, "example.com")
a = Article.objects.create(headline="a foo headline")
a.publications.add(p)
a.sites.add(current_site)
a = Article.objects.get(id=a.pk)
self.assertEqual(a.id, a.pk)
self.assertEqual(a.sites.count(), 1)
def test_models_in_the_test_package(self):
"""
Regression for #12245 - Models can exist in the test package, too.
"""
p = Publication.objects.create(title="FooBar")
ad = Advertisement.objects.create(customer="Lawrence Journal-World")
ad.publications.add(p)
ad = Advertisement.objects.get(id=ad.pk)
self.assertEqual(ad.publications.count(), 1)
def test_automatic_m2m_column_names(self):
"""
Regression for #12386 - field names on the autogenerated intermediate
class that are specified as dotted strings don't retain any path
component for the field or column name.
"""
self.assertEqual(
Article.publications.through._meta.fields[1].name, 'article'
)
self.assertEqual(
Article.publications.through._meta.fields[1].get_attname_column(),
('article_id', 'article_id')
)
self.assertEqual(
Article.publications.through._meta.fields[2].name, 'publication'
)
self.assertEqual(
Article.publications.through._meta.fields[2].get_attname_column(),
('publication_id', 'publication_id')
)
self.assertEqual(
Article._meta.get_field('publications').m2m_db_table(),
truncate_name('model_package_article_publications', connection.ops.max_name_length()),
)
self.assertEqual(
Article._meta.get_field('publications').m2m_column_name(), 'article_id'
)
self.assertEqual(
Article._meta.get_field('publications').m2m_reverse_name(),
'publication_id'
)
| {
"content_hash": "de0523f55403484868407e71e0c8a55f",
"timestamp": "",
"source": "github",
"line_count": 87,
"max_line_length": 98,
"avg_line_length": 34.758620689655174,
"alnum_prop": 0.6418650793650794,
"repo_name": "gdi2290/django",
"id": "84b1dd1c301fb0a7c880bf6b60373ecca129a0f1",
"size": "3024",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/model_package/tests.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "53429"
},
{
"name": "JavaScript",
"bytes": "103687"
},
{
"name": "Makefile",
"bytes": "5765"
},
{
"name": "Python",
"bytes": "10365282"
},
{
"name": "Shell",
"bytes": "10452"
}
],
"symlink_target": ""
} |
from graphene import Schema
class Schema(Schema):
"""
This class creates a graphql schema that resolves its fields using
the natuilus event queue for asynchronous data retrieval.
"""
def __init__(self, executor=None, auto_camelcase=None, **kwds):
super().__init__(
auto_camelcase=False,
**kwds
)
| {
"content_hash": "33f9105a9d232f0a5545c7f17d71ca32",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 74,
"avg_line_length": 30.416666666666668,
"alnum_prop": 0.6054794520547945,
"repo_name": "aaivazis/nautilus",
"id": "3b7454a6a44bfabbdeb4f7bb6e306a1103ee2592",
"size": "384",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "nautilus/api/schema.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "15430"
},
{
"name": "Python",
"bytes": "103018"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import cv2
import numpy as np
try:
import cPickle as pickle
except ImportError:
import pickle
import os
import math
from utils.timer import Timer
from utils.cython_nms import nms, nms_new
from utils.boxes_grid import get_boxes_grid
from utils.blob import im_list_to_blob
from model.config import cfg, get_output_dir
from model.bbox_transform import clip_boxes, bbox_transform_inv
def _get_image_blob(im):
"""Converts an image into a network input.
Arguments:
im (ndarray): a color image in BGR order
Returns:
blob (ndarray): a data blob holding an image pyramid
im_scale_factors (list): list of image scales (relative to im) used
in the image pyramid
"""
im_orig = im.astype(np.float32, copy=True)
im_orig -= cfg.PIXEL_MEANS
im_shape = im_orig.shape
im_size_min = np.min(im_shape[0:2])
im_size_max = np.max(im_shape[0:2])
processed_ims = []
im_scale_factors = []
for target_size in cfg.TEST.SCALES:
im_scale = float(target_size) / float(im_size_min)
# Prevent the biggest axis from being more than MAX_SIZE
if np.round(im_scale * im_size_max) > cfg.TEST.MAX_SIZE:
im_scale = float(cfg.TEST.MAX_SIZE) / float(im_size_max)
im = cv2.resize(im_orig, None, None, fx=im_scale, fy=im_scale,
interpolation=cv2.INTER_LINEAR)
im_scale_factors.append(im_scale)
processed_ims.append(im)
# Create a blob to hold the input images
blob = im_list_to_blob(processed_ims)
return blob, np.array(im_scale_factors)
def _get_blobs(im):
"""Convert an image and RoIs within that image into network inputs."""
blobs = {}
blobs['data'], im_scale_factors = _get_image_blob(im)
return blobs, im_scale_factors
def _clip_boxes(boxes, im_shape):
"""Clip boxes to image boundaries."""
# x1 >= 0
boxes[:, 0::4] = np.maximum(boxes[:, 0::4], 0)
# y1 >= 0
boxes[:, 1::4] = np.maximum(boxes[:, 1::4], 0)
# x2 < im_shape[1]
boxes[:, 2::4] = np.minimum(boxes[:, 2::4], im_shape[1] - 1)
# y2 < im_shape[0]
boxes[:, 3::4] = np.minimum(boxes[:, 3::4], im_shape[0] - 1)
return boxes
def _rescale_boxes(boxes, inds, scales):
"""Rescale boxes according to image rescaling."""
for i in range(boxes.shape[0]):
boxes[i,:] = boxes[i,:] / scales[int(inds[i])]
return boxes
def im_detect(sess, net, im):
blobs, im_scales = _get_blobs(im)
assert len(im_scales) == 1, "Only single-image batch implemented"
im_blob = blobs['data']
# seems to have height, width, and image scales
# still not sure about the scale, maybe full image it is 1.
blobs['im_info'] = np.array([[im_blob.shape[1], im_blob.shape[2], im_scales[0]]], dtype=np.float32)
_, scores, bbox_pred, rois = net.test_image(sess, blobs['data'], blobs['im_info'])
boxes = rois[:, 1:5] / im_scales[0]
# print(scores.shape, bbox_pred.shape, rois.shape, boxes.shape)
scores = np.reshape(scores, [scores.shape[0], -1])
bbox_pred = np.reshape(bbox_pred, [bbox_pred.shape[0], -1])
if cfg.TEST.BBOX_REG:
# Apply bounding-box regression deltas
box_deltas = bbox_pred
pred_boxes = bbox_transform_inv(boxes, box_deltas)
pred_boxes = _clip_boxes(pred_boxes, im.shape)
else:
# Simply repeat the boxes, once for each class
pred_boxes = np.tile(boxes, (1, scores.shape[1]))
return scores, pred_boxes
def apply_nms(all_boxes, thresh):
"""Apply non-maximum suppression to all predicted boxes output by the
test_net method.
"""
num_classes = len(all_boxes)
num_images = len(all_boxes[0])
nms_boxes = [[[] for _ in range(num_images)] for _ in range(num_classes)]
for cls_ind in range(num_classes):
for im_ind in range(num_images):
dets = all_boxes[cls_ind][im_ind]
if dets == []:
continue
x1 = dets[:, 0]
y1 = dets[:, 1]
x2 = dets[:, 2]
y2 = dets[:, 3]
scores = dets[:, 4]
inds = np.where((x2 > x1) & (y2 > y1) & (scores > cfg.TEST.DET_THRESHOLD))[0]
dets = dets[inds,:]
if dets == []:
continue
keep = nms(dets, thresh)
if len(keep) == 0:
continue
nms_boxes[cls_ind][im_ind] = dets[keep, :].copy()
return nms_boxes
def test_net(sess, net, imdb, weights_filename, max_per_image=100, thresh=0.05):
np.random.seed(cfg.RNG_SEED)
"""Test a Fast R-CNN network on an image database."""
num_images = len(imdb.image_index)
# all detections are collected into:
# all_boxes[cls][image] = N x 5 array of detections in
# (x1, y1, x2, y2, score)
all_boxes = [[[] for _ in range(num_images)]
for _ in range(imdb.num_classes)]
output_dir = get_output_dir(imdb, weights_filename)
# timers
_t = {'im_detect' : Timer(), 'misc' : Timer()}
for i in range(num_images):
im = cv2.imread(imdb.image_path_at(i))
_t['im_detect'].tic()
scores, boxes = im_detect(sess, net, im)
_t['im_detect'].toc()
_t['misc'].tic()
# skip j = 0, because it's the background class
for j in range(1, imdb.num_classes):
inds = np.where(scores[:, j] > thresh)[0]
cls_scores = scores[inds, j]
cls_boxes = boxes[inds, j*4:(j+1)*4]
cls_dets = np.hstack((cls_boxes, cls_scores[:, np.newaxis])) \
.astype(np.float32, copy=False)
keep = nms(cls_dets, cfg.TEST.NMS)
cls_dets = cls_dets[keep, :]
all_boxes[j][i] = cls_dets
# Limit to max_per_image detections *over all classes*
if max_per_image > 0:
image_scores = np.hstack([all_boxes[j][i][:, -1]
for j in range(1, imdb.num_classes)])
if len(image_scores) > max_per_image:
image_thresh = np.sort(image_scores)[-max_per_image]
for j in range(1, imdb.num_classes):
keep = np.where(all_boxes[j][i][:, -1] >= image_thresh)[0]
all_boxes[j][i] = all_boxes[j][i][keep, :]
_t['misc'].toc()
print('im_detect: {:d}/{:d} {:.3f}s {:.3f}s' \
.format(i + 1, num_images, _t['im_detect'].average_time,
_t['misc'].average_time))
det_file = os.path.join(output_dir, 'detections.pkl')
with open(det_file, 'wb') as f:
pickle.dump(all_boxes, f, pickle.HIGHEST_PROTOCOL)
print('Evaluating detections')
imdb.evaluate_detections(all_boxes, output_dir)
| {
"content_hash": "4704b9089dc5b1c763f580855fdcb62d",
"timestamp": "",
"source": "github",
"line_count": 191,
"max_line_length": 101,
"avg_line_length": 32.910994764397905,
"alnum_prop": 0.6264715240216354,
"repo_name": "kukuruza/tf-faster-rcnn",
"id": "932fc888dd917e88827e22b0344b3105d20e094d",
"size": "6514",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lib/model/test.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "146"
},
{
"name": "Cuda",
"bytes": "5064"
},
{
"name": "Makefile",
"bytes": "94"
},
{
"name": "Matlab",
"bytes": "1821"
},
{
"name": "Python",
"bytes": "212191"
},
{
"name": "Roff",
"bytes": "1195"
},
{
"name": "Shell",
"bytes": "9566"
}
],
"symlink_target": ""
} |
"""Unit tests to cover AdGroupAdService."""
__author__ = 'api.sgrinberg@gmail.com (Stan Grinberg)'
import base64
import os
import sys
sys.path.insert(0, os.path.join('..', '..', '..'))
import unittest
from adspygoogle.common import Utils
from tests.adspygoogle.adwords import HTTP_PROXY
from tests.adspygoogle.adwords import SERVER_V201109
from tests.adspygoogle.adwords import TEST_VERSION_V201109
from tests.adspygoogle.adwords import VERSION_V201109
from tests.adspygoogle.adwords import client
class AdGroupAdServiceTestV201109(unittest.TestCase):
"""Unittest suite for AdGroupAdService using v201109."""
SERVER = SERVER_V201109
VERSION = VERSION_V201109
IMAGE_DATA = Utils.ReadFile(os.path.join('data', 'image.jpg'))
MOBILE_IMAGE_DATA = Utils.ReadFile(os.path.join('data', 'image_192x53.jpg'))
IMAGE_DATA = base64.encodestring(IMAGE_DATA)
MOBILE_IMAGE_DATA = base64.encodestring(MOBILE_IMAGE_DATA)
client.debug = False
service = None
campaign_id = '0'
ad_group_id = '0'
ad = None
def setUp(self):
"""Prepare unittest."""
if not self.__class__.service:
self.__class__.service = client.GetAdGroupAdService(
self.__class__.SERVER, self.__class__.VERSION, HTTP_PROXY)
if self.__class__.campaign_id == '0' or self.__class__.ad_group_id == '0':
campaign_service = client.GetCampaignService(
self.__class__.SERVER, self.__class__.VERSION, HTTP_PROXY)
operations = [{
'operator': 'ADD',
'operand': {
'name': 'Campaign #%s' % Utils.GetUniqueName(),
'status': 'PAUSED',
'biddingStrategy': {
'xsi_type': 'ManualCPC'
},
'budget': {
'period': 'DAILY',
'amount': {
'microAmount': '1000000'
},
'deliveryMethod': 'STANDARD'
}
}
}]
self.__class__.campaign_id = campaign_service.Mutate(
operations)[0]['value'][0]['id']
ad_group_service = client.GetAdGroupService(
self.__class__.SERVER, self.__class__.VERSION, HTTP_PROXY)
operations = [{
'operator': 'ADD',
'operand': {
'campaignId': self.__class__.campaign_id,
'name': 'AdGroup #%s' % Utils.GetUniqueName(),
'status': 'ENABLED',
'bids': {
'xsi_type': 'ManualCPCAdGroupBids',
'keywordMaxCpc': {
'amount': {
'microAmount': '1000000'
}
}
}
}
}]
self.__class__.ad_group_id = ad_group_service.Mutate(
operations)[0]['value'][0]['id']
def testAddTextAd(self):
"""Test whether we can add a text ad."""
operations = [{
'operator': 'ADD',
'operand': {
'xsi_type': 'AdGroupAd',
'adGroupId': self.__class__.ad_group_id,
'ad': {
'xsi_type': 'TextAd',
'url': 'http://www.example.com',
'displayUrl': 'example.com',
'description1': 'Visit the Red Planet in style.',
'description2': 'Low-gravity fun for everyone!',
'headline': 'Luxury Cruise to Mars'
},
'status': 'ENABLED',
}
}]
ads = self.__class__.service.Mutate(operations)
self.__class__.ad = ads[0]['value'][0]
self.assert_(isinstance(ads, tuple))
def testGetAllAdsFromCampaign(self):
"""Test whether we can fetch all ads from given campaign."""
selector = {
'fields': ['AdGroupId', 'Status'],
'predicates': [
{
'field': 'CampaignId',
'operator': 'EQUALS',
'values': [self.__class__.campaign_id]
}
]
}
self.assert_(isinstance(self.__class__.service.Get(selector), tuple))
def makeTestSuiteV201109():
"""Set up test suite using v201109.
Returns:
TestSuite test suite using v201109.
"""
suite = unittest.TestSuite()
suite.addTests(unittest.makeSuite(AdGroupAdServiceTestV201109))
return suite
if __name__ == '__main__':
suites = []
if TEST_VERSION_V201109:
suites.append(makeTestSuiteV201109())
if suites:
alltests = unittest.TestSuite(suites)
unittest.main(defaultTest='alltests')
| {
"content_hash": "e97a4b48f42504dc237150095fbfa453",
"timestamp": "",
"source": "github",
"line_count": 139,
"max_line_length": 78,
"avg_line_length": 31.827338129496404,
"alnum_prop": 0.5522151898734177,
"repo_name": "nearlyfreeapps/python-googleadwords",
"id": "319c26b4489abba5d9a5724f89524f11282e58c6",
"size": "5066",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/adspygoogle/adwords/ad_group_ad_service_unittest.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "JavaScript",
"bytes": "10581"
},
{
"name": "Python",
"bytes": "1394721"
}
],
"symlink_target": ""
} |
import dask.dataframe as dd
import pandas as pd
from featuretools.utils.gen_utils import import_or_none, is_instance
ps = import_or_none('pyspark.pandas')
def to_pandas(df, index=None, sort_index=False, int_index=False):
'''
Testing util to convert dataframes to pandas. If a pandas dataframe is passed in, just returns the dataframe.
Args:
index (str, optional): column name to set as index, defaults to None
sort_index (bool, optional): whether to sort the dataframe on the index after setting it, defaults to False
int_index (bool, optional): Converts computed dask index to Int64Index to avoid errors, defaults to False
Returns:
Pandas DataFrame
'''
if isinstance(df, (pd.DataFrame, pd.Series)):
return df
if isinstance(df, (dd.DataFrame, dd.Series)):
pd_df = df.compute()
if is_instance(df, (ps, ps), ('DataFrame', 'Series')):
pd_df = df.to_pandas()
if index:
pd_df = pd_df.set_index(index)
if sort_index:
pd_df = pd_df.sort_index()
if int_index and isinstance(df, dd.DataFrame):
pd_df.index = pd.Int64Index(pd_df.index)
return pd_df
def get_df_tags(df):
'''Gets a DataFrame's semantic tags without index or time index tags for Woodwork init'''
semantic_tags = {}
for col_name in df.columns:
semantic_tags[col_name] = df.ww.semantic_tags[col_name] - {'time_index', 'index'}
return semantic_tags
| {
"content_hash": "7998987be8032978421c6b396a559cd1",
"timestamp": "",
"source": "github",
"line_count": 45,
"max_line_length": 115,
"avg_line_length": 32.53333333333333,
"alnum_prop": 0.6591530054644809,
"repo_name": "Featuretools/featuretools",
"id": "db4345404b6ca90e4eed93171b796043cdc6f8bb",
"size": "1464",
"binary": false,
"copies": "1",
"ref": "refs/heads/latest-dep-update-03d11f0",
"path": "featuretools/tests/testing_utils/es_utils.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "3340"
},
{
"name": "Makefile",
"bytes": "736"
},
{
"name": "Python",
"bytes": "921333"
},
{
"name": "Shell",
"bytes": "511"
}
],
"symlink_target": ""
} |
"""Main entry point into the Identity service."""
import abc
import functools
import os
import uuid
from oslo_config import cfg
from oslo_log import log
from oslo_utils import importutils
import six
from keystone import clean
from keystone.common import cache
from keystone.common import dependency
from keystone.common import driver_hints
from keystone.common import manager
from keystone import config
from keystone import exception
from keystone.i18n import _, _LW
from keystone import notifications
CONF = cfg.CONF
LOG = log.getLogger(__name__)
MEMOIZE = cache.get_memoization_decorator(section='identity')
DOMAIN_CONF_FHEAD = 'keystone.'
DOMAIN_CONF_FTAIL = '.conf'
def filter_user(user_ref):
"""Filter out private items in a user dict.
'password', 'tenants' and 'groups' are never returned.
:returns: user_ref
"""
if user_ref:
user_ref = user_ref.copy()
user_ref.pop('password', None)
user_ref.pop('projects', None)
user_ref.pop('groups', None)
user_ref.pop('domains', None)
try:
user_ref['extra'].pop('password', None)
user_ref['extra'].pop('tenants', None)
except KeyError:
pass
return user_ref
def exception_translated(exception_type):
"""Wraps API calls to map to correct exception."""
def _exception_translated(f):
@functools.wraps(f)
def wrapper(self, *args, **kwargs):
try:
return f(self, *args, **kwargs)
except exception.PublicIDNotFound as e:
if exception_type == 'user':
raise exception.UserNotFound(user_id=str(e))
elif exception_type == 'group':
raise exception.GroupNotFound(group_id=str(e))
elif exception_type == 'assertion':
raise AssertionError(_('Invalid user / password'))
else:
raise
return wrapper
return _exception_translated
@dependency.provider('identity_api')
@dependency.requires('assignment_api', 'resource_api', 'revoke_api')
class Manager(manager.Manager):
"""Default pivot point for the Identity backend.
See :mod:`keystone.common.manager.Manager` for more details on how this
dynamically calls the backend.
This class also handles the support of domain specific backends, by using
the DomainConfigs class. The setup call for DomainConfigs is called
from with the @domains_configured wrapper in a lazy loading fashion
to get around the fact that we can't satisfy the assignment api it needs
from within our __init__() function since the assignment driver is not
itself yet initialized.
Each of the identity calls are pre-processed here to choose, based on
domain, which of the drivers should be called. The non-domain-specific
driver is still in place, and is used if there is no specific driver for
the domain in question (or we are not using multiple domain drivers).
Starting with Juno, in order to be able to obtain the domain from
just an ID being presented as part of an API call, a public ID to domain
and local ID mapping is maintained. This mapping also allows for the local
ID of drivers that do not provide simple UUIDs (such as LDAP) to be
referenced via a public facing ID. The mapping itself is automatically
generated as entities are accessed via the driver.
This mapping is only used when:
- the entity is being handled by anything other than the default driver, or
- the entity is being handled by the default LDAP driver and backward
compatible IDs are not required.
This means that in the standard case of a single SQL backend or the default
settings of a single LDAP backend (since backward compatible IDs is set to
True by default), no mapping is used. An alternative approach would be to
always use the mapping table, but in the cases where we don't need it to
make the public and local IDs the same. It is felt that not using the
mapping by default is a more prudent way to introduce this functionality.
"""
_USER = 'user'
_GROUP = 'group'
def __init__(self):
super(Manager, self).__init__(CONF.identity.driver)
self.event_callbacks = {
notifications.ACTIONS.deleted: {
'domain': [self._domain_deleted],
},
}
def _domain_deleted(self, service, resource_type, operation,
payload):
domain_id = payload['resource_info']
user_refs = self.list_users(domain_scope=domain_id)
group_refs = self.list_groups(domain_scope=domain_id)
for group in group_refs:
# Cleanup any existing groups.
try:
self.delete_group(group['id'])
except exception.GroupNotFound:
LOG.debug(('Group %(groupid)s not found when deleting domain '
'contents for %(domainid)s, continuing with '
'cleanup.'),
{'groupid': group['id'], 'domainid': domain_id})
# And finally, delete the users themselves
for user in user_refs:
try:
self.delete_user(user['id'])
except exception.UserNotFound:
LOG.debug(('User %(userid)s not found when deleting domain '
'contents for %(domainid)s, continuing with '
'cleanup.'),
{'userid': user['id'], 'domainid': domain_id})
def _ensure_domain_id_in_hints(self, hints, domain_id):
if (domain_id is not None and
not hints.get_exact_filter_by_name('domain_id')):
hints.add_filter('domain_id', domain_id)
# The actual driver calls - these are pre/post processed here as
# part of the Manager layer to make sure we:
#
# - select the right driver for this domain
# - clear/set domain_ids for drivers that do not support domains
# - create any ID mapping that might be required
@notifications.emit_event('authenticate')
@exception_translated('assertion')
def authenticate(self, context, user_id, password):
return self.driver.authenticate(user_id, password)
@exception_translated('user')
def create_user(self, user_ref, initiator=None):
user = user_ref.copy()
user['name'] = clean.user_name(user['name'])
user.setdefault('enabled', True)
user['enabled'] = clean.user_enabled(user['enabled'])
domain_id = user['domain_id']
self.resource_api.get_domain(domain_id)
# For creating a user, the domain is in the object itself
domain_id = user_ref['domain_id']
# Generate a local ID - in the future this might become a function of
# the underlying driver so that it could conform to rules set down by
# that particular driver type.
user['id'] = uuid.uuid4().hex
ref = self.driver.create_user(user['id'], user)
notifications.Audit.created(self._USER, user['id'], initiator)
return ref
@exception_translated('user')
@MEMOIZE
def get_user(self, user_id):
return self.driver.get_user(user_id)
def assert_user_enabled(self, user_id, user=None):
"""Assert the user and the user's domain are enabled.
:raise AssertionError if the user or the user's domain is disabled.
"""
if user is None:
user = self.get_user(user_id)
self.resource_api.assert_domain_enabled(user['domain_id'])
if not user.get('enabled', True):
raise AssertionError(_('User is disabled: %s') % user_id)
@exception_translated('user')
@MEMOIZE
def get_user_by_name(self, user_name, domain_id):
return self.driver.get_user_by_name(user_name, domain_id)
@manager.response_truncated
@exception_translated('user')
def list_users(self, domain_scope=None, hints=None):
hints = hints or driver_hints.Hints()
# Force the domain_scope into the hint to ensure that we only get
# back domains for that scope.
self._ensure_domain_id_in_hints(hints, domain_scope)
return self.driver.list_users(hints)
@exception_translated('user')
def update_user(self, user_id, user_ref, initiator=None):
old_user_ref = self.get_user(user_id)
user = user_ref.copy()
if 'name' in user:
user['name'] = clean.user_name(user['name'])
if 'enabled' in user:
user['enabled'] = clean.user_enabled(user['enabled'])
if 'domain_id' in user:
self.resource_api.get_domain(user['domain_id'])
if 'id' in user:
if user_id != user['id']:
raise exception.ValidationError(_('Cannot change user ID'))
# Since any ID in the user dict is now irrelevant, remove its so as
# the driver layer won't be confused by the fact the this is the
# public ID not the local ID
user.pop('id')
self.get_user.invalidate(self, old_user_ref['id'])
self.get_user_by_name.invalidate(self, old_user_ref['name'],
old_user_ref['domain_id'])
ref = self.driver.update_user(user_id, user)
notifications.Audit.updated(self._USER, user_id, initiator)
enabled_change = ((user.get('enabled') is False) and
user['enabled'] != old_user_ref.get('enabled'))
if enabled_change or user.get('password') is not None:
self.emit_invalidate_user_token_persistence(user_id)
return ref
@exception_translated('user')
def delete_user(self, user_id, initiator=None):
# Get user details to invalidate the cache.
user_old = self.get_user(user_id)
self.driver.delete_user(user_id)
self.assignment_api.delete_user(user_id)
self.get_user.invalidate(self, user_id)
self.get_user_by_name.invalidate(self, user_old['name'],
user_old['domain_id'])
notifications.Audit.deleted(self._USER, user_id, initiator)
@notifications.internal(notifications.INVALIDATE_USER_TOKEN_PERSISTENCE)
def emit_invalidate_user_token_persistence(self, user_id):
"""Emit a notification to the callback system to revoke user tokens.
This method and associated callback listener removes the need for
making a direct call to another manager to delete and revoke tokens.
:param user_id: user identifier
:type user_id: string
"""
pass
@notifications.internal(
notifications.INVALIDATE_USER_PROJECT_TOKEN_PERSISTENCE)
def emit_invalidate_grant_token_persistence(self, user_project):
"""Emit a notification to the callback system to revoke grant tokens.
This method and associated callback listener removes the need for
making a direct call to another manager to delete and revoke tokens.
:param user_project: {'user_id': user_id, 'project_id': project_id}
:type user_project: dict
"""
pass
def change_password(self, context, user_id, original_password,
new_password):
# authenticate() will raise an AssertionError if authentication fails
self.authenticate(context, user_id, original_password)
update_dict = {'password': new_password}
self.update_user(user_id, update_dict)
@exception_translated('group')
def create_group(self, group_ref, initiator=None):
group = group_ref.copy()
group.setdefault('description', '')
domain_id = group['domain_id']
self.resource_api.get_domain(domain_id)
# For creating a group, the domain is in the object itself
domain_id = group_ref['domain_id']
driver = self._select_identity_driver(domain_id)
group = self._clear_domain_id_if_domain_unaware(driver, group)
# Generate a local ID - in the future this might become a function of
# the underlying driver so that it could conform to rules set down by
# that particular driver type.
group['id'] = uuid.uuid4().hex
ref = driver.create_group(group['id'], group)
notifications.Audit.created(self._GROUP, group['id'], initiator)
return ref
@exception_translated('group')
@MEMOIZE
def get_group(self, group_id):
ref = self.driver.get_group(group_id)
return ref
@exception_translated('group')
def get_group_by_name(self, group_name, domain_id):
ref = self.driver.get_group_by_name(group_name, domain_id)
return ref
@exception_translated('group')
def update_group(self, group_id, group, initiator=None):
if 'domain_id' in group:
self.resource_api.get_domain(group['domain_id'])
ref = self.driver.update_group(group_id, group)
self.get_group.invalidate(self, group_id)
notifications.Audit.updated(self._GROUP, group_id, initiator)
return ref
@exception_translated('group')
def delete_group(self, group_id, initiator=None):
user_ids = (u['id'] for u in self.list_users_in_group(group_id))
self.driver.delete_group(group_id)
self.get_group.invalidate(self, group_id)
self.assignment_api.delete_group(group_id)
notifications.Audit.deleted(self._GROUP, group_id, initiator)
for uid in user_ids:
self.emit_invalidate_user_token_persistence(uid)
@exception_translated('group')
def add_user_to_group(self, user_id, group_id):
self.driver.add_user_to_group(user_id, group_id)
@exception_translated('group')
def remove_user_from_group(self, user_id, group_id):
self.driver.remove_user_from_group(user_id, group_id)
self.emit_invalidate_user_token_persistence(user_id)
@manager.response_truncated
@exception_translated('user')
def list_groups_for_user(self, user_id, hints=None):
hints = hints or driver_hints.Hints()
ref_list = self.driver.list_groups_for_user(user_id, hints)
return ref_list
@manager.response_truncated
@exception_translated('group')
def list_groups(self, domain_scope=None, hints=None):
hints = hints or driver_hints.Hints()
self._ensure_domain_id_in_hints(hints, domain_scope)
ref_list = self.driver.list_groups(hints)
return ref_list
@manager.response_truncated
@exception_translated('group')
def list_users_in_group(self, group_id, hints=None):
hints = hints or driver_hints.Hints()
ref_list = self.driver.list_users_in_group(group_id, hints)
return ref_list
@exception_translated('group')
def check_user_in_group(self, user_id, group_id):
return self.driver.check_user_in_group(user_id, group_id)
@six.add_metaclass(abc.ABCMeta)
class Driver(object):
"""Interface description for an Identity driver."""
def _get_list_limit(self):
return CONF.identity.list_limit or CONF.list_limit
def is_domain_aware(self):
"""Indicates if Driver supports domains."""
return True
@property
def is_sql(self):
"""Indicates if this Driver uses SQL."""
return False
def generates_uuids(self):
"""Indicates if Driver generates UUIDs as the local entity ID."""
return True
@abc.abstractmethod
def authenticate(self, user_id, password):
"""Authenticate a given user and password.
:returns: user_ref
:raises: AssertionError
"""
raise exception.NotImplemented() # pragma: no cover
# user crud
@abc.abstractmethod
def create_user(self, user_id, user):
"""Creates a new user.
:raises: keystone.exception.Conflict
"""
raise exception.NotImplemented() # pragma: no cover
@abc.abstractmethod
def list_users(self, hints):
"""List users in the system.
:param hints: filter hints which the driver should
implement if at all possible.
:returns: a list of user_refs or an empty list.
"""
raise exception.NotImplemented() # pragma: no cover
@abc.abstractmethod
def get_user(self, user_id):
"""Get a user by ID.
:returns: user_ref
:raises: keystone.exception.UserNotFound
"""
raise exception.NotImplemented() # pragma: no cover
@abc.abstractmethod
def update_user(self, user_id, user):
"""Updates an existing user.
:raises: keystone.exception.UserNotFound,
keystone.exception.Conflict
"""
raise exception.NotImplemented() # pragma: no cover
@abc.abstractmethod
def delete_user(self, user_id):
"""Deletes an existing user.
:raises: keystone.exception.UserNotFound
"""
raise exception.NotImplemented() # pragma: no cover
@abc.abstractmethod
def get_user_by_name(self, user_name, domain_id):
"""Get a user by name.
:returns: user_ref
:raises: keystone.exception.UserNotFound
"""
raise exception.NotImplemented() # pragma: no cover
@abc.abstractmethod
def list_users_in_group(self, group_id, hints):
"""List users in a group.
:param group_id: the group in question
:param hints: filter hints which the driver should
implement if at all possible.
:returns: a list of user_refs or an empty list.
"""
raise exception.NotImplemented() # pragma: no cover
@abc.abstractmethod
def add_user_to_group(self, user_id, group_id):
"""Adds a user to a group.
:raises: keystone.exception.UserNotFound,
keystone.exception.GroupNotFound
"""
raise exception.NotImplemented() # pragma: no cover
@abc.abstractmethod
def check_user_in_group(self, user_id, group_id):
"""Checks if a user is a member of a group.
:raises: keystone.exception.UserNotFound,
keystone.exception.GroupNotFound
"""
raise exception.NotImplemented() # pragma: no cover
@abc.abstractmethod
def remove_user_from_group(self, user_id, group_id):
"""Removes a user from a group.
:raises: keystone.exception.NotFound
"""
raise exception.NotImplemented() # pragma: no cover
# group crud
@abc.abstractmethod
def create_group(self, group_id, group):
"""Creates a new group.
:raises: keystone.exception.Conflict
"""
raise exception.NotImplemented() # pragma: no cover
@abc.abstractmethod
def list_groups(self, hints):
"""List groups in the system.
:param hints: filter hints which the driver should
implement if at all possible.
:returns: a list of group_refs or an empty list.
"""
raise exception.NotImplemented() # pragma: no cover
@abc.abstractmethod
def list_groups_for_user(self, user_id, hints):
"""List groups a user is in
:param user_id: the user in question
:param hints: filter hints which the driver should
implement if at all possible.
:returns: a list of group_refs or an empty list.
"""
raise exception.NotImplemented() # pragma: no cover
@abc.abstractmethod
def get_group(self, group_id):
"""Get a group by ID.
:returns: group_ref
:raises: keystone.exception.GroupNotFound
"""
raise exception.NotImplemented() # pragma: no cover
@abc.abstractmethod
def get_group_by_name(self, group_name, domain_id):
"""Get a group by name.
:returns: group_ref
:raises: keystone.exception.GroupNotFound
"""
raise exception.NotImplemented() # pragma: no cover
@abc.abstractmethod
def update_group(self, group_id, group):
"""Updates an existing group.
:raises: keystone.exceptionGroupNotFound,
keystone.exception.Conflict
"""
raise exception.NotImplemented() # pragma: no cover
@abc.abstractmethod
def delete_group(self, group_id):
"""Deletes an existing group.
:raises: keystone.exception.GroupNotFound
"""
raise exception.NotImplemented() # pragma: no cover
# end of identity | {
"content_hash": "18b82d3f7fd474291939590911c06d17",
"timestamp": "",
"source": "github",
"line_count": 573,
"max_line_length": 79,
"avg_line_length": 35.92321116928447,
"alnum_prop": 0.6292751651768363,
"repo_name": "darren-wang/ks3",
"id": "821aacfeb8983391e3efcfbd6c0f69b97246be0e",
"size": "21170",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "keystone/identity/core.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "864167"
},
{
"name": "Shell",
"bytes": "4861"
}
],
"symlink_target": ""
} |
import pandas as pd
def get_test_data():
test_data = pd.read_csv('data/classification_data/dev_split.csv')
#print test_data
test = test_data['Participant_ID'].tolist()
#print test
#test.append(video)
clm_d = pd.read_csv('data/disc_nondisc/discriminative_CLM.csv')
covarep_d = pd.read_csv('data/disc_nondisc/discriminative_COVAREP.csv')
liwc_d = pd.read_csv('data/disc_nondisc/discriminative_LIWC.csv')
clm_nd = pd.read_csv('data/disc_nondisc/nondiscriminative_CLM.csv')
covarep_nd = pd.read_csv('data/disc_nondisc/nondiscriminative_COVAREP.csv')
liwc_nd = pd.read_csv('data/disc_nondisc/nondiscriminative_LIWC.csv')
for key in test:
if not ((clm_nd['video'] == key).any() ):
print "visual ",key
if not ((covarep_nd['video'] == key).any() ):
print "acoustic ", key
#print key
if not((liwc_nd['video'] == key).any()):
print "liwc ", key
get_test_data()
| {
"content_hash": "b8b33215f70592bbf68c26614d10b6ab",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 79,
"avg_line_length": 32.53333333333333,
"alnum_prop": 0.6239754098360656,
"repo_name": "ab93/Depression-Identification",
"id": "a2363c0281c207e1a5d81f298a0e03e38fb409f8",
"size": "976",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/feature_extract/check_features.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "213001"
}
],
"symlink_target": ""
} |
"""
Methods for detecting objects leading to pickling failures.
"""
from __future__ import absolute_import, with_statement
import dis
from inspect import ismethod, isfunction, istraceback, isframe, iscode
from inspect import getmodule
from .pointers import parent, reference, at, parents, children
from .dill import _trace as trace
from .dill import PY3
def outermost(func): # is analogous to getsource(func,enclosing=True)
"""get outermost enclosing object (i.e. the outer function in a closure)
NOTE: this is the object-equivalent of getsource(func, enclosing=True)
"""
if PY3:
if ismethod(func):
_globals = func.__func__.__globals__ or {}
elif isfunction(func):
_globals = func.__globals__ or {}
else:
return #XXX: or raise? no matches
_globals = _globals.items()
else:
if ismethod(func):
_globals = func.im_func.func_globals or {}
elif isfunction(func):
_globals = func.func_globals or {}
else:
return #XXX: or raise? no matches
_globals = _globals.iteritems()
# get the enclosing source
from .source import getsourcelines
try: lines,lnum = getsourcelines(func, enclosing=True)
except: #TypeError, IOError
lines,lnum = [],None
code = ''.join(lines)
# get all possible names,objects that are named in the enclosing source
_locals = ((name,obj) for (name,obj) in _globals if name in code)
# now only save the objects that generate the enclosing block
for name,obj in _locals: #XXX: don't really need 'name'
try:
if getsourcelines(obj) == (lines,lnum): return obj
except: #TypeError, IOError
pass
return #XXX: or raise? no matches
def nestedcode(func, recurse=True): #XXX: or return dict of {co_name: co} ?
"""get the code objects for any nested functions (e.g. in a closure)"""
func = code(func)
if not iscode(func): return [] #XXX: or raise? no matches
nested = set()
for co in func.co_consts:
if co is None: continue
co = code(co)
if co:
nested.add(co)
if recurse: nested |= set(nestedcode(co, recurse=True))
return list(nested)
def code(func):
'''get the code object for the given function or method
NOTE: use dill.source.getsource(CODEOBJ) to get the source code
'''
if PY3:
im_func = '__func__'
func_code = '__code__'
else:
im_func = 'im_func'
func_code = 'func_code'
if ismethod(func): func = getattr(func, im_func)
if isfunction(func): func = getattr(func, func_code)
if istraceback(func): func = func.tb_frame
if isframe(func): func = func.f_code
if iscode(func): return func
return
#XXX: ugly: parse dis.dis for name after "<code object" in line and in globals?
def referrednested(func, recurse=True): #XXX: return dict of {__name__: obj} ?
"""get functions defined inside of func (e.g. inner functions in a closure)
NOTE: results may differ if the function has been executed or not.
If len(nestedcode(func)) > len(referrednested(func)), try calling func().
If possible, python builds code objects, but delays building functions
until func() is called.
"""
if PY3:
att1 = '__code__'
att0 = '__func__'
else:
att1 = 'func_code' # functions
att0 = 'im_func' # methods
import gc
funcs = set()
# get the code objects, and try to track down by referrence
for co in nestedcode(func, recurse):
# look for function objects that refer to the code object
for obj in gc.get_referrers(co):
# get methods
_ = getattr(obj, att0, None) # ismethod
if getattr(_, att1, None) is co: funcs.add(obj)
# get functions
elif getattr(obj, att1, None) is co: funcs.add(obj)
# get frame objects
elif getattr(obj, 'f_code', None) is co: funcs.add(obj)
# get code objects
elif hasattr(obj, 'co_code') and obj is co: funcs.add(obj)
# frameobjs => func.func_code.co_varnames not in func.func_code.co_cellvars
# funcobjs => func.func_code.co_cellvars not in func.func_code.co_varnames
# frameobjs are not found, however funcobjs are...
# (see: test_mixins.quad ... and test_mixins.wtf)
# after execution, code objects get compiled, and then may be found by gc
return list(funcs)
def freevars(func):
"""get objects defined in enclosing code that are referred to by func
returns a dict of {name:object}"""
if PY3:
im_func = '__func__'
func_code = '__code__'
func_closure = '__closure__'
else:
im_func = 'im_func'
func_code = 'func_code'
func_closure = 'func_closure'
if ismethod(func): func = getattr(func, im_func)
if isfunction(func):
closures = getattr(func, func_closure) or ()
func = getattr(func, func_code).co_freevars # get freevars
else:
return {}
return dict((name,c.cell_contents) for (name,c) in zip(func,closures))
# thanks to Davies Liu for recursion of globals
def nestedglobals(func, recurse=True):
"""get the names of any globals found within func"""
func = code(func)
if func is None: return list()
from .temp import capture
names = set()
with capture('stdout') as out:
dis.dis(func) #XXX: dis.dis(None) disassembles last traceback
for line in out.getvalue().splitlines():
if '_GLOBAL' in line:
name = line.split('(')[-1].split(')')[0]
names.add(name)
for co in getattr(func, 'co_consts', tuple()):
if co and recurse and iscode(co):
names.update(nestedglobals(co, recurse=True))
return list(names)
def referredglobals(func, recurse=True):
"""get the names of objects in the global scope referred to by func"""
return globalvars(func, recurse).keys()
def globalvars(func, recurse=True):
"""get objects defined in global scope that are referred to by func
return a dict of {name:object}"""
if PY3:
im_func = '__func__'
func_code = '__code__'
func_globals = '__globals__'
else:
im_func = 'im_func'
func_code = 'func_code'
func_globals = 'func_globals'
if ismethod(func): func = getattr(func, im_func)
if isfunction(func):
globs = {} #FIXME: vars(getmodule(object)) # get dict of __builtins__
globs.update(getattr(func, func_globals) or {})
if not recurse:
func = getattr(func, func_code).co_names # get names
else:
func = set(nestedglobals(getattr(func, func_code)))
# find globals for all entries of func
for key in func.copy(): #XXX: unnecessary...?
func.update(globalvars(globs.get(key), recurse=True).keys())
else:
return {}
#NOTE: if name not in func_globals, then we skip it...
return dict((name,globs[name]) for name in func if name in globs)
def varnames(func):
"""get names of variables defined by func
returns a tuple (local vars, local vars referrenced by nested functions)"""
func = code(func)
if not iscode(func):
return () #XXX: better ((),())? or None?
return func.co_varnames, func.co_cellvars
def baditems(obj, exact=False, safe=False): #XXX: obj=globals() ?
"""get items in object that fail to pickle"""
if not hasattr(obj,'__iter__'): # is not iterable
return [j for j in (badobjects(obj,0,exact,safe),) if j is not None]
obj = obj.values() if getattr(obj,'values',None) else obj
_obj = [] # can't use a set, as items may be unhashable
[_obj.append(badobjects(i,0,exact,safe)) for i in obj if i not in _obj]
return [j for j in _obj if j is not None]
def badobjects(obj, depth=0, exact=False, safe=False):
"""get objects that fail to pickle"""
from dill import pickles
if not depth:
if pickles(obj,exact,safe): return None
return obj
return dict(((attr, badobjects(getattr(obj,attr),depth-1,exact,safe)) \
for attr in dir(obj) if not pickles(getattr(obj,attr),exact,safe)))
def badtypes(obj, depth=0, exact=False, safe=False):
"""get types for objects that fail to pickle"""
from dill import pickles
if not depth:
if pickles(obj,exact,safe): return None
return type(obj)
return dict(((attr, badtypes(getattr(obj,attr),depth-1,exact,safe)) \
for attr in dir(obj) if not pickles(getattr(obj,attr),exact,safe)))
def errors(obj, depth=0, exact=False, safe=False):
"""get errors for objects that fail to pickle"""
from dill import pickles, copy
if not depth:
try:
pik = copy(obj)
if exact:
assert pik == obj, \
"Unpickling produces %s instead of %s" % (pik,obj)
assert type(pik) == type(obj), \
"Unpickling produces %s instead of %s" % (type(pik),type(obj))
return None
except Exception:
import sys
return sys.exc_info()[1]
return dict(((attr, errors(getattr(obj,attr),depth-1,exact,safe)) \
for attr in dir(obj) if not pickles(getattr(obj,attr),exact,safe)))
del absolute_import, with_statement
# EOF
| {
"content_hash": "f5561e272bedeb1ddf50769495219faf",
"timestamp": "",
"source": "github",
"line_count": 252,
"max_line_length": 79,
"avg_line_length": 37.17063492063492,
"alnum_prop": 0.6159923134408029,
"repo_name": "mindw/dill",
"id": "7e62f07750b2b9b4ef28c09801126ecf332de010",
"size": "9656",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "dill/detect.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "195210"
}
],
"symlink_target": ""
} |
"""
Utilities for the safeopt library (e.g., sampling).
Author: Felix Berkenkamp (befelix at inf dot ethz dot ch)
"""
from __future__ import print_function, absolute_import, division
from collections import Sequence # isinstance(...,Sequence)
import numpy as np
import scipy as sp
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D # Create 3D axes
from matplotlib import cm # 3D plot colors
__all__ = ['linearly_spaced_combinations', 'sample_gp_function', 'plot_2d_gp',
'plot_3d_gp', 'plot_contour_gp']
def linearly_spaced_combinations(bounds, num_samples):
"""
Return 2-D array with all linearly spaced combinations with the bounds.
Parameters
----------
bounds: sequence of tuples
The bounds for the variables, [(x1_min, x1_max), (x2_min, x2_max), ...]
num_samples: integer or array_likem
Number of samples to use for every dimension. Can be a constant if
the same number should be used for all, or an array to fine-tune
precision. Total number of data points is num_samples ** len(bounds).
Returns
-------
combinations: 2-d array
A 2-d arrray. If d = len(bounds) and l = prod(num_samples) then it
is of size l x d, that is, every row contains one combination of
inputs.
"""
num_vars = len(bounds)
if not isinstance(num_samples, Sequence):
num_samples = [num_samples] * num_vars
if len(bounds) == 1:
return np.linspace(bounds[0][0], bounds[0][1], num_samples[0])[:, None]
# Create linearly spaced test inputs
inputs = [np.linspace(b[0], b[1], n) for b, n in zip(bounds,
num_samples)]
# Convert to 2-D array
return np.array([x.ravel() for x in np.meshgrid(*inputs)]).T
def sample_gp_function(kernel, bounds, noise_var, num_samples,
interpolation='kernel', mean_function=None):
"""
Sample a function from a gp with corresponding kernel within its bounds.
Parameters
----------
kernel: instance of GPy.kern.*
bounds: list of tuples
[(x1_min, x1_max), (x2_min, x2_max), ...]
noise_var: float
Variance of the observation noise of the GP function
num_samples: int or list
If integer draws the corresponding number of samples in all
dimensions and test all possible input combinations. If a list then
the list entries correspond to the number of linearly spaced samples of
the corresponding input
interpolation: string
If 'linear' interpolate linearly between samples, if 'kernel' use the
corresponding mean RKHS-function of the GP.
mean_function: callable
Mean of the sample function
Returns
-------
function: object
function(x, noise=True)
A function that takes as inputs new locations x to be evaluated and
returns the corresponding noisy function values. If noise=False is
set the true function values are returned (useful for plotting).
"""
inputs = linearly_spaced_combinations(bounds, num_samples)
cov = kernel.K(inputs) + np.eye(inputs.shape[0]) * 1e-6
output = np.random.multivariate_normal(np.zeros(inputs.shape[0]),
cov)
if interpolation == 'linear':
def evaluate_gp_function_linear(x, noise=True):
"""
Evaluate the GP sample function with linear interpolation.
Parameters
----------
x: np.array
2D array with inputs
noise: bool
Whether to include prediction noise
"""
x = np.atleast_2d(x)
y = sp.interpolate.griddata(inputs, output, x, method='linear')
# Work around weird dimension squishing in griddata
y = np.atleast_2d(y.squeeze()).T
if mean_function is not None:
y += mean_function(x)
if noise:
y += np.sqrt(noise_var) * np.random.randn(x.shape[0], 1)
return y
return evaluate_gp_function_linear
elif interpolation == 'kernel':
cho_factor = sp.linalg.cho_factor(cov)
alpha = sp.linalg.cho_solve(cho_factor, output)
def evaluate_gp_function_kernel(x, noise=True):
"""
Evaluate the GP sample function with kernel interpolation.
Parameters
----------
x: np.array
2D array with inputs
noise: bool
Whether to include prediction noise
"""
x = np.atleast_2d(x)
y = kernel.K(x, inputs).dot(alpha)
y = y[:, None]
if mean_function is not None:
y += mean_function(x)
if noise:
y += np.sqrt(noise_var) * np.random.randn(x.shape[0], 1)
return y
return evaluate_gp_function_kernel
def plot_2d_gp(gp, inputs, predictions=None, figure=None, axis=None,
fixed_inputs=None, beta=3, fmin=None, **kwargs):
"""
Plot a 2D GP with uncertainty.
Parameters
----------
gp: Instance of GPy.models.GPRegression
inputs: 2darray
The input parameters at which the GP is to be evaluated
predictions: ndarray
Can be used to manually pass the GP predictions, set to None to
use the gp directly. Is of the form (mean, variance)
figure: matplotlib figure
The figure on which to draw (ignored if axis is provided
axis: matplotlib axis
The axis on which to draw
fixed_inputs: list
A list containing the the fixed inputs and their corresponding
values, e.g., [(0, 3.2), (4, -2.43)]. Set the value to None if
it's not fixed, but should not be a plotted axis either
beta: float
The confidence interval used
fmin : float
The safety threshold value.
Returns
-------
axis
"""
if fixed_inputs is None:
if gp.kern.input_dim > 1:
raise NotImplementedError('This only works for 1D inputs')
fixed_inputs = []
elif gp.kern.input_dim - len(fixed_inputs) != 1:
raise NotImplemented('This only works for 1D inputs')
ms = kwargs.pop('ms', 10)
mew = kwargs.pop('mew', 3)
point_color = kwargs.pop('point_color', 'k')
if axis is None:
if figure is None:
figure = plt.figure()
axis = figure.gca()
else:
axis = figure.gca()
# Get a list of unfixed inputs to plot
unfixed = list(range(gp.kern.input_dim))
for dim, val in fixed_inputs:
if val is not None:
inputs[:, dim] = val
unfixed.remove(dim)
# Compute GP predictions if not provided
if predictions is None:
mean, var = gp._raw_predict(inputs)
else:
mean, var = predictions
output = mean.squeeze()
std_dev = beta * np.sqrt(var.squeeze())
axis.fill_between(inputs[:, unfixed[0]],
output - std_dev,
output + std_dev,
facecolor='blue',
alpha=0.3)
axis.plot(inputs[:, unfixed[0]], output, **kwargs)
axis.scatter(gp.X[:-1, unfixed[0]], gp.Y[:-1, 0], s=20 * ms,
marker='x', linewidths=mew, color=point_color)
axis.scatter(gp.X[-1, unfixed[0]], gp.Y[-1, 0], s=20 * ms,
marker='x', linewidths=mew, color='r')
axis.set_xlim([np.min(inputs[:, unfixed[0]]),
np.max(inputs[:, unfixed[0]])])
if fmin is not None:
axis.plot(inputs[[0, -1], unfixed[0]], [fmin, fmin], 'k--')
return axis
def plot_3d_gp(gp, inputs, predictions=None, figure=None, axis=None,
fixed_inputs=None, beta=3, **kwargs):
"""
Plot a 3D gp with uncertainty.
Parameters
----------
gp: Instance of GPy.models.GPRegression
inputs: 2darray
The input parameters at which the GP is to be evaluated
predictions: ndarray
Can be used to manually pass the GP predictions, set to None to
use the gp directly. Is of the form [mean, variance]
figure: matplotlib figure
The figure on which to draw (ignored if axis is provided
axis: matplotlib axis
The axis on which to draw
fixed_inputs: list
A list containing the the fixed inputs and their corresponding
values, e.g., [(0, 3.2), (4, -2.43)]. Set the value to None if
it's not fixed, but should not be a plotted axis either
beta: float
The confidence interval used
Returns
-------
surface: matplotlib trisurf plot
data: matplotlib plot for data points
"""
if fixed_inputs is None:
if gp.kern.input_dim > 2:
raise NotImplementedError('This only works for 2D inputs')
fixed_inputs = []
elif gp.kern.input_dim - len(fixed_inputs) != 2:
raise NotImplemented('Only two inputs can be unfixed')
if axis is None:
if figure is None:
figure = plt.figure()
axis = Axes3D(figure)
else:
axis = Axes3D(figure)
# Get a list of unfixed inputs to plot
unfixed = list(range(gp.kern.input_dim))
for dim, val in fixed_inputs:
if val is not None:
inputs[:, dim] = val
unfixed.remove(dim)
# Compute GP predictions if not provided
if predictions is None:
mean, var = gp._raw_predict(inputs)
else:
mean, var = predictions
surf = axis.plot_trisurf(inputs[:, unfixed[0]],
inputs[:, unfixed[1]],
mean[:, 0],
cmap=cm.jet, linewidth=0.2, alpha=0.5)
data = axis.plot(gp.X[:-1, unfixed[0]],
gp.X[:-1, unfixed[1]],
gp.Y[:-1, 0],
'o')
axis.plot(gp.X[-1, unfixed[0]],
gp.X[-1, unfixed[1]],
gp.Y[-1, 0],
'ro')
axis.set_xlim([np.min(inputs[:, unfixed[0]]),
np.max(inputs[:, unfixed[0]])])
axis.set_ylim([np.min(inputs[:, unfixed[1]]),
np.max(inputs[:, unfixed[1]])])
return surf, data
def plot_contour_gp(gp, inputs, predictions=None, figure=None, axis=None,
colorbar=True, **kwargs):
"""
Plot a 3D gp with uncertainty.
Parameters
----------
gp: Instance of GPy.models.GPRegression
inputs: list of arrays/floats
The input parameters at which the GP is to be evaluated,
here instead of the combinations of inputs the individual inputs
that are spread in a grid are given. Only two of the arrays
should have more than one value (not fixed).
predictions: ndarray
Can be used to manually pass the GP predictions, set to None to
use the gp directly.
figure: matplotlib figure
The figure on which to draw (ignored if axis is provided
axis: matplotlib axis
The axis on which to draw
Returns
-------
contour: matplotlib contour plot
colorbar: matplotlib colorbar
points: matplotlib plot
"""
if axis is None:
if figure is None:
figure = plt.figure()
axis = figure.gca()
else:
axis = figure.gca()
# Find which inputs are fixed to constant values
slices = []
lengths = []
for i, inp in enumerate(inputs):
if isinstance(inp, np.ndarray):
slices.append(i)
lengths.append(inp.shape[0])
mesh = np.meshgrid(*inputs, indexing='ij')
if predictions is None:
# Convert to array with combinations of inputs
gp_inputs = np.array([x.ravel() for x in mesh]).T
mean = gp._raw_predict(gp_inputs)[0]
else:
mean = predictions[0]
c_bar = None
if not np.all(mean == mean[0]):
# Need to squeeze the added dimensions caused by fixed inputs
c = axis.contour(mesh[slices[0]].squeeze(),
mesh[slices[1]].squeeze(),
mean.squeeze().reshape(*lengths),
20,
**kwargs)
if colorbar:
c_bar = plt.colorbar(c)
else:
c = None
data = axis.plot(gp.X[:-1, slices[0]], gp.X[:-1, slices[1]], 'ob')
axis.plot(gp.X[-1, slices[0]], gp.X[-1, slices[1]], 'or')
axis.set_xlim([np.min(inputs[slices[0]]),
np.max(inputs[slices[0]])])
axis.set_ylim([np.min(inputs[slices[1]]),
np.max(inputs[slices[1]])])
return c, c_bar, data
| {
"content_hash": "8c147c86ea48c1ce35da4068a5fd55d0",
"timestamp": "",
"source": "github",
"line_count": 381,
"max_line_length": 79,
"avg_line_length": 35.39632545931759,
"alnum_prop": 0.5391517128874388,
"repo_name": "befelix/SafeOpt",
"id": "d2921b5febc711ddc8cc22ac82a74f680b14734d",
"size": "13486",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "safeopt/utilities.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "907"
},
{
"name": "Python",
"bytes": "68742"
},
{
"name": "Shell",
"bytes": "1167"
}
],
"symlink_target": ""
} |
"""This package provides tools for appending layers to docker images."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import json
from containerregistry.client import docker_name
from containerregistry.client.v2_2 import docker_digest
from containerregistry.client.v2_2 import docker_http
from containerregistry.client.v2_2 import docker_image
from containerregistry.transform.v2_2 import metadata
# _EMPTY_LAYER_TAR_ID is the sha256 of an empty tarball.
_EMPTY_LAYER_TAR_ID = 'sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4'
class Layer(docker_image.DockerImage):
"""Appends a new layer on top of a base image.
This augments a base docker image with new files from a gzipped tarball,
adds environment variables and exposes a port.
"""
def __init__(self,
base,
tar_gz,
diff_id = None,
overrides = None):
"""Creates a new layer on top of a base with optional tar.gz.
Args:
base: a base DockerImage for a new layer.
tar_gz: an optional gzipped tarball passed as a bytes with filesystem
changeset.
diff_id: an optional string containing the digest of the
uncompressed tar_gz.
overrides: an optional metadata.Overrides object of properties to override
on the base image.
"""
self._base = base
manifest = json.loads(self._base.manifest())
config_file = json.loads(self._base.config_file())
overrides = overrides or metadata.Overrides()
overrides = overrides.Override(created_by=docker_name.USER_AGENT)
if tar_gz:
self._blob = tar_gz
self._blob_sum = docker_digest.SHA256(self._blob)
manifest['layers'].append({
'digest': self._blob_sum,
'mediaType': docker_http.MANIFEST_SCHEMA2_MIME,
'size': len(self._blob),
})
if not diff_id:
diff_id = docker_digest.SHA256(self.uncompressed_blob(self._blob_sum))
# Takes naked hex.
overrides = overrides.Override(layers=[diff_id[len('sha256:'):]])
else:
# The empty layer.
overrides = overrides.Override(layers=[docker_digest.SHA256(b'', '')])
config_file = metadata.Override(config_file, overrides)
self._config_file = json.dumps(config_file, sort_keys=True)
manifest['config']['digest'] = docker_digest.SHA256(
self._config_file.encode('utf8'))
self._manifest = json.dumps(manifest, sort_keys=True)
def manifest(self):
"""Override."""
return self._manifest
def config_file(self):
"""Override."""
return self._config_file
def blob(self, digest):
"""Override."""
if digest == self._blob_sum:
return self._blob
return self._base.blob(digest)
# __enter__ and __exit__ allow use as a context manager.
def __enter__(self):
"""Override."""
return self
def __exit__(self, unused_type, unused_value, unused_traceback):
"""Override."""
return
| {
"content_hash": "bd0d85a99c4280d544824d68cfbdeef5",
"timestamp": "",
"source": "github",
"line_count": 95,
"max_line_length": 95,
"avg_line_length": 31.768421052631577,
"alnum_prop": 0.6640159045725647,
"repo_name": "kubeflow/fairing",
"id": "e3b0160ef14ec4c04c8e323ca6dae88906a9f4d1",
"size": "3613",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "containerregistry/client/v2_2/append_.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2103"
},
{
"name": "Jsonnet",
"bytes": "2440311"
},
{
"name": "Jupyter Notebook",
"bytes": "1573"
},
{
"name": "Python",
"bytes": "523314"
},
{
"name": "Shell",
"bytes": "439"
}
],
"symlink_target": ""
} |
import logging
from django.db import models
from django.db.models import CASCADE
from chroma_core.models import AlertStateBase
from chroma_core.models import AlertEvent
from chroma_core.models import DeletableStatefulObject
from chroma_core.models import StateChangeJob
from chroma_core.models import Job
from chroma_core.models import SchedulingError
from chroma_core.models import StateLock
from chroma_core.lib.job import DependOn, DependAll, Step
from chroma_help.help import help_text
class PacemakerConfiguration(DeletableStatefulObject):
states = ["unconfigured", "stopped", "started"]
initial_state = "unconfigured"
host = models.OneToOneField("ManagedHost", related_name="_pacemaker_configuration", on_delete=CASCADE)
def __str__(self):
return "%s Pacemaker configuration" % self.host
class Meta:
app_label = "chroma_core"
ordering = ["id"]
def get_label(self):
return "pacemaker configuration"
def set_state(self, state, intentional=False):
"""
:param intentional: set to true to silence any alerts generated by this transition
"""
super(PacemakerConfiguration, self).set_state(state, intentional)
if intentional:
PacemakerStoppedAlert.notify_warning(self, self.state != "started")
else:
PacemakerStoppedAlert.notify(self, self.state != "started")
reverse_deps = {"ManagedHost": lambda mh: PacemakerConfiguration.objects.filter(host_id=mh.id)}
# Below the handler should be in a completion hook, but I can't see how to get the instance of the completion
# hook to add it and time is running out. I will return to this.
@property
def reconfigure_fencing(self):
# We return False because we are overloading the attribute setter below to make it a event handler rather than
# a real property. If some sets reconfigure_fencing = False then the event will not be called because the current
# value is always False. If someone sets reconfigure_fencing = True then the setter will be called because the
# current value is always False!
return False
@reconfigure_fencing.setter
def reconfigure_fencing(self, ignored_value):
# We don't store this because we are overloading the attribute setter below to make it a event handler rather than
# a real property.
pass
class StonithNotEnabledAlert(AlertStateBase):
default_severity = logging.ERROR
class Meta:
app_label = "chroma_core"
proxy = True
def alert_message(self):
return help_text["stonith_not_enabled"] % self.alert_item
def end_event(self):
return AlertEvent(
message_str=help_text["stonith_enabled"] % self.alert_item,
alert_item=self.alert_item,
alert=self,
severity=logging.INFO,
)
@property
def affected_objects(self):
"""
:return: A list of objects that are affected by this alert
"""
return [self.alert_item.host]
class PacemakerStoppedAlert(AlertStateBase):
# Pacemaker being down is never solely responsible for a filesystem
# being unavailable: if a target is offline we will get a separate
# ERROR alert for that. Pacemaker being offline may indicate a configuration
# fault, but equally could just indicate that the host hasn't booted up that far yet.
default_severity = logging.INFO
def alert_message(self):
return "Pacemaker stopped on server %s" % self.alert_item.host
class Meta:
app_label = "chroma_core"
proxy = True
def end_event(self):
return AlertEvent(
message_str="Pacemaker started on server '%s'" % self.alert_item.host,
alert_item=self.alert_item.host,
alert=self,
severity=logging.WARNING,
)
@property
def affected_objects(self):
"""
:return: A list of objects that are affected by this alert
"""
return [self.alert_item.host]
class ConfigurePacemakerStep(Step):
idempotent = True
def run(self, kwargs):
host = kwargs["host"]
self.invoke_agent_expect_result(host, "configure_pacemaker")
class ConfigurePacemakerJob(StateChangeJob):
state_transition = StateChangeJob.StateTransition(PacemakerConfiguration, "unconfigured", "stopped")
stateful_object = "pacemaker_configuration"
pacemaker_configuration = models.ForeignKey(PacemakerConfiguration, on_delete=CASCADE)
state_verb = "Configure Pacemaker"
display_group = Job.JOB_GROUPS.COMMON
display_order = 30
class Meta:
app_label = "chroma_core"
ordering = ["id"]
@classmethod
def long_description(cls, stateful_object):
return help_text["configure_pacemaker"]
def description(self):
return help_text["configure_pacemaker_on"] % self.pacemaker_configuration.host
def get_steps(self):
return [
(StartPacemakerStep, {"host": self.pacemaker_configuration.host}),
(ConfigurePacemakerStep, {"host": self.pacemaker_configuration.host}),
(StopPacemakerStep, {"host": self.pacemaker_configuration.host}),
]
def get_deps(self):
"""
Before Pacemaker operations are possible the host must have had its packages installed.
Maybe we need a packages object, but this routine at least keeps the detail in one place.
Also corosync needs to be up and running. This is because configuring pacemaker requires starting pacemaker.
Or maybe we need an unacceptable_states lists.
:return:
"""
if self.pacemaker_configuration.host.state in ["unconfigured", "undeployed"]:
deps = [DependOn(self.pacemaker_configuration.host, "packages_installed")]
else:
deps = []
deps.append(DependOn(self.pacemaker_configuration.host.corosync_configuration, "started"))
return DependAll(deps)
class UnconfigurePacemakerStep(Step):
idempotent = True
def run(self, kwargs):
host = kwargs["host"]
self.invoke_agent_expect_result(host, "unconfigure_pacemaker")
class UnconfigurePacemakerJob(StateChangeJob):
state_transition = StateChangeJob.StateTransition(PacemakerConfiguration, "stopped", "unconfigured")
stateful_object = "pacemaker_configuration"
pacemaker_configuration = models.ForeignKey(PacemakerConfiguration, on_delete=CASCADE)
state_verb = "Unconfigure Pacemaker"
display_group = Job.JOB_GROUPS.COMMON
display_order = 30
class Meta:
app_label = "chroma_core"
ordering = ["id"]
@classmethod
def long_description(cls, stateful_object):
return help_text["unconfigure_pacemaker"]
def description(self):
return help_text["unconfigure_pacemaker_on"] % self.pacemaker_configuration.host
def get_steps(self):
# Sadly we need to restart and then stop (it will be stopped) pacemaker to configure it.
# It will be stopped because this transition is stopped->unconfigured.
return [
(StartPacemakerStep, {"host": self.pacemaker_configuration.host}),
(UnconfigurePacemakerStep, {"host": self.pacemaker_configuration.host}),
(StopPacemakerStep, {"host": self.pacemaker_configuration.host}),
]
def get_deps(self):
"""
Before Pacemaker operations are possible the host must have had its packages installed.
Maybe we need a packages object, but this routine at least keeps the detail in one place.
Also corosync needs to be up and running. This is because configuring pacemaker requires starting pacemaker.
Or maybe we need an unacceptable_states lists.
:return:
"""
if self.pacemaker_configuration.host.state in ["unconfigured", "undeployed"]:
deps = [DependOn(self.pacemaker_configuration.host, "packages_installed")]
else:
deps = []
deps.append(DependOn(self.pacemaker_configuration.host.corosync_configuration, "started"))
# Any targets will have to be removed.
from chroma_core.models import ManagedTargetMount
for managed_target_mount in ManagedTargetMount.objects.filter(host=self.pacemaker_configuration.host):
deps.append(DependOn(managed_target_mount.target, "removed"))
return DependAll(deps)
@classmethod
def can_run(cls, instance):
"""We don't want people to unconfigure pacemaker on a node that has a ManagedTargetMount so make the command
available only when that is not the case.
:param instance: PacemakerConfiguration instance being queried
:return: True if no ManagedTargetMounts exist on the host in question.
"""
from chroma_core.models import ManagedTargetMount
return len(ManagedTargetMount.objects.filter(host=instance.host)) == 0
class StartPacemakerStep(Step):
idempotent = True
def run(self, kwargs):
self.invoke_agent_expect_result(kwargs["host"], "start_pacemaker")
@classmethod
def describe(cls, kwargs):
return help_text["start_pacemaker_on"] % kwargs["host"].fqdn
class StartPacemakerJob(StateChangeJob):
state_transition = StateChangeJob.StateTransition(PacemakerConfiguration, "stopped", "started")
stateful_object = "pacemaker_configuration"
pacemaker_configuration = models.ForeignKey(PacemakerConfiguration, on_delete=CASCADE)
state_verb = "Start Pacemaker"
display_group = Job.JOB_GROUPS.COMMON
display_order = 30
class Meta:
app_label = "chroma_core"
ordering = ["id"]
@classmethod
def long_description(cls, stateful_object):
return help_text["start_pacemaker"]
def description(self):
return "Start Pacemaker on %s" % self.pacemaker_configuration.host
def get_steps(self):
return [(StartPacemakerStep, {"host": self.pacemaker_configuration.host})]
def get_deps(self):
return DependOn(self.pacemaker_configuration.host.corosync_configuration, "started")
class StopPacemakerStep(Step):
idempotent = True
def run(self, kwargs):
self.invoke_agent_expect_result(kwargs["host"], "stop_pacemaker")
@classmethod
def describe(cls, kwargs):
return help_text["stop_pacemaker_on"] % kwargs["host"].fqdn
class StopPacemakerJob(StateChangeJob):
state_transition = StateChangeJob.StateTransition(PacemakerConfiguration, "started", "stopped")
stateful_object = "pacemaker_configuration"
pacemaker_configuration = models.ForeignKey(PacemakerConfiguration, on_delete=CASCADE)
state_verb = "Stop Pacemaker"
display_group = Job.JOB_GROUPS.RARE
display_order = 100
class Meta:
app_label = "chroma_core"
ordering = ["id"]
@classmethod
def long_description(cls, stateful_object):
return help_text["stop_pacemaker"]
def description(self):
return "Stop Pacemaker on %s" % self.pacemaker_configuration.host
def get_steps(self):
return [(StopPacemakerStep, {"host": self.pacemaker_configuration.host})]
class GetPacemakerStateStep(Step):
idempotent = True
# FIXME: using database=True to do the alerting update inside .set_state but
# should do it in a completion
database = True
def run(self, kwargs):
from chroma_core.services.job_scheduler.agent_rpc import AgentException
host = kwargs["host"]
try:
lnet_data = self.invoke_agent(host, "device_plugin", {"plugin": "linux_network"})["linux_network"]["lnet"]
host.set_state(lnet_data["state"])
host.save(update_fields=["state", "state_modified_at"])
except TypeError:
self.log("Data received from old client. Host %s state cannot be updated until agent is updated" % host)
except AgentException as e:
self.log("No data for plugin linux_network from host %s due to exception %s" % (host, e))
class GetPacemakerStateJob(Job):
pacemaker_configuration = models.ForeignKey(PacemakerConfiguration, on_delete=CASCADE)
requires_confirmation = False
verb = "Get Pacemaker state"
class Meta:
app_label = "chroma_core"
ordering = ["id"]
def create_locks(self):
return [StateLock(job=self, locked_item=self.pacemaker_configuration, write=True)]
@classmethod
def get_args(cls, pacemaker_configuration):
return {"host": pacemaker_configuration.host}
@classmethod
def long_description(cls, stateful_object):
return help_text["pacemaker_state"]
def description(self):
return "Get Pacemaker state for %s" % self.pacemaker_configuration.host
def get_steps(self):
return [(GetPacemakerStateStep, {"host": self.pacemaker_configuration.host})]
class ConfigureHostFencingJob(Job):
host = models.ForeignKey("ManagedHost", on_delete=CASCADE)
requires_confirmation = False
verb = "Configure Host Fencing"
class Meta:
app_label = "chroma_core"
ordering = ["id"]
@classmethod
def get_args(cls, host):
return {"host_id": host.id}
@classmethod
def long_description(cls, stateful_object):
return help_text["configure_host_fencing"]
def description(self):
return "Configure fencing agent on %s" % self.host
def create_locks(self):
return [StateLock(job=self, locked_item=self.host.pacemaker_configuration, write=True)]
def get_steps(self):
return [(ConfigureHostFencingStep, {"host": self.host})]
class ConfigureHostFencingStep(Step):
idempotent = True
# Needs database in order to query host outlets
database = True
def run(self, kwargs):
host = kwargs["host"]
if host.state != "managed":
raise SchedulingError(
"Attempted to configure a fencing device while the host %s was in state %s. Expected host to be in state 'managed'. Please ensure your host has completed set up and configure power control again."
% (host.fqdn, host.state)
)
if not host.pacemaker_configuration:
# Shouldn't normally happen, but makes debugging our own bugs easier.
raise RuntimeError(
"Attemped to configure fencing on a host that does not yet have a pacemaker configuration."
)
agent_kwargs = []
for outlet in host.outlets.select_related().all():
fence_kwargs = {
"agent": outlet.device.device_type.agent,
"login": outlet.device.username,
"password": outlet.device.password,
}
# IPMI fencing config doesn't need most of these attributes.
if outlet.device.is_ipmi and outlet.device.device_type.agent not in ["fence_virsh", "fence_vbox"]:
fence_kwargs["ipaddr"] = outlet.identifier
fence_kwargs["lanplus"] = "2.0" in outlet.device.device_type.model # lanplus
else:
fence_kwargs["plug"] = outlet.identifier
fence_kwargs["ipaddr"] = outlet.device.address
fence_kwargs["ipport"] = outlet.device.port
agent_kwargs.append(fence_kwargs)
self.invoke_agent(host, "configure_fencing", {"agents": agent_kwargs})
| {
"content_hash": "2b7e07dee63e88e11f279d177438c923",
"timestamp": "",
"source": "github",
"line_count": 434,
"max_line_length": 212,
"avg_line_length": 35.53917050691244,
"alnum_prop": 0.6703189834024896,
"repo_name": "intel-hpdd/intel-manager-for-lustre",
"id": "da9f3e77f6a78340d55c145c0dfa32ebd8d74071",
"size": "15598",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "chroma_core/models/pacemaker.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Gherkin",
"bytes": "20532"
},
{
"name": "Makefile",
"bytes": "20966"
},
{
"name": "Python",
"bytes": "6527307"
},
{
"name": "Roff",
"bytes": "1415"
},
{
"name": "Ruby",
"bytes": "27697"
},
{
"name": "Shell",
"bytes": "127203"
}
],
"symlink_target": ""
} |
"""
Module to set up run time parameters for Clawpack -- classic code.
The values set in the function setrun are then written out to data files
that will be read in by the Fortran code.
"""
import os
import numpy as np
#------------------------------
def setrun(claw_pkg='classic'):
#------------------------------
"""
Define the parameters used for running Clawpack.
INPUT:
claw_pkg expected to be "classic" for this setrun.
OUTPUT:
rundata - object of class ClawRunData
"""
from clawpack.clawutil import data
assert claw_pkg.lower() == 'classic', "Expected claw_pkg = 'classic'"
num_dim = 1
rundata = data.ClawRunData(claw_pkg, num_dim)
#------------------------------------------------------------------
# Problem-specific parameters to be written to setprob.data:
#------------------------------------------------------------------
probdata = rundata.new_UserData(name='probdata',fname='setprob.data')
probdata.add_param('beta', 200., 'for width of Gaussian data')
#------------------------------------------------------------------
# Standard Clawpack parameters to be written to claw.data:
#------------------------------------------------------------------
clawdata = rundata.clawdata # initialized when rundata instantiated
# ---------------
# Spatial domain:
# ---------------
# Number of space dimensions:
clawdata.num_dim = num_dim
# Lower and upper edge of computational domain:
clawdata.lower[0] = 0.000000e+00 # xlower
clawdata.upper[0] = 2.000000e+00 # xupper
# Number of grid cells:
clawdata.num_cells[0] = 200 # mx
# ---------------
# Size of system:
# ---------------
# Number of equations in the system:
clawdata.num_eqn = 1
# Number of auxiliary variables in the aux array (initialized in setaux)
clawdata.num_aux = 0
# Index of aux array corresponding to capacity function, if there is one:
clawdata.capa_index = 0
# -------------
# Initial time:
# -------------
clawdata.t0 = 0.000000
# Restart from checkpoint file of a previous run?
# Note: If restarting, you must also change the Makefile to set:
# RESTART = True
# If restarting, t0 above should be from original run, and the
# restart_file 'fort.qNNNN' specified below should be in
# the OUTDIR indicated in Makefile.
clawdata.restart = False # True to restart from prior results
clawdata.restart_file = 'fort.q0006' # File to use for restart data
# -------------
# Output times:
#--------------
# Specify at what times the results should be written to fort.q files.
# Note that the time integration stops after the final output time.
clawdata.output_style = 1
if clawdata.output_style==1:
# Output ntimes frames at equally spaced times up to tfinal:
# Can specify num_output_times = 0 for no output
clawdata.num_output_times = 20
clawdata.tfinal = 2.000000
clawdata.output_t0 = True # output at initial (or restart) time?
elif clawdata.output_style == 2:
# Specify a list or numpy array of output times:
# Include t0 if you want output at the initial time.
clawdata.output_times = [0., 0.1]
elif clawdata.output_style == 3:
# Output every step_interval timesteps over total_steps timesteps:
clawdata.output_step_interval = 2
clawdata.total_steps = 4
clawdata.output_t0 = True # output at initial (or restart) time?
clawdata.output_format = 'ascii' # 'ascii', 'binary', 'netcdf'
clawdata.output_q_components = 'all' # could be list such as [True,True]
clawdata.output_aux_components = 'none' # could be list
clawdata.output_aux_onlyonce = True # output aux arrays only at t0
# ---------------------------------------------------
# Verbosity of messages to screen during integration:
# ---------------------------------------------------
# The current t, dt, and cfl will be printed every time step
# at AMR levels <= verbosity. Set verbosity = 0 for no printing.
# (E.g. verbosity == 2 means print only on levels 1 and 2.)
clawdata.verbosity = 0
# --------------
# Time stepping:
# --------------
# if dt_variable==True: variable time steps used based on cfl_desired,
# if dt_variable==False: fixed time steps dt = dt_initial always used.
clawdata.dt_variable = True
# Initial time step for variable dt.
# (If dt_variable==0 then dt=dt_initial for all steps)
clawdata.dt_initial = 1.000000e-01
# Max time step to be allowed if variable dt used:
clawdata.dt_max = 1.000000e+99
# Desired Courant number if variable dt used
clawdata.cfl_desired = 0.800000
# max Courant number to allow without retaking step with a smaller dt:
clawdata.cfl_max = 1.000000
# Maximum number of time steps to allow between output times:
clawdata.steps_max = 5000
# ------------------
# Method to be used:
# ------------------
# Order of accuracy: 1 => Godunov, 2 => Lax-Wendroff plus limiters
clawdata.order = 2
# Number of waves in the Riemann solution:
clawdata.num_waves = 1
# List of limiters to use for each wave family:
# Required: len(limiter) == num_waves
# Some options:
# 0 or 'none' ==> no limiter (Lax-Wendroff)
# 1 or 'minmod' ==> minmod
# 2 or 'superbee' ==> superbee
# 3 or 'vanleer' ==> van Leer
# 4 or 'mc' ==> MC limiter
clawdata.limiter = ['minmod']
clawdata.use_fwaves = False # True ==> use f-wave version of algorithms
# Source terms splitting:
# src_split == 0 or 'none' ==> no source term (src routine never called)
# src_split == 1 or 'godunov' ==> Godunov (1st order) splitting used,
# src_split == 2 or 'strang' ==> Strang (2nd order) splitting used, not recommended.
clawdata.source_split = 0
# --------------------
# Boundary conditions:
# --------------------
# Number of ghost cells (usually 2)
clawdata.num_ghost = 2
# Choice of BCs at xlower and xupper:
# 0 or 'user' => user specified (must modify bcNamr.f to use this option)
# 1 or 'extrap' => extrapolation (non-reflecting outflow)
# 2 or 'periodic' => periodic (must specify this at both boundaries)
# 3 or 'wall' => solid wall for systems where q(2) is normal velocity
clawdata.bc_lower[0] = 'periodic' # at xlower
clawdata.bc_upper[0] = 'periodic' # at xupper
return rundata
# end of function setrun
# ----------------------
if __name__ == '__main__':
# Set up run-time parameters and write all data files.
import sys
rundata = setrun(*sys.argv[1:])
rundata.write()
| {
"content_hash": "befa0c0ade630c5264f8d18e9c95c7b1",
"timestamp": "",
"source": "github",
"line_count": 223,
"max_line_length": 92,
"avg_line_length": 31.896860986547086,
"alnum_prop": 0.5584141712357655,
"repo_name": "amath574w2015/am574-class",
"id": "fa01cc6b3644bcbb3f2f521ed71fa968123b20cf",
"size": "7113",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "labs/lab5/burgers/setrun.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "16448"
},
{
"name": "FORTRAN",
"bytes": "8434"
},
{
"name": "HTML",
"bytes": "992"
},
{
"name": "Makefile",
"bytes": "12639"
},
{
"name": "Matlab",
"bytes": "1260"
},
{
"name": "Python",
"bytes": "62477"
},
{
"name": "Shell",
"bytes": "183"
},
{
"name": "TeX",
"bytes": "48331"
}
],
"symlink_target": ""
} |
import sqlite3
DAYS = {day:x for (x, day) in enumerate(['M', 'T', 'W', 'R', 'F', 'S', 'U'], start=1)}
def humanize_time(time):
"""Convert a time from the database's format to a human readable string.
Args:
time::Int - The time to convert.
Returns:
_::String - The converted time.
"""
time_str = '{:04d}'.format(time, )
if time_str[2:] == '25':
return '{}:{}'.format(time_str[:2], 15)
if time_str[2:] == '50':
return '{}:{}'.format(time_str[:2], 30)
if time_str[2:] == '75':
return '{}:{}'.format(time_str[:2], 45)
return '{}:{}'.format(time_str[:2], time_str[2:])
def dehumanize_time(time):
"""Convert a human readable time in 24h format to what the database needs.
Args:
time::String - The time to convert.
Returns:
_::Int - The converted time.
"""
if time[3:] == '15':
return int(time[:2] + '25')
if time[3:] == '30':
return int(time[:2] + '50')
if time[3:] == '45':
return int(time[:2] + '75')
if time[3:] == '50':
return int(time[:2]+time[3:])+50
return int(time[:2] + '00')
def consolidate_times(times):
"""Consolidates contiguous time intervals for a list of times.
Args:
times::[(Int, Int)] - The list of times to consolidate. Check the README
for specifications on format.
Returns:
joined_times::[(Int, Int)] - A list of consolidated times.
"""
joined_times = []
if not times: return joined_times
start, end = times[0]
for i in range(1, len(times)):
if end != times[i][0]:
joined_times.append((start, end))
start, end = times[i]
else:
end = times[i][1]
joined_times.append((start, end))
return joined_times
class DBAdmin():
def __init__(self):
self._conn = sqlite3.connect('room_data.db')
self._c = self._conn.cursor()
@property
def conn(self):
return self._conn
@property
def c(self):
return self._c
def init_table(self):
self.conn.execute('CREATE TABLE IF NOT EXISTS rooms('
'room TEXT NOT NULL, '
'day INT, '
'time INT, '
'taken INT DEFAULT 0, '
'UNIQUE (room, day, time, taken))')
def add_room(self, room):
"""Add a room to the database. Defaults all times as free.
Args:
room::String - The name of the room to add. Should be in the
format [A-Z]{2-3}[0-9]{2-3}, eg, MS160.
"""
for i in range(0, 2400, 25):
timeslots = [(room.upper(), d, i, 0) for d in range(1, 8)]
self.c.executemany('INSERT INTO rooms VALUES (?,?,?,?)', (timeslots))
self.conn.commit()
def add_time(self, room, days, start, end):
"""Add a time a room is being used.
;
Args:
room::String - The name of the room to add.
days::String - The days of the week the room is being used.
Weeks start on Monday.
start::String - The time the room starts being used in 24 hour format.
Only times in increments of 15 minutes from the hour are accepted.
end::String - The time the room stops being used in 24 hour format.
Only times in increments of 15 minutes from the hour are accepted.
"""
if not self.c.execute('SELECT EXISTS (SELECT 1 FROM rooms '
'WHERE room="{}" LIMIT 1)'
.format(room,)).fetchone()[0]:
self.add_room(room)
dehu_start, dehu_end = dehumanize_time(start), dehumanize_time(end)
for day in days:
for i in range(dehu_start, dehu_end+1, 25):
self.c.execute('UPDATE rooms SET taken = 1 WHERE '
'room = "{}" AND day = {} AND time = {}'
.format(room.upper(), DAYS[day], i))
self.conn.commit()
def check_room(self, room, day):
"""Returns the times a specific room is free for the day.
Args:
room::String - The name of the room to check.
day::String - The day of the week the room is checked for.
Returns:
times::[(Int, Int)] - The time intervals the room is free.
"""
times = []
for time in self.c.execute('SELECT time FROM rooms WHERE room '
'= "{}" AND day = {} AND taken = 0 ORDER BY time'
.format(room.upper(), DAYS[day])):
times.append((time[0], time[0]+25))
return [(humanize_time(x), humanize_time(y)) for
(x, y) in consolidate_times(times)]
def find_room(self, day, start='00:00', end='24:00'):
"""Returns the rooms that are available between start and end.
Args:
day::String - The day of the week the room must be free. Weeks start on Monday.
start::String - The time the room starts being free used in 24 hour format.
Only times in increments of 15 minutes from the hour are accepted.
end::String - The time the room must be free until in 24 hour format.
Only times in increments of 15 minutes from the hour are accepted.
Returns:
rooms_joined::{String:[(Int, Int)]} - A dictionary mapping rooms to the
times they are free to use.
"""
rooms, rooms_joined = {}, {}
dehu_start, dehu_end = dehumanize_time(start), dehumanize_time(end)
for room, time in self.c.execute('SELECT room, time FROM rooms WHERE day = {} AND '
'time >= {} AND time <= {} AND taken = 0 '
'ORDER BY room, time'
.format(DAYS[day], dehu_start, dehu_end)):
if room not in rooms:
rooms[room] = [(time, time+25)]
else:
rooms[room].append((time, time+25))
for room, times in rooms.items():
consolidated_times = consolidate_times(times)
for time_range in consolidated_times:
if time_range[0] <= dehu_start and time_range[1] >= dehu_end:
rooms_joined[room] = consolidated_times
break
return rooms_joined
| {
"content_hash": "a87681f248cdaaf75f8ad2331900e4b4",
"timestamp": "",
"source": "github",
"line_count": 171,
"max_line_length": 92,
"avg_line_length": 38.59649122807018,
"alnum_prop": 0.5118181818181818,
"repo_name": "martin-tran/ucalgary-free-room-finder",
"id": "ef4060fa28f670554b56b73dfa94a07d22aa9ab9",
"size": "6600",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/dbadmin.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "20031"
}
],
"symlink_target": ""
} |
import os
import json
from colors import Colors
'''
Class responsible for touching the JSON file. It is the only entity that modifies the underlying JSON.
It's called from the space and application class's.
'''
class Storage:
'''
Upon creation of object: initialize the file (if not already done), and set up file and data handles
'''
def __init__(self):
self.file_path = os.environ['HOME'] + '/.poof'
# If $HOME/.poof doesn't exists, create and initialize it with an empty "activities" array
if '.poof' not in os.listdir(os.environ['HOME']):
new_file = open(self.file_path, 'w')
json.dump({"activities": []}, new_file, indent=4)
new_file.close()
# Create file handle and JSON data object for read and write
self.json_file = open(self.file_path, 'r+')
self.all_data = json.load(self.json_file)
'''
Deletes the contents of the JSON file_path
Used as part of the update-write cycle...prevents data from being duplicated
@returns: nothing
'''
def delete(self):
self.json_file.seek(0)
self.json_file.truncate()
'''
Copy self.all_data into self.json_file with pretty formatting <3
@returns: nothing
'''
def dumpData(self):
json.dump(self.all_data, self.json_file, indent=4)
'''
Add space object to JSON file
@returns: nothing
'''
def addSpace(self, space):
self.all_data['activities'].append(space)
self.delete()
self.dumpData()
print Colors.OKGREEN + "Space added successfully." + Colors.ENDC
self.json_file.close()
'''
Get a specified space object from JSON
@returns: space object or -1
'''
def getSpace(self, space):
for space_obj in self.all_data['activities']:
if space_obj['space'] == space:
return space_obj
# space didn't exist
return -1
'''
Delete a specified space object from JSON
@returns: nothing
'''
def deleteSpace(self, space):
index = 0
for space_obj in self.all_data['activities']:
if space_obj['space'] == space:
del self.all_data['activities'][index]
self.delete()
self.dumpData()
self.json_file.close()
print Colors.OKGREEN + 'Space successfully deleted.' + Colors.ENDC
return
index = index + 1
# space didn't exist
print Colors.WARNING + 'The space you ask for does not exist.' + Colors.ENDC
'''
Add an application to the specified space array in JSON
@returns: nothing
'''
def addApplication(self, application, space):
for space_obj in self.all_data['activities']:
if space_obj['space'] == space:
space_obj['sources'].append('/Applications/' + application)
self.delete()
self.dumpData()
print Colors.OKGREEN + 'Application added successfully.' + Colors.ENDC
self.json_file.close()
return
# space didn't exist
print Colors.WARNING + 'The space you ask for does not exist.' + Colors.ENDC
'''
Delete specified application from specified space in JSON
@returns: nothing
'''
def deleteApplication(self, space, application):
index = 0
for space_obj in self.all_data['activities']:
if space_obj['space'] == space:
for source in space_obj['sources']:
if source == ('/Applications/' + application):
del space_obj['sources'][index]
self.delete()
self.dumpData()
print Colors.OKGREEN + 'Application deleted successfully.' + Colors.ENDC
self.json_file.close()
return
index = index + 1
# space didn't exist
print Colors.WARNING + 'The application you ask for does not exist.' + Colors.ENDC
'''
Prints everyting in the JSON
@returns: nothing
'''
def all(self):
for space in self.all_data['activities']:
print Colors.OKGREEN + 'Space name: ' + space['space'] + Colors.ENDC
for app in space['sources']:
print Colors.OKBLUE + ' ' + app
| {
"content_hash": "926fce676ca2e75df03295008a00e05d",
"timestamp": "",
"source": "github",
"line_count": 153,
"max_line_length": 103,
"avg_line_length": 24.313725490196077,
"alnum_prop": 0.6739247311827957,
"repo_name": "ahoskins/poof",
"id": "b2295488c0ba60905c85b2db8d6221bc9ce1aa57",
"size": "3720",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "poof/storage.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "9319"
}
],
"symlink_target": ""
} |
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Reference'
db.create_table(u'cmsplugin_reference', (
(u'cmsplugin_ptr', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['cms.CMSPlugin'], unique=True, primary_key=True)),
('referee', self.gf('django.db.models.fields.CharField')(default='Referee', max_length=100)),
('reference', self.gf('django.db.models.fields.TextField')(default='Reference')),
('active', self.gf('django.db.models.fields.BooleanField')(default=False)),
))
db.send_create_signal(u'plugins', ['Reference'])
def backwards(self, orm):
# Deleting model 'Reference'
db.delete_table(u'cmsplugin_reference')
models = {
'cms.cmsplugin': {
'Meta': {'object_name': 'CMSPlugin'},
'changed_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'creation_date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'max_length': '15', 'db_index': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.CMSPlugin']", 'null': 'True', 'blank': 'True'}),
'placeholder': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.Placeholder']", 'null': 'True'}),
'plugin_type': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_index': 'True'}),
'position': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'})
},
'cms.placeholder': {
'Meta': {'object_name': 'Placeholder'},
'default_width': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'slot': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_index': 'True'})
},
u'plugins.credential': {
'Meta': {'object_name': 'Credential', 'db_table': "u'cmsplugin_credential'", '_ormbases': ['cms.CMSPlugin']},
u'cmsplugin_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['cms.CMSPlugin']", 'unique': 'True', 'primary_key': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'default': "'Description'"}),
'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '100'}),
'inverted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'default': "'Name'", 'max_length': '100'})
},
u'plugins.reference': {
'Meta': {'object_name': 'Reference', 'db_table': "u'cmsplugin_reference'", '_ormbases': ['cms.CMSPlugin']},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
u'cmsplugin_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['cms.CMSPlugin']", 'unique': 'True', 'primary_key': 'True'}),
'referee': ('django.db.models.fields.CharField', [], {'default': "'Referee'", 'max_length': '100'}),
'reference': ('django.db.models.fields.TextField', [], {'default': "'Reference'"})
}
}
complete_apps = ['plugins'] | {
"content_hash": "7e092e8eb57c0aaa2682c432cfa3b53d",
"timestamp": "",
"source": "github",
"line_count": 64,
"max_line_length": 157,
"avg_line_length": 63.28125,
"alnum_prop": 0.577037037037037,
"repo_name": "robertour/commas",
"id": "431fb32b67b36a7d4fd9e39e33c9b7b88a1963b6",
"size": "4074",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "plugins/migrations/0008_auto__add_reference.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "303484"
},
{
"name": "JavaScript",
"bytes": "12522"
},
{
"name": "Python",
"bytes": "151771"
}
],
"symlink_target": ""
} |
"""
rgenetics datatypes
Ross Lazarus
for the rgenetics and galaxy projects
genome graphs datatypes derived from Interval datatypes
genome graphs datasets have a header row with appropriate columnames
The first column is always the marker - eg columname = rs, first row= rs12345 if the rows are snps
subsequent row values are all numeric ! Will fail if any non numeric (eg '+' or 'NA') values
ross lazarus for rgenetics
august 20 2007
"""
import logging, os, sys, time, tempfile, shutil, string, glob
import data
from galaxy import util
from cgi import escape
import urllib, binascii
from galaxy.web import url_for
from galaxy.datatypes import metadata
from galaxy.datatypes.metadata import MetadataElement
from galaxy.datatypes.data import Text
from galaxy.datatypes.tabular import Tabular
from galaxy.datatypes.images import Html
from galaxy.datatypes.interval import Interval
from galaxy.util.hash_util import *
log = logging.getLogger(__name__)
class GenomeGraphs(Interval):
"""gg version viewable at ucsc of Gff format"""
file_ext = "gg"
column_names = [ 'Seqname', 'Source', 'Feature', 'Start', 'End', 'Score', 'Strand', 'Frame', 'Group' ]
"""Add metadata elements"""
MetadataElement( name="columns", default=9, desc="Number of columns", readonly=True, visible=False )
MetadataElement( name="column_types", default=['str','str','str','int','int','int','str','str','str'], param=metadata.ColumnTypesParameter, desc="Column types", readonly=True, visible=False )
MetadataElement( name="chromCol", default=1, desc="Chrom column", param=metadata.ColumnParameter )
MetadataElement( name="startCol", default=4, desc="Start column", param=metadata.ColumnParameter )
MetadataElement( name="endCol", default=5, desc="End column", param=metadata.ColumnParameter )
MetadataElement( name="strandCol", desc="Strand column (click box & select)", param=metadata.ColumnParameter, optional=True, no_value=0 )
###do we need to repeat these? they are the same as should be inherited from interval type
def __init__(self, **kwd):
"""Initialize datatype, by adding GBrowse display app"""
Interval.__init__(self, **kwd)
self.add_display_app ( 'ucsc', 'display at UCSC', 'as_ucsc_display_file', 'ucsc_links' )
def as_ucsc_display_file( self, dataset, **kwd ):
return open( dataset.file_name )
def set_meta( self, dataset, overwrite = True, **kwd ):
i = 0
for i, line in enumerate( file ( dataset.file_name ) ):
line = line.rstrip('\r\n')
if line and not line.startswith( '#' ):
elems = line.split( '\t' )
if len(elems) == 9:
try:
int( elems[3] )
int( elems[4] )
break
except:
pass
Interval.set_meta( self, dataset, overwrite = overwrite, skip = i )
def make_html_table( self, dataset, skipchars=[] ):
"""Create HTML table, used for displaying peek"""
out = ['<table cellspacing="0" cellpadding="3">']
comments = []
try:
# Generate column header
out.append( '<tr>' )
for i, name in enumerate( self.column_names ):
out.append( '<th>%s.%s</th>' % ( str( i+1 ), name ) )
out.append( self.make_html_peek_rows( dataset, skipchars=skipchars ) )
out.append( '</table>' )
out = "".join( out )
except Exception, exc:
out = "Can't create peek %s" % exc
return out
def get_estimated_display_viewport( self, dataset ):
"""
Return a chrom, start, stop tuple for viewing a file. There are slight differences between gff 2 and gff 3
formats. This function should correctly handle both...
"""
if True or (dataset.has_data() and dataset.state == dataset.states.OK):
try:
seqid = ''
start = 2147483647 # Maximum value of a signed 32 bit integer ( 2**31 - 1 )
stop = 0
for i, line in enumerate( file( dataset.file_name ) ):
if i == 0: # track stuff there
continue
line = line.rstrip( '\r\n' )
if not line:
continue
if not line.startswith( '#' ):
elems = line.split( '\t' )
if not seqid:
# We can only set the viewport for a single chromosome
seqid = elems[0]
if seqid == elems[0]:
# Make sure we have not spanned chromosomes
start = min( start, int( elems[3] ) )
stop = max( stop, int( elems[4] ) )
else:
# We've spanned a chromosome
break
if i > 10: # span 10 features
break
except:
seqid, start, stop = ( '', '', '' )
return ( seqid, str( start ), str( stop ) )
else:
return ( '', '', '' )
def gbrowse_links( self, dataset, type, app, base_url ):
ret_val = []
if dataset.has_data:
viewport_tuple = self.get_estimated_display_viewport( dataset )
seqid = viewport_tuple[0]
start = viewport_tuple[1]
stop = viewport_tuple[2]
if seqid and start and stop:
for site_name, site_url in util.get_gbrowse_sites_by_build( dataset.dbkey ):
if site_name in app.config.gbrowse_display_sites:
link = "%s?start=%s&stop=%s&ref=%s&dbkey=%s" % ( site_url, start, stop, seqid, dataset.dbkey )
ret_val.append( ( site_name, link ) )
return ret_val
def ucsc_links( self, dataset, type, app, base_url ):
ret_val = []
if dataset.has_data:
viewport_tuple = self.get_estimated_display_viewport(dataset)
if viewport_tuple:
chrom = viewport_tuple[0]
start = viewport_tuple[1]
stop = viewport_tuple[2]
if start == '' or int(start) < 1:
start='1'
if stop == '' or int(stop) <= start:
stop = '%d' % (int(start) + 10000)
for site_name, site_url in util.get_ucsc_by_build(dataset.dbkey):
if site_name in app.config.ucsc_display_sites:
# HACK: UCSC doesn't support https, so force http even
# if our URL scheme is https. Making this work
# requires additional hackery in your upstream proxy.
# If UCSC ever supports https, remove this hack.
internal_url = "%s" % url_for( controller='dataset',
dataset_id=dataset.id, action='display_at', filename='ucsc_' + site_name )
if base_url.startswith( 'https://' ):
base_url = base_url.replace( 'https', 'http', 1 )
display_url = urllib.quote_plus( "%s%s/display_as?id=%i&display_app=%s&authz_method=display_at" % (base_url, url_for( controller='root' ), dataset.id, type) )
redirect_url = urllib.quote_plus( "%sdb=%s&position=%s:%s-%s&hgt.customText=%%s" % (site_url, dataset.dbkey, chrom, start, stop) )
link = '%s?redirect_url=%s&display_url=%s' % ( internal_url, redirect_url, display_url )
ret_val.append( (site_name, link) )
else:
log.debug('@@@ gg ucsc_links - no viewport_tuple')
return ret_val
def sniff( self, filename ):
"""
Determines whether the file is in gff format
GFF lines have nine required fields that must be tab-separated.
"""
f = open(filename,'r')
headers = f.readline().split
if headers[0].lower() == 'track':
headers = f.readline.split()
#headers = get_headers( filename, '\t' )
try:
if len(headers) < 2:
return False
for hdr in headers:
if hdr and hdr[0].startswith( '##gff-version' ) and hdr[0].find( '2' ) < 0:
return False
if hdr and hdr[0] and not hdr[0].startswith( '#' ):
if len(hdr) != 9:
return False
try:
int( hdr[3] )
int( hdr[4] )
except:
return False
if hdr[5] != '.':
try:
score = int(hdr[5])
except:
return False
if (score < 0 or score > 1000):
return False
if hdr[6] not in data.valid_strand:
return False
return True
except:
return False
class rgTabList(Tabular):
"""
for sampleid and for featureid lists of exclusions or inclusions in the clean tool
featureid subsets on statistical criteria -> specialized display such as gg
"""
file_ext = "rgTList"
def __init__(self, **kwd):
"""Initialize featurelistt datatype"""
Tabular.__init__( self, **kwd )
self.column_names = []
def make_html_table( self, dataset, skipchars=[] ):
"""Create HTML table, used for displaying peek"""
out = ['<table cellspacing="0" cellpadding="3">']
comments = []
try:
# Generate column header
out.append( '<tr>' )
for i, name in enumerate( self.column_names ):
out.append( '<th>%s.%s</th>' % ( str( i+1 ), name ) )
if dataset.metadata.columns - len( self.column_names ) > 0:
for i in range( len( self.column_names ), dataset.metadata.columns ):
out.append( '<th>%s</th>' % str( i+1 ) )
out.append( '</tr>' )
out.append( self.make_html_peek_rows( dataset, skipchars=skipchars ) )
out.append( '</table>' )
out = "".join( out )
except Exception, exc:
out = "Can't create peek %s" % exc
return out
class rgSampleList(rgTabList):
"""
for sampleid exclusions or inclusions in the clean tool
output from QC eg excess het, gender error, ibd pair member,eigen outlier,excess mendel errors,...
since they can be uploaded, should be flexible
but they are persistent at least
same infrastructure for expression?
"""
file_ext = "rgSList"
def __init__(self, **kwd):
"""
Initialize samplelist datatype
"""
rgTabList.__init__( self, **kwd )
self.column_names[0] = 'FID'
self.column_names[1] = 'IID'
# this is what Plink wants as at 2009
def sniff(self,filename):
"""
"""
infile = open(dataset.file_name, "r")
header= infile.next() # header
if header[0] == 'FID' and header[1] == 'IID':
return True
else:
return False
class rgFeatureList( rgTabList ):
"""
for featureid lists of exclusions or inclusions in the clean tool
output from QC eg low maf, high missingness, bad hwe in controls, excess mendel errors,...
featureid subsets on statistical criteria -> specialized display such as gg
same infrastructure for expression?
"""
file_ext = "rgFList"
def __init__(self, **kwd):
"""Initialize featurelist datatype"""
rgTabList.__init__( self, **kwd )
for i,s in enumerate(['#FeatureId', 'Chr', 'Genpos', 'Mappos']):
self.column_names[i] = s
class Rgenetics(Html):
"""
class to use for rgenetics
"""
MetadataElement( name="base_name", desc="base name for all transformed versions of this genetic dataset", default="rgenetics", readonly=True, set_in_upload=True)
composite_type = 'auto_primary_file'
allow_datatype_change = False
file_ext = 'rgenetics'
def missing_meta( self, dataset=None, **kwargs):
"""Checks for empty meta values"""
for key, value in dataset.metadata.items():
if not value:
return True
return False
def generate_primary_file( self, dataset = None ):
rval = ['<html><head><title>Rgenetics Galaxy Composite Dataset </title></head><p/>']
rval.append('<div>This composite dataset is composed of the following files:<p/><ul>')
for composite_name, composite_file in self.get_composite_files( dataset = dataset ).iteritems():
opt_text = ''
if composite_file.optional:
opt_text = ' (optional)'
rval.append( '<li><a href="%s" type="application/binary">%s</a>%s' % ( composite_name, composite_name, opt_text ) )
rval.append( '</ul></div></html>' )
return "\n".join( rval )
def regenerate_primary_file(self,dataset):
"""
cannot do this until we are setting metadata
"""
def fix(oldpath,newbase):
old,e = os.path.splitext(oldpath)
head,rest = os.path.split(old)
newpath = os.path.join(head,newbase)
newpath = '%s%s' % (newpath,e)
if oldpath <> newpath:
shutil.move(oldpath,newpath)
return newpath
bn = dataset.metadata.base_name
efp = dataset.extra_files_path
flist = os.listdir(efp)
proper_base = bn
rval = ['<html><head><title>Files for Composite Dataset %s</title></head><p/>Comprises the following files:<p/><ul>' % (bn)]
for i,fname in enumerate(flist):
newpath = fix(os.path.join(efp,fname),proper_base)
sfname = os.path.split(newpath)[-1]
rval.append( '<li><a href="%s">%s</a>' % ( sfname, sfname ) )
rval.append( '</ul></html>' )
f = file(dataset.file_name,'w')
f.write("\n".join( rval ))
f.write('\n')
f.close()
def set_meta( self, dataset, **kwd ):
"""
for lped/pbed eg
"""
if kwd.get('overwrite') == False:
#log.debug('@@@ rgenetics set_meta called with overwrite = False')
return True
try:
efp = dataset.extra_files_path
except:
#log.debug('@@@rgenetics set_meta failed %s - dataset %s has no efp ?' % (sys.exc_info()[0], dataset.name))
return False
try:
flist = os.listdir(efp)
except:
#log.debug('@@@rgenetics set_meta failed %s - dataset %s has no efp ?' % (sys.exc_info()[0],dataset.name))
return False
if len(flist) == 0:
#log.debug('@@@rgenetics set_meta failed - %s efp %s is empty?' % (dataset.name,efp))
return False
bn = None
for f in flist:
n,e = os.path.splitext(f)[0]
if (not bn) and e in ('.ped','.map','.bim','.fam'):
bn = n
dataset.metadata.base_name = bn
if not bn:
bn = '?'
self.regenerate_primary_file(dataset)
if not dataset.info:
dataset.info = 'Galaxy genotype datatype object'
if not dataset.blurb:
dataset.blurb = 'Composite file - Rgenetics Galaxy toolkit'
return True
class SNPMatrix(Rgenetics):
"""
fake class to distinguish different species of Rgenetics data collections
"""
file_ext="snpmatrix"
def set_peek( self, dataset, is_multi_byte=False ):
if not dataset.dataset.purged:
dataset.peek = "Binary RGenetics file"
dataset.blurb = data.nice_size( dataset.get_size() )
else:
dataset.peek = 'file does not exist'
dataset.blurb = 'file purged from disk'
def sniff(self,filename):
"""
need to check the file header hex code
"""
infile = open(dataset.file_name, "b")
head = infile.read(16)
head = [hex(x) for x in head]
if head <> '':
return False
else:
return True
class Lped(Rgenetics):
"""
fake class to distinguish different species of Rgenetics data collections
"""
file_ext="lped"
def __init__( self, **kwd ):
Rgenetics.__init__(self, **kwd)
self.add_composite_file( '%s.ped', description = 'Pedigree File', substitute_name_with_metadata = 'base_name', is_binary = True )
self.add_composite_file( '%s.map', description = 'Map File', substitute_name_with_metadata = 'base_name', is_binary = True )
class Pphe(Rgenetics):
"""
fake class to distinguish different species of Rgenetics data collections
"""
file_ext="pphe"
def __init__( self, **kwd ):
Rgenetics.__init__(self, **kwd)
self.add_composite_file( '%s.pphe', description = 'Plink Phenotype File', substitute_name_with_metadata = 'base_name' )
class Lmap(Rgenetics):
"""
fake class to distinguish different species of Rgenetics data collections
"""
file_ext="lmap"
class Fphe(Rgenetics):
"""
fake class to distinguish different species of Rgenetics data collections
"""
file_ext="fphe"
def __init__( self, **kwd ):
Rgenetics.__init__(self, **kwd)
self.add_composite_file( '%s.fphe', description = 'FBAT Phenotype File', substitute_name_with_metadata = 'base_name' )
class Phe(Rgenetics):
"""
fake class to distinguish different species of Rgenetics data collections
"""
file_ext="phe"
def __init__( self, **kwd ):
Rgenetics.__init__(self, **kwd)
self.add_composite_file( '%s.phe', description = 'Phenotype File', substitute_name_with_metadata = 'base_name' )
class Fped(Rgenetics):
"""
fake class to distinguish different species of Rgenetics data collections
"""
file_ext="fped"
def __init__( self, **kwd ):
Rgenetics.__init__(self, **kwd)
self.add_composite_file( '%s.fped', description = 'FBAT format pedfile', substitute_name_with_metadata = 'base_name' )
class Pbed(Rgenetics):
"""
fake class to distinguish different species of Rgenetics data collections
"""
file_ext="pbed"
def __init__( self, **kwd ):
Rgenetics.__init__(self, **kwd)
self.add_composite_file( '%s.bim', substitute_name_with_metadata = 'base_name', is_binary = True )
self.add_composite_file( '%s.bed', substitute_name_with_metadata = 'base_name', is_binary = True )
self.add_composite_file( '%s.fam', substitute_name_with_metadata = 'base_name', is_binary = True )
class Eigenstratgeno(Rgenetics):
"""
fake class to distinguish different species of Rgenetics data collections
"""
file_ext="eigenstratgeno"
def __init__( self, **kwd ):
Rgenetics.__init__(self, **kwd)
self.add_composite_file( '%s.eigenstratgeno', substitute_name_with_metadata = 'base_name', is_binary = True )
self.add_composite_file( '%s.ind', substitute_name_with_metadata = 'base_name', is_binary = True )
self.add_composite_file( '%s.map', substitute_name_with_metadata = 'base_name', is_binary = True )
class Eigenstratpca(Rgenetics):
"""
fake class to distinguish different species of Rgenetics data collections
"""
file_ext="eigenstratpca"
def __init__( self, **kwd ):
Rgenetics.__init__(self, **kwd)
self.add_composite_file( '%s.eigenstratpca', description = 'Eigenstrat PCA file', substitute_name_with_metadata = 'base_name' )
class Snptest(Rgenetics):
"""
fake class to distinguish different species of Rgenetics data collections
"""
file_ext="snptest"
class Pheno(Tabular):
"""
base class for pheno files
"""
file_ext = 'pheno'
class RexpBase( Html ):
"""
base class for BioC data structures in Galaxy
must be constructed with the pheno data in place since that
goes into the metadata for each instance
"""
MetadataElement( name="columns", default=0, desc="Number of columns", visible=True )
MetadataElement( name="column_names", default=[], desc="Column names", visible=True )
MetadataElement(name="pheCols",default=[],desc="Select list for potentially interesting variables",visible=True)
MetadataElement( name="base_name",
desc="base name for all transformed versions of this expression dataset", default='rexpression', set_in_upload=True)
MetadataElement( name="pheno_path", desc="Path to phenotype data for this experiment", default="rexpression.pheno", visible=True)
file_ext = 'rexpbase'
html_table = None
is_binary = True
composite_type = 'auto_primary_file'
allow_datatype_change = False
def __init__( self, **kwd ):
Html.__init__(self,**kwd)
self.add_composite_file( '%s.pheno', description = 'Phenodata tab text file',
substitute_name_with_metadata = 'base_name', is_binary=True)
def generate_primary_file( self, dataset = None ):
"""
This is called only at upload to write the html file
cannot rename the datasets here - they come with the default unfortunately
"""
return '<html><head></head><body>AutoGenerated Primary File for Composite Dataset</body></html>'
def get_phecols(self, phenolist=[], maxConc=20):
"""
sept 2009: cannot use whitespace to split - make a more complex structure here
and adjust the methods that rely on this structure
return interesting phenotype column names for an rexpression eset or affybatch
to use in array subsetting and so on. Returns a data structure for a
dynamic Galaxy select parameter.
A column with only 1 value doesn't change, so is not interesting for
analysis. A column with a different value in every row is equivalent to a unique
identifier so is also not interesting for anova or limma analysis - both these
are removed after the concordance (count of unique terms) is constructed for each
column. Then a complication - each remaining pair of columns is tested for
redundancy - if two columns are always paired, then only one is needed :)
"""
for nrows,row in enumerate(phenolist): # construct concordance
if len(row.strip()) == 0:
break
row = row.strip().split('\t')
if nrows == 0: # set up from header
head = row
totcols = len(row)
concordance = [{} for x in head] # list of dicts
else:
for col,code in enumerate(row): # keep column order correct
if col >= totcols:
log.warning('### get_phecols error in pheno file - row %d col %d (%s) longer than header %s' % (nrows, col, row, head))
else:
concordance[col].setdefault(code,0) # first one is zero
concordance[col][code] += 1
useCols = []
useConc = [] # columns of interest to keep
nrows = len(phenolist)
nrows -= 1 # drop head from count
for c,conc in enumerate(concordance): # c is column number
if (len(conc) > 1) and (len(conc) < min(nrows,maxConc)): # not all same and not all different!!
useConc.append(conc) # keep concordance
useCols.append(c) # keep column
nuse = len(useCols)
# now to check for pairs of concordant columns - drop one of these.
delme = []
p = phenolist[1:] # drop header
plist = [x.strip().split('\t') for x in p] # list of lists
phe = [[x[i] for i in useCols] for x in plist if len(x) >= totcols] # strip unused data
for i in range(0,(nuse-1)): # for each interesting column
for j in range(i+1,nuse):
kdict = {}
for row in phe: # row is a list of lists
k = '%s%s' % (row[i],row[j]) # composite key
kdict[k] = k
if (len(kdict.keys()) == len(concordance[useCols[j]])): # i and j are always matched
delme.append(j)
delme = list(set(delme)) # remove dupes
listCol = []
delme.sort()
delme.reverse() # must delete from far end!
for i in delme:
del useConc[i] # get rid of concordance
del useCols[i] # and usecols entry
for i,conc in enumerate(useConc): # these are all unique columns for the design matrix
ccounts = [(conc.get(code,0),code) for code in conc.keys()] # decorate
ccounts.sort()
cc = [(x[1],x[0]) for x in ccounts] # list of code count tuples
codeDetails = (head[useCols[i]],cc) # ('foo',[('a',3),('b',11),..])
listCol.append(codeDetails)
if len(listCol) > 0:
res = listCol
# metadata.pheCols becomes [('bar;22,zot;113','foo'), ...]
else:
res = [('no usable phenotype columns found',[('?',0),]),]
return res
def get_pheno(self,dataset):
"""
expects a .pheno file in the extra_files_dir - ugh
note that R is wierd and adds the row.name in
the header so the columns are all wrong - unless you tell it not to.
A file can be written as
write.table(file='foo.pheno',pData(foo),sep='\t',quote=F,row.names=F)
"""
p = file(dataset.metadata.pheno_path,'r').readlines()
if len(p) > 0: # should only need to fix an R pheno file once
head = p[0].strip().split('\t')
line1 = p[1].strip().split('\t')
if len(head) < len(line1):
head.insert(0,'ChipFileName') # fix R write.table b0rken-ness
p[0] = '\t'.join(head)
else:
p = []
return '\n'.join(p)
def set_peek( self, dataset, is_multi_byte=False ):
"""
expects a .pheno file in the extra_files_dir - ugh
note that R is wierd and does not include the row.name in
the header. why?
"""
if not dataset.dataset.purged:
pp = os.path.join(dataset.extra_files_path,'%s.pheno' % dataset.metadata.base_name)
try:
p = file(pp,'r').readlines()
except:
p = ['##failed to find %s' % pp,]
dataset.peek = ''.join(p[:5])
dataset.blurb = 'Galaxy Rexpression composite file'
else:
dataset.peek = 'file does not exist\n'
dataset.blurb = 'file purged from disk'
def get_peek( self, dataset ):
"""expects a .pheno file in the extra_files_dir - ugh"""
pp = os.path.join(dataset.extra_files_path,'%s.pheno' % dataset.metadata.base_name)
try:
p = file(pp,'r').readlines()
except:
p = ['##failed to find %s' % pp]
return ''.join(p[:5])
def get_file_peek(self,filename):
"""
can't really peek at a filename - need the extra_files_path and such?
"""
h = '## rexpression get_file_peek: no file found'
try:
h = file(filename,'r').readlines()
except:
pass
return ''.join(h[:5])
def regenerate_primary_file(self,dataset):
"""cannot do this until we are setting metadata
"""
bn = dataset.metadata.base_name
flist = os.listdir(dataset.extra_files_path)
rval = ['<html><head><title>Files for Composite Dataset %s</title></head><p/>Comprises the following files:<p/><ul>' % (bn)]
for i,fname in enumerate(flist):
sfname = os.path.split(fname)[-1]
rval.append( '<li><a href="%s">%s</a>' % ( sfname, sfname ) )
rval.append( '</ul></html>' )
f = file(dataset.file_name,'w')
f.write("\n".join( rval ))
f.write('\n')
f.close()
def init_meta( self, dataset, copy_from=None ):
"""Add metadata elements"""
if copy_from:
dataset.metadata = copy_from.metadata
def set_meta( self, dataset, **kwd ):
"""
NOTE we apply the tabular machinary to the phenodata extracted
from a BioC eSet or affybatch.
"""
try:
flist = os.listdir(dataset.extra_files_path)
except:
#log.debug('@@@rexpression set_meta failed - no dataset?')
return False
bn = None
for f in flist:
n = os.path.splitext(f)[0]
if not bn:
bn = n
dataset.metadata.base_name = bn
if not bn:
bn = '?'
pn = '%s.pheno' % (bn)
pp = os.path.join(dataset.extra_files_path,pn)
dataset.metadata.pheno_path=pp
try:
pf = file(pp,'r').readlines() # read the basename.phenodata in the extra_files_path
except:
pf = None
if pf:
h = pf[0].strip()
h = h.split('\t') # hope is header
h = [escape(x) for x in h]
dataset.metadata.column_names = h
dataset.metadata.columns = len(h)
dataset.peek = ''.join(pf[:5])
else:
dataset.metadata.column_names = []
dataset.metadata.columns = 0
dataset.peek = 'No pheno file found'
if len(pf) > 1:
dataset.metadata.pheCols = self.get_phecols(phenolist=pf)
else:
dataset.metadata.pheCols = [('','No useable phenotypes found',False),]
#self.regenerate_primary_file(dataset)
if not dataset.info:
dataset.info = 'Galaxy Expression datatype object'
if not dataset.blurb:
dataset.blurb = 'R loadable BioC expression object for the Rexpression Galaxy toolkit'
return True
def make_html_table( self, pp='nothing supplied from peek\n'):
"""Create HTML table, used for displaying peek"""
out = ['<table cellspacing="0" cellpadding="3">',]
p = pp.split('\n')
try:
# Generate column header
for i,row in enumerate(p):
lrow = row.strip().split('\t')
if i == 0:
orow = ['<th>%s</th>' % escape(x) for x in lrow]
orow.insert(0,'<tr>')
orow.append('</tr>')
else:
orow = ['<td>%s</td>' % escape(x) for x in lrow]
orow.insert(0,'<tr>')
orow.append('</tr>')
out.append(''.join(orow))
out.append( '</table>' )
out = "\n".join( out )
except Exception, exc:
out = "Can't create html table %s" % str( exc )
return out
def display_peek( self, dataset ):
"""Returns formatted html of peek"""
out=self.make_html_table(dataset.peek)
return out
def get_mime(self):
"""Returns the mime type of the datatype"""
return 'text/html'
class Affybatch( RexpBase ):
"""derived class for BioC data structures in Galaxy """
file_ext = "affybatch"
def __init__( self, **kwd ):
RexpBase.__init__(self, **kwd)
self.add_composite_file( '%s.affybatch', description = 'AffyBatch R object saved to file',
substitute_name_with_metadata = 'base_name', is_binary=True )
class Eset( RexpBase ):
"""derived class for BioC data structures in Galaxy """
file_ext = "eset"
def __init__( self, **kwd ):
RexpBase.__init__(self, **kwd)
self.add_composite_file( '%s.eset', description = 'ESet R object saved to file',
substitute_name_with_metadata = 'base_name', is_binary = True )
class MAlist( RexpBase ):
"""derived class for BioC data structures in Galaxy """
file_ext = "malist"
def __init__( self, **kwd ):
RexpBase.__init__(self, **kwd)
self.add_composite_file( '%s.malist', description = 'MAlist R object saved to file',
substitute_name_with_metadata = 'base_name', is_binary = True )
if __name__ == '__main__':
import doctest, sys
doctest.testmod(sys.modules[__name__])
| {
"content_hash": "b7bd3478a91d17957a25b403678db2dc",
"timestamp": "",
"source": "github",
"line_count": 758,
"max_line_length": 195,
"avg_line_length": 44.04485488126649,
"alnum_prop": 0.5418738393338525,
"repo_name": "volpino/Yeps-EURAC",
"id": "6dea663208b7bd26fbe46f8c9036ae8170159fe3",
"size": "33386",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lib/galaxy/datatypes/genetics.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "1028241"
},
{
"name": "Perl",
"bytes": "46988"
},
{
"name": "Python",
"bytes": "3728724"
},
{
"name": "Shell",
"bytes": "24718"
}
],
"symlink_target": ""
} |
from rest_framework import status
from rest_framework.response import Response
from .errors import ConfigError
def poster(posting_task):
try:
posting_task()
except ConfigError as e:
return Response({
'details': str(e),
}, status=status.HTTP_503_SERVICE_UNAVAILABLE)
return Response()
| {
"content_hash": "85925bd8863aa443a6266df77cc7efc0",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 54,
"avg_line_length": 23.928571428571427,
"alnum_prop": 0.6716417910447762,
"repo_name": "vaniakosmos/memes-reposter",
"id": "9203054fae14db76997c0b125623ac8f1167981f",
"size": "335",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "apps/core/views.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1067"
},
{
"name": "Dockerfile",
"bytes": "203"
},
{
"name": "HTML",
"bytes": "11813"
},
{
"name": "JavaScript",
"bytes": "6442"
},
{
"name": "Makefile",
"bytes": "247"
},
{
"name": "Python",
"bytes": "81075"
},
{
"name": "Shell",
"bytes": "614"
}
],
"symlink_target": ""
} |
from . import string_utils
from . import property_utils
from . import shadergraph_utils
from . import scene_utils
from .. import rman_constants
from .. import rman_config
from collections import OrderedDict
from copy import deepcopy
import bpy
import os
import getpass
__BLENDER_TO_RMAN_DSPY__ = { 'TIFF': 'tiff', 'TARGA': 'targa', 'TARGA_RAW': 'targa', 'OPEN_EXR': 'openexr', 'PNG': 'png'}
def get_channel_name(aov, layer_name):
aov_name = aov.name.replace(' ', '')
aov_channel_name = aov.channel_name
if not aov.aov_name or not aov.channel_name:
return ''
elif aov.aov_name == "color rgba":
aov_channel_name = "Ci,a"
# Remaps any color lpe channel names to a denoise friendly one
elif aov_name in channel_name_map.keys():
aov_channel_name = '%s_%s_%s' % (
channel_name_map[aov_name], aov_name, layer_name)
elif aov.aov_name == "color custom_lpe":
aov_channel_name = aov.name
else:
aov_channel_name = '%s_%s' % (
aov_name, layer_name)
return aov_channel_name
def _default_dspy_params():
d = {}
d[u'enable'] = { 'type': u'int', 'value': True}
d[u'lpeLightGroup'] = { 'type': u'string', 'value': None}
d[u'remap_a'] = { 'type': u'float', 'value': 0.0}
d[u'remap_b'] = { 'type': u'float', 'value': 0.0}
d[u'remap_c'] = { 'type': u'float', 'value': 0.0}
d[u'exposure'] = { 'type': u'float2', 'value': [1.0, 1.0] }
d[u'filter'] = {'type': u'string', 'value': 'default'}
d[u'filterwidth'] = { 'type': u'float2', 'value': [2,2]}
d[u'statistics'] = { 'type': u'string', 'value': 'none'}
d[u'shadowthreshold'] = { 'type': u'float', 'value': 0.01}
return d
def _add_stylized_channels(dspys_dict, dspy_drv, rman_scene, expandTokens):
"""
Add the necessary dspy channels for stylized looks.
"""
stylized_tmplt = rman_config.__RMAN_DISPLAY_TEMPLATES__.get('Stylized', None)
if not stylized_tmplt:
return
rm = rman_scene.bl_scene.renderman
display_driver = dspy_drv
rman_dspy_channels = rman_config.__RMAN_DISPLAY_CHANNELS__
if not display_driver:
display_driver = __BLENDER_TO_RMAN_DSPY__.get(rman_scene.bl_scene.render.image_settings.file_format, 'openexr')
if 'display' in stylized_tmplt:
display_driver = stylized_tmplt['display']['displayType']
if display_driver in ['it', 'blender']:
if rman_scene.is_viewport_render:
display_driver = 'null'
for chan in stylized_tmplt['channels']:
dspy_params = {}
dspy_params['displayChannels'] = []
dspy_name = '%s_%s' % (stylized_tmplt.get('displayName', 'rman_stylized'), chan)
d = _default_dspy_params()
if chan not in dspys_dict['channels']:
d = _default_dspy_params()
settings = rman_dspy_channels[chan]
chan_src = settings['channelSource']
chan_type = settings['channelType']
d[u'channelSource'] = {'type': u'string', 'value': chan_src}
d[u'channelType'] = { 'type': u'string', 'value': chan_type}
dspys_dict['channels'][chan] = d
dspy_params['displayChannels'].append(chan)
filePath = '%s_%s' % (dspy_name, chan)
dspys_dict['displays'][dspy_name] = {
'driverNode': display_driver,
'filePath': filePath,
'denoise': False,
'denoise_mode': 'singleframe',
'camera': None,
'bake_mode': None,
'params': dspy_params,
'dspyDriverParams': None}
else:
dspy_name = stylized_tmplt.get('displayName', 'rman_stylized')
dspy_params = {}
dspy_params['displayChannels'] = []
for chan in stylized_tmplt['channels']:
d = _default_dspy_params()
if chan not in dspys_dict['channels']:
d = _default_dspy_params()
settings = rman_dspy_channels[chan]
chan_src = settings['channelSource']
chan_type = settings['channelType']
d[u'channelSource'] = {'type': u'string', 'value': chan_src}
d[u'channelType'] = { 'type': u'string', 'value': chan_type}
dspys_dict['channels'][chan] = d
dspy_params['displayChannels'].append(chan)
filePath = rm.path_beauty_image_output
f, ext = os.path.splitext(filePath)
filePath = f + '_rman_stylized' + ext
if expandTokens:
filePath = string_utils.expand_string(filePath,
display=display_driver,
frame=rman_scene.bl_frame_current,
asFilePath=True)
dspys_dict['displays'][dspy_name] = {
'driverNode': display_driver,
'filePath': filePath,
'denoise': False,
'denoise_mode': 'singleframe',
'camera': None,
'bake_mode': None,
'params': dspy_params,
'dspyDriverParams': None}
def _add_denoiser_channels(dspys_dict, dspy_params):
"""
Add the necessary dspy channels for denoiser. We assume
the beauty display will be used as the variance file
"""
denoise_tmplt = rman_config.__RMAN_DISPLAY_TEMPLATES__['Denoiser']
for chan in denoise_tmplt['channels']:
dspy_channels = dspys_dict['displays']['beauty']['params']['displayChannels']
if chan in dspy_channels:
continue
if chan not in dspys_dict['channels']:
d = _default_dspy_params()
settings = rman_config.__RMAN_DISPLAY_CHANNELS__[chan]
d[u'channelSource'] = {'type': u'string', 'value': settings['channelSource']}
d[u'channelType'] = { 'type': u'string', 'value': settings['channelType']}
if 'statistics' in settings:
d[u'statistics'] = { 'type': u'string', 'value': settings['statistics']}
dspys_dict['channels'][chan] = d
dspys_dict['displays']['beauty']['params']['displayChannels'].append(chan)
filePath = dspys_dict['displays']['beauty']['filePath']
f,ext = os.path.splitext(filePath)
dspys_dict['displays']['beauty']['filePath'] = f + '_variance' + ext
def _set_blender_dspy_dict(layer, dspys_dict, dspy_drv, rman_scene, expandTokens):
rm = rman_scene.bl_scene.renderman
display_driver = dspy_drv
param_list = None
aov_denoise = False
if not display_driver:
display_driver = __BLENDER_TO_RMAN_DSPY__.get(rman_scene.bl_scene.render.image_settings.file_format, 'openexr')
param_list = rman_scene.rman.Types.ParamList()
if display_driver == 'openexr':
param_list.SetInteger('asrgba', 1)
if display_driver == 'blender' and rm.blender_optix_denoiser:
aov_denoise = True
param_list = rman_scene.rman.Types.ParamList()
param_list.SetInteger("use_optix_denoiser", 1)
# add beauty (Ci,a)
dspy_params = {}
dspy_params['displayChannels'] = []
d = _default_dspy_params()
d[u'channelSource'] = {'type': u'string', 'value': 'Ci'}
d[u'channelType'] = { 'type': u'string', 'value': 'color'}
dspys_dict['channels']['Ci'] = d
d = _default_dspy_params()
d[u'channelSource'] = {'type': u'string', 'value': 'a'}
d[u'channelType'] = { 'type': u'string', 'value': 'float'}
dspys_dict['channels']['a'] = d
dspy_params['displayChannels'].append('Ci')
dspy_params['displayChannels'].append('a')
filePath = rm.path_beauty_image_output
if expandTokens:
filePath = string_utils.expand_string(filePath,
display=display_driver,
frame=rman_scene.bl_frame_current,
asFilePath=True)
dspys_dict['displays']['beauty'] = {
'driverNode': display_driver,
'filePath': filePath,
'denoise': aov_denoise,
'denoise_mode': 'singleframe',
'camera': None,
'bake_mode': None,
'params': dspy_params,
'dspyDriverParams': param_list}
if display_driver == 'blender' and rman_scene.is_viewport_render:
display_driver = 'null'
# so use built in aovs
blender_aovs = [
('z_depth', layer.use_pass_z, 'z'),
('Nn', layer.use_pass_normal, "Normal"),
("dPdtime", layer.use_pass_vector, "Vectors"),
("u", layer.use_pass_uv, "u"),
("v", layer.use_pass_uv, "v"),
("id", layer.use_pass_object_index, "id"),
("blender_shadows", layer.use_pass_shadow, "Shadows"),
("blender_diffuse", layer.use_pass_diffuse_direct, "Diffuse"),
("blender_indirectdiffuse", layer.use_pass_diffuse_indirect, "IndirectDiffuse"),
("blender_albedo", layer.use_pass_diffuse_color, "Albedo"),
("blender_specular", layer.use_pass_glossy_direct, "Specular"),
("blender_indirectspecular", layer.use_pass_glossy_indirect, "IndirectSpecular"),
("blender_subsurface", layer.use_pass_subsurface_indirect,"Subsurface"),
("blender_emission", layer.use_pass_emit, "Emission")
]
# declare display channels
for source, doit, name in blender_aovs:
filePath = rm.path_aov_image_output
if expandTokens:
token_dict = {'aov': name}
filePath = string_utils.expand_string(filePath,
display=display_driver,
frame=rman_scene.bl_frame_current,
token_dict=token_dict,
asFilePath=True)
if doit:
dspy_params = {}
dspy_params['displayChannels'] = []
d = _default_dspy_params()
settings = rman_config.__RMAN_DISPLAY_CHANNELS__[source]
d[u'channelSource'] = {'type': u'string', 'value': settings['channelSource']}
d[u'channelType'] = { 'type': u'string', 'value': settings['channelType']}
if source == 'id':
d[u'filter'] = {'type': u'string', 'value': 'zmin'}
d[u'filterwidth'] = { 'type': u'float2', 'value': [1, 1]}
dspys_dict['channels'][name] = d
dspy_params['displayChannels'].append(name)
dspys_dict['displays'][name] = {
'driverNode': display_driver,
'filePath': filePath,
'denoise': aov_denoise,
'denoise_mode': 'singleframe',
'camera': None,
'bake_mode': None,
'params': dspy_params,
'dspyDriverParams': param_list}
if not layer.use_pass_object_index and rman_scene.is_interactive:
# Add ID pass if it was not requested and we're in
# IPR mode
dspy_params = {}
dspy_params['displayChannels'] = []
d = _default_dspy_params()
d[u'channelSource'] = {'type': u'string', 'value': 'id'}
d[u'channelType'] = { 'type': u'string', 'value': 'integer'}
d[u'filter'] = {'type': u'string', 'value': 'zmin'}
d[u'filterwidth'] = { 'type': u'float2', 'value': [1, 1]}
dspys_dict['channels']['id'] = d
dspy_params['displayChannels'].append('id')
filePath = 'id_pass'
dspys_dict['displays']['id_pass'] = {
'driverNode': display_driver,
'filePath': filePath,
'denoise': False,
'denoise_mode': 'singleframe',
'camera': None,
'bake_mode': None,
'params': dspy_params,
'dspyDriverParams': None}
def _get_real_chan_name(chan):
""" Get the real channel name
Channels with a light group will have the light group
appended to the name
"""
ch_name = chan.channel_name
lgt_grp = chan.light_group.strip()
if lgt_grp != '' and lgt_grp not in ch_name:
ch_name = '%s_%s' % (ch_name, lgt_grp)
return ch_name
def _add_chan_to_dpsychan_list(rm, rm_rl, dspys_dict, chan):
ch_name = _get_real_chan_name(chan)
lgt_grp = chan.light_group.strip()
# add the channel if not already in list
if ch_name not in dspys_dict['channels']:
d = _default_dspy_params()
source_type = chan.channel_type
source = chan.channel_source
if lgt_grp or lgt_grp != '':
if 'Ci' in source:
source = "lpe:C[DS]*[<L.>O]"
if "<L.>" in source:
source = source.replace("<L.>", "<L.'%s'>" % lgt_grp)
elif "lpe:" in source:
source = source.replace("L", "<L.'%s'>" % lgt_grp)
d[u'channelSource'] = {'type': u'string', 'value': source}
d[u'channelType'] = { 'type': u'string', 'value': source_type}
d[u'lpeLightGroup'] = { 'type': u'string', 'value': lgt_grp}
d[u'remap_a'] = { 'type': u'float', 'value': chan.remap_a}
d[u'remap_b'] = { 'type': u'float', 'value': chan.remap_b}
d[u'remap_c'] = { 'type': u'float', 'value': chan.remap_c}
d[u'exposure'] = { 'type': u'float2', 'value': [chan.exposure_gain, chan.exposure_gamma] }
if rm.hider_pixelFilterMode != 'importance':
# per channel filter does not work in importance mode
d[u'filter'] = {'type': u'string', 'value': chan.chan_pixelfilter}
d[u'filterwidth'] = { 'type': u'float2', 'value': [chan.chan_pixelfilter_x, chan.chan_pixelfilter_y]}
d[u'statistics'] = { 'type': u'string', 'value': chan.stats_type}
d[u'shadowthreshold'] = { 'type': u'float', 'value': chan.shadowthreshold}
dspys_dict['channels'][ch_name] = d
def _set_rman_dspy_dict(rm_rl, dspys_dict, dspy_drv, rman_scene, expandTokens):
rm = rman_scene.bl_scene.renderman
display_driver = dspy_drv
for aov in rm_rl.custom_aovs:
if aov.name == '':
continue
if len(aov.dspy_channels) < 1:
continue
dspy_params = {}
dspy_params['displayChannels'] = []
for chan_ptr in aov.dspy_channels:
chan = rm_rl.dspy_channels[chan_ptr.dspy_chan_idx]
_add_chan_to_dpsychan_list(rm, rm_rl, dspys_dict, chan)
dspy_params['displayChannels'].append(chan.channel_name)
param_list = None
aov_denoise = aov.denoise
aov_denoise_mode = aov.denoise_mode
if rman_scene.rman_bake:
if rm.rman_bake_illum_mode == '3D':
display_driver = 'pointcloud'
else:
display_driver = aov.displaydriver
param_list = rman_scene.rman.Types.ParamList()
dspy_driver_settings = getattr(aov, '%s_settings' % display_driver)
property_utils.set_node_rixparams(dspy_driver_settings, None, param_list, None)
elif rman_scene.external_render:
display_driver = aov.displaydriver
param_list = rman_scene.rman.Types.ParamList()
dspy_driver_settings = getattr(aov, '%s_settings' % display_driver)
property_utils.set_node_rixparams(dspy_driver_settings, None, param_list, None)
elif display_driver == 'blender':
if rman_scene.is_viewport_render:
if aov.name != 'beauty':
display_driver = 'null'
if display_driver == 'blender' and rm.blender_optix_denoiser:
aov_denoise = True
param_list = rman_scene.rman.Types.ParamList()
param_list.SetInteger("use_optix_denoiser", 1)
if rman_scene.rman_bake:
filePath = rm.path_bake_illum_ptc
if rm.rman_bake_illum_mode == '2D':
filePath = rm.path_bake_illum_img
if expandTokens:
token_dict = {'aov': aov.name}
filePath = string_utils.expand_string(filePath,
display=display_driver,
frame=rman_scene.bl_frame_current,
token_dict=token_dict,
asFilePath=True)
if rm.rman_bake_illum_filename == 'BAKEFILEATTR':
filePath = '<user:bake_filename_attr>'
elif rm.rman_bake_illum_filename == 'IDENTIFIER':
tokens = os.path.splitext(filePath)
filePath = '%s.<identifier:object>%s' % (tokens[0], tokens[1])
else:
if aov.name == 'beauty':
filePath = rm.path_beauty_image_output
if expandTokens:
filePath = string_utils.expand_string(filePath,
display=display_driver,
frame=rman_scene.bl_frame_current,
asFilePath=True)
else:
filePath = rm.path_aov_image_output
if expandTokens:
token_dict = {'aov': aov.name}
filePath = string_utils.expand_string(filePath,
display=display_driver,
frame=rman_scene.bl_frame_current,
token_dict=token_dict,
asFilePath=True)
if aov.name != 'beauty' and (display_driver in ['it', 'blender']): #(display_driver == 'it' or rman_scene.is_viewport_render):
# break up display per channel when rendering to it
for chan_ptr in aov.dspy_channels:
chan = rm_rl.dspy_channels[chan_ptr.dspy_chan_idx]
ch_name = _get_real_chan_name(chan)
dspy_name = '%s_%s' % (aov.name, ch_name)
new_dspy_params = deepcopy(dspy_params)
new_dspy_params['displayChannels'] = [ch_name]
if display_driver == 'it':
new_file_path = filePath.replace('.it', '_%s.it' % ch_name)
else:
new_file_path = filePath.replace('.exr', '_%s.exr' % ch_name)
dspys_dict['displays'][dspy_name] = {
'driverNode': display_driver,
'filePath': new_file_path,
'denoise': aov_denoise,
'denoise_mode': aov_denoise_mode,
'camera': aov.camera,
'bake_mode': aov.aov_bake,
'params': new_dspy_params,
'dspyDriverParams': param_list }
else:
dspys_dict['displays'][aov.name] = {
'driverNode': display_driver,
'filePath': filePath,
'denoise': aov_denoise,
'denoise_mode': aov_denoise_mode,
'camera': aov.camera,
'bake_mode': aov.aov_bake,
'params': dspy_params,
'dspyDriverParams': param_list }
if aov_denoise and display_driver == 'openexr' and not rman_scene.is_interactive:
_add_denoiser_channels(dspys_dict, dspy_params)
if aov.name == 'beauty' and rman_scene.is_interactive:
if rman_scene.is_viewport_render:
display_driver = 'null'
# Add ID pass
dspy_params = {}
dspy_params['displayChannels'] = []
d = _default_dspy_params()
d[u'channelSource'] = {'type': u'string', 'value': 'id'}
d[u'channelType'] = { 'type': u'string', 'value': 'integer'}
d[u'filter'] = {'type': u'string', 'value': 'zmin'}
d[u'filterwidth'] = { 'type': u'float2', 'value': [1, 1]}
dspys_dict['channels']['id'] = d
dspy_params['displayChannels'].append('id')
filePath = 'id_pass'
dspys_dict['displays']['id_pass'] = {
'driverNode': display_driver,
'filePath': filePath,
'denoise': False,
'denoise_mode': 'singleframe',
'camera': aov.camera,
'bake_mode': None,
'params': dspy_params,
'dspyDriverParams': None}
def _set_rman_holdouts_dspy_dict(dspys_dict, dspy_drv, rman_scene, expandTokens):
rm = rman_scene.bl_scene.renderman
display_driver = dspy_drv
if not display_driver:
display_driver = __BLENDER_TO_RMAN_DSPY__.get(rman_scene.bl_scene.render.image_settings.file_format, 'openexr')
param_list = rman_scene.rman.Types.ParamList()
if display_driver == 'openexr':
param_list.SetInteger('asrgba', 1)
if display_driver == 'blender' and rman_scene.is_viewport_render:
display_driver = 'null'
dspy_params = {}
dspy_params['displayChannels'] = []
d = _default_dspy_params()
occluded_src = "color lpe:holdouts;C[DS]+<L.>"
d[u'channelSource'] = {'type': u'string', 'value': occluded_src}
d[u'channelType'] = { 'type': u'string', 'value': 'color'}
dspys_dict['channels']['occluded'] = d
dspy_params['displayChannels'].append('occluded')
dspys_dict['displays']['occluded'] = {
'driverNode': 'null',
'filePath': 'occluded',
'denoise': False,
'denoise_mode': 'singleframe',
'camera': None,
'bake_mode': None,
'params': dspy_params,
'dspyDriverParams': None}
dspy_params = {}
dspy_params['displayChannels'] = []
d = _default_dspy_params()
holdout_matte_src = "color lpe:holdouts;unoccluded;C[DS]+<L.>"
d[u'channelSource'] = {'type': u'string', 'value': holdout_matte_src}
d[u'channelType'] = { 'type': u'string', 'value': 'color'}
dspys_dict['channels']['holdoutMatte'] = d
dspy_params['displayChannels'].append('holdoutMatte')
# user wants separate AOV for matte
if rm.do_holdout_matte == "AOV":
filePath = rm.path_beauty_image_output
f, ext = os.path.splitext(filePath)
filePath = f + '_holdoutMatte' + ext
if expandTokens:
filePath = string_utils.expand_string(filePath,
display=display_driver,
frame=rman_scene.bl_frame_current,
asFilePath=True)
dspys_dict['displays']['holdoutMatte'] = {
'driverNode': display_driver,
'filePath': filePath,
'denoise': False,
'denoise_mode': 'singleframe',
'camera': None,
'bake_mode': None,
'params': dspy_params,
'dspyDriverParams': None}
else:
dspys_dict['displays']['holdoutMatte'] = {
'driverNode': 'null',
'filePath': 'holdoutMatte',
'denoise': False,
'denoise_mode': 'singleframe',
'camera': None,
'bake_mode': None,
'params': dspy_params,
'dspyDriverParams': None}
def get_dspy_dict(rman_scene, expandTokens=True):
"""
Create a dictionary of display channels and displays. The layout:
{ 'channels': {
u'Ci': { u'channelSource': { 'type': u'string', 'value': u'Ci'},
u'channelType': { 'type': u'string', 'value': u'color'},
u'enable': { 'type': u'int', 'value': True},
u'lpeLightGroup': { 'type': u'string', 'value': None},
u'remap_a': { 'type': u'float', 'value': 0.0},
u'remap_b': { 'type': u'float', 'value': 0.0},
u'remap_c': { 'type': u'float', 'value': 0.0}
},
u'a': { u'channelSource': { 'type': u'string', 'value': u'a'},
u'channelType': { 'type': u'string', 'value': u'float'},
u'enable': { 'type': u'int', 'value': True},
u'lpeLightGroup': { 'type': u'string', 'value': None},
u'remap_a': { 'type': u'float', 'value': 0.0},
u'remap_b': { 'type': u'float', 'value': 0.0},
u'remap_c': { 'type': u'float', 'value': 0.0}
}
},
'displays': { u'rmanDefaultDisplay':
{ 'driverNode': u'd_openexr1',
'filePath': u'<OUT>/<blender>/images/<scene>.<F4>.<ext>',
'params': { u'enable': { 'type': u'int', 'value': True},
u'displayChannels': { 'type': u'message', 'value': [ u'Ci', u'a']},
u'displayType': { 'type': u'message', 'value': u'd_openexr'},
u'exposure': { 'type': u'float2', 'value': [1.0, 1.0]},
u'filter': { 'type': u'string', 'value': 'default},
u'filterwidth': { 'type': u'float2', 'value': [1.0, 1.0]},
u'remap_a': { 'type': u'float', 'value': 0.0},
u'remap_b': { 'type': u'float', 'value': 0.0},
u'remap_c': { 'type': u'float', 'value': 0.0}
},
'camera': [None|u'camera_name'],
'denoise': [True|False],
'denoise_mode': [u'singleframe'|u'crossframe']
'bake_mode': [True|False]
'dspyDriverParams': RtParamList
}
}
}
"""
rm = rman_scene.bl_scene.renderman
rm_rl = rman_scene.rm_rl
layer = rman_scene.bl_view_layer
dspys_dict = {'displays': OrderedDict(), 'channels': {}}
display_driver = None
if rman_scene.is_interactive:
display_driver = rm.render_into
elif (not rman_scene.external_render):
# if preview render
# we ignore the display driver setting in the AOV and render to whatever
# render_into is set to
display_driver = rm.render_into
# FIXME: remove these lines once we are able to get some kind of progress
# when rendering to XPU
if rm.render_into == 'blender' and scene_utils.get_render_variant(rman_scene.bl_scene) != 'prman':
display_driver = 'openexr'
if rm.render_rman_stylized:
_add_stylized_channels(dspys_dict, display_driver, rman_scene, expandTokens)
if rm_rl:
_set_rman_dspy_dict(rm_rl, dspys_dict, display_driver, rman_scene, expandTokens)
else:
# We're using blender's layering system
_set_blender_dspy_dict(layer, dspys_dict, display_driver, rman_scene, expandTokens)
if rm.do_holdout_matte != "OFF":
_set_rman_holdouts_dspy_dict(dspys_dict, display_driver, rman_scene, expandTokens)
return dspys_dict
def make_dspy_info(scene):
"""
Create some render parameter from scene and pass it to image tool.
If current scene renders to "it", collect some useful infos from scene
and send them alongside the render job to RenderMan's image tool. Applies to
renderpass result only, does not affect postprocessing like denoise.
Arguments:
scene (bpy.types.Scene) - Blender scene object
Returns:
(str) - a string with the display notes to give to "it"
"""
params = {}
rm = scene.renderman
world = scene.world
from time import localtime, strftime
ts = strftime("%a %x, %X", localtime())
ts = bytes(ts, 'ascii', 'ignore').decode('utf-8', 'ignore')
integrator = shadergraph_utils.find_integrator_node(world)
integrator_nm = 'PxrPathTracer'
if integrator:
integrator_nm = integrator.bl_label
dspy_notes = "Render start:\t%s\r\r" % ts
dspy_notes += "Integrator:\t%s\r\r" % integrator_nm
dspy_notes += "Samples:\t%d - %d\r" % (rm.hider_minSamples, rm.hider_maxSamples)
dspy_notes += "Pixel Variance:\t%f\r\r" % rm.ri_pixelVariance
# moved this in front of integrator check. Was called redundant in
# both cases
if integrator:
if integrator.bl_label == 'PxrPathTracer':
dspy_notes += "Mode:\t%s\r" % integrator.sampleMode
dspy_notes += "Light:\t%d\r" % integrator.numLightSamples
dspy_notes += "Bxdf:\t%d\r" % integrator.numBxdfSamples
if integrator.sampleMode == 'bxdf':
dspy_notes += "Indirect:\t%d\r\r" % integrator.numIndirectSamples
else:
dspy_notes += "Diffuse:\t%d\r" % integrator.numDiffuseSamples
dspy_notes += "Specular:\t%d\r" % integrator.numSpecularSamples
dspy_notes += "Subsurface:\t%d\r" % integrator.numSubsurfaceSamples
dspy_notes += "Refraction:\t%d\r" % integrator.numRefractionSamples
elif integrator.bl_label == "PxrVCM":
dspy_notes += "Light:\t%d\r" % integrator.numLightSamples
dspy_notes += "Bxdf:\t%d\r\r" % integrator.numBxdfSamples
return dspy_notes
def export_metadata(scene, params):
"""
Create metadata for the OpenEXR display driver
Arguments:
scene (bpy.types.Scene) - Blender scene object
params (RtParamList) - param list to fill with meta data
"""
rm = scene.renderman
world = scene.world
if "Camera" not in bpy.data.cameras:
return
if "Camera" not in bpy.data.objects:
return
cam = bpy.data.cameras["Camera"]
obj = bpy.data.objects["Camera"]
if cam.dof.focus_object:
dof_distance = (obj.location - cam.dof.focus_object.location).length
else:
dof_distance = cam.dof.focus_distance
output_dir = string_utils.expand_string(rm.path_rib_output,
frame=scene.frame_current,
asFilePath=True)
output_dir = os.path.dirname(output_dir)
statspath=os.path.join(output_dir, 'stats.%04d.xml' % scene.frame_current)
params.SetString('exrheader_dcc', 'Blender %s\nRenderman for Blender %s' % (bpy.app.version, rman_constants.RFB_ADDON_VERSION_STRING))
params.SetFloat('exrheader_fstop', cam.dof.aperture_fstop )
params.SetFloat('exrheader_focaldistance', dof_distance )
params.SetFloat('exrheader_focal', cam.lens )
params.SetFloat('exrheader_haperture', cam.sensor_width )
params.SetFloat('exrheader_vaperture', cam.sensor_height )
params.SetString('exrheader_renderscene', bpy.data.filepath)
params.SetString('exrheader_user', getpass.getuser())
params.SetString('exrheader_statistics', statspath)
integrator = shadergraph_utils.find_integrator_node(world)
integrator_nm = 'PxrPathTracer'
if integrator:
integrator_nm = integrator.bl_label
params.SetString('exrheader_integrator', integrator_nm)
params.SetFloatArray('exrheader_samples', [rm.hider_minSamples, rm.hider_maxSamples], 2)
params.SetFloat('exrheader_pixelvariance', rm.ri_pixelVariance)
params.SetString('exrheader_comment', rm.custom_metadata) | {
"content_hash": "4a275fd04fe809c9ae956507495a1bf0",
"timestamp": "",
"source": "github",
"line_count": 742,
"max_line_length": 138,
"avg_line_length": 43.44474393530997,
"alnum_prop": 0.5259647598957687,
"repo_name": "adminradio/RenderManForBlender",
"id": "e3b6622aafd8b71be83e22aaa6a51dc440cf6b94",
"size": "32236",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "rfb_utils/display_utils.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "1055354"
}
],
"symlink_target": ""
} |
from direct.distributed import DistributedObject
from direct.directnotify import DirectNotifyGlobal
from toontown.toonbase import ToontownGlobals
from pandac.PandaModules import *
from math import *
import math
from direct.fsm.FSM import FSM
from toontown.minigame import ArrowKeys
from direct.showbase import PythonUtil
from direct.task import Task
from direct.distributed.ClockDelta import *
import BuildGeometry
from toontown.golf import GolfGlobals
import random, time
def scalp(vec, scal):
vec0 = vec[0] * scal
vec1 = vec[1] * scal
vec2 = vec[2] * scal
vec = Vec3(vec0, vec1, vec2)
def length(vec):
return sqrt(vec[0] ** 2 + vec[1] ** 2 + vec[2] ** 2)
class PhysicsWorldBase:
notify = DirectNotifyGlobal.directNotify.newCategory('DistributedPhysicsWorld')
def __init__(self, canRender = 0):
self.canRender = canRender
self.world = OdeWorld()
self.space = OdeSimpleSpace()
self.contactgroup = OdeJointGroup()
self.bodyList = []
self.geomList = []
self.massList = []
self.rayList = []
self.showContacts = 0
self.jointMarkers = []
self.jointMarkerCount = 64
self.meshDataList = []
self.geomDataList = []
self.commonObjectInfoDict = {}
self.maxColCount = 0
if self.canRender:
self.odePandaRelationList = self.bodyList
self.root = render.attachNewNode('physics root node')
else:
self.root = NodePath('physics root node')
self.placerNode = self.root.attachNewNode('Placer')
self.subPlacerNode = self.placerNode.attachNewNode('Placer Sub Node')
self.commonObjectDict = {}
self.commonId = 0
self.worldAttach = self.root.attachNewNode('physics geom attach point')
self.timingCycleLength = 10.0
self.timingCycleOffset = 0.0
self.timingSimTime = 0.0
self.FPS = 90.0
self.refFPS = 60.0
self.DTAStep = 1.0 / self.FPS
self.refCon = 1.2
def delete(self):
self.notify.debug('Max Collision Count was %s' % self.maxColCount)
self.stopSim()
self.commonObjectDict = None
if self.canRender:
for pair in self.odePandaRelationList:
pair[0].remove()
pair[1].destroy()
self.odePandaRelationList = None
else:
for body in self.bodyList:
body[1].destroy()
self.bodyList = None
for mass in self.massList:
mass = None
for geom in self.geomList:
geom.destroy()
geom = None
for ray in self.rayList:
ray.destroy()
ray = None
self.placerNode.remove()
self.root.remove()
for marker in self.jointMarkers:
marker.remove()
self.jointMarkers = None
for data in self.geomDataList:
data.destroy()
for data in self.meshDataList:
data.destroy()
self.floor.destroy()
self.floor = None
self.contactgroup.empty()
self.world.destroy()
self.space.destroy()
self.world = None
self.space = None
return
def setupSimulation(self):
self.world.setAutoDisableFlag(0)
self.world.setAutoDisableLinearThreshold(0.15)
self.world.setAutoDisableAngularThreshold(0.15)
self.world.setAutoDisableSteps(2)
self.world.setGravity(0, 0, -25)
self.world.setErp(0.8)
self.world.setCfm(1e-05)
self.world.initSurfaceTable(5)
self.world.setSurfaceEntry(0, 0, 150, 0.05, 0.1, 0.9, 1e-05, 0.0, 0.4 / self.refCon)
self.world.setSurfaceEntry(1, 1, 1500, 0.05, 0.1, 0.9, 1e-05, 0.0, 0.001 / self.refCon)
self.world.setSurfaceEntry(2, 2, 150, 0.05, 0.1, 0.9, 1e-05, 0.0, 0.4 / self.refCon)
self.world.setSurfaceEntry(0, 2, 150, 0.05, 0.1, 0.9, 1e-05, 0.0, 0.4 / self.refCon)
self.world.setSurfaceEntry(0, 3, 150, 0.0, 0.1, 0.9, 1e-05, 0.0, 0.4 / self.refCon)
self.world.setSurfaceEntry(1, 3, 150, 0.0, 99.1, 0.9, 1e-05, 0.0, 1.0 / self.refCon)
self.world.setSurfaceEntry(2, 3, 150, 0.0, 9.1, 0.9, 1e-05, 0.0, 0.4 / self.refCon)
self.world.setSurfaceEntry(3, 3, 150, 0.0, 9.1, 0.9, 1e-05, 0.0, 0.4 / self.refCon)
self.world.setSurfaceEntry(4, 4, 150, 0.0, 9.1, 0.9, 1e-05, 0.0, 0.4 / self.refCon)
self.world.setSurfaceEntry(1, 4, 150, 0.0, 99.1, 0.9, 1e-05, 0.0, 0.001 / self.refCon)
self.world.setSurfaceEntry(pos1=0, pos2=1, mu=80, bounce=0.15, bounce_vel=0.1, soft_erp=0.9, soft_cfm=1e-05, slip=0.0, dampen=0.35 / self.refCon)
self.world.setSurfaceEntry(pos1=2, pos2=1, mu=1500, bounce=0.9, bounce_vel=0.01, soft_erp=0.9, soft_cfm=1e-05, slip=0.0, dampen=0.001 / self.refCon)
self.floor = OdePlaneGeom(self.space, Vec4(0.0, 0.0, 1.0, -20.0))
self.floor.setCollideBits(BitMask32(0))
self.floor.setCategoryBits(BitMask32(3840))
self.space.setAutoCollideWorld(self.world)
self.space.setAutoCollideJointGroup(self.contactgroup)
self.world.setQuickStepNumIterations(8)
self.DTA = 0.0
self.frameCounter = 0
if self.canRender:
for count in range(self.jointMarkerCount):
testMarker = render.attachNewNode('Joint Marker')
ballmodel = loader.loadModel('phase_3/models/misc/sphere')
ballmodel.reparentTo(testMarker)
ballmodel.setScale(0.1)
testMarker.setPos(0.0, 0.0, -100.0)
self.jointMarkers.append(testMarker)
def setTimingCycleLength(self, time):
self.timingCycleLength = time
def getTimingCycleLength(self):
return self.timingCycleLength
def getCycleTime(self, doprint = 0):
cycleTime = (globalClock.getRealTime() + self.timingCycleOffset) % self.timingCycleLength
if doprint:
print 'Get Cycle Time %s' % cycleTime
return cycleTime
def setTimeIntoCycle(self, time, doprint = 0):
trueCycleTime = globalClock.getRealTime() % self.timingCycleLength
self.timingCycleOffset = time - trueCycleTime
if doprint:
self.notify.debug('Set Cycle Time %s' % self.timingCycleOffset)
self.notify.debug('SET cycle time %s' % ((globalClock.getRealTime() + self.timingCycleOffset) % self.timingCycleLength))
def getSimCycleTime(self):
return
return self.timingSimTime % self.timingCycleLength
def startSim(self):
taskMgr.add(self.__simulationTask, 'simulation task')
def stopSim(self):
taskMgr.remove('simulation task')
def __simulationTask(self, task):
self.DTA += globalClock.getDt()
self.frameCounter += 1
if self.frameCounter >= 10:
self.frameCounter = 0
startTime = globalClock.getRealTime()
colCount = 0
while self.DTA >= self.DTAStep:
self.DTA -= self.DTAStep
self.preStep()
self.simulate()
self.postStep()
if self.canRender:
self.placeBodies()
if self.frameCounter == 0:
endTime = globalClock.getRealTime() - startTime
return task.cont
def simulate(self):
self.colCount = self.space.autoCollide()
if self.maxColCount < self.colCount:
self.maxColCount = self.colCount
self.notify.debug('New Max Collision Count %s' % self.maxColCount)
self.world.quickStep(self.DTAStep)
for bodyPair in self.bodyList:
self.world.applyDampening(self.DTAStep, bodyPair[1])
self.contactgroup.empty()
self.commonObjectControl()
self.timingSimTime = self.timingSimTime + self.DTAStep
def placeBodies(self):
for pair in self.odePandaRelationList:
pandaNodePathGeom = pair[0]
odeBody = pair[1]
if pandaNodePathGeom:
pandaNodePathGeom.setPos(odeBody.getPosition())
rotation = odeBody.getRotation() * (180.0 / math.pi)
pandaNodePathGeom.setQuat(Quat(odeBody.getQuaternion()[0], odeBody.getQuaternion()[1], odeBody.getQuaternion()[2], odeBody.getQuaternion()[3]))
def preStep(self):
pass
def postStep(self):
if self.showContacts and self.canRender:
for count in range(self.jointMarkerCount):
pandaNodePathGeom = self.jointMarkers[count]
if count < self.colCount:
pandaNodePathGeom.setPos(self.space.getContactData(count * 3 + 0), self.space.getContactData(count * 3 + 1), self.space.getContactData(count * 3 + 2))
else:
pandaNodePathGeom.setPos(0.0, 0.0, -100.0)
def commonObjectControl(self):
time = self.getCycleTime()
for key in self.commonObjectDict:
if key not in self.commonObjectInfoDict:
self.commonObjectInfoDict[key] = None
entry = self.commonObjectDict[key]
if entry[1] in [2, 4]:
type = entry[1]
body = entry[2]
motor = entry[3]
timeData = entry[4]
forceData = entry[5]
eventData = entry[6]
model = entry[7]
force = 0.0
for index in range(len(timeData)):
if index == len(timeData) - 1 and timeData[index] < time or timeData[index] < time and timeData[index + 1] > time:
force = forceData[index]
event = eventData[index]
if event != self.commonObjectInfoDict[key]:
self.commonObjectEvent(key, model, type, force, event)
self.commonObjectInfoDict[key] = event
motor.setParamVel(force)
return
def commonObjectEvent(self, key, model, type, force, event):
self.notify.debug('commonObjectForceEvent %s %s %s %s %s' % (key,
model,
type,
force,
event))
def getCommonObjectData(self):
objectStream = [(0,
0,
self.getCycleTime(),
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0)]
for key in self.commonObjectDict:
objectPair = self.commonObjectDict[key]
object = objectPair[2]
pos3 = object.getPosition()
quat4 = object.getQuaternion()
anV3 = object.getAngularVel()
lnV3 = object.getLinearVel()
data = (objectPair[0],
objectPair[1],
pos3[0],
pos3[1],
pos3[2],
quat4[0],
quat4[1],
quat4[2],
quat4[3],
anV3[0],
anV3[1],
anV3[2],
lnV3[0],
lnV3[1],
lnV3[2])
objectStream.append(data)
if len(objectStream) <= 1:
data = (0, 99, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)
objectStream.append(data)
return objectStream
def useCommonObjectData(self, objectData, enable = 1):
if not objectData:
return
if objectData[1][1] == 99:
return
time = objectData[0]
self.setTimeIntoCycle(time[2])
if time[2] > self.timingCycleLength:
pass
for dataIndex in range(1, len(objectData)):
data = objectData[dataIndex]
commonObject = self.commonObjectDict[data[0]]
commonObject[2].setPosition(data[2], data[3], data[4])
commonObject[2].setQuaternion(Quat(data[5], data[6], data[7], data[8]))
commonObject[2].setAngularVel(data[9], data[10], data[11])
commonObject[2].setLinearVel(data[12], data[13], data[14])
if enable:
commonObject[2].enable()
else:
commonObject[2].disable()
def createCommonObject(self, type, commonId, pos, hpr, sizeX = 0, sizeY = 0, moveDistance = 0):
if commonId == None:
commonId = self.commonId
self.commonId += 1
vPos = Point3(float(pos[0]), float(pos[1]), float(pos[2]))
vHpr = Vec3(float(hpr[0]), float(hpr[1]), float(hpr[2]))
rHpr = Vec3(float(hpr[0]), float(hpr[1]), float(hpr[2]))
self.placerNode.setHpr(vHpr)
self.placerNode.setPos(vPos)
if type == 0:
model, box = self.createBox(self.world, self.space, 10.0, 5.0, 5.0, 5.0)
box.setPosition(vPos)
self.placerNode.setHpr(vHpr)
box.setQuaternion(self.placerNode.getQuat())
self.commonObjectDict[commonId] = (commonId, type, box)
elif type == 1:
model, cross = self.createCross(self.world, self.space, 1.0, 3.0, 12.0, 2.0, 2)
motor = OdeHingeJoint(self.world)
cross.setPosition(vPos)
cross.setQuaternion(self.placerNode.getQuat())
ourAxis = render.getRelativeVector(self.placerNode, Vec3(0, 0, 1))
motor.setParamVel(1.5)
motor.setParamFMax(500000000.0)
boxsize = Vec3(1.0, 1.0, 1.0)
motor.attach(0, cross)
motor.setAnchor(vPos)
motor.setAxis(ourAxis)
self.cross = cross
cross.enable()
self.commonObjectDict[commonId] = (commonId, type, cross)
elif type == 2:
ourAxis = render.getRelativeVector(self.placerNode, Vec3(0, 0, 1))
model, box = self.createBox(self.world, self.space, 10.0, 5.0, 5.0, 5.0, 2)
box.setPosition(vPos)
box.setQuaternion(self.placerNode.getQuat())
motor = OdeSliderJoint(self.world)
motor.attach(box, 0)
motor.setAxis(ourAxis)
motor.setParamVel(3.0)
motor.setParamFMax(5000000.0)
motor.setParamHiStop(10.0)
motor.setParamLoStop(-10.0)
timeData = (0.0, 5.0)
forceData = (3.0, -3.0)
eventData = (1, 2)
self.commonObjectDict[commonId] = (commonId,
type,
box,
motor,
timeData,
forceData,
eventData,
model)
elif type == 3:
vPos = Point3(float(pos[0]), float(pos[1]), float(pos[2]))
vHpr = Vec3(float(hpr[0]), float(hpr[1]), float(hpr[2]))
self.placerNode.setHpr(vHpr)
self.placerNode.setPos(vPos)
self.subPlacerNode.setPos(0, 0, 0)
if self.canRender:
myModel = loader.loadModel('phase_6/models/golf/golf_windmill_b')
else:
myModel = loader.loadModel('phase_6/models/golf/golf_windmill_b.bam')
myModel.reparentTo(self.root)
myModel.setPos(vPos)
myModel.setHpr(vHpr)
millFan = myModel.find('**/windmillFan0')
millBase = myModel.find('**/arm')
rod = myModel.find('**/rod')
rod.wrtReparentTo(millBase)
self.windmillFanNodePath = millFan
self.windmillBaseNodePath = millBase
millData = OdeTriMeshData(millBase)
millGeom = OdeTriMeshGeom(self.space, millData)
self.meshDataList.append(millData)
millGeom.setPosition(self.subPlacerNode.getPos(self.root))
millGeom.setQuaternion(self.subPlacerNode.getQuat())
millGeom.setCollideBits(BitMask32(251658240))
millGeom.setCategoryBits(BitMask32(8388608))
self.space.setCollideId(millGeom, 8)
vPos = Point3(float(pos[0]), float(pos[1]), float(pos[2]) + 5)
vHpr = Vec3(float(hpr[0]), float(hpr[1] + 90), float(hpr[2]) - 90)
self.placerNode.setHpr(vHpr)
self.placerNode.setPos(vPos)
self.subPlacerNode.setPos(-1, 0, 0.0)
model, cross = self.createPinWheel(self.world, self.space, 10.0, 1.6, 4.0, 0.6, 5, 3.7, 1.2, 1, millFan, (0, 0, 90), (-4.6, -0.5, -0.25), 20)
self.placerNode.setHpr(vHpr)
self.placerNode.setPos(vPos)
self.subPlacerNode.setPos(-1, 0, 0.0)
motor = OdeHingeJoint(self.world)
cross.setPosition(self.subPlacerNode.getPos(self.root))
cross.setQuaternion(self.placerNode.getQuat())
ourAxis = self.root.getRelativeVector(self.subPlacerNode, Vec3(0, 0, 1))
motor.setParamVel(1.0)
motor.setParamFMax(50000.0)
boxsize = Vec3(1.0, 1.0, 1.0)
motor.attach(0, cross)
motor.setAnchor(self.subPlacerNode.getPos(self.root))
motor.setAxis(ourAxis)
self.cross = cross
cross.enable()
self.commonObjectDict[commonId] = (commonId, type, cross)
elif type == 4:
ourAxis = self.root.getRelativeVector(self.placerNode, Vec3(0, 1, 0))
model, box = self.createBox(self.world, self.space, 50.0, sizeX, sizeY, 1.0, 2)
box.setPosition(vPos)
box.setQuaternion(self.placerNode.getQuat())
motor = OdeSliderJoint(self.world)
motor.attach(box, 0)
motor.setAxis(ourAxis)
motor.setParamVel(moveDistance / 4.0)
motor.setParamFMax(25000.0)
motor.setParamHiStop(moveDistance)
motor.setParamLoStop(0)
timeData = (0.0, 1.0, 5.0, 6.0)
forceData = (-moveDistance / 4.0,
moveDistance / 4.0,
moveDistance / 4.0,
-moveDistance / 4.0)
eventData = (-1, 1, -2, 2)
radius = moveDistance + sizeY * 0.5
self.commonObjectDict[commonId] = (commonId,
type,
box,
motor,
timeData,
forceData,
eventData,
model,
radius)
return [type,
commonId,
(pos[0], pos[1], pos[2]),
(hpr[0], hpr[1], hpr[2]),
sizeX,
sizeY,
moveDistance]
def createSphere(self, world, space, density, radius, ballIndex = None):
self.notify.debug('create sphere index %s' % ballIndex)
body = OdeBody(world)
M = OdeMass()
M.setSphere(density, radius)
body.setMass(M)
body.setPosition(0, 0, -100)
geom = OdeSphereGeom(space, radius)
self.space.setSurfaceType(geom, 1)
self.notify.debug('collide ID is %s' % self.space.setCollideId(geom, 42))
self.massList.append(M)
self.geomList.append(geom)
if ballIndex == 1:
self.notify.debug('1')
geom.setCollideBits(BitMask32(16777215))
geom.setCategoryBits(BitMask32(4278190080L))
elif ballIndex == 2:
self.notify.debug('2')
geom.setCollideBits(BitMask32(16777215))
geom.setCategoryBits(BitMask32(4278190080L))
elif ballIndex == 3:
self.notify.debug('3')
geom.setCollideBits(BitMask32(16777215))
geom.setCategoryBits(BitMask32(4278190080L))
elif ballIndex == 4:
self.notify.debug('4')
geom.setCollideBits(BitMask32(16777215))
geom.setCategoryBits(BitMask32(4278190080L))
else:
geom.setCollideBits(BitMask32(4294967295L))
geom.setCategoryBits(BitMask32(4294967295L))
geom.setBody(body)
if self.notify.getDebug():
self.notify.debug('golf ball geom id')
geom.write()
self.notify.debug(' -')
self.notify.debug('Collide Bits %s' % geom.getCollideBits())
if self.canRender:
testball = render.attachNewNode('Ball Holder')
ballmodel = loader.loadModel('phase_6/models/golf/golf_ball')
ballmodel.reparentTo(testball)
ballmodel.setColor(*GolfGlobals.PlayerColors[ballIndex - 1])
testball.setPos(0, 0, -100)
self.odePandaRelationList.append((testball, body))
else:
testball = None
self.bodyList.append((None, body))
return (testball, body, geom)
def createBox(self, world, space, density, lx, ly, lz, colOnlyBall = 0):
body = OdeBody(self.world)
M = OdeMass()
M.setSphere(density, 0.3 * (lx + ly + lz))
body.setMass(M)
boxsize = Vec3(lx, ly, lz)
geom = OdeBoxGeom(space, boxsize)
geom.setBody(body)
self.space.setSurfaceType(geom, 0)
self.space.setCollideId(geom, 7)
self.massList.append(M)
self.geomList.append(geom)
if colOnlyBall:
geom.setCollideBits(BitMask32(251658240))
geom.setCategoryBits(BitMask32(0))
elif colOnlyBall == 2:
geom.setCollideBits(BitMask32(0))
geom.setCategoryBits(BitMask32(0))
if self.canRender:
color = random.choice([Vec4(1.0, 0.0, 0.5, 1.0), Vec4(0.5, 0.5, 1.0, 1.0), Vec4(0.5, 1.0, 0.5, 1.0)])
boxsize = Vec3(lx, ly, lz)
boxNodePathGeom, t1, t2 = BuildGeometry.addBoxGeom(self.worldAttach, lx, ly, lz, color, 1)
boxNodePathGeom.setPos(0, 0, -100)
self.odePandaRelationList.append((boxNodePathGeom, body))
else:
boxNodePathGeom = None
self.bodyList.append((None, body))
return (boxNodePathGeom, body)
def createCross(self, world, space, density, lx, ly, lz, colOnlyBall = 0, attachedGeo = None, aHPR = None, aPos = None):
body = OdeBody(self.world)
M = OdeMass()
M.setBox(density, lx, ly, lz)
body.setMass(M)
body.setFiniteRotationMode(1)
boxsize = Vec3(lx, ly, lz)
boxsize2 = Vec3(ly, lx, lz)
geom = OdeBoxGeom(space, boxsize)
geom.setBody(body)
self.space.setSurfaceType(geom, 0)
self.space.setCollideId(geom, 13)
geom2 = OdeBoxGeom(space, boxsize2)
geom2.setBody(body)
self.space.setSurfaceType(geom2, 0)
self.space.setCollideId(geom2, 26)
self.massList.append(M)
self.geomList.append(geom)
self.geomList.append(geom2)
self.odePandaRelationList.append((boxNodePathGeom, body))
if colOnlyBall == 1:
geom.setCollideBits(BitMask32(251658240))
geom.setCategoryBits(BitMask32(0))
geom2.setCollideBits(BitMask32(251658240))
geom2.setCategoryBits(BitMask32(0))
elif colOnlyBall == 2:
geom.setCollideBits(BitMask32(0))
geom.setCategoryBits(BitMask32(0))
geom2.setCollideBits(BitMask32(0))
geom2.setCategoryBits(BitMask32(0))
if self.canRender:
boxNodePathGeom, t1, t2 = BuildGeometry.addBoxGeom(self.worldAttach, lx, ly, lz, Vec4(1.0, 1.0, 1.0, 1.0), 1)
boxNodePathGeom.setPos(0, 0, -100)
boxNodePathGeom2, t1, t2 = BuildGeometry.addBoxGeom(boxNodePathGeom, ly, lx, lz, Vec4(1.0, 1.0, 1.0, 1.0), 1)
boxNodePathGeom2.setPos(0, 0, 0)
if attachedGeo:
attachedGeo.reparentTo(boxNodePathGeom)
attachedGeo.setHpr(0, 0, 90)
attachedGeo.setPos(-4.8, 0, -2.0)
self.odePandaRelationList.append((boxNodePathGeom, body))
else:
boxNodePathGeom = None
self.bodyList.append((None, body))
return (boxNodePathGeom, body)
def createCross2(self, world, space, density, lx, ly, lz, latSlide, colOnlyBall = 0, attachedGeo = None, aHPR = None, aPos = None):
body = OdeBody(self.world)
M = OdeMass()
M.setBox(density, lx, ly, lz)
body.setMass(M)
body.setFiniteRotationMode(1)
boxsize = Vec3(lx, ly * 0.5, lz)
boxsize2 = Vec3(ly * 0.5, lx, lz)
geom = OdeBoxGeom(space, boxsize)
geom.setBody(body)
geom.setOffsetPosition(-latSlide, ly * 0.25, 0)
self.space.setSurfaceType(geom, 0)
self.space.setCollideId(geom, 13)
geom2 = OdeBoxGeom(space, boxsize2)
geom2.setBody(body)
geom2.setOffsetPosition(ly * 0.25, latSlide, 0)
self.space.setSurfaceType(geom2, 0)
self.space.setCollideId(geom2, 13)
geom3 = OdeBoxGeom(space, boxsize)
geom3.setBody(body)
geom3.setOffsetPosition(latSlide, -ly * 0.25, 0)
self.space.setSurfaceType(geom3, 0)
self.space.setCollideId(geom3, 13)
geom4 = OdeBoxGeom(space, boxsize2)
geom4.setBody(body)
geom4.setOffsetPosition(-ly * 0.25, -latSlide, 0)
self.space.setSurfaceType(geom4, 0)
self.space.setCollideId(geom4, 13)
self.massList.append(M)
self.geomList.append(geom)
self.geomList.append(geom2)
self.geomList.append(geom3)
self.geomList.append(geom4)
if colOnlyBall == 1:
geom.setCollideBits(BitMask32(251658240))
geom.setCategoryBits(BitMask32(0))
geom2.setCollideBits(BitMask32(251658240))
geom2.setCategoryBits(BitMask32(0))
geom3.setCollideBits(BitMask32(251658240))
geom3.setCategoryBits(BitMask32(0))
geom4.setCollideBits(BitMask32(251658240))
geom4.setCategoryBits(BitMask32(0))
elif colOnlyBall == 2:
geom.setCollideBits(BitMask32(0))
geom.setCategoryBits(BitMask32(0))
geom2.setCollideBits(BitMask32(0))
geom2.setCategoryBits(BitMask32(0))
geom3.setCollideBits(BitMask32(0))
geom3.setCategoryBits(BitMask32(0))
geom4.setCollideBits(BitMask32(0))
geom4.setCategoryBits(BitMask32(0))
if self.canRender:
someNodePathGeom = render.attachNewNode('pinwheel')
if attachedGeo:
attachedGeo.reparentTo(someNodePathGeom)
attachedGeo.setHpr(aHPR[0], aHPR[1], aHPR[2])
attachedGeo.setPos(aPos[0], aPos[1], aPos[2])
boxNodePathGeom, t1, t2 = BuildGeometry.addBoxGeom(someNodePathGeom, lx, ly * 0.5, lz, Vec4(1.0, 1.0, 1.0, 1.0), 1)
boxNodePathGeom.setPos(-latSlide, ly * 0.25, 0)
boxNodePathGeom2, t1, t2 = BuildGeometry.addBoxGeom(someNodePathGeom, ly * 0.5, lx, lz, Vec4(1.0, 1.0, 1.0, 1.0), 1)
boxNodePathGeom2.setPos(ly * 0.25, latSlide, 0)
boxNodePathGeom3, t1, t2 = BuildGeometry.addBoxGeom(someNodePathGeom, lx, ly * 0.5, lz, Vec4(1.0, 1.0, 1.0, 1.0), 1)
boxNodePathGeom3.setPos(latSlide, -ly * 0.25, 0)
boxNodePathGeom4, t1, t2 = BuildGeometry.addBoxGeom(someNodePathGeom, ly * 0.5, lx, lz, Vec4(1.0, 1.0, 1.0, 1.0), 1)
boxNodePathGeom4.setPos(-ly * 0.25, -latSlide, 0)
self.odePandaRelationList.append((someNodePathGeom, body))
else:
someNodePathGeom = None
self.bodyList.append((None, body))
return (someNodePathGeom, body)
def createPinWheel(self, world, space, density, lx, ly, lz, numBoxes, disV, disH, colOnlyBall = 0, attachedGeo = None, aHPR = None, aPos = None, offRot = 0):
body = OdeBody(self.world)
M = OdeMass()
M.setBox(density, lx, ly, lz)
body.setMass(M)
body.setFiniteRotationMode(1)
boxsize = Vec3(lx, ly * 0.5, lz)
boxsize2 = Vec3(ly * 0.5, lx, lz)
self.massList.append(M)
self.placerNode.setPos(0, 0, 0)
self.placerNode.setHpr(0, 0, 0)
self.subPlacerNode.setHpr(0, 0, 0)
self.subPlacerNode.setPos(disH, disV, 0)
if self.canRender:
someNodePathGeom = render.attachNewNode('pinwheel')
else:
someNodePathGeom = self.root.attachNewNode('pinwheel')
for num in range(numBoxes):
spin = 360.0 * float(num) / float(numBoxes) + float(offRot)
self.placerNode.setH(spin)
geom = OdeBoxGeom(space, boxsize)
geom.setBody(body)
geom.setOffsetPosition(self.subPlacerNode.getPos(self.root))
geom.setOffsetQuaternion(self.subPlacerNode.getQuat(self.root))
self.geomList.append(geom)
self.space.setSurfaceType(geom, 0)
self.space.setCollideId(geom, 13)
if colOnlyBall == 1:
geom.setCollideBits(BitMask32(251658240))
geom.setCategoryBits(BitMask32(0))
elif colOnlyBall == 2:
geom.setCollideBits(BitMask32(0))
geom.setCategoryBits(BitMask32(0))
if not attachedGeo:
boxNodePathGeom, t1, t2 = BuildGeometry.addBoxGeom(someNodePathGeom, lx, ly * 0.5, lz, Vec4(1.0, 1.0, 1.0, 1.0), 1)
boxNodePathGeom.setPos(self.subPlacerNode.getPos(self.root))
boxNodePathGeom.setHpr(self.subPlacerNode.getHpr(self.root))
if attachedGeo and self.canRender:
attachedGeo.reparentTo(someNodePathGeom)
attachedGeo.setHpr(aHPR[0], aHPR[1], aHPR[2])
attachedGeo.setPos(aPos[0], aPos[1], aPos[2])
if self.canRender:
self.odePandaRelationList.append((someNodePathGeom, body))
else:
someNodePathGeom = None
self.bodyList.append((None, body))
return (someNodePathGeom, body)
def attachMarker(self, body):
if self.canRender:
testMarker = render.attachNewNode('Joint Marker')
ballmodel = loader.loadModel('models/misc/sphere')
ballmodel.reparentTo(testMarker)
ballmodel.setScale(0.25)
testMarker.setPos(0.0, 0.0, -100.0)
self.odePandaRelationList.append((testMarker, body))
| {
"content_hash": "f2fda6ca33d78242b5b4fda398aab6cd",
"timestamp": "",
"source": "github",
"line_count": 719,
"max_line_length": 170,
"avg_line_length": 41.55910987482615,
"alnum_prop": 0.5803687962250259,
"repo_name": "ksmit799/Toontown-Source",
"id": "ec88aba26b62bd9b21cee548b9f6ce91fba49e33",
"size": "29881",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "toontown/golf/PhysicsWorldBase.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1441"
},
{
"name": "PLSQL",
"bytes": "901"
},
{
"name": "Python",
"bytes": "15617225"
},
{
"name": "Shell",
"bytes": "182"
}
],
"symlink_target": ""
} |
"""
Packet definitions, enumerations, and helper functions for IEEE 802.11a physical
layer protocol.
Revision Info
=============
* $LastChangedBy: mandke $
* $LastChangedDate: 2011-09-27 22:15:57 -0500 (Tue, 27 Sep 2011) $
* $LastChangedRevision: 5167 $
:author: Ketan Mandke <kmandke@mail.utexas.edu>
:copyright:
Copyright 2009 The University of Texas at Austin
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
:var DOT11A_USEPARITY:
Boolean flag; if true, use parity check parameter in `Dot11A`.
:var DOT11A_USE_PIECEWISE_PER:
Boolean flag; if true, use SINR heap to calculate PER in piecewise fashion.
:var DOT11A_VERBOSE:
Constant enumeration to control verbose thresholds in this file.
Setting the verbose level of a `Dot11APHY` above this threshold will cause
the corresponding output in this class to be written (or logged).
:var DOT11A_NBPSC: Number of coded bits per subcarrier.
:var DOT11A_NDBPS: Number of data bits per OFDM symbol.
:var DOT11A_MTYPE: Modulation type enumeration for M-QAM constellations.
:var DOT11A_CODERATE:
Punctured convolution code rate used for forward error correction (contains
string representation of ratio).
:var DOT11A_DATARATE:
Effective data rate in bits-per-second (bps) for OFDM payload of waveform
generated by IEEE 802.11a physical layer; does not account for training
overhead.
:var DOT11A_TFFT: Duration of FFT Period.
:var DOT11A_TGI: Duration of guard interval (GI).
:var DOT11A_TSYM: Duration of an OFDM symbol (TFFT+TGI).
:var DOT11A_TSHORT: Short training sequence duration.
:var DOT11A_TLONG: Long training sequence duration.
:var DOT11A_TSIGNAL: Duration of SIGNAL OFDM symbol.
:var DOT11A_TDETECT: Duration allowed for packet detection.
:var DOT11A_MAXPOWER: Maximum transmit power (in dBm).
:var DOT11A_NOISEFIGURE: Noise factor due to digital impairments (in dB).
:var DOT11A_ANALOGLOSS: Loss factor associated with analog impairments (in dB).
:var DOT11A_SYSTEMLOSS: NOISEFIGURE x ANALOGLOSS (in dB).
:var DOT11A_CARRIER: Carrier frequency (in Hz).
:var DOT11A_BANDWIDTH: System bandwidth (in Hz).
:var DOT11A_RXSENSITIVITY:
Minimum receiver power (in dBm) to achieve a packet error rate less than 10%
for a PSDU of length 1000 bytes, using the base rate (i.e. BPSK, 1/2 coding).
:var DOT11A_CSTHRESHOLD:
Carrier sense threshold (in dBm); receive power threshold above which a
carrier sense busy should automatically be assessed.
:var DOT11A_FDTHRESHOLD:
Frame detection threshold (in dB); during frame detection, the
signal-to-interference-and-noise ratio (SINR) must exceed this value in
order for the packet detector to find an incoming frame.
"""
__docformat__ = "restructuredtext en"
from scapy.all import Packet, BitField, ActionField
from numpy import inf
DOT11A_USEPARITY = 0
DOT11A_USE_PIECEWISE_PER = 1
DOT11A_VERBOSE = 63
DOT11A_NBPSC = [ 1, 1, 2, 2, 4, 4, 6, 6]
DOT11A_NDBPS = [ 24, 36, 48, 72, 96, 144, 192, 216]
DOT11A_MTYPE = ["BPSK","BPSK","QPSK","QPSK","16QAM","16QAM","64QAM","64QAM"]
DOT11A_CODERATE = [ "1/2", "3/4", "1/2", "3/4", "1/2", "3/4", "2/3", "3/4"]
DOT11A_DATARATE = [ 6e6, 9e6, 12e6, 18e6, 24e6, 36e6, 48e6, 54e6]
DOT11A_TFFT = 3.2e-6
DOT11A_TGI = 0.8e-6
DOT11A_TSYM = 4.0e-6
DOT11A_TSHORT = 8.0e-6
DOT11A_TLONG = 8.0e-6
DOT11A_TSIGNAL = 4.0e-6
DOT11A_TDETECT = 4.0e-6
DOT11A_MAXPOWER = 16.02
DOT11A_NOISEFIGURE = 10.0
DOT11A_ANALOGLOSS = 5.0
DOT11A_SYSTEMLOSS = DOT11A_ANALOGLOSS + DOT11A_NOISEFIGURE
DOT11A_CARRIER = 5.25e9
DOT11A_BANDWIDTH = 20e6
DOT11A_RXSENSITIVITY = -82
DOT11A_CSTHRESHOLD = -62
## FIXME: No standard value for detection threshold
#DOT11A_FDTHRESHOLD = -inf # all packets are detected
DOT11A_FDTHRESHOLD = 0.0 # SINR threshold
class Dot11A(Packet):
"""Packet format for IEEE 802.11a physical layer packet header.
This class inherits from the `Dot11` class from Scapy. All the fields will
be encoded as part of the SIGNAL field in the IEEE 802.11a packet.
4 1 12 1 6
----------------------------------------
| rate | rsvd | length | parity | tail |
----------------------------------------
"""
name = "802.11A PHY"
fields_desc = [ActionField(BitField('rate', 0, 4), \
'updatepar', param='rate'), \
ActionField(BitField('reserved', 0, 1), \
'updatepar', param='reserved'), \
ActionField(BitField('length', 0, 12), \
'updatepar', param='length'), \
BitField('parity', 0, 1), \
ActionField(BitField('tail', 0, 6), \
'updatepar', param='tail') ]
def updatepar(self, val, fld, param=None, **kwargs):
"""Update parity check value."""
if not DOT11A_USEPARITY: return
rate, reserved = self.rate, self.reserved
length, tail = self.length, self.tail
if (param=="rate"): rate = val
if (param=="reserved"): reserved = val
if (param=="length"): length = val
if (param=="tail"): tail = val
chksum = rate+reserved+length+tail
self.parity = (chksum%2)
def checkpar(self):
"""Check parity of checksum."""
if not DOT11A_USEPARITY: return True
chksum = self.rate+self.reserved+self.length+self.tail
return (self.parity==(chksum%2) )
def isdot11a(p):
"""Is packet an IEEE 802.11a packet?"""
isdot11a = isinstance(p, Packet) and p.haslayer(Dot11A)
return isdot11a
def get_dot11a(p):
"""Extract IEEE 802.11a physical layer packet form `p`.
:return: IEEE 802.11a packet or `None` if no matching layer found.
"""
dot11a = None
if isdot11a(p):
dot11a = p[Dot11A]
return dot11a
| {
"content_hash": "058f19ce41219667c872d88450537a71",
"timestamp": "",
"source": "github",
"line_count": 182,
"max_line_length": 81,
"avg_line_length": 35.61538461538461,
"alnum_prop": 0.6505708114779389,
"repo_name": "reidlindsay/wins",
"id": "48874d91c784e5c0186c1547757b3fe566098667",
"size": "6507",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "wins/ieee80211/dot11a_support.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "5653"
},
{
"name": "C++",
"bytes": "51883"
},
{
"name": "Makefile",
"bytes": "2270"
},
{
"name": "Python",
"bytes": "1193050"
},
{
"name": "Shell",
"bytes": "665341"
}
],
"symlink_target": ""
} |
"""
my_method does a thing. There are many like it, but this one is mine.
? Do we really need this?
! Deprecated
@param my_param Do some stuff with this
"""
# ! Import the modules
import sys
import random
# * var to set up loop
myVar = True
# ? will this loop ever terminate?
while myVar:
# TODO: localise the output
question = raw_input("Ask the magic 8 ball a question: (press enter to quit) ")
answers = random.randint(1,8)
if question == "":
sys.exit()
elif answers == 1:
print "It is certain"
elif answers == 2:
print "Outlook good"
elif answers == 3:
print "You may rely on it"
elif answers == 4:
print "Ask again later"
elif answers == 5:
print "Concentrate and ask again"
elif answers == 6:
print "Reply hazy, try again"
elif answers == 7:
print "My reply is no"
elif answers == 8:
print "My sources say no"
# ! Import the modules
import sys
import random
# * var to set up loop
myVar = True
# ? will this loop ever terminate?
while myVar:
# TODO: localise the output
question = raw_input("Ask the magic 8 ball a question: (press enter to quit) ")
answers = random.randint(1,8)
if question == "":
sys.exit()
elif answers == 1:
print "It is certain"
elif answers == 2:
print "Outlook good"
elif answers == 3:
print "You may rely on it"
elif answers == 4:
print "Ask again later"
elif answers == 5:
print "Concentrate and ask again"
elif answers == 6:
print "Reply hazy, try again"
elif answers == 7:
print "My reply is no"
elif answers == 8:
print "My sources say no"
# ! Import the modules
import sys
import random
# * var to set up loop
myVar = True
# ? will this loop ever terminate?
while myVar:
# TODO: localise the output
question = raw_input("Ask the magic 8 ball a question: (press enter to quit) ")
answers = random.randint(1,8)
if question == "":
sys.exit()
elif answers == 1:
print "It is certain"
elif answers == 2:
print "Outlook good"
elif answers == 3:
print "You may rely on it"
elif answers == 4:
print "Ask again later"
elif answers == 5:
print "Concentrate and ask again"
elif answers == 6:
print "Reply hazy, try again"
elif answers == 7:
print "My reply is no"
elif answers == 8:
print "My sources say no"
# ! Import the modules
import sys
import random
# * var to set up loop
myVar = True
# ? will this loop ever terminate?
while myVar:
# TODO: localise the output
question = raw_input("Ask the magic 8 ball a question: (press enter to quit) ")
answers = random.randint(1,8)
if question == "":
sys.exit()
elif answers == 1:
print "It is certain"
elif answers == 2:
print "Outlook good"
elif answers == 3:
print "You may rely on it"
elif answers == 4:
print "Ask again later"
elif answers == 5:
print "Concentrate and ask again"
elif answers == 6:
print "Reply hazy, try again"
elif answers == 7:
print "My reply is no"
elif answers == 8:
print "My sources say no"
# ! Import the modules
import sys
import random
# * var to set up loop
myVar = True
# ? will this loop ever terminate?
while myVar:
# TODO: localise the output
question = raw_input("Ask the magic 8 ball a question: (press enter to quit) ")
answers = random.randint(1,8)
if question == "":
sys.exit()
elif answers == 1:
print "It is certain"
elif answers == 2:
print "Outlook good"
elif answers == 3:
print "You may rely on it"
elif answers == 4:
print "Ask again later"
elif answers == 5:
print "Concentrate and ask again"
elif answers == 6:
print "Reply hazy, try again"
elif answers == 7:
print "My reply is no"
elif answers == 8:
print "My sources say no" | {
"content_hash": "64b8c3b58dc5883ce9c8a6b94116d02d",
"timestamp": "",
"source": "github",
"line_count": 217,
"max_line_length": 83,
"avg_line_length": 20.138248847926267,
"alnum_prop": 0.5528604118993135,
"repo_name": "aaron-bond/better-comments",
"id": "0fdeb07f30e06d7e5d8e4f134e30fdc98615f56e",
"size": "4394",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/test/samples/test.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "AL",
"bytes": "2980"
},
{
"name": "Apex",
"bytes": "297"
},
{
"name": "Brightscript",
"bytes": "198"
},
{
"name": "C#",
"bytes": "4887"
},
{
"name": "COBOL",
"bytes": "1982"
},
{
"name": "CSS",
"bytes": "105"
},
{
"name": "Classic ASP",
"bytes": "28"
},
{
"name": "Clojure",
"bytes": "22606"
},
{
"name": "ColdFusion",
"bytes": "1198"
},
{
"name": "Common Lisp",
"bytes": "8651"
},
{
"name": "D",
"bytes": "286"
},
{
"name": "Dart",
"bytes": "821"
},
{
"name": "EJS",
"bytes": "326"
},
{
"name": "Elixir",
"bytes": "300"
},
{
"name": "Elm",
"bytes": "564"
},
{
"name": "Fortran",
"bytes": "278"
},
{
"name": "FreeMarker",
"bytes": "320"
},
{
"name": "GDScript",
"bytes": "382"
},
{
"name": "Groovy",
"bytes": "284"
},
{
"name": "HCL",
"bytes": "303"
},
{
"name": "HTML",
"bytes": "2464"
},
{
"name": "Haskell",
"bytes": "395"
},
{
"name": "HiveQL",
"bytes": "352"
},
{
"name": "JavaScript",
"bytes": "3318"
},
{
"name": "Lua",
"bytes": "182"
},
{
"name": "MATLAB",
"bytes": "247"
},
{
"name": "Nim",
"bytes": "4743"
},
{
"name": "PHP",
"bytes": "245"
},
{
"name": "PLSQL",
"bytes": "34"
},
{
"name": "Pascal",
"bytes": "2628"
},
{
"name": "PigLatin",
"bytes": "759"
},
{
"name": "PowerShell",
"bytes": "331"
},
{
"name": "Puppet",
"bytes": "216"
},
{
"name": "Python",
"bytes": "4394"
},
{
"name": "Racket",
"bytes": "2796"
},
{
"name": "SAS",
"bytes": "346"
},
{
"name": "SCSS",
"bytes": "155"
},
{
"name": "Sass",
"bytes": "162"
},
{
"name": "ShaderLab",
"bytes": "655"
},
{
"name": "Shell",
"bytes": "139"
},
{
"name": "Solidity",
"bytes": "277"
},
{
"name": "Stata",
"bytes": "630"
},
{
"name": "Stylus",
"bytes": "158"
},
{
"name": "Svelte",
"bytes": "431"
},
{
"name": "Tcl",
"bytes": "588"
},
{
"name": "TeX",
"bytes": "323"
},
{
"name": "Twig",
"bytes": "310"
},
{
"name": "TypeScript",
"bytes": "26829"
},
{
"name": "VHDL",
"bytes": "12"
},
{
"name": "Vala",
"bytes": "453"
},
{
"name": "Verilog",
"bytes": "726"
},
{
"name": "Vue",
"bytes": "1110"
}
],
"symlink_target": ""
} |
"""
Test CuisineCore module
"""
import unittest
from unittest.mock import patch, PropertyMock
import copy
from JumpScale import j
@patch('JumpScale.core.redis.Redis.hget')
@patch('JumpScale.core.redis.Redis.hset')
class TestCuisineCore(unittest.TestCase):
def setUp(self):
self.dump_env = {
'HOME': '/root',
'HOSTNAME': 'js8-core',
'JSBASE': '/js/path',
}
self.core = j.tools.cuisine.local.core
self.dir_paths = {'appDir': '/js/path/apps',
'base': '/js/path',
'binDir': '/js/path/bin',
'cfgDir': '/optvar//cfg',
'codeDir': '/opt/code/',
'goDir': '/optvar/go/',
'homeDir': '/root',
'hrdDir': '/optvar//hrd',
'jsLibDir': '/js/path/lib/JumpScale/',
'libDir': '/js/path/lib/',
'logDir': '/optvar//log',
'optDir': '/opt/',
'pidDir': '/optvar//pid',
'tmpDir': '/optvar//tmp',
'tmplsDir': '/js/path/templates',
'varDir': '/optvar/'
}
def tearDown(self):
pass
def test_isJS8Sandbox_property(self, cache_set_mock, cache_get_mock):
"""
Test accessing the isJS8Sandbox property
"""
cache_get_mock.return_value = None
self.assertIsNotNone(self.core.isJS8Sandbox)
@patch('JumpScale.j.tools.cuisine.local.core.getenv')
def test_dir_paths_property_if_JSBASE_and_linux(self, getenv_mock, cache_set_mock, cache_get_mock):
"""
Happy Path: Test accessing the dir_paths property if JSBASE in env
"""
cache_get_mock.return_value = None
getenv_mock.return_value = self.dump_env
result = self.core.dir_paths
self.assertEqual(result, self.dir_paths)
@patch('JumpScale.j.tools.cuisine.local.core.getenv')
def test_dir_paths_property_if_linux(self, getenv_mock, cache_set_mock, cache_get_mock):
"""
Happy Path: Test accessing the dir_paths property if JSBASE not found in env
"""
cache_get_mock.return_value = None
# remove JSBASE from dump_env
dump_env = copy.deepcopy(self.dump_env)
del dump_env['JSBASE']
getenv_mock.return_value = dump_env
expected_result = {
'appDir': '/opt/jumpscale8//apps',
'base': '/opt/jumpscale8/',
'binDir': '/opt/jumpscale8//bin',
'cfgDir': '/optvar//cfg',
'codeDir': '/opt/code/',
'goDir': '/optvar/go/',
'homeDir': '/root',
'hrdDir': '/optvar//hrd',
'jsLibDir': '/opt/jumpscale8//lib/JumpScale/',
'libDir': '/opt/jumpscale8//lib/',
'logDir': '/optvar//log',
'optDir': '/opt/',
'pidDir': '/optvar//pid',
'tmpDir': '/optvar//tmp',
'tmplsDir': '/opt/jumpscale8//templates',
'varDir': '/optvar/'
}
result = self.core.dir_paths
self.assertEqual(result, expected_result)
@patch('JumpScale.tools.cuisine.CuisineCore.CuisineCore.isMac', new_callable=PropertyMock)
@patch('JumpScale.j.tools.cuisine.local.core.getenv')
def test_dir_paths_property_if_not_linux(self, getenv_mock, mac_mock, cache_set_mock, cache_get_mock):
"""
Happy Path: Test accessing the dir_paths property if JSBASE not found in env and not linux
"""
cache_get_mock.return_value = None
mac_mock.return_value = True
# remove JSBASE from dump_env
dump_env = copy.deepcopy(self.dump_env)
del dump_env['JSBASE']
getenv_mock.return_value = dump_env
expected_result = {
'appDir': '/root/opt/jumpscale8//apps',
'base': '/root/opt/jumpscale8/',
'binDir': '/root/opt/jumpscale8//bin',
'cfgDir': '/root/optvar//cfg',
'codeDir': '/root/opt/code/',
'goDir': '/root/optvar/go/',
'homeDir': '/root',
'hrdDir': '/root/optvar//hrd',
'jsLibDir': '/root/opt/jumpscale8//lib/JumpScale/',
'libDir': '/root/opt/jumpscale8//lib/',
'logDir': '/root/optvar//log',
'optDir': '/root/opt/',
'pidDir': '/root/optvar//pid',
'tmpDir': '/root/optvar//tmp',
'tmplsDir': '/root/opt/jumpscale8//templates',
'varDir': '/root/optvar/'
}
result = self.core.dir_paths
self.assertEqual(result, expected_result)
self.assertEqual(mac_mock.call_count, 2)
@patch('JumpScale.tools.cuisine.CuisineCore.CuisineCore.isMac', new_callable=PropertyMock)
@patch('JumpScale.j.tools.cuisine.local.core.getenv')
def test_dir_paths_property_if_JSBASE_and_not_linux(self, getenv_mock, mac_mock, cache_set_mock, cache_get_mock):
"""
Happy Path: Test accessing the dir_paths property if JSBASE in env and not linux
"""
cache_get_mock.return_value = None
mac_mock.return_value = True
getenv_mock.return_value = self.dump_env
expected_result = {
'appDir': '/js/path/apps',
'base': '/js/path',
'binDir': '/js/path/bin',
'cfgDir': '/root/optvar//cfg',
'codeDir': '/root/opt/code/',
'goDir': '/root/optvar/go/',
'homeDir': '/root',
'hrdDir': '/root/optvar//hrd',
'jsLibDir': '/js/path/lib/JumpScale/',
'libDir': '/js/path/lib/',
'logDir': '/root/optvar//log',
'optDir': '/root/opt/',
'pidDir': '/root/optvar//pid',
'tmpDir': '/root/optvar//tmp',
'tmplsDir': '/js/path/templates',
'varDir': '/root/optvar/'
}
result = self.core.dir_paths
self.assertEqual(result, expected_result)
mac_mock.assert_called_once_with()
@unittest.skip("Needs fixing")
def test_args_replace(self):
"""
Test args replace
"""
with patch("JumpScale.j") as j_mock:
from JumpScale import j
import JumpScale.tools.cuisine.CuisineCore
JumpScale.tools.cuisine.CuisineCore.j = j
from JumpScale.tools.cuisine.CuisineCore import CuisineCore
executor_mock = MagicMock()
j.tools.executor.getLocal.return_value = executor_mock
executor = j.tools.executor.getLocal()
cuisine = j.tools.cuisine.local
cuisine_core = CuisineCore(executor, cuisine)
cuisine_core.getenv = MagicMock()
cuisine_core.getenv.return_value = self.dump_env
cuisine_core.run = MagicMock()
cuisine_core.run.return_value = (0, 'hostname', '')
input_text = "$base:$appDir:$tmplsDir:$varDir:$binDir:$codeDir:$cfgDir:$homeDir:$jsLibDir:$libDir:$logDir:$pidDir:$tmpDir:$hostname"
expected_output = "/opt/jumpscale8/:/opt/jumpscale8//apps:/opt/jumpscale8//templates:/optvar/:/opt/jumpscale8//bin:/opt/code/:/optvar//cfg:/root:/opt/jumpscale8//lib/JumpScale/:/opt/jumpscale8//lib/:/optvar//log:/optvar//pid:/optvar//tmp:hostname"
actual_output = cuisine_core.args_replace(input_text)
self.assertEqual(expected_output, actual_output)
@unittest.skip("Needs fixing")
def test_file_get_tmp_path(self):
"""
Test file get tmp path
"""
with mock.patch("JumpScale.j") as j_mock:
from JumpScale import j
import JumpScale.tools.cuisine.CuisineCore
JumpScale.tools.cuisine.CuisineCore.j = j
from JumpScale.tools.cuisine.CuisineCore import CuisineCore
executor_mock = mock.MagicMock()
j.tools.executor.getLocal.return_value = executor_mock
executor = j.tools.executor.getLocal()
cuisine = j.tools.cuisine.local
cuisine_core = CuisineCore(executor, cuisine)
cuisine_core.run = mock.MagicMock()
cuisine_core.run.return_value = (0, 'hostname', '')
cuisine_core.getenv = mock.MagicMock()
cuisine_core.getenv.return_value = self.dump_env
j.data.idgenerator.generateXCharID.return_value = 10 * 'a'
expected_output = '/optvar//tmp/aaaaaaaaaa'
actual_output = cuisine_core.file_get_tmp_path()
self.assertEquals(expected_output, actual_output)
expected_output = '/optvar//tmp/path'
actual_output = cuisine_core.file_get_tmp_path(basepath="path")
self.assertEquals(expected_output, actual_output)
@unittest.skip("Needs fixing")
def test_file_download(self):
"""
Test file download
"""
with mock.patch("JumpScale.j") as j_mock:
from JumpScale import j
import JumpScale.tools.cuisine.CuisineCore
JumpScale.tools.cuisine.CuisineCore.j = j
from JumpScale.tools.cuisine.CuisineCore import CuisineCore
executor_mock = mock.MagicMock()
j.tools.executor.getLocal.return_value = executor_mock
executor = j.tools.executor.getLocal()
cuisine = j.tools.cuisine.local
cuisine_core = CuisineCore(executor, cuisine)
url = 'http://hallo.com/downloadme.txt'
to = '/tmp/path'
cuisine_core.file_exists = mock.MagicMock()
cuisine_core.file_exists.return_value = False
cuisine_core.createDir = mock.MagicMock()
cuisine_core.file_unlink = mock.MagicMock()
cuisine_core.run = mock.MagicMock()
cuisine_core.run.side_effect = [(33, '', 'err'), (0, 'Ok', '')]
cuisine_core.touch = mock.MagicMock()
cuisine_core.file_download(url, to)
self.assertTrue(cuisine_core.touch.called)
self.assertFalse(j.sal.fs.getBaseName.called)
@unittest.skip("Needs fixing")
def test_file_download_fail(self):
"""
Test file download wth failure
"""
with mock.patch("JumpScale.j") as j_mock:
from JumpScale import j
import JumpScale.tools.cuisine.CuisineCore
JumpScale.tools.cuisine.CuisineCore.j = j
from JumpScale.tools.cuisine.CuisineCore import CuisineCore
from JumpScale.core.errorhandling import JSExceptions
executor_mock = mock.MagicMock()
j.tools.executor.getLocal.return_value = executor_mock
executor = j.tools.executor.getLocal()
cuisine = j.tools.cuisine.local
cuisine_core = CuisineCore(executor, cuisine)
url = 'http://hallo.com/downloadme.txt'
to = '/tmp/path'
cuisine_core.file_exists = mock.MagicMock()
cuisine_core.file_exists.return_value = False
cuisine_core.createDir = mock.MagicMock()
cuisine_core.file_unlink = mock.MagicMock()
cuisine_core.run = mock.MagicMock()
cuisine_core.run.side_effect = [(32, '', 'err'), (0, 'Ok', '')]
cuisine_core.touch = mock.MagicMock()
j.exceptions.RuntimeError = JSExceptions.RuntimeError
self.assertRaises(JSExceptions.RuntimeError, cuisine_core.file_download, url, to)
@unittest.skip("Needs fixing")
def test_file_expand(self):
"""
Test file expand
"""
with mock.patch("JumpScale.j") as j_mock:
from JumpScale import j
import JumpScale.tools.cuisine.CuisineCore
JumpScale.tools.cuisine.CuisineCore.j = j
from JumpScale.tools.cuisine.CuisineCore import CuisineCore
executor_mock = mock.MagicMock()
j.tools.executor.getLocal.return_value = executor_mock
executor = j.tools.executor.getLocal()
cuisine = j.tools.cuisine.local
cuisine_core = CuisineCore(executor, cuisine)
path = '/tmp/file.tgz'
to = '/tmp/dest'
cuisine_core.run = mock.MagicMock()
cuisine_core.args_replace = mock.MagicMock()
cuisine_core.file_expand(path, to)
@unittest.skip("Needs fixing")
def test_file_expand_fail(self):
"""
Test file expand failure case
"""
with mock.patch("JumpScale.j") as j_mock:
from JumpScale import j
import JumpScale.tools.cuisine.CuisineCore
JumpScale.tools.cuisine.CuisineCore.j = j
from JumpScale.tools.cuisine.CuisineCore import CuisineCore
from JumpScale.core.errorhandling import JSExceptions
executor_mock = mock.MagicMock()
j.tools.executor.getLocal.return_value = executor_mock
executor = j.tools.executor.getLocal()
cuisine = j.tools.cuisine.local
cuisine_core = CuisineCore(executor, cuisine)
path = '/tmp/file.txt'
to = '/tmp/dest'
cuisine_core.run = mock.MagicMock()
cuisine_core.args_replace = mock.MagicMock()
cuisine_core.args_replace.side_effect = (path, to)
j.exceptions.RuntimeError = JSExceptions.RuntimeError
self.assertRaises(JSExceptions.RuntimeError, cuisine_core.file_expand, path, to)
@unittest.skip("Needs fixing")
def test_touch(self):
"""
Test touch
"""
with mock.patch("JumpScale.j") as j_mock:
from JumpScale import j
import JumpScale.tools.cuisine.CuisineCore
JumpScale.tools.cuisine.CuisineCore.j = j
from JumpScale.tools.cuisine.CuisineCore import CuisineCore
executor_mock = mock.MagicMock()
j.tools.executor.getLocal.return_value = executor_mock
executor = j.tools.executor.getLocal()
cuisine = j.tools.cuisine.local
cuisine_core = CuisineCore(executor, cuisine)
cuisine_core.run = mock.MagicMock()
cuisine_core.args_replace = mock.MagicMock()
cuisine_core.file_write = mock.MagicMock()
self.assertIsNone(cuisine_core.touch('/tmp/hello'))
self.assertTrue(cuisine_core.file_write.called)
# def file_upload_binary(self, local, remote):
# def file_upload_local(self, local, remote):
# def file_download_binary(self, local, remote):
# def file_download_local(self,remote, local):
# def file_copy(self, source, dest, recursive=False, overwrite=False):
# def file_move(self, source, dest, recursive=False):
# def joinpaths(self, *args):
# def fs_find(self,path,recursive=True,pattern="",findstatement="",type="",contentsearch="",extendinfo=False):
# def sudo(self, cmd, die=True,showout=True):
# def run(self,cmd,die=True,debug=None,checkok=False,showout=True,profile=False,replaceArgs=True,check_is_ok=False):
# def run_script(self,content,die=True,profile=False):
# def command_location(self,command):
# def tmux_execute_jumpscript(self,script,sessionname="ssh", screenname="js"):
# def execute_jumpscript(self,script):
# def execute_jumpscript(self,script):
| {
"content_hash": "5f6469184287540d009391084c6eaeb1",
"timestamp": "",
"source": "github",
"line_count": 348,
"max_line_length": 259,
"avg_line_length": 44.14367816091954,
"alnum_prop": 0.5816950917849238,
"repo_name": "Jumpscale/jumpscale_core8",
"id": "7bcba7c596e7d2b6148e86304b5dd20f73cfc469",
"size": "15362",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/tools/cuisine/TestCuisineCore.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "1113"
},
{
"name": "Cap'n Proto",
"bytes": "9033"
},
{
"name": "Lua",
"bytes": "12538"
},
{
"name": "Python",
"bytes": "4343122"
},
{
"name": "Shell",
"bytes": "7091"
}
],
"symlink_target": ""
} |
from bottle import route, mako_template as template, redirect, request, response, get, post
from bottle import static_file, view #为了不经过controller直接返回诸如html,css等静态文件引入
from model.documents import *
from setting import *
DATE_FORMAT = '%Y-%m-%d %H:%M:%S' # 入库格式化时间
@post('/add_order', method = 'POST')
def add_item():
#request.params可以同时获取到GET或者POST方法传入的参数
mobile = request.params.get('mobile')
ordered = request.params.get('ordered')
restaurant = request.params.get('restaurant')
save_order(mobile, ordered, restaurant)
# typeObj = Order(mobile = mobile, ordered = ordered, restaurant = restaurant)
return redirect('/list_order')
def save_order(mobile, ordered, restaurant):
typeObj = Order(mobile = mobile, ordered = ordered, restaurant = restaurant)
@route('/list_order')
def list_item():
start = request.params.get('start') or '0'
size = request.params.get('size') or '1000'
types = Order.objects[int(start):(int(start) + int(size))]
data = {
'types': types
}
return template('views/system/order/list', data = data, site_opt = site_opt)
@route('/del_order')
def del_item():
id = request.params.get('id')
Order.objects(id=id).delete()
redirect('/list_order')
@route('/modify_order')
def modify_item():
id = request.params.get('id')
status = request.params.get('status')
Order.objects(id=id).update(set__status=int(status))
redirect('/list_order')
@route('/to_modify_order')
def to_modify_item():
id = request.params.get('id')
item = Order.objects(id=id)[0]
data = {
'item': item
}
return template('views/system/order/edit', data = data, site_opt = site_opt)
@post('/client_add_order', method = 'POST')
def client_add_item():
mobile = request.params.get('mobile')
ordered = request.params.get('ordered')
restaurant = request.params.get('restaurant')
save_order(mobile, ordered, restaurant)
return 'OK' | {
"content_hash": "847ab66a66629db6fd11cca7f7e21846",
"timestamp": "",
"source": "github",
"line_count": 64,
"max_line_length": 91,
"avg_line_length": 28.890625,
"alnum_prop": 0.7030827474310438,
"repo_name": "buddyli/android_intership",
"id": "1d02118c95d9845373c3d76059b01905cbc65a5c",
"size": "1980",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "controller/order_oper.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "47842"
},
{
"name": "HTML",
"bytes": "3058"
},
{
"name": "JavaScript",
"bytes": "94254"
},
{
"name": "Python",
"bytes": "1126446"
},
{
"name": "Smarty",
"bytes": "89162"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
from .call_request import CallRequestMessage, call_req_rw
from .call_request_continue import call_req_c_rw
from .call_response import CallResponseMessage, call_res_rw
from .call_response_continue import call_res_c_rw
from .cancel import CancelMessage, cancel_rw
from .claim import ClaimMessage, claim_rw
from .common import Tracing, ChecksumType
from .error import ErrorMessage, ErrorCode, error_rw
from .init_request import InitRequestMessage, init_req_rw
from .init_response import InitResponseMessage, init_res_rw
from .ping_request import PingRequestMessage, ping_req_rw
from .ping_response import PingResponseMessage, ping_res_rw
from .types import Types
RW = {
Types.CALL_REQ: call_req_rw,
Types.CALL_REQ_CONTINUE: call_req_c_rw,
Types.CALL_RES: call_res_rw,
Types.CALL_RES_CONTINUE: call_res_c_rw,
Types.CANCEL: cancel_rw,
Types.CLAIM: claim_rw,
Types.ERROR: error_rw,
Types.INIT_REQ: init_req_rw,
Types.INIT_RES: init_res_rw,
Types.PING_REQ: ping_req_rw,
Types.PING_RES: ping_res_rw,
}
__all__ = [
"RW",
"ChecksumType",
"CallRequestMessage",
"CallRequestContinueMessage",
"CallResponseMessage",
"CallResponseContinueMessage",
"CancelMessage",
"ClaimMessage",
"ErrorMessage",
"ErrorCode",
"InitRequestMessage",
"InitResponseMessage",
"PingRequestMessage",
"PingResponseMessage",
"Tracing",
]
| {
"content_hash": "cd37e98a6c78af32d90a3d79a1ddac06",
"timestamp": "",
"source": "github",
"line_count": 47,
"max_line_length": 59,
"avg_line_length": 30.74468085106383,
"alnum_prop": 0.7245674740484429,
"repo_name": "uber/tchannel-python",
"id": "92b86d034aea6fd41e8dd6fde69e68522e4eaa9c",
"size": "2548",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tchannel/messages/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "597"
},
{
"name": "Makefile",
"bytes": "3621"
},
{
"name": "Python",
"bytes": "776222"
},
{
"name": "Shell",
"bytes": "1484"
},
{
"name": "Thrift",
"bytes": "13128"
}
],
"symlink_target": ""
} |
from django.shortcuts import render
from fristapp.models import People, Aritcle
from django.http import HttpResponse
from django.template import Context, Template
# Create your views here.
def first_try(request):
person = People(name='Spork', job="officer")
html_string = '''
<html lang="en">
<head>
<title>firstApp</title>
<meta charset="UTF-8">
<link href="https://cdnjs.cloudflare.com/ajax/libs/semantic-ui/2.2.4/semantic.css" rel="stylesheet">
</head>
<body>
<h1 class="ui center aligned icon header">
<i class="hand spock icon"></i> Hello,{{person.name}}
</h1>
</body>
</html>
'''
t = Template(html_string)
c = Context({'person': person})
web_page = t.render(c)
return HttpResponse(web_page)
def index(request):
queruset = request.GET.get('tag')
if queruset:
article_list = Aritcle.objects.filter(tag=queruset)
else:
article_list = Aritcle.objects.all()
print(queruset)
Context = {}
Context['article_list'] = article_list
index_page = render(request, 'firstweb.html', Context)
return index_page
| {
"content_hash": "2d8e0c78d1e8bf4de822b677d2571ee6",
"timestamp": "",
"source": "github",
"line_count": 50,
"max_line_length": 104,
"avg_line_length": 22.46,
"alnum_prop": 0.6518254674977738,
"repo_name": "LTMana/code",
"id": "4fa10f4db44c615a8b9c9ffa5d2a38600c24b4fd",
"size": "1123",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Python/Django/fristsite/fristapp/views.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "221642"
},
{
"name": "C++",
"bytes": "4282363"
},
{
"name": "CSS",
"bytes": "5671819"
},
{
"name": "DTrace",
"bytes": "1236"
},
{
"name": "HTML",
"bytes": "4754669"
},
{
"name": "JavaScript",
"bytes": "2590435"
},
{
"name": "Jupyter Notebook",
"bytes": "978217"
},
{
"name": "Makefile",
"bytes": "165"
},
{
"name": "Objective-C",
"bytes": "15506407"
},
{
"name": "Objective-C++",
"bytes": "1004991"
},
{
"name": "PHP",
"bytes": "60147"
},
{
"name": "Python",
"bytes": "33527"
},
{
"name": "Ruby",
"bytes": "4641"
},
{
"name": "Shell",
"bytes": "197483"
},
{
"name": "Swift",
"bytes": "462956"
},
{
"name": "Vue",
"bytes": "1701"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
import sys
from os import environ
from celery import Celery
from .utils.conf_builder import AppConfig
from .utils.scheduler import Scheduler
coyote_config = environ.get('COYOTECONFIG', 'coyote.yaml')
coyote = AppConfig(coyote_config)
sys.path += coyote.syspaths
app = Celery(include=coyote.includes)
# assume this shouldn't be trapped by coyote.halt_on_init_error = False
if coyote.celery_config is not None:
app.config_from_object(coyote.celery_config)
app.conf.update(CELERYBEAT_SCHEDULE=coyote.schedules,
CELERYBEAT_SCHEDULER=Scheduler)
| {
"content_hash": "080daf2ba97497aa46b67932ce03152e",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 71,
"avg_line_length": 31.736842105263158,
"alnum_prop": 0.7711442786069652,
"repo_name": "charlesthomas/coyote",
"id": "35641287f612e65b3b8dd7bad08857cc7e4ccbb2",
"size": "603",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "coyote/celery.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "354"
},
{
"name": "Python",
"bytes": "20998"
},
{
"name": "Ruby",
"bytes": "353"
},
{
"name": "Shell",
"bytes": "88"
}
],
"symlink_target": ""
} |
import os
import unittest
from typing import List
def binary_search(array, from_index, to_index, key):
while from_index <= to_index:
middle = (from_index + to_index) // 2
if array[middle] < key:
from_index = middle + 1
elif array[middle] > key:
to_index = middle - 1
else:
return middle
return -1
def pairs(k: int, arr: List[int]) -> int:
arr = sorted(arr)
count = 0
for i in range(0, len(arr) - 1):
if binary_search(arr, i + 1, len(arr) - 1, arr[i] + k) >= 0:
count += 1
return count
class TestCode(unittest.TestCase):
def runner(self, name):
io_lines = [[[]]] * 2
for index, template in enumerate(['input%s.txt', 'output%s.txt']):
path = os.path.join(os.path.split(__file__)[0], template % name)
with open(path, 'r') as handle:
lines = handle.readlines()
io_lines[index] = [line.strip().split(' ') for line in lines]
k = int(io_lines[0][0][1])
arr = [int(item) for item in io_lines[0][1]]
result = pairs(k, arr)
expected = int(io_lines[1][0][0])
self.assertEqual(expected, result)
def test_example(self):
self.runner('_example')
def test_other(self):
expected = 4
result = pairs(1, [1, 5, 3, 4, 2])
self.assertEqual(expected, result)
| {
"content_hash": "ebcf2e1a3d7d6c664e5fc18a3c10cfef",
"timestamp": "",
"source": "github",
"line_count": 48,
"max_line_length": 76,
"avg_line_length": 29.354166666666668,
"alnum_prop": 0.5386799148332151,
"repo_name": "altermarkive/Coding-Interviews",
"id": "be0a8731d057d3d84deb5d1f8c18b7c16d564334",
"size": "1479",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "algorithm-design/hackerrank/pairs/pairs.py",
"mode": "33188",
"license": "mit",
"language": [],
"symlink_target": ""
} |
import os
import os.path
biomas = ["AMAZONIA", "MATAATLANTICA", "PANTANAL", "CERRADO", "CAATINGA", "PAMPA", "ZONACOSTEIRA"]
'''
CLASSIFICACAO
'''
# mosaico de classificacao por bioma e ano
for bioma in biomas:
nextAno = 2008
print os.path.exists('../CLASSIFICACAOFT/BIOMA/' + bioma)
while nextAno <= 2015:
if os.path.exists('../CLASSIFICACAOFT/BIOMA/' + bioma):
strReplace = {'bioma': bioma, 'ano': nextAno}
cmdFind = "find ../CLASSIFICACAOFT/BIOMA/%(bioma)s/ -name '*%(ano)s*' > BIOMA_%(bioma)s_%(ano)s.txt" % strReplace
cmdGdalbuildvrt = "gdalbuildvrt -allow_projection_difference -overwrite -input_file_list BIOMA_%(bioma)s_%(ano)s.txt BIOMA_%(bioma)s_%(ano)s.vrt" % strReplace
print bioma + " : " + str(nextAno)
os.system(cmdFind)
os.system(cmdGdalbuildvrt)
nextAno += 1
# mosaico de classicacao do brasil por ano
nextAno = 2008
while nextAno <= 2015:
strReplace = {'ano': nextAno}
cmdFind = "find ../CLASSIFICACAOFT/BIOMA/ -name '*%(ano)s*' > PAIS_BRASIL_%(ano)s.txt" % strReplace
cmdGdalbuildvrt = "gdalbuildvrt -allow_projection_difference -overwrite -input_file_list PAIS_BRASIL_%(ano)s.txt PAIS_BRASIL_%(ano)s.vrt" % strReplace
print "BRASIL : " + str(nextAno)
os.system(cmdFind)
os.system(cmdGdalbuildvrt)
nextAno += 1
'''
INTEGRACAO
'''
# mosaico de integracao por bioma e ano
for bioma in biomas:
nextAno = 2008
print os.path.exists('../INTEGRACAOC/BIOMA/' + bioma)
while nextAno <= 2015:
if os.path.exists('../INTEGRACAOC/BIOMA/' + bioma):
strReplace = {'bioma': bioma, 'ano': nextAno}
cmdFind = "find ../INTEGRACAOC/BIOMA/%(bioma)s/ -name '*%(ano)s*' > INTEGRACAO_BIOMA_%(bioma)s_%(ano)s.txt" % strReplace
cmdGdalbuildvrt = "gdalbuildvrt -allow_projection_difference -overwrite -input_file_list INTEGRACAO_BIOMA_%(bioma)s_%(ano)s.txt INTEGRACAO_BIOMA_%(bioma)s_%(ano)s.vrt" % strReplace
print 'INTEGRACAO ' + bioma + " : " + str(nextAno)
os.system(cmdFind)
os.system(cmdGdalbuildvrt)
nextAno += 1
# mosaico de integracao do brasil por ano
nextAno = 2008
while nextAno <= 2015:
strReplace = {'ano': nextAno}
cmdFind = "find ../INTEGRACAOC/BIOMA/ -name '*%(ano)s*' > INTEGRACAO_PAIS_BRASIL_%(ano)s.txt" % strReplace
cmdGdalbuildvrt = "gdalbuildvrt -allow_projection_difference -overwrite -input_file_list INTEGRACAO_PAIS_BRASIL_%(ano)s.txt INTEGRACAO_PAIS_BRASIL_%(ano)s.vrt" % strReplace
print "INTEGRACAO BRASIL : " + str(nextAno)
os.system(cmdFind)
os.system(cmdGdalbuildvrt)
nextAno += 1
'''# mosaico de integracao do brasil por ano
nextAno = 2008
while nextAno <= 2016:
strReplace = {'ano': nextAno}
if os.path.isdir("../INTEGRACAO/BRASIL/%(ano)s" % strReplace):
print("../INTEGRACAO/BRASIL/%(ano)s" % strReplace)
cmdFind = "find ../INTEGRACAO/BRASIL/%(ano)s/ -name *.tif > INTEGRACAO_PAIS_BRASIL_%(ano)s.txt" % strReplace
cmdGdalbuildvrt = "gdalbuildvrt -allow_projection_difference -overwrite -input_file_list INTEGRACAO_PAIS_BRASIL_%(ano)s.txt INTEGRACAO_PAIS_BRASIL_%(ano)s.vrt" % strReplace
print "INTEGRACAO BRASIL : " + str(nextAno)
os.system(cmdFind)
os.system(cmdGdalbuildvrt)
nextAno += 1'''
'''
TRANSICAO
'''
transicaoAnos = ['2008-2009','2009-2010','2010-2011','2011-2012','2012-2013','2013-2014','2014-2015']
# mosaico de transicao por bioma e ano
for bioma in biomas:
print os.path.exists('../TRANSICAO/BIOMA/' + bioma)
for tano in transicaoAnos:
if os.path.exists('../TRANSICAO/BIOMA/' + bioma):
strReplace = {'bioma': bioma, 'ano': tano,'ano_': tano.replace('-','_')}
cmdFind = "find ../TRANSICAO/BIOMA/%(bioma)s/ -name '*%(ano_)s*' > TRANSICAO_BIOMA_%(bioma)s_%(ano)s.txt" % strReplace
cmdGdalbuildvrt = "gdalbuildvrt -allow_projection_difference -overwrite -input_file_list TRANSICAO_BIOMA_%(bioma)s_%(ano)s.txt TRANSICAO_BIOMA_%(bioma)s_%(ano)s.vrt" % strReplace
print 'TRANSICAO ' + bioma + " : " + str(tano)
os.system(cmdFind)
os.system(cmdGdalbuildvrt)
# mosaico de transicao do brasil por ano
for tano in transicaoAnos:
strReplace = {'ano': tano,'ano_': tano.replace('-','_')}
cmdFind = "find ../TRANSICAO/BIOMA/ -name '*%(ano_)s*' > TRANSICAO_PAIS_BRASIL_%(ano)s.txt" % strReplace
cmdGdalbuildvrt = "gdalbuildvrt -allow_projection_difference -overwrite -input_file_list TRANSICAO_PAIS_BRASIL_%(ano)s.txt TRANSICAO_PAIS_BRASIL_%(ano)s.vrt" % strReplace
print "TRANSICAO BRASIL : " + str(tano)
os.system(cmdFind)
os.system(cmdGdalbuildvrt)
| {
"content_hash": "6432488ccdab9dab31097ef22c69f57b",
"timestamp": "",
"source": "github",
"line_count": 106,
"max_line_length": 192,
"avg_line_length": 45.56603773584906,
"alnum_prop": 0.634575569358178,
"repo_name": "TerrasAppSolutions/seeg-mapbiomas-workspace",
"id": "76789a6eef5a8efccf387f01bb2920ec68a177e0",
"size": "4830",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "app/Vendor/Shellscripts/buildvrt.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "2980"
},
{
"name": "CSS",
"bytes": "67586"
},
{
"name": "Dockerfile",
"bytes": "2507"
},
{
"name": "HTML",
"bytes": "238878"
},
{
"name": "JavaScript",
"bytes": "2182379"
},
{
"name": "PHP",
"bytes": "8418950"
},
{
"name": "Python",
"bytes": "3165"
},
{
"name": "Shell",
"bytes": "8648"
},
{
"name": "TSQL",
"bytes": "42857"
}
],
"symlink_target": ""
} |
import os
import unittest
from gitviewfs_objects import Directory
import dir_structure.shallow
from tests.structs.shallow import paths
from tests.structs.shallow.utils import BaseDefaultDirStructTest,\
BaseDefaultDirStructIntegrationTest
class RootDirPathTest(BaseDefaultDirStructTest):
def test_path(self):
self.assertPathIs(paths.ROOT_DIR, Directory)
class RootDirTest(unittest.TestCase):
def test_get_items_names(self):
dir_struct = dir_structure.shallow.Shallow()
root_dir = dir_struct.get_root_dir()
items = root_dir.get_items_names()
self.assertItemsEqual(['HEAD', 'branches', 'commits', 'trees', 'blobs'], items)
class RootDirIntegrationTest(BaseDefaultDirStructIntegrationTest):
def test_list(self):
items = os.listdir(self.mountpoint)
self.assertItemsEqual(['HEAD', 'branches', 'commits', 'trees', 'blobs'], items)
| {
"content_hash": "8b551cdf5fae7d66bb2d6efd1b694607",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 81,
"avg_line_length": 26.875,
"alnum_prop": 0.7674418604651163,
"repo_name": "erdavila/gitviewfs",
"id": "98c52aaa10966aede25f9aedfe1c371a619c4b5a",
"size": "860",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/structs/shallow/test_root_dir.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "59815"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.test import TestCase
from django.core.urlresolvers import reverse
from django.test.client import RequestFactory
from django.contrib.auth.models import User
from django.utils import timezone
from happenings.models import Event
from happenings.templatetags.happenings_tags import show_calendar
from happenings.utils.common import now
class ShowCalendarTemplateTagTest(TestCase):
def setUp(self):
self.factory = RequestFactory()
self.url = reverse('calendar:list')
self.url_qs = reverse('calendar:list')
def test_show_calendar_no_events(self):
req = self.factory.get(self.url)
cal = show_calendar({}, req)
self.assertIn(str(now.month), cal)
self.assertIn(str(now.year), cal)
self.assertNotIn('calendar-event', cal)
def test_mini_calendar(self):
user = User.objects.create_user(
'foo', 'bar@example.com', 'secret'
)
event = Event.objects.create(
start_date=timezone.now(),
end_date=timezone.now(),
all_day=True,
created_by=user,
title="The big event",
description="Amazing event",
repeat="NEVER",
)
event2 = Event.objects.create(
start_date=timezone.now() + timezone.timedelta(days=1),
end_date=timezone.now() + timezone.timedelta(days=1),
all_day=True,
created_by=user,
title="The other event",
description="Incredible event",
repeat="NEVER",
)
event.save()
event2.save()
req = self.factory.get(self.url, {'cal_category': 'birthday'})
cal = show_calendar({}, req, mini=True)
self.assertIn(str(now.month), cal)
self.assertIn(str(now.year), cal)
self.assertNotIn(event.title, cal)
self.assertNotIn(event2.title, cal)
| {
"content_hash": "16fffc4f464d51db2d7fd62785549fe9",
"timestamp": "",
"source": "github",
"line_count": 57,
"max_line_length": 70,
"avg_line_length": 34.01754385964912,
"alnum_prop": 0.6173285198555957,
"repo_name": "wreckage/django-happenings",
"id": "7aad9c8b40e4f49b84f7411528ea42a588b5dae2",
"size": "1939",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/unit_tests/test_template_tags/test_show_calendar.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "CSS",
"bytes": "2106"
},
{
"name": "HTML",
"bytes": "14341"
},
{
"name": "JavaScript",
"bytes": "6350"
},
{
"name": "Python",
"bytes": "271009"
}
],
"symlink_target": ""
} |
import os
import subprocess
def is_program_installed(program_name):
'''Determines if a given program is installed by checking if calling
subprocess with its program executable as parameter yields a file not
found error. If subprocess terminates with file not found or some
different error, we assume that the given program is not installed
on the system.
Arguments:
program_name -- Name of the executable
Returns:
True if the program is installed, False otherwise
'''
is_installed = True
try:
FNULL = open(os.devnull, 'w')
subprocess.call([program_name], stdout=FNULL, stderr=subprocess.STDOUT)
FNULL.close()
except OSError as e:
if e.errno == os.errno.ENOENT:
# not found
is_installed = False
else:
# something else went wrong
is_installed = False
return is_installed | {
"content_hash": "bdfd0c79e2ce9e6bb8023015ac8422a6",
"timestamp": "",
"source": "github",
"line_count": 29,
"max_line_length": 79,
"avg_line_length": 32.10344827586207,
"alnum_prop": 0.6466165413533834,
"repo_name": "mguenther/pdfextract",
"id": "bb7f071d902a2380eb9526c43a251eab17284185",
"size": "956",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pdfextract/validation.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "8248"
}
],
"symlink_target": ""
} |
"""Pilot layout."""
from makani.gs.monitor2.apps.layout import base
from makani.gs.monitor2.apps.plugins.indicators import control
from makani.gs.monitor2.apps.plugins.indicators import ground_station
class PilotLayout(base.BaseLayout):
"""The pilot layout."""
_NAME = 'Pilot'
_DESIRED_VIEW_COLS = 12
_ORDER_HORIZONTALLY = False
# Derived class should set the _MODE.
_MODE = '<unset>'
def Initialize(self):
self._AddIndicators('Flight Controller', [
control.FlightPlanIndicator(),
control.FlightModeIndicator(self._MODE),
control.FlightModeGatesIndicator(self._MODE),
control.ControlTimeIndicator(self._MODE),
control.JoystickIndicator(),
control.HoverGainRampScaleIndicator(self._MODE),
ground_station.WindIndicator(),
], properties={'cols': 3})
self._AddIndicators('Kite Position State', [
control.ApparentWindSpeedIndicator(self._MODE),
control.AltitudeIndicator(self._MODE),
control.PayoutIndicator(),
control.LowBoundLoopAltitudeIndicator(self._MODE),
control.CrosswindPlaybookIndicator(),
control.AutoControllerIndicator(),
], properties={'cols': 3})
self._AddIndicators('Throttle', [
control.ThrottleIndicator(from_joystick=True),
], properties={'cols': 2.5})
self._AddBreak()
self._AddIndicators('Off-Tether Position', [
control.GlideslopeChart(self._MODE),
control.TopDownPositionChart(self._MODE),
], properties={'cols': 3})
self._AddIndicators('Constraint Window', [
control.ConstraintWindow(self._MODE),
], properties={'cols': 3})
self._AddBreak()
self._AddIndicators('Aero Angles', [
control.AeroAnglesXYPlot(self._MODE),
], properties={'cols': 3})
self._AddIndicators('Flight Circle', [
control.CrosswindCircleWindow(self._MODE),
], properties={'cols': 3})
self._AddIndicators('Rotor Moments', [
control.RotorPitchYawWindow(),
], properties={'cols': 3})
self._AddBreak()
self._AddIndicators('Trans-in Trajectory', [
control.TransInTrajectoryChart(self._MODE),
], properties={'cols': 3})
self._AddIndicators('Impact Zone', [
control.ImpactZoneChart(self._MODE),
], properties={'cols': 3})
self._AddIndicators('Vessel Altitude', [
ground_station.VesselPositionChart(),
], properties={'cols': 3})
| {
"content_hash": "6e66691e764e8382e33b616eaaee831d",
"timestamp": "",
"source": "github",
"line_count": 78,
"max_line_length": 69,
"avg_line_length": 31.076923076923077,
"alnum_prop": 0.6621287128712872,
"repo_name": "google/makani",
"id": "59ba269f8868c795553c5cb37ade67fe7005c2af",
"size": "3013",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "gs/monitor2/apps/plugins/layouts/pilot_template.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "119408"
},
{
"name": "C",
"bytes": "20174258"
},
{
"name": "C++",
"bytes": "30512322"
},
{
"name": "CSS",
"bytes": "8921"
},
{
"name": "Dockerfile",
"bytes": "1381"
},
{
"name": "Emacs Lisp",
"bytes": "1134"
},
{
"name": "HTML",
"bytes": "65745"
},
{
"name": "Java",
"bytes": "1558475"
},
{
"name": "JavaScript",
"bytes": "130727"
},
{
"name": "Jupyter Notebook",
"bytes": "1154728"
},
{
"name": "MATLAB",
"bytes": "1026162"
},
{
"name": "Makefile",
"bytes": "2798"
},
{
"name": "Objective-C",
"bytes": "62972"
},
{
"name": "Perl",
"bytes": "870724"
},
{
"name": "Python",
"bytes": "5552781"
},
{
"name": "RPC",
"bytes": "195736"
},
{
"name": "Roff",
"bytes": "2567875"
},
{
"name": "SWIG",
"bytes": "8663"
},
{
"name": "Shell",
"bytes": "297941"
},
{
"name": "Starlark",
"bytes": "462998"
},
{
"name": "Vim Script",
"bytes": "2281"
},
{
"name": "XC",
"bytes": "50398"
},
{
"name": "XS",
"bytes": "49289"
}
],
"symlink_target": ""
} |
"""
Spyder configuration options.
Note: Leave this file free of Qt related imports, so that it can be used to
quickly load a user config file.
"""
import os
import sys
# Local import
from spyder.config.base import CHECK_ALL, EXCLUDED_NAMES
from spyder.config.fonts import MEDIUM, SANS_SERIF
from spyder.config.utils import IMPORT_EXT
from spyder.config.appearance import APPEARANCE
from spyder.plugins.editor.utils.findtasks import TASKS_PATTERN
from spyder.utils.introspection.module_completion import PREFERRED_MODULES
# =============================================================================
# Main constants
# =============================================================================
# Find in files exclude patterns
EXCLUDE_PATTERNS = ['*.csv, *.dat, *.log, *.tmp, *.bak, *.orig']
# Extensions that should be visible in Spyder's file/project explorers
SHOW_EXT = ['.py', '.ipynb', '.dat', '.pdf', '.png', '.svg', '.md', '.yml',
'.yaml']
# Extensions supported by Spyder (Editor or Variable explorer)
USEFUL_EXT = IMPORT_EXT + SHOW_EXT
# Name filters for file/project explorers (excluding files without extension)
NAME_FILTERS = ['README', 'INSTALL', 'LICENSE', 'CHANGELOG']
NAME_FILTERS += ['*' + _ext for _ext in USEFUL_EXT if _ext not in NAME_FILTERS]
# Port used to detect if there is a running instance and to communicate with
# it to open external files
OPEN_FILES_PORT = 21128
# OS Specific
WIN = os.name == 'nt'
MAC = sys.platform == 'darwin'
LINUX = sys.platform.startswith('linux')
CTRL = "Meta" if MAC else "Ctrl"
# Modules to be preloaded for Rope and Jedi
PRELOAD_MDOULES = ', '.join(PREFERRED_MODULES)
# =============================================================================
# Defaults
# =============================================================================
DEFAULTS = [
('main',
{
'opengl': 'software',
'single_instance': True,
'open_files_port': OPEN_FILES_PORT,
'mac_open_file': False,
'normal_screen_resolution': True,
'high_dpi_scaling': False,
'high_dpi_custom_scale_factor': False,
'high_dpi_custom_scale_factors': '1.5',
'vertical_tabs': False,
'prompt_on_exit': False,
'panes_locked': True,
'window/size': (1260, 740),
'window/position': (10, 10),
'window/is_maximized': True,
'window/is_fullscreen': False,
'window/prefs_dialog_size': (1050, 530),
'use_custom_margin': True,
'custom_margin': 0,
'use_custom_cursor_blinking': False,
'show_internal_errors': True,
'check_updates_on_startup': True,
'cursor/width': 2,
'completion/size': (300, 180),
'report_error/remember_token': False,
'show_dpi_message': True,
}),
('toolbar',
{
'enable': True,
'toolbars_visible': True,
'last_visible_toolbars': [],
}),
('statusbar',
{
'show_status_bar': True,
'memory_usage/enable': True,
'memory_usage/timeout': 2000,
'cpu_usage/enable': False,
'cpu_usage/timeout': 2000,
'clock/enable': False,
'clock/timeout': 1000,
'check_updates_on_startup': False,
}),
('quick_layouts',
{
'place_holder': '',
'names': [],
'order': [],
'active': [],
'ui_names': []
}),
('internal_console',
{
'max_line_count': 300,
'working_dir_history': 30,
'working_dir_adjusttocontents': False,
'wrap': True,
'codecompletion/auto': False,
'external_editor/path': 'SciTE',
'external_editor/gotoline': '-goto:',
}),
('main_interpreter',
{
'default': True,
'custom': False,
'umr/enabled': True,
'umr/verbose': True,
'umr/namelist': [],
'custom_interpreters_list': [],
'custom_interpreter': '',
}),
('ipython_console',
{
'show_banner': True,
'completion_type': 0,
'show_calltips': True,
'ask_before_closing': False,
'show_reset_namespace_warning': True,
'buffer_size': 500,
'pylab': True,
'pylab/autoload': False,
'pylab/backend': 0,
'pylab/inline/figure_format': 0,
'pylab/inline/resolution': 72,
'pylab/inline/width': 6,
'pylab/inline/height': 4,
'pylab/inline/bbox_inches': True,
'startup/run_lines': 'import antimony; import rrplugins; import numpy; import scipy; import matplotlib; import roadrunner; import tellurium as te',
'startup/use_run_file': False,
'startup/run_file': '',
'greedy_completer': False,
'jedi_completer': False,
'autocall': 0,
'symbolic_math': False,
'in_prompt': '',
'out_prompt': '',
'show_elapsed_time': False,
'ask_before_restart': True,
# This is True because there are libraries like Pyomo
# that generate a lot of Command Prompts while running,
# and that's extremely annoying for Windows users.
'hide_cmd_windows': True,
'pdb_prevent_closing': True,
'pdb_ignore_lib': False,
'pdb_execute_events': True,
'pdb_use_exclamation_mark': True,
'pdb_stop_first_line': True,
}),
('variable_explorer',
{
'check_all': CHECK_ALL,
'dataframe_format': '.6g', # No percent sign to avoid problems
# with ConfigParser's interpolation
'excluded_names': EXCLUDED_NAMES,
'exclude_private': True,
'exclude_uppercase': True,
'exclude_capitalized': False,
'exclude_unsupported': False,
'exclude_callables_and_modules': True,
'truncate': True,
'minmax': False,
'show_callable_attributes': True,
'show_special_attributes': False
}),
('plots',
{
'mute_inline_plotting': True,
'show_plot_outline': False,
'auto_fit_plotting': True
}),
('editor',
{
'printer_header/font/family': SANS_SERIF,
'printer_header/font/size': MEDIUM,
'printer_header/font/italic': False,
'printer_header/font/bold': False,
'wrap': False,
'wrapflag': True,
'todo_list': True,
'realtime_analysis': True,
'realtime_analysis/timeout': 2500,
'outline_explorer': True,
'line_numbers': True,
'blank_spaces': False,
'edge_line': True,
'edge_line_columns': '79',
'indent_guides': False,
'code_folding': True,
'show_code_folding_warning': True,
'scroll_past_end': False,
'toolbox_panel': True,
'close_parentheses': True,
'close_quotes': True,
'add_colons': True,
'auto_unindent': True,
'indent_chars': '* *',
'tab_stop_width_spaces': 4,
'check_eol_chars': True,
'convert_eol_on_save': False,
'convert_eol_on_save_to': 'LF',
'tab_always_indent': False,
'intelligent_backspace': True,
'automatic_completions': True,
'automatic_completions_after_chars': 3,
'automatic_completions_after_ms': 300,
'completions_hint': True,
'completions_hint_after_ms': 500,
'underline_errors': False,
'highlight_current_line': True,
'highlight_current_cell': True,
'occurrence_highlighting': True,
'occurrence_highlighting/timeout': 1500,
'always_remove_trailing_spaces': False,
'add_newline': False,
'always_remove_trailing_newlines': False,
'show_tab_bar': True,
'show_class_func_dropdown': False,
'max_recent_files': 20,
'save_all_before_run': True,
'focus_to_editor': True,
'run_cell_copy': False,
'onsave_analysis': False,
'autosave_enabled': True,
'autosave_interval': 60,
'docstring_type': 'Numpydoc',
'strip_trailing_spaces_on_modify': False,
}),
('historylog',
{
'enable': True,
'wrap': True,
'go_to_eof': True,
'line_numbers': False,
}),
('help',
{
'enable': True,
'max_history_entries': 20,
'wrap': True,
'connect/editor': False,
'connect/ipython_console': False,
'math': True,
'automatic_import': True,
'plain_mode': False,
'rich_mode': True,
'show_source': False,
'locked': False,
}),
('onlinehelp',
{
'enable': True,
'zoom_factor': .8,
'handle_links': False,
'max_history_entries': 20,
}),
('outline_explorer',
{
'enable': True,
'show_fullpath': False,
'show_all_files': False,
'group_cells': True,
'sort_files_alphabetically': False,
'show_comments': True,
'follow_cursor': True,
'display_variables': False
}),
('project_explorer',
{
'name_filters': NAME_FILTERS,
'show_all': True,
'show_hscrollbar': True,
'max_recent_projects': 10,
'visible_if_project_open': True,
'date_column': False,
'single_click_to_open': False,
'show_hidden': True,
'size_column': False,
'type_column': False,
'date_column': False
}),
('explorer',
{
'enable': True,
'name_filters': NAME_FILTERS,
'show_hidden': False,
'single_click_to_open': False,
'size_column': False,
'type_column': False,
'date_column': True
}),
('find_in_files',
{
'enable': True,
'supported_encodings': ["utf-8", "iso-8859-1", "cp1252"],
'exclude': EXCLUDE_PATTERNS,
'exclude_regexp': False,
'search_text_regexp': False,
'search_text': [''],
'search_text_samples': [TASKS_PATTERN],
'more_options': False,
'case_sensitive': False,
'exclude_case_sensitive': False,
'max_results': 1000,
}),
('breakpoints',
{
'enable': True,
}),
('completions',
{
'enable': True,
'kite_call_to_action': True,
'enable_code_snippets': True,
'completions_wait_for_ms': 200,
'enabled_providers': {},
'provider_configuration': {},
'request_priorities': {}
}),
('profiler',
{
'enable': True,
}),
('pylint',
{
'enable': True,
'history_filenames': [],
'max_entries': 30,
'project_dir': None,
}),
('workingdir',
{
'working_dir_adjusttocontents': False,
'working_dir_history': 20,
'console/use_project_or_home_directory': False,
'console/use_cwd': True,
'console/use_fixed_directory': False,
'startup/use_fixed_directory': False,
}),
('tours',
{
'enable': True,
'show_tour_message': True,
}),
('shortcuts',
{
# ---- Global ----
# -- In app/spyder.py
'_/close pane': "Shift+Ctrl+F4",
'_/lock unlock panes': "Shift+Ctrl+F5",
'_/use next layout': "Shift+Alt+PgDown",
'_/use previous layout': "Shift+Alt+PgUp",
'_/maximize pane': "Ctrl+Alt+Shift+M",
'_/fullscreen mode': "F11",
'_/save current layout': "Shift+Alt+S",
'_/layout preferences': "Shift+Alt+P",
'_/spyder documentation': "F1",
'_/restart': "Shift+Alt+R",
'_/quit': "Ctrl+Q",
# -- In plugins/editor
'_/file switcher': 'Ctrl+P',
'_/symbol finder': 'Ctrl+Alt+P',
'_/debug': "Ctrl+F5",
'_/debug step over': "Ctrl+F10",
'_/debug continue': "Ctrl+F12",
'_/debug step into': "Ctrl+F11",
'_/debug step return': "Ctrl+Shift+F11",
'_/debug exit': "Ctrl+Shift+F12",
'_/run': "F5",
'_/configure': "Ctrl+F6",
'_/re-run last script': "F6",
# -- In plugins/init
'_/switch to help': "Ctrl+Shift+H",
'_/switch to outline_explorer': "Ctrl+Shift+O",
'_/switch to editor': "Ctrl+Shift+E",
'_/switch to historylog': "Ctrl+Shift+L",
'_/switch to onlinehelp': "Ctrl+Shift+D",
'_/switch to project_explorer': "Ctrl+Shift+P",
'_/switch to ipython_console': "Ctrl+Shift+I",
'_/switch to variable_explorer': "Ctrl+Shift+V",
'_/switch to find_in_files': "Ctrl+Shift+F",
'_/switch to explorer': "Ctrl+Shift+X",
'_/switch to plots': "Ctrl+Shift+G",
'_/switch to pylint': "Ctrl+Shift+C",
'_/switch to profiler': "Ctrl+Shift+R",
# -- In widgets/findreplace.py
'find_replace/find text': "Ctrl+F",
'find_replace/find next': "F3",
'find_replace/find previous': "Shift+F3",
'find_replace/replace text': "Ctrl+R",
'find_replace/hide find and replace': "Escape",
# ---- Editor ----
# -- In widgets/sourcecode/codeeditor.py
'editor/code completion': CTRL+'+Space',
'editor/duplicate line up': (
"Ctrl+Alt+Up" if WIN else "Shift+Alt+Up"),
'editor/duplicate line down': (
"Ctrl+Alt+Down" if WIN else "Shift+Alt+Down"),
'editor/delete line': 'Ctrl+D',
'editor/transform to uppercase': 'Ctrl+Shift+U',
'editor/transform to lowercase': 'Ctrl+U',
'editor/indent': 'Ctrl+]',
'editor/unindent': 'Ctrl+[',
'editor/move line up': "Alt+Up",
'editor/move line down': "Alt+Down",
'editor/go to new line': "Ctrl+Shift+Return",
'editor/go to definition': "Ctrl+G",
'editor/toggle comment': "Ctrl+1",
'editor/blockcomment': "Ctrl+4",
'editor/unblockcomment': "Ctrl+5",
'editor/start of line': "Meta+A",
'editor/end of line': "Meta+E",
'editor/previous line': "Meta+P",
'editor/next line': "Meta+N",
'editor/previous char': "Meta+B",
'editor/next char': "Meta+F",
'editor/previous word': "Ctrl+Left",
'editor/next word': "Ctrl+Right",
'editor/kill to line end': "Meta+K",
'editor/kill to line start': "Meta+U",
'editor/yank': 'Meta+Y',
'editor/rotate kill ring': 'Shift+Meta+Y',
'editor/kill previous word': 'Meta+Backspace',
'editor/kill next word': 'Meta+D',
'editor/start of document': 'Ctrl+Home',
'editor/end of document': 'Ctrl+End',
'editor/undo': 'Ctrl+Z',
'editor/redo': 'Ctrl+Shift+Z',
'editor/cut': 'Ctrl+X',
'editor/copy': 'Ctrl+C',
'editor/paste': 'Ctrl+V',
'editor/delete': 'Del',
'editor/select all': "Ctrl+A",
# -- In widgets/editor.py
'editor/inspect current object': 'Ctrl+I',
'editor/breakpoint': 'F12',
'editor/conditional breakpoint': 'Shift+F12',
'editor/run selection': "F9",
'editor/go to line': 'Ctrl+L',
'editor/go to previous file': CTRL + '+Shift+Tab',
'editor/go to next file': CTRL + '+Tab',
'editor/cycle to previous file': 'Ctrl+PgUp',
'editor/cycle to next file': 'Ctrl+PgDown',
'editor/new file': "Ctrl+N",
'editor/open last closed':"Ctrl+Shift+T",
'editor/open file': "Ctrl+O",
'editor/save file': "Ctrl+S",
'editor/save all': "Ctrl+Alt+S",
'editor/save as': 'Ctrl+Shift+S',
'editor/close all': "Ctrl+Shift+W",
'editor/last edit location': "Ctrl+Alt+Shift+Left",
'editor/previous cursor position': "Alt+Left",
'editor/next cursor position': "Alt+Right",
'editor/previous warning': "Ctrl+Alt+Shift+,",
'editor/next warning': "Ctrl+Alt+Shift+.",
'editor/zoom in 1': "Ctrl++",
'editor/zoom in 2': "Ctrl+=",
'editor/zoom out': "Ctrl+-",
'editor/zoom reset': "Ctrl+0",
'editor/close file 1': "Ctrl+W",
'editor/close file 2': "Ctrl+F4",
'editor/run cell': CTRL + '+Return',
'editor/run cell and advance': 'Shift+Return',
'editor/debug cell': 'Alt+Shift+Return',
'editor/go to next cell': 'Ctrl+Down',
'editor/go to previous cell': 'Ctrl+Up',
'editor/re-run last cell': 'Alt+Return',
'editor/split vertically': "Ctrl+{",
'editor/split horizontally': "Ctrl+_",
'editor/close split panel': "Alt+Shift+W",
'editor/docstring': "Ctrl+Alt+D",
'editor/autoformatting': "Ctrl+Alt+I",
'editor/show in external file explorer': '',
# -- In Breakpoints
'_/switch to breakpoints': "Ctrl+Shift+B",
# ---- Consoles (in widgets/shell) ----
'console/inspect current object': "Ctrl+I",
'console/clear shell': "Ctrl+L",
'console/clear line': "Shift+Escape",
# ---- In Pylint ----
'pylint/run analysis': "F8",
# ---- In Profiler ----
'profiler/run profiler': "F10",
# ---- In widgets/ipythonconsole/shell.py ----
'ipython_console/new tab': "Ctrl+T",
'ipython_console/reset namespace': "Ctrl+Alt+R",
'ipython_console/restart kernel': "Ctrl+.",
'ipython_console/inspect current object': "Ctrl+I",
'ipython_console/clear shell': "Ctrl+L",
'ipython_console/clear line': "Shift+Escape",
# ---- In widgets/arraybuider.py ----
'array_builder/enter array inline': "Ctrl+Alt+M",
'array_builder/enter array table': "Ctrl+M",
# ---- In widgets/variableexplorer/arrayeditor.py ----
'variable_explorer/copy': 'Ctrl+C',
# ---- In widgets/variableexplorer/namespacebrowser.py ----
'variable_explorer/search': 'Ctrl+F',
'variable_explorer/refresh': 'Ctrl+R',
# ---- In widgets/plots/figurebrowser.py ----
'plots/copy': 'Ctrl+C',
'plots/previous figure': 'Ctrl+PgUp',
'plots/next figure': 'Ctrl+PgDown',
'plots/save': 'Ctrl+S',
'plots/save all': 'Ctrl+Alt+S',
'plots/close': 'Ctrl+W',
'plots/close all': 'Ctrl+Shift+W',
'plots/zoom in': "Ctrl++",
'plots/zoom out': "Ctrl+-",
# ---- In widgets/explorer ----
'explorer/copy file': 'Ctrl+C',
'explorer/paste file': 'Ctrl+V',
'explorer/copy absolute path': 'Ctrl+Alt+C',
'explorer/copy relative path': 'Ctrl+Alt+Shift+C',
# ---- In plugins/findinfiles/plugin ----
'find_in_files/find in files': 'Ctrl+Alt+F',
}),
('appearance', APPEARANCE),
]
NAME_MAP = {
# Empty container object means use the rest of defaults
'spyder': [],
# Splitting these files makes sense for projects, we might as well
# apply the same split for the app global config
# These options change on spyder startup or are tied to a specific OS,
# not good for version control
'transient': [
('main', [
'completion/size',
'crash',
'current_version',
'historylog_filename',
'spyder_pythonpath',
'window/position',
'window/prefs_dialog_size',
'window/size',
'window/state',
]
),
('toolbar', [
'last_visible_toolbars',
]
),
('editor', [
'autosave_mapping',
'bookmarks',
'filenames',
'layout_settings',
'recent_files',
'splitter_state',
]
),
('explorer', [
'file_associations',
]),
('find_in_files', [
'path_history'
'search_text',
'exclude_index',
'search_in_index',
]
),
('main_interpreter', [
'custom_interpreters_list',
'custom_interpreter',
'executable',
]
),
('onlinehelp', [
'zoom_factor',
]
),
('outline_explorer', [
'expanded_state',
'scrollbar_position',
],
),
('project_explorer', [
'current_project_path',
'expanded_state',
'recent_projects',
'max_recent_projects',
'scrollbar_position',
]
),
('quick_layouts', []), # Empty list means use all options
('run', [
'breakpoints',
'configurations',
'defaultconfiguration',
'default/wdir/fixed_directory',
]
),
('workingdir', [
'console/fixed_directory',
'startup/fixed_directory',
]
),
('pylint', [
'history_filenames',
]
),
]
}
# =============================================================================
# Config instance
# =============================================================================
# IMPORTANT NOTES:
# 1. If you want to *change* the default value of a current option, you need to
# do a MINOR update in config version, e.g. from 3.0.0 to 3.1.0
# 2. If you want to *remove* options that are no longer needed in our codebase,
# or if you want to *rename* options, then you need to do a MAJOR update in
# version, e.g. from 3.0.0 to 4.0.0
# 3. You don't need to touch this value if you're just adding a new option
CONF_VERSION = '70.1.0'
| {
"content_hash": "8838bec2383dab697a3ae14ff5b12047",
"timestamp": "",
"source": "github",
"line_count": 633,
"max_line_length": 161,
"avg_line_length": 38.79462875197472,
"alnum_prop": 0.459339495866759,
"repo_name": "sys-bio/tellurium",
"id": "0484323b6e2e8337e253c7d077f191c917f497e9",
"size": "24712",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "spyder_mod/Spyder 5.1.5/site-packages/spyder/config/main.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "19065"
},
{
"name": "Inno Setup",
"bytes": "36260"
},
{
"name": "Jupyter Notebook",
"bytes": "10767"
},
{
"name": "Python",
"bytes": "2401119"
},
{
"name": "Shell",
"bytes": "713"
}
],
"symlink_target": ""
} |
import re
from .base_linter import BaseLinter, INPUT_METHOD_TEMP_FILE
CONFIG = {
'language': 'Puppet',
'executable': 'puppet',
'lint_args': ['parser', 'validate', '--color=false', '{filename}'],
'test_existence_args': '-V',
'input_method': INPUT_METHOD_TEMP_FILE
}
class Linter(BaseLinter):
def parse_errors(self, view, errors, lines, errorUnderlines, violationUnderlines, warningUnderlines, errorMessages, violationMessages, warningMessages):
for line in errors.splitlines():
match = re.match(r'[Ee]rr(or)?: (?P<error>.+?(Syntax error at \'(?P<near>.+?)\'; expected \'.+\')) at /.+?:(?P<line>\d+)?', line)
if not match:
match = re.match(r'[Ee]rr(or)?: (?P<error>.+?(Could not match (?P<near>.+?))?) at /.+?:(?P<line>\d+)?', line)
if match:
error, line = match.group('error'), match.group('line')
lineno = int(line)
near = match.group('near')
if near:
error = '{0}, near "{1}"'.format(error, near)
self.underline_regex(view, lineno, '(?P<underline>{0})'.format(re.escape(near)), lines, errorUnderlines)
self.add_message(lineno, lines, error, errorMessages)
| {
"content_hash": "064bbbbfff3d8902d1cff8f694d7427a",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 156,
"avg_line_length": 42.266666666666666,
"alnum_prop": 0.5615141955835962,
"repo_name": "benesch/sublime-linter",
"id": "86c911c278babeab989232831f7c1c1eb1d674ef",
"size": "1268",
"binary": false,
"copies": "1",
"ref": "refs/heads/sublime-text-3",
"path": "sublimelinter/modules/puppet.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "810413"
},
{
"name": "Python",
"bytes": "248878"
}
],
"symlink_target": ""
} |
import os, sys
sys.path.append("../")
import SocialObjects
from SocialObjects import *
import workflow
import gateway
import persistence
import tests
import server
| {
"content_hash": "60138c6fd81738aafdaa42930864733e",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 27,
"avg_line_length": 18.22222222222222,
"alnum_prop": 0.8109756097560976,
"repo_name": "uoscompsci/PRISONER",
"id": "97002e3753276f3647da38e28b723ce222d5c811",
"size": "164",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "prisoner/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "835"
},
{
"name": "HTML",
"bytes": "3685"
},
{
"name": "Python",
"bytes": "215679"
},
{
"name": "Shell",
"bytes": "183"
}
],
"symlink_target": ""
} |
import json
import six
from oslo_utils import timeutils as tu
from senlin.common import exception
from senlin.db.sqlalchemy import api as db_api
from senlin.tests.unit.common import base
from senlin.tests.unit.common import utils
from senlin.tests.unit.db import shared
UUID1 = shared.UUID1
UUID2 = shared.UUID2
UUID3 = shared.UUID3
class DBAPINodeTest(base.SenlinTestCase):
def setUp(self):
super(DBAPINodeTest, self).setUp()
self.ctx = utils.dummy_context()
self.profile = shared.create_profile(self.ctx)
self.cluster = shared.create_cluster(self.ctx, self.profile)
def test_node_create(self):
nodes = db_api.node_get_all_by_cluster(self.ctx, self.cluster.id)
self.assertEqual(0, len(nodes))
res = shared.create_node(self.ctx, self.cluster, self.profile)
node = db_api.node_get(self.ctx, res.id)
self.assertIsNotNone(node)
self.assertEqual('test_node_name', node.name)
self.assertEqual(UUID1, node.physical_id)
self.assertEqual(1, node.index)
self.assertIsNone(node.role)
self.assertIsNone(node.created_time)
self.assertIsNone(node.updated_time)
self.assertIsNone(node.deleted_time)
self.assertEqual('ACTIVE', node.status)
self.assertEqual('create complete', node.status_reason)
self.assertEqual('{"foo": "123"}', json.dumps(node.metadata))
self.assertEqual('{"key1": "value1"}', json.dumps(node.data))
self.assertEqual(self.cluster.id, node.cluster_id)
self.assertEqual(self.profile.id, node.profile_id)
nodes = db_api.node_get_all_by_cluster(self.ctx, self.cluster.id)
self.assertEqual(1, len(nodes))
def test_node_get(self):
res = shared.create_node(self.ctx, self.cluster, self.profile)
node = db_api.node_get(self.ctx, res.id)
self.assertIsNotNone(node)
node = db_api.node_get(self.ctx, UUID2)
self.assertIsNone(node)
nodes = db_api.node_get_all_by_cluster(self.ctx, self.cluster.id)
self.assertEqual(1, len(nodes))
def test_node_get_show_deleted(self):
res = shared.create_node(self.ctx, self.cluster, self.profile)
node_id = res.id
node = db_api.node_get(self.ctx, node_id)
self.assertIsNotNone(node)
db_api.node_delete(self.ctx, node_id)
node = db_api.node_get(self.ctx, node_id)
self.assertIsNone(node)
node = db_api.node_get(self.ctx, node_id, show_deleted=False)
self.assertIsNone(node)
node = db_api.node_get(self.ctx, node_id, show_deleted=True)
self.assertEqual(node_id, node.id)
def test_node_get_by_name(self):
shared.create_node(self.ctx, self.cluster, self.profile)
node = db_api.node_get_by_name(self.ctx, 'test_node_name')
self.assertIsNotNone(node)
self.assertEqual('test_node_name', node.name)
self.assertEqual(self.cluster.id, node.cluster_id)
res = db_api.node_get_by_name(self.ctx, 'BogusName')
self.assertIsNone(res)
def test_node_get_by_name_show_deleted(self):
node_name = 'test_node_name'
shared.create_node(self.ctx, self.cluster, self.profile,
name=node_name)
node = db_api.node_get_by_name(self.ctx, node_name)
self.assertIsNotNone(node)
node_id = node.id
db_api.node_delete(self.ctx, node_id)
res = db_api.node_get_by_name(self.ctx, node_name)
self.assertIsNone(res)
res = db_api.node_get_by_name(self.ctx, node_name, show_deleted=False)
self.assertIsNone(res)
res = db_api.node_get_by_name(self.ctx, node_name, show_deleted=True)
self.assertEqual(node_id, res.id)
def test_node_get_by_short_id(self):
node_id1 = 'same-part-unique-part'
node_id2 = 'same-part-part-unique'
shared.create_node(self.ctx, None, self.profile,
id=node_id1, name='node-1')
shared.create_node(self.ctx, None, self.profile,
id=node_id2, name='node-2')
for x in range(len('same-part-')):
self.assertRaises(exception.MultipleChoices,
db_api.node_get_by_short_id,
self.ctx, node_id1[:x])
res = db_api.node_get_by_short_id(self.ctx, node_id1[:11])
self.assertEqual(node_id1, res.id)
res = db_api.node_get_by_short_id(self.ctx, node_id2[:11])
self.assertEqual(node_id2, res.id)
res = db_api.node_get_by_short_id(self.ctx, 'non-existent')
self.assertIsNone(res)
def test_node_get_by_short_id_show_deleted(self):
node_id = 'this-is-a-unique-id'
shared.create_node(self.ctx, None, self.profile, id=node_id)
res = db_api.node_get_by_short_id(self.ctx, node_id[:5])
self.assertEqual(node_id, res.id)
res = db_api.node_get_by_short_id(self.ctx, node_id[:7])
self.assertEqual(node_id, res.id)
db_api.node_delete(self.ctx, node_id)
res = db_api.node_get_by_short_id(self.ctx, node_id[:5])
self.assertIsNone(res)
res = db_api.node_get_by_short_id(self.ctx, node_id[:5],
show_deleted=False)
self.assertIsNone(res)
res = db_api.node_get_by_short_id(self.ctx, node_id[:5],
show_deleted=True)
self.assertEqual(node_id, res.id)
def test_node_get_all(self):
values = [{'name': 'node1'}, {'name': 'node2'}, {'name': 'node3'}]
[shared.create_node(self.ctx, None, self.profile, **v) for v in values]
nodes = db_api.node_get_all(self.ctx)
self.assertEqual(3, len(nodes))
names = [node.name for node in nodes]
[self.assertIn(val['name'], names) for val in values]
def test_node_get_all_with_cluster_id(self):
values = [{'name': 'node1'}, {'name': 'node2'}, {'name': 'node3'}]
for v in values:
shared.create_node(self.ctx, self.cluster, self.profile, **v)
shared.create_node(self.ctx, None, self.profile, name='node0')
nodes = db_api.node_get_all(self.ctx, cluster_id=self.cluster.id)
self.assertEqual(3, len(nodes))
names = [node.name for node in nodes]
[self.assertIn(val['name'], names) for val in values]
def test_node_get_all_show_deleted(self):
values = [{'id': 'node1'}, {'id': 'node2'}, {'id': 'node3'}]
for v in values:
shared.create_node(self.ctx, self.cluster, self.profile, **v)
db_api.node_delete(self.ctx, 'node2')
nodes = db_api.node_get_all(self.ctx)
self.assertEqual(2, len(nodes))
nodes = db_api.node_get_all(self.ctx, show_deleted=False)
self.assertEqual(2, len(nodes))
nodes = db_api.node_get_all(self.ctx, show_deleted=True)
self.assertEqual(3, len(nodes))
def test_node_get_all_with_limit_marker(self):
node_ids = ['node1', 'node2', 'node3']
for v in node_ids:
shared.create_node(self.ctx, self.cluster, self.profile,
id=v, init_time=tu.utcnow())
nodes = db_api.node_get_all(self.ctx, limit=1)
self.assertEqual(1, len(nodes))
nodes = db_api.node_get_all(self.ctx, limit=2)
self.assertEqual(2, len(nodes))
nodes = db_api.node_get_all(self.ctx, limit=5)
self.assertEqual(3, len(nodes))
nodes = db_api.node_get_all(self.ctx, marker='node1')
self.assertEqual(2, len(nodes))
nodes = db_api.node_get_all(self.ctx, marker='node2')
self.assertEqual(1, len(nodes))
nodes = db_api.node_get_all(self.ctx, marker='node3')
self.assertEqual(0, len(nodes))
nodes = db_api.node_get_all(self.ctx, limit=1, marker='node1')
self.assertEqual(1, len(nodes))
def test_node_get_all_used_sort_keys(self):
node_ids = ['node1', 'node2', 'node3']
for v in node_ids:
shared.create_node(self.ctx, self.cluster, self.profile, id=v)
mock_paginate = self.patchobject(db_api.utils, 'paginate_query')
sort_keys = ['index', 'name', 'created_time', 'updated_time',
'deleted_time', 'status']
db_api.node_get_all(self.ctx, sort_keys=sort_keys)
args = mock_paginate.call_args[0]
used_sort_keys = set(args[3])
expected_keys = set(['index', 'name', 'created_time', 'updated_time',
'deleted_time', 'status', 'id'])
self.assertEqual(expected_keys, used_sort_keys)
def test_node_get_all_sort_keys_wont_change(self):
sort_keys = ['id']
db_api.node_get_all(self.ctx, sort_keys=sort_keys)
self.assertEqual(['id'], sort_keys)
def test_node_get_all_sort_keys_and_dir(self):
values = [{'id': '001', 'name': 'node1', 'status': 'ACTIVE'},
{'id': '002', 'name': 'node3', 'status': 'ERROR'},
{'id': '003', 'name': 'node2', 'status': 'UPDATING'}]
for v in values:
shared.create_node(self.ctx, self.cluster, self.profile, **v)
nodes = db_api.node_get_all(self.ctx, sort_keys=['name', 'status'],
sort_dir='asc')
self.assertEqual(3, len(nodes))
# Sorted by name
self.assertEqual('001', nodes[0].id)
self.assertEqual('003', nodes[1].id)
self.assertEqual('002', nodes[2].id)
nodes = db_api.node_get_all(self.ctx, sort_keys=['status', 'name'],
sort_dir='asc')
self.assertEqual(3, len(nodes))
# Sorted by statuses (ascending)
self.assertEqual('001', nodes[0].id)
self.assertEqual('002', nodes[1].id)
self.assertEqual('003', nodes[2].id)
nodes = db_api.node_get_all(self.ctx, sort_keys=['status', 'name'],
sort_dir='desc')
self.assertEqual(3, len(nodes))
# Sorted by statuses (descending)
self.assertEqual('003', nodes[0].id)
self.assertEqual('002', nodes[1].id)
self.assertEqual('001', nodes[2].id)
def test_node_get_all_default_sort_dir(self):
nodes = [shared.create_node(self.ctx, None, self.profile,
init_time=tu.utcnow())
for x in range(3)]
results = db_api.node_get_all(self.ctx, sort_dir='asc')
self.assertEqual(3, len(results))
self.assertEqual(nodes[0].id, results[0].id)
self.assertEqual(nodes[1].id, results[1].id)
self.assertEqual(nodes[2].id, results[2].id)
def test_node_get_all_with_filters(self):
shared.create_node(self.ctx, None, self.profile, name='node1')
shared.create_node(self.ctx, None, self.profile, name='node2')
filters = {'name': ['node1', 'nodex']}
results = db_api.node_get_all(self.ctx, filters=filters)
self.assertEqual(1, len(results))
self.assertEqual('node1', results[0]['name'])
filters = {'name': 'node1'}
results = db_api.node_get_all(self.ctx, filters=filters)
self.assertEqual(1, len(results))
self.assertEqual('node1', results[0]['name'])
def test_node_get_all_with_empty_filters(self):
shared.create_node(self.ctx, None, self.profile, name='node1')
shared.create_node(self.ctx, None, self.profile, name='node2')
filters = None
results = db_api.node_get_all(self.ctx, filters=filters)
self.assertEqual(2, len(results))
def test_node_get_all_with_project_safe(self):
shared.create_node(self.ctx, None, self.profile, name='node1')
shared.create_node(self.ctx, None, self.profile, name='node2')
self.ctx.project = 'a-different-project'
results = db_api.node_get_all(self.ctx, project_safe=False)
self.assertEqual(2, len(results))
self.ctx.project = 'a-different-project'
results = db_api.node_get_all(self.ctx)
self.assertEqual(0, len(results))
results = db_api.node_get_all(self.ctx, project_safe=True)
self.assertEqual(0, len(results))
def test_node_get_by_cluster(self):
cluster1 = shared.create_cluster(self.ctx, self.profile)
node0 = shared.create_node(self.ctx, None, self.profile)
node1 = shared.create_node(self.ctx, self.cluster, self.profile)
node2 = shared.create_node(self.ctx, self.cluster, self.profile)
node3 = shared.create_node(self.ctx, cluster1, self.profile)
nodes = db_api.node_get_all_by_cluster(self.ctx, self.cluster.id)
self.assertEqual(2, len(nodes))
self.assertEqual(set([node1.id, node2.id]),
set([nodes[0].id, nodes[1].id]))
nodes = db_api.node_get_all_by_cluster(self.ctx, None)
self.assertEqual(1, len(nodes))
self.assertEqual(node0.id, nodes[0].id)
nodes = db_api.node_get_all_by_cluster(self.ctx, cluster1.id)
self.assertEqual(1, len(nodes))
self.assertEqual(node3.id, nodes[0].id)
def test_node_get_by_cluster_show_deleted(self):
node0 = shared.create_node(self.ctx, self.cluster, self.profile)
node1 = shared.create_node(self.ctx, self.cluster, self.profile)
nodes = db_api.node_get_all_by_cluster(self.ctx,
self.cluster.id)
self.assertEqual(2, len(nodes))
self.assertEqual(set([node0.id, node1.id]),
set([nodes[0].id, nodes[1].id]))
db_api.node_delete(self.ctx, node1.id)
nodes = db_api.node_get_all_by_cluster(self.ctx,
self.cluster.id)
self.assertEqual(1, len(nodes))
self.assertEqual(node0.id, nodes[0].id)
nodes = db_api.node_get_all_by_cluster(self.ctx,
self.cluster.id,
show_deleted=True)
self.assertEqual(2, len(nodes))
self.assertEqual(set([node0.id, node1.id]),
set([nodes[0].id, nodes[1].id]))
def test_node_get_by_name_and_cluster(self):
node_name = 'test_node_007'
shared.create_node(self.ctx, self.cluster, self.profile,
name=node_name)
node = db_api.node_get_by_name_and_cluster(self.ctx,
node_name,
self.cluster.id)
self.assertIsNotNone(node)
self.assertEqual(node_name, node.name)
self.assertEqual(self.cluster.id, node.cluster_id)
node = db_api.node_get_by_name_and_cluster(self.ctx, 'not-exist',
self.cluster.id)
self.assertIsNone(node)
node = db_api.node_get_by_name_and_cluster(self.ctx, node_name,
'BogusClusterID')
self.assertIsNone(node)
def test_node_get_by_physical_id(self):
shared.create_node(self.ctx, self.cluster, self.profile,
physical_id=UUID1)
node = db_api.node_get_by_physical_id(self.ctx, UUID1)
self.assertIsNotNone(node)
self.assertEqual(UUID1, node.physical_id)
node = db_api.node_get_by_physical_id(self.ctx, UUID2)
self.assertIsNone(node)
def test_node_update(self):
node = shared.create_node(self.ctx, self.cluster, self.profile)
new_attributes = {
'name': 'new node name',
'status': 'bad status',
'role': 'a new role',
}
db_api.node_update(self.ctx, node.id, new_attributes)
node = db_api.node_get(self.ctx, node.id)
self.assertEqual('new node name', node.name)
self.assertEqual('bad status', node.status)
self.assertEqual('a new role', node.role)
def test_node_update_not_found(self):
new_attributes = {'name': 'new_name'}
ex = self.assertRaises(exception.NodeNotFound,
db_api.node_update,
self.ctx, 'BogusId', new_attributes)
self.assertEqual('The node (BogusId) could not be found.',
six.text_type(ex))
def test_node_update_cluster_status_updated(self):
cluster = db_api.cluster_get(self.ctx, self.cluster.id)
self.assertEqual('INIT', cluster.status)
node = shared.create_node(self.ctx, self.cluster, self.profile)
new_attributes = {
'name': 'new_name',
'status': 'ERROR',
'status_reason': 'Something is wrong',
}
db_api.node_update(self.ctx, node.id, new_attributes)
node = db_api.node_get(self.ctx, node.id)
self.assertEqual('new_name', node.name)
self.assertEqual('ERROR', node.status)
self.assertEqual('Something is wrong', node.status_reason)
cluster = db_api.cluster_get(self.ctx, self.cluster.id)
self.assertEqual('WARNING', cluster.status)
reason = 'Node new_name: Something is wrong'
self.assertEqual(reason, cluster.status_reason)
def test_node_migrate_from_none(self):
node_orphan = shared.create_node(self.ctx, None, self.profile)
timestamp = tu.utcnow()
node = db_api.node_migrate(self.ctx, node_orphan.id, self.cluster.id,
timestamp)
cluster = db_api.cluster_get(self.ctx, self.cluster.id)
self.assertEqual(timestamp, node.updated_time)
self.assertEqual(self.cluster.id, node.cluster_id)
self.assertEqual(2, cluster.next_index)
nodes = db_api.node_get_all_by_cluster(self.ctx, self.cluster.id)
self.assertEqual(1, len(nodes))
def test_node_migrate_to_none(self):
node = shared.create_node(self.ctx, self.cluster, self.profile)
timestamp = tu.utcnow()
node_new = db_api.node_migrate(self.ctx, node.id, None, timestamp)
self.assertEqual(timestamp, node_new.updated_time)
self.assertIsNone(node_new.cluster_id)
nodes = db_api.node_get_all_by_cluster(self.ctx, self.cluster.id)
self.assertEqual(0, len(nodes))
def test_node_migrate_between_clusters(self):
cluster1 = shared.create_cluster(self.ctx, self.profile)
cluster2 = shared.create_cluster(self.ctx, self.profile)
node = shared.create_node(self.ctx, cluster1, self.profile)
nodes = db_api.node_get_all_by_cluster(self.ctx, cluster1.id)
self.assertEqual(1, len(nodes))
nodes = db_api.node_get_all_by_cluster(self.ctx, cluster2.id)
self.assertEqual(0, len(nodes))
self.assertEqual(2, cluster1.next_index)
self.assertEqual(1, cluster2.next_index)
timestamp = tu.utcnow()
node_new = db_api.node_migrate(self.ctx, node.id, cluster2.id,
timestamp)
cluster1 = db_api.cluster_get(self.ctx, cluster1.id)
cluster2 = db_api.cluster_get(self.ctx, cluster2.id)
self.assertEqual(timestamp, node_new.updated_time)
self.assertEqual(cluster2.id, node_new.cluster_id)
nodes = db_api.node_get_all_by_cluster(self.ctx, cluster1.id)
self.assertEqual(0, len(nodes))
nodes = db_api.node_get_all_by_cluster(self.ctx, cluster2.id)
self.assertEqual(1, len(nodes))
self.assertEqual(2, cluster1.next_index)
self.assertEqual(2, cluster2.next_index)
# Migrate it back!
timestamp = tu.utcnow()
node_new = db_api.node_migrate(self.ctx, node.id, cluster1.id,
timestamp)
cluster1 = db_api.cluster_get(self.ctx, cluster1.id)
cluster2 = db_api.cluster_get(self.ctx, cluster2.id)
self.assertEqual(timestamp, node_new.updated_time)
self.assertEqual(cluster1.id, node_new.cluster_id)
nodes = db_api.node_get_all_by_cluster(self.ctx, cluster1.id)
self.assertEqual(1, len(nodes))
nodes = db_api.node_get_all_by_cluster(self.ctx, cluster2.id)
self.assertEqual(0, len(nodes))
self.assertEqual(3, cluster1.next_index)
self.assertEqual(2, cluster2.next_index)
def test_node_delete(self):
node = shared.create_node(self.ctx, self.cluster, self.profile)
node_id = node.id
nodes = db_api.node_get_all_by_cluster(self.ctx, self.cluster.id)
self.assertEqual(1, len(nodes))
db_api.node_delete(self.ctx, node_id)
res = db_api.node_get(self.ctx, node_id)
self.assertIsNone(res)
nodes = db_api.node_get_all_by_cluster(self.ctx, self.cluster.id)
self.assertEqual(0, len(nodes))
def test_node_delete_not_found(self):
node_id = 'BogusNodeID'
res = db_api.node_delete(self.ctx, node_id)
self.assertIsNone(res)
res = db_api.node_get(self.ctx, node_id)
self.assertIsNone(res)
| {
"content_hash": "2bafd31df6de4a4bb8981edeb5480218",
"timestamp": "",
"source": "github",
"line_count": 514,
"max_line_length": 79,
"avg_line_length": 41.007782101167315,
"alnum_prop": 0.5931777208463801,
"repo_name": "Alzon/senlin",
"id": "603920a6fdc5d7aa24626d411b1ef5d7d481fdf9",
"size": "21627",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "senlin/tests/unit/db/test_node_api.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "1931099"
},
{
"name": "Shell",
"bytes": "16531"
}
],
"symlink_target": ""
} |
import os
import unittest
from unittest.mock import Mock, PropertyMock, patch
from airflow.providers.apache.hive.operators.hive_to_samba import Hive2SambaOperator
from airflow.utils.operator_helpers import context_to_airflow_vars
from tests.providers.apache.hive import DEFAULT_DATE, TestHiveEnvironment
class TestHive2SambaOperator(TestHiveEnvironment):
def setUp(self):
self.kwargs = dict(
hql='hql',
destination_filepath='destination_filepath',
samba_conn_id='samba_default',
hiveserver2_conn_id='hiveserver2_default',
task_id='test_hive_to_samba_operator',
)
super().setUp()
@patch('airflow.providers.apache.hive.operators.hive_to_samba.SambaHook')
@patch('airflow.providers.apache.hive.operators.hive_to_samba.HiveServer2Hook')
@patch('airflow.providers.apache.hive.operators.hive_to_samba.NamedTemporaryFile')
def test_execute(self, mock_tmp_file, mock_hive_hook, mock_samba_hook):
type(mock_tmp_file).name = PropertyMock(return_value='tmp_file')
mock_tmp_file.return_value.__enter__ = Mock(return_value=mock_tmp_file)
context = {}
Hive2SambaOperator(**self.kwargs).execute(context)
mock_hive_hook.assert_called_once_with(hiveserver2_conn_id=self.kwargs['hiveserver2_conn_id'])
mock_hive_hook.return_value.to_csv.assert_called_once_with(
hql=self.kwargs['hql'],
csv_filepath=mock_tmp_file.name,
hive_conf=context_to_airflow_vars(context))
mock_samba_hook.assert_called_once_with(samba_conn_id=self.kwargs['samba_conn_id'])
mock_samba_hook.return_value.push_from_local.assert_called_once_with(
self.kwargs['destination_filepath'], mock_tmp_file.name)
@unittest.skipIf(
'AIRFLOW_RUNALL_TESTS' not in os.environ,
"Skipped because AIRFLOW_RUNALL_TESTS is not set")
def test_hive2samba(self):
op = Hive2SambaOperator(
task_id='hive2samba_check',
samba_conn_id='tableau_samba',
hql="SELECT * FROM airflow.static_babynames LIMIT 10000",
destination_filepath='test_airflow.csv',
dag=self.dag)
op.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE,
ignore_ti_state=True)
| {
"content_hash": "ce0affb15529a21e09b163ecb6bb4152",
"timestamp": "",
"source": "github",
"line_count": 52,
"max_line_length": 102,
"avg_line_length": 44.36538461538461,
"alnum_prop": 0.6757693974859125,
"repo_name": "mtagle/airflow",
"id": "58f88149af8c4f246bf6560e40679f3e4c36f78b",
"size": "3094",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/providers/apache/hive/operators/test_hive_to_samba.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "13715"
},
{
"name": "Dockerfile",
"bytes": "17280"
},
{
"name": "HTML",
"bytes": "148492"
},
{
"name": "JavaScript",
"bytes": "25360"
},
{
"name": "Jupyter Notebook",
"bytes": "2933"
},
{
"name": "Mako",
"bytes": "1339"
},
{
"name": "Python",
"bytes": "10006634"
},
{
"name": "Shell",
"bytes": "217011"
},
{
"name": "TSQL",
"bytes": "879"
}
],
"symlink_target": ""
} |
"""sqlitemagic provices a simple magic for interacting with SQLite
databases stored on disk.
Usage:
%%sqlite filename.db
select personal, family from person;
produces:
Alan|Turing
Grace|Hopper
"""
# This file is copyright 2013 by Greg Wilson: see
# https://github.com/gvwilson/sqlitemagic/blob/master/LICENSE
# for the license.
# Inspired by https://github.com/tkf/ipython-sqlitemagic.
import sqlite3
from IPython.core.magic import Magics, magics_class, cell_magic
from IPython.display import display, HTML
@magics_class
class SqliteMagic(Magics):
'''Provide the 'sqlite' calling point.'''
@cell_magic
def sqlite(self, filename, query):
connection = sqlite3.connect(filename)
cursor = connection.cursor()
try:
cursor.execute(query)
results = cursor.fetchall()
display(HTML(self.tablify(results)))
except Exception, e:
import sys
print >> sys.stderr, "exception", e
cursor.close()
connection.close()
def tablify(self, rows):
return '<table>\n' + '\n'.join(self.rowify(r) for r in rows) + '\n</table>'
def rowify(self, row):
return '<tr>' + ''.join('<td>' + str(r) + '</td>' for r in row) + '</tr>'
def load_ipython_extension(ipython):
ipython.register_magics(SqliteMagic)
| {
"content_hash": "a35f27b8d6015566c82d064ca15945d5",
"timestamp": "",
"source": "github",
"line_count": 49,
"max_line_length": 83,
"avg_line_length": 27.142857142857142,
"alnum_prop": 0.6533834586466165,
"repo_name": "selimnairb/2014-02-25-swctest",
"id": "4a1e83d51b1345997baa3a1056c62bd3b3998093",
"size": "1330",
"binary": false,
"copies": "1",
"ref": "refs/heads/gh-pages",
"path": "novice/sql/sqlitemagic.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "CSS",
"bytes": "8114"
},
{
"name": "HTML",
"bytes": "871618"
},
{
"name": "JavaScript",
"bytes": "3491"
},
{
"name": "Makefile",
"bytes": "14449"
},
{
"name": "Perl",
"bytes": "131309"
},
{
"name": "Python",
"bytes": "280525"
},
{
"name": "R",
"bytes": "56547"
},
{
"name": "Shell",
"bytes": "1795"
},
{
"name": "Smarty",
"bytes": "3768"
},
{
"name": "TeX",
"bytes": "14234"
}
],
"symlink_target": ""
} |
def down_slope(x, left, right):
return float(float(right - x) / float(right - left))
def up_slope(x, left, right):
return float(float(x - left) / float(right - left))
| {
"content_hash": "dbf787bd72601731b9d8fb5fa79a23bd",
"timestamp": "",
"source": "github",
"line_count": 6,
"max_line_length": 56,
"avg_line_length": 29.5,
"alnum_prop": 0.6384180790960452,
"repo_name": "JakeCowton/Pok-e-Lol",
"id": "fbffc0296a3ca55acdabb07e37dcb76d1e9bda0b",
"size": "198",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "fuzzy/slope_calculator.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "47867"
}
],
"symlink_target": ""
} |
from .test import *
def test(suite='raam.test',verbosity=2):
"""
Runs all tests from the module.
Parameters
----------
verbosity : int (optional)
Test output verbosity
"""
suite = unittest.TestLoader().loadTestsFromNames([suite])
#suite = unittest.TestLoader().loadTestsFromTestCase(raam.test.MatrixConstruction)
unittest.TextTestRunner(verbosity=verbosity).run(suite)
| {
"content_hash": "0e3a4a5f5fb64924cbc1d65ef4efdec8",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 86,
"avg_line_length": 28,
"alnum_prop": 0.6714285714285714,
"repo_name": "marekpetrik/RAAM",
"id": "c9349a325b396f55f6d5adcfcbf9c25eb3fe390f",
"size": "420",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "raam/test/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "144083"
},
{
"name": "Shell",
"bytes": "738"
}
],
"symlink_target": ""
} |
from .exceptions import OperationDeclarationException
from .memory import MEMORY, MemoryValue
TOKENS = {}
class Operation(type):
def __init__(cls, name, bases, dct):
if "TOKEN" in dct:
if dct["TOKEN"] not in TOKENS:
TOKENS[dct["TOKEN"]] = cls()
else:
raise OperationDeclarationException(f"Operation token {dct['TOKEN']} already in use")
else:
raise OperationDeclarationException("Operation must have TOKEN attribute")
if "execute" not in dct:
raise OperationDeclarationException("Operation must declare execute method")
super().__init__(name, bases, dct)
class LoadOperation(metaclass=Operation):
TOKEN = "LOAD"
def execute(self, arguments):
MEMORY.r.value = MEMORY.get_value_at_address(arguments[0]).value
class OutOperation(metaclass=Operation):
TOKEN = "OUT"
def execute(self, arguments):
print(f"{arguments[0]} -> {MEMORY.get_value_at_address(arguments[0]).value}")
class CompareOperation(metaclass=Operation):
TOKEN = "COMPARE"
def execute(self, arguments):
value1 = MEMORY.get_value_at_address(arguments[0]).value
value2 = MEMORY.r.value
MEMORY.gt.value = int(value1 > value2)
MEMORY.lt.value = int(value1 < value2)
MEMORY.eq.value = int(value1 == value2)
class StoreOperation(metaclass=Operation):
TOKEN = "STORE"
def execute(self, arguments):
MEMORY.set_value(MEMORY.r.value, arguments[0])
class InitOperation(metaclass=Operation):
TOKEN = "INIT"
def execute(self, arguments):
MEMORY.set_value(arguments[1], arguments[0])
class JumpOperation(metaclass=Operation):
TOKEN = "JUMP"
def execute(self, arguments):
MEMORY.pc.value = arguments[0]
class AddOperation(metaclass=Operation):
TOKEN = "ADD"
def execute(self, arguments):
if len(arguments) == 1:
value = MEMORY.get_value_at_address(arguments[0]).value
MEMORY.r.value += value
elif len(arguments) == 2:
value = MEMORY.get_value_at_address(arguments[0]).value
MEMORY.get_value_at_address(arguments[1]).value += value
elif len(arguments) == 3:
value1 = MEMORY.get_value_at_address(arguments[0]).value
value2 = MEMORY.get_value_at_address(arguments[1]).value
MEMORY.set_value(value1 + value2, arguments[2])
class SubtractOperation(metaclass=Operation):
TOKEN = "SUBTRACT"
def execute(self, arguments):
if len(arguments) == 1:
value = MEMORY.get_value_at_address(arguments[0]).value
MEMORY.r.value -= value
elif len(arguments) == 2:
value = MEMORY.get_value_at_address(arguments[0]).value
MEMORY.get_value_at_address(arguments[1]).value -= value
elif len(arguments) == 3:
value1 = MEMORY.get_value_at_address(arguments[0]).value
value2 = MEMORY.get_value_at_address(arguments[1]).value
MEMORY.set_value(value1 - value2, arguments[2])
class MultiplyOperation(metaclass=Operation):
TOKEN = "MULTIPLY"
def execute(self, arguments):
if len(arguments) == 1:
value = MEMORY.get_value_at_address(arguments[0]).value
MEMORY.r.value *= value
elif len(arguments) == 2:
value = MEMORY.get_value_at_address(arguments[0]).value
MEMORY.get_value_at_address(arguments[1]).value *= value
elif len(arguments) == 3:
value1 = MEMORY.get_value_at_address(arguments[0]).value
value2 = MEMORY.get_value_at_address(arguments[1]).value
MEMORY.set_value(value1 * value2, arguments[2])
class DivideOperation(metaclass=Operation):
TOKEN = "DIVIDE"
def execute(self, arguments):
if len(arguments) == 1:
value = MEMORY.get_value_at_address(arguments[0]).value
MEMORY.r.value /= value
elif len(arguments) == 2:
value = MEMORY.get_value_at_address(arguments[0]).value
MEMORY.get_value_at_address(arguments[1]).value /= value
elif len(arguments) == 3:
value1 = MEMORY.get_value_at_address(arguments[0]).value
value2 = MEMORY.get_value_at_address(arguments[1]).value
MEMORY.set_value(value1 / value2, arguments[2])
class JumpGTOperation(metaclass=Operation):
TOKEN = "JUMPGT"
def execute(self, arguments):
if MEMORY.gt.value == 1:
MEMORY.pc.value = arguments[0]
class HaltOperation(metaclass=Operation):
TOKEN = "HALT"
def execute(self, arguments):
MEMORY.pc.value = MEMORY.get_max_address() + 1
class ClearOperation(metaclass=Operation):
TOKEN = "CLEAR"
def execute(self, arguments):
MEMORY.set_value(0, arguments[0])
class IncrementOperation(metaclass=Operation):
TOKEN = "INCREMENT"
def execute(self, arguments):
MEMORY.set_value(MEMORY.get_value(arguments[0]) + 1, arguments[0])
class DecrementOperation(metaclass=Operation):
TOKEN = "DECREMENT"
def execute(self, arguments):
MEMORY.set_value(MEMORY.get_value(arguments[0]) - 1, arguments[0])
class JumpLTOperation(metaclass=Operation):
TOKEN = "JUMPLT"
def execute(self, arguments):
if MEMORY.lt.value == 1:
MEMORY.pc.value = arguments[0]
class JumpEQOperation(metaclass=Operation):
TOKEN = "JUMPEQ"
def execute(self, arguments):
if MEMORY.eq.value == 1:
MEMORY.pc.value = arguments[0]
class JumpNEQOperation(metaclass=Operation):
TOKEN = "JUMPNEQ"
def execute(self, arguments):
if MEMORY.eq.value == 0:
MEMORY.pc.value = arguments[0]
class InOperation(metaclass=Operation):
TOKEN = "IN"
def execute(self, arguments):
MEMORY.set_value(int(input("Value: ")), arguments[0])
class AnchorOperation(metaclass=Operation):
TOKEN = "ANCHOR"
def execute(self, arguments):
# Handling of this operation occurs in the interpreter during the loading stage
pass
| {
"content_hash": "5fb134ef0c84e261255ff94ce8f610ea",
"timestamp": "",
"source": "github",
"line_count": 206,
"max_line_length": 101,
"avg_line_length": 29.689320388349515,
"alnum_prop": 0.6355461085676913,
"repo_name": "nint8835/InvitationASM",
"id": "54dcd82b3683547f99f6398de465f9b6ff81e282",
"size": "6116",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "InvitationASM/operations.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "17551"
}
],
"symlink_target": ""
} |
"""Frontend handler tests."""
import os
import shutil
import tempfile
import unittest
from google.cloud import ndb
import frontend_handlers
from osv import models
from osv import tests
class FrontendHandlerTest(unittest.TestCase):
"""Frontend handler tests."""
def setUp(self):
tests.reset_emulator()
self.maxDiff = None # pylint: disable=invalid-name
self.tmp_dir = tempfile.mkdtemp()
tests.mock_datetime(self)
def tearDown(self):
shutil.rmtree(self.tmp_dir, ignore_errors=True)
def test_ecosystem_counts(self):
"""Test ecosystem counts aggregates correctly updates."""
models.Bug(
id='BLAH-0',
db_id='BLAH-0',
status=1,
public=True,
source='test',
affected_packages=[{
'package': {
'ecosystem': 'PyPI',
'name': 'blah',
},
}]).put()
models.Bug(
id='BLAH-1',
db_id='BLAH-1',
status=1,
public=True,
source='test',
affected_packages=[{
'package': {
'ecosystem': 'Debian:3.1',
'name': 'blah',
},
}, {
'package': {
'ecosystem': 'Debian:7',
'name': 'blah',
},
}]).put()
models.Bug(
id='BLAH-2',
db_id='BLAH-2',
status=1,
public=True,
source='test',
affected_packages=[{
'package': {
'ecosystem': 'Debian:8',
'name': 'blah',
},
}]).put()
# Invalid entries.
models.Bug(
id='BLAH-3',
db_id='BLAH-3',
status=2,
public=True,
source='test',
affected_packages=[{
'package': {
'ecosystem': 'Debian:8',
'name': 'blah',
},
}]).put()
models.Bug(
id='BLAH-4',
db_id='BLAH-4',
status=1,
public=False,
source='test',
affected_packages=[{
'package': {
'ecosystem': 'Debian:8',
'name': 'blah',
},
}]).put()
counts = frontend_handlers.osv_get_ecosystem_counts()
self.assertDictEqual({'Debian': 2, 'PyPI': 1}, counts)
if __name__ == '__main__':
os.system('pkill -f datastore')
ds_emulator = tests.start_datastore_emulator()
try:
with ndb.Client().context() as context:
context.set_memcache_policy(False)
context.set_cache_policy(False)
unittest.main()
finally:
# TODO(ochang): Cleaner way of properly cleaning up processes.
os.system('pkill -f datastore')
| {
"content_hash": "3ca7553c7a79937f130b75ce89ccda43",
"timestamp": "",
"source": "github",
"line_count": 114,
"max_line_length": 66,
"avg_line_length": 23.464912280701753,
"alnum_prop": 0.49794392523364484,
"repo_name": "google/osv.dev",
"id": "b382e85b33c0fdde38947bcecdbd01d31b1d29e7",
"size": "3250",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "gcp/appengine/frontend_handlers_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "10995"
},
{
"name": "Go",
"bytes": "133088"
},
{
"name": "HTML",
"bytes": "30598"
},
{
"name": "JavaScript",
"bytes": "5795"
},
{
"name": "Makefile",
"bytes": "1325"
},
{
"name": "Python",
"bytes": "359843"
},
{
"name": "SCSS",
"bytes": "20439"
},
{
"name": "Shell",
"bytes": "17923"
}
],
"symlink_target": ""
} |
"""
Translates the MedleyDB Melody and Instrument Activation annotations to a set
of JAMS files.
The original data is found online at the following URL:
http://marl.smusic.nyu.edu/medleydb
Example:
./medleydb_parser.py MedleyDB/ [-o MedleyDB_JAMS/]
"""
__author__ = "Rachel M. Bittner"
__copyright__ = "Copyright 2014, Music and Audio Research Lab (MARL)"
__license__ = "GPL"
__version__ = "1.0"
__email__ = "rachel.bittner@nyu.edu"
import argparse
import json
import logging
import os
import sys
import time
import yaml
sys.path.append("..")
import pyjams
import pyjams.util as U
MEL1 = "The f0 curve of predominant melodic line drawn from a single source"
MEL2 = "The f0 curve of predominant melodic line drawn from multiple sources"
MEL3 = "The f0 curves of all melodic lines drawn from multiple sources"
MELODY_DEFS = {1: MEL1, 2: MEL2, 3: MEL3}
def fill_file_metadata(jam, artist, title):
"""Fills the song-level metadata into the JAMS jam."""
jam.file_metadata.artist = artist
jam.file_metadata.title = title
def fill_genre_annotation_metadata(annot):
"""Fills the annotation metadata."""
annot.annotation_metadata.corpus = "MedleyDB"
annot.annotation_metadata.version = "1.0"
annot.annotation_metadata.annotation_tools = ""
annot.annotation_metadata.annotation_rules = ""
annot.annotation_metadata.validation = "None"
annot.annotation_metadata.data_source = "Manual Annotation"
annot.annotation_metadata.curator = pyjams.Curator(name="Rachel Bittner",
email='rachel.bittner@nyu.edu')
annot.annotation_metadata.annotator = {}
def fill_melody_annotation_metadata(annot, mel_type):
"""Fills the annotation metadata."""
annot.annotation_metadata.corpus = "MedleyDB"
annot.annotation_metadata.version = "1.0"
annot.annotation_metadata.annotation_tools = "Tony"
annot.annotation_metadata.annotation_rules = MELODY_DEFS[mel_type]
annot.annotation_metadata.validation = "Manual Validation"
annot.annotation_metadata.data_source = "Manual Annotation"
annot.annotation_metadata.curator = pyjams.Curator(name="Rachel Bittner",
email='rachel.bittner@nyu.edu')
annot.annotation_metadata.annotator = {}
def fill_instid_annotation_metadata(annot):
"""Fills the annotation metadata."""
annot.annotation_metadata.corpus = "MedleyDB"
annot.annotation_metadata.version = "1.0"
annot.annotation_metadata.annotation_tools = ""
annot.annotation_metadata.annotation_rules = ""
annot.annotation_metadata.validation = "None"
annot.annotation_metadata.data_source = "Automatic Annotation"
annot.annotation_metadata.curator = pyjams.Curator(name="Juan P. Bello",
email='jpbello@nyu.edu')
annot.annotation_metadata.annotator = {}
def fill_melody_annotation(annot_fpath, melody_annot, mel_type):
"""Fill a melody annotation with data from annot_fpath."""
times, values = U.read_lab(annot_fpath, 2, delimiter=",")
U.fill_timeseries_annotation_data(times, values, None, melody_annot)
fill_melody_annotation_metadata(melody_annot, mel_type)
def fill_instid_annotation(annot_fpath, instid_annot):
"""Fill an instrument id annotation with data from annot_fpath."""
start_times, end_times, inst_labels = U.read_lab(annot_fpath, 3,
delimiter=",", header=True)
U.fill_range_annotation_data(start_times, end_times, inst_labels,
instid_annot)
fill_instid_annotation_metadata(instid_annot)
def create_JAMS(dataset_dir, trackid, out_file):
"""Creates a JAMS file given the Isophonics lab file."""
metadata_file = os.path.join(dataset_dir, 'Audio', trackid,
'%s_METADATA.yaml' % trackid)
with open(metadata_file, 'r') as f_in:
metadata = yaml.load(f_in)
# New JAMS and annotation
jam = pyjams.JAMS()
# Global file metadata
fill_file_metadata(jam, metadata['artist'], metadata['title'])
# Create Genre Annotation
genre_annot = jam.genre.create_annotation()
U.fill_observation_annotation_data([metadata['genre']], [1], [""],
genre_annot)
fill_genre_annotation_metadata(genre_annot)
# Create Melody Annotations
melody_path = os.path.join(dataset_dir, 'Annotations', 'Melody_Annotations')
melody1_fpath = os.path.join(melody_path, 'MELODY1',
"%s_MELODY1.csv" % trackid)
if os.path.exists(melody1_fpath):
melody1_annot = jam.melody.create_annotation()
fill_melody_annotation(melody1_fpath, melody1_annot, 1)
melody2_fpath = os.path.join(melody_path, 'MELODY2',
"%s_MELODY2.csv" % trackid)
if os.path.exists(melody2_fpath):
melody2_annot = jam.melody.create_annotation()
fill_melody_annotation(melody2_fpath, melody2_annot, 2)
# Create SourceID Annotation
instid_fpath = os.path.join(dataset_dir, 'Annotations',
'Instrument_Activations', 'SOURCEID',
"%s_SOURCEID.lab" % trackid)
if os.path.exists(instid_fpath):
instid_annot = jam.source.create_annotation()
fill_instid_annotation(instid_fpath, instid_annot)
# jam.file_metadata.duration = end_times[-1]
# Save JAMS
with open(out_file, "w") as fp:
json.dump(jam, fp, indent=2)
def process(in_dir, out_dir):
"""Converts MedleyDB Annotations into JAMS format, and saves
them in the out_dir folder."""
# Collect all trackid's.
yaml_files = U.find_with_extension(os.path.join(in_dir, 'Audio'), 'yaml')
trackids = [U.filebase(y).replace("_METADATA", "") for y in yaml_files]
U.smkdirs(out_dir)
for trackid in trackids:
jams_file = os.path.join(out_dir, "%s.jams" % trackid)
#Create a JAMS file for this track
create_JAMS(in_dir, trackid, jams_file)
def main():
"""Main function to convert the dataset into JAMS."""
parser = argparse.ArgumentParser(
description="Converts the MARL-Chords dataset to the JAMS format",
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument("in_dir",
action="store",
help="Isophonics main folder")
parser.add_argument("-o",
action="store",
dest="out_dir",
default="outJAMS",
help="Output JAMS folder")
args = parser.parse_args()
start_time = time.time()
# Setup the logger
logging.basicConfig(format='%(asctime)s: %(message)s', level=logging.INFO)
# Run the parser
process(args.in_dir, args.out_dir)
# Done!
logging.info("Done! Took %.2f seconds.", time.time() - start_time)
if __name__ == '__main__':
main()
| {
"content_hash": "f09eb655dca7f39692f69c0d2d1fafe6",
"timestamp": "",
"source": "github",
"line_count": 195,
"max_line_length": 86,
"avg_line_length": 36.184615384615384,
"alnum_prop": 0.6374716553287982,
"repo_name": "urinieto/jams",
"id": "faacd4bad3af3b6c19b5163b14353a39960e6878",
"size": "7078",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "parsers/medleydb_parser.py",
"mode": "33261",
"license": "bsd-2-clause",
"language": [
{
"name": "Matlab",
"bytes": "24899"
},
{
"name": "Python",
"bytes": "105220"
}
],
"symlink_target": ""
} |
"""VirtualHelixItem for the PropertyView.
Attributes:
KEY_COL (int): :class:`QTreeWidgetItem` column that will display property keys
VAL_COL (int): :class:`QTreeWidgetItem` column that will display property values
"""
from typing import (
List
)
from PyQt5.QtCore import (
Qt,
QModelIndex
)
from PyQt5.QtWidgets import (
QTreeWidgetItem,
QDoubleSpinBox,
QSpinBox,
QWidget,
QStyleOptionViewItem
)
from cadnano.proxies.cnenum import (
ItemEnum,
EnumType
)
from cadnano.controllers import VirtualHelixItemController
from .cnpropertyitem import CNPropertyItem
from cadnano.cntypes import (
VirtualHelixT,
NucleicAcidPartT,
KeyT,
ValueT
)
KEY_COL = 0
VAL_COL = 1
class VirtualHelixSetItem(CNPropertyItem):
"""VirtualHelixItem class for the PropertyView.
"""
_GROUPNAME = "helices"
def __init__(self, **kwargs):
"""
Args:
model_part (NucleicAcidPart): The model part
parent (:class:`PropertyEditorWidget`): the :class:`PropertyEditorWidget`
id_num (int): VirtualHelix ID number. See `NucleicAcidPart` for
description and related methods.
key (str, optional): Default is ``None``
"""
super(VirtualHelixSetItem, self).__init__(**kwargs)
if self._key == "name":
for outline_vh in self.outlineViewObjList():
self._controller_list.append(VirtualHelixItemController(self, outline_vh.part(), True, False))
# end def
### PUBLIC SUPPORT METHODS ###
def itemType(self) -> EnumType:
"""Overrides AbstractPropertyPartItem.itemType
Returns:
ItemEnum: VIRTUALHELIX
"""
return ItemEnum.VIRTUALHELIX
# end def
# SLOTS
def partVirtualHelixPropertyChangedSlot(self, sender: NucleicAcidPartT,
id_num: int,
virtual_helix: VirtualHelixT,
keys: KeyT,
values: ValueT):
"""
Args:
sender: Model object that emitted the signal.
id_num: VirtualHelix ID number. See `NucleicAcidPart` for description and related methods.
keys: Description
values: Description
"""
if virtual_helix in self.outlineViewObjSet():
for key, val in zip(keys, values):
# print("change slot", key, val)
self.setValue(key, val)
# end def
def partVirtualHelixResizedSlot(self, sender: NucleicAcidPartT,
id_num: int,
virtual_helix: VirtualHelixT):
# print("resize slot")
if virtual_helix in self.outlineViewObjSet():
val = virtual_helix.getSize()
self.setValue('length', int(val))
# end def
def partVirtualHelixRemovingSlot(self, sender: NucleicAcidPartT,
id_num: int,
virtual_helix: VirtualHelixT,
neighbors: List[int]):
"""
Args:
sender (obj): Model object that emitted the signal.
id_num (int): VirtualHelix ID number. See `NucleicAcidPart` for description and related methods.
neighbors (list):
"""
if virtual_helix in self.outlineViewObjSet():
self.disconnectSignals()
self.parent().removeChild(self)
# end def
def partVirtualHelixRemovedSlot(self, sender: NucleicAcidPartT, id_num: int):
"""
Args:
sender: Model object that emitted the signal.
id_num: VirtualHelix ID number. See `NucleicAcidPart` for description and related methods.
"""
# end def
def configureEditor(self, parent_qw: QWidget,
option: QStyleOptionViewItem,
model_index: QModelIndex) -> QWidget:
"""
Args:
parent_qw: Description
option: Description
model_index: Description
Returns:
the widget used to edit the item specified by index for editing
"""
cn_m = self.outlineViewObj()
key = self.key()
if key == 'eulerZ':
editor = QDoubleSpinBox(parent_qw)
tpb, _ = cn_m.getTwistPerBase()
editor.setSingleStep(tpb)
editor.setDecimals(1)
editor.setRange(0, 359)
elif key == 'scamZ':
editor = QDoubleSpinBox(parent_qw)
tpb, _ = cn_m.getTwistPerBase()
editor.setSingleStep(tpb)
editor.setDecimals(1)
editor.setRange(0, 359)
elif key == 'length':
editor = QSpinBox(parent_qw)
bpr, length = cn_m.getProperty(['bases_per_repeat', 'length'])
editor.setRange(length, 4*length)
editor.setSingleStep(bpr)
elif key == 'z' and cn_m.part().isZEditable():
editor = QDoubleSpinBox(parent_qw)
bw = cn_m.part().baseWidth()
editor.setSingleStep(bw)
editor.setRange(-bw*21, bw*21)
else:
editor = CNPropertyItem.configureEditor(self, parent_qw, option, model_index)
return editor
# end def
def updateCNModel(self):
"""Notify the cadnano model that a property may need updating.
This method should be called by the item model dataChangedSlot.
"""
value = self.data(1, Qt.DisplayRole)
key = self._key
u_s = self.treeWidget().undoStack()
u_s.beginMacro("Multi Property VH Edit: %s" % key)
if key == 'length':
# print("Property view 'length' updating")
for vh in self.outlineViewObjList():
# print("vh", vh.idNum(), value, vh.getSize())
if value != vh.getSize():
vh.setSize(value)
elif key == 'z':
# print("Property view 'z' updating", key, value)
for vh in self.outlineViewObjList():
if value != vh.getZ():
vh.setZ(value)
else:
for vh in self.outlineViewObjList():
if value != vh.getProperty(key):
vh.setProperty(key, value)
u_s.endMacro()
# end def
# end class
| {
"content_hash": "b7fd21dd7ffa160fe443ad05554db0d2",
"timestamp": "",
"source": "github",
"line_count": 188,
"max_line_length": 110,
"avg_line_length": 34.558510638297875,
"alnum_prop": 0.552100969678313,
"repo_name": "scholer/cadnano2.5",
"id": "7efa4cc782189f0531e576deaf0fabf725074ef4",
"size": "6521",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "cadnano/views/propertyview/virtualhelixitem.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "2617"
},
{
"name": "Python",
"bytes": "1624263"
},
{
"name": "QMake",
"bytes": "3719"
}
],
"symlink_target": ""
} |
import argparse
import json
import os
import sys
import pkg_resources
import argcomplete
import traceback
import subprocess
import click
from click.exceptions import ClickException
from dateutil import parser
from .pydl4j import set_config, get_config
from .pydl4j import validate_config, is_docker_available
from .pydl4j import _maven_build
if sys.version_info[0] == 2:
input = raw_input
_CONFIG = get_config()
DEFAULT_DL4J_VERSION = _CONFIG['dl4j_version']
DEFAULT_BACKEND = _CONFIG['nd4j_backend']
DEFAULT_DATAVEC = _CONFIG['datavec']
DEFAULT_SPARK = _CONFIG['spark']
DEFAULT_SPARK_MAJOR = _CONFIG['spark_version']
DEFAULT_SCALA_VERSION = _CONFIG['scala_version']
DEFAULT_SPARK_DETAILS = 'y'
def to_bool(string):
if type(string) is bool:
return string
return True if string[0] in ["Y", "y"] else False
class CLI(object):
def __init__(self):
self.var_args = None
self.command = None
def command_dispatcher(self, args=None):
desc = ('pydl4j, a system to manage your DL4J dependencies from Python.\n')
parser = argparse.ArgumentParser(description=desc)
parser.add_argument(
'-v', '--version', action='version',
version=pkg_resources.get_distribution("pydl4j").version,
help='Print pydl4j version'
)
subparsers = parser.add_subparsers(title='subcommands', dest='command')
subparsers.add_parser('init', help='Initialize pydl4j')
subparsers.add_parser('install', help='Install jars for pydl4j')
argcomplete.autocomplete(parser)
args = parser.parse_args(args)
self.var_args = vars(args)
if not args.command:
parser.print_help()
return
self.command = args.command
if self.command == 'init':
self.init()
return
if self.command == 'install':
self.install()
return
def init(self):
click.echo(click.style(u"""\n██████╗ ██╗ ██╗██████╗ ██╗██╗ ██╗ ██╗
██╔══██╗╚██╗ ██╔╝██╔══██╗██║██║ ██║ ██║
██████╔╝ ╚████╔╝ ██║ ██║██║███████║ ██║
██╔═══╝ ╚██╔╝ ██║ ██║██║╚════██║██ ██║
██║ ██║ ██████╔╝███████╗██║╚█████╔╝
╚═╝ ╚═╝ ╚═════╝ ╚══════╝╚═╝ ╚════╝ \n""", fg='blue', bold=True))
click.echo(click.style("pydl4j", bold=True) +
" is a system to manage your DL4J dependencies from Python!\n")
# DL4J version
dl4j_version = input("Which DL4J version do you want to use for your Python projects? (default '%s'): " %
DEFAULT_DL4J_VERSION) or DEFAULT_DL4J_VERSION
# TODO: check if input is valid
# ND4J backend
backend = input("Which backend would you like to use ('cpu' or 'gpu')? (default '%s'): " %
DEFAULT_BACKEND) or DEFAULT_BACKEND
backend = backend.lower()
# DataVec usage
datavec = input(
"Do you need DL4J DataVec for ETL? (default 'y') [y/n]: ") or DEFAULT_DATAVEC
datavec = to_bool(datavec)
# DL4J core usage
DEFAULT_DL4J = 'y'
dl4j_core = input(
"Do you want to work with DeepLearning4J from Python? (default 'y') [y/n]: ") or DEFAULT_DL4J
dl4j_core = to_bool(dl4j_core)
# Spark
spark = input(
"Do you need Spark for distributed computation in your application? (default 'y') [y/n]: ") or DEFAULT_SPARK
spark = to_bool(spark)
spark_version = DEFAULT_SPARK_MAJOR
scala_version = DEFAULT_SCALA_VERSION
if spark:
spark_details = input("We use Spark {} and Scala {} by default, is this OK for you? (default 'y') [y/n]: ".format(DEFAULT_SPARK_MAJOR,
DEFAULT_SCALA_VERSION)) or DEFAULT_SPARK_DETAILS
if not spark_details[0] in ["Y", "y"]:
spark_version = input("Which which major Spark release would you like to use? (default '%s'): " %
DEFAULT_SPARK_MAJOR) or DEFAULT_SPARK_MAJOR
scala_version = input("Which Scala version would you like to use? (default '%s'): " %
DEFAULT_SCALA_VERSION) or DEFAULT_SCALA_VERSION
cli_out = {
'dl4j_version': dl4j_version,
'nd4j_backend': backend,
'dl4j_core': dl4j_core,
'datavec': datavec,
'spark': spark,
'spark_version': spark_version,
'scala_version': scala_version
}
validate_config(cli_out)
formatted_json = json.dumps(cli_out, sort_keys=False, indent=2)
click.echo("\nThis is your current settings file " +
click.style("config.json", bold=True) + ":\n")
click.echo(click.style(formatted_json, fg="green", bold=True))
confirm = input(
"\nDoes this look good? (default 'y') [y/n]: ") or 'yes'
if not to_bool(confirm):
click.echo(
"" + click.style("Please initialize pydl4j once again", fg="red", bold=True))
return
set_config(cli_out)
def install(self):
if is_docker_available():
use_docker = input(
"Docker available on your system. Would you like to use docker for installation> (default 'y')[y/n]: ") or 'yes'
if to_bool(use_docker):
click.echo(click.style(
"Docker is running, starting installation.", fg="green", bold=True))
click.echo(click.style("========\n\nNote that this might take some time to complete.\n" +
"We will first pull a docker container with Maven, then install all dependencies selected with 'pydl4j init'.\n" +
"After completion you can start using DL4J from Python.\n\n========", fg="green", bold=False))
_maven_build(use_docker=True)
else:
click.echo(click.style("========\n\nNote that this might take some time to complete.\n" +
"After completion you can start using DL4J from Python.\n\n========", fg="green", bold=False))
_maven_build(use_docker=False)
else:
click.echo(
"" + click.style("Could not detect docker on your system.", fg="red", bold=True))
click.echo(click.style("========\n\nNote that this might take some time to complete.\n" +
"After completion you can start using DL4J from Python.\n\n========", fg="green", bold=False))
_maven_build(use_docker=False)
def handle():
try:
cli = CLI()
sys.exit(cli.command_dispatcher())
except KeyboardInterrupt:
sys.exit()
except Exception as e:
click.echo(click.style("Error: ", fg='red', bold=True))
traceback.print_exc()
sys.exit()
if __name__ == '__main__':
handle()
| {
"content_hash": "250db800b3c2058ea6e7d942d36913f6",
"timestamp": "",
"source": "github",
"line_count": 188,
"max_line_length": 174,
"avg_line_length": 37.8563829787234,
"alnum_prop": 0.5420823380637909,
"repo_name": "RobAltena/deeplearning4j",
"id": "f48d4e01e0a31e00d018236a5dc54962f47f9f04",
"size": "8311",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pydl4j/pydl4j/cli.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "2469"
},
{
"name": "C",
"bytes": "144275"
},
{
"name": "C#",
"bytes": "138404"
},
{
"name": "C++",
"bytes": "16954560"
},
{
"name": "CMake",
"bytes": "77377"
},
{
"name": "CSS",
"bytes": "10363"
},
{
"name": "Cuda",
"bytes": "2324886"
},
{
"name": "Dockerfile",
"bytes": "1329"
},
{
"name": "FreeMarker",
"bytes": "77045"
},
{
"name": "HTML",
"bytes": "38914"
},
{
"name": "Java",
"bytes": "36293636"
},
{
"name": "JavaScript",
"bytes": "436278"
},
{
"name": "PureBasic",
"bytes": "12256"
},
{
"name": "Python",
"bytes": "325018"
},
{
"name": "Ruby",
"bytes": "4558"
},
{
"name": "Scala",
"bytes": "355054"
},
{
"name": "Shell",
"bytes": "80490"
},
{
"name": "Smarty",
"bytes": "900"
},
{
"name": "Starlark",
"bytes": "931"
},
{
"name": "TypeScript",
"bytes": "80252"
}
],
"symlink_target": ""
} |
# This code is part of pyCMPL
#
# Copyright (C) 2013
# Mike Steglich - Technical University of Applied Sciences
# Wildau, Germany
#
# pyCMPL is a project of the Technical University of
# Applied Sciences Wildau and the Institute for Operations Research
# and Business Management at the Martin Luther University
# Halle-Wittenberg.
# Please visit the project homepage <www.coliop.org>
#
# pyCMPL is free software; you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# pyCMPL is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
# License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program; if not, see <http://www.gnu.org/licenses/>.
#
#**********************************************************************
#!/usr/bin/python -i
import os
import sys
from pyCmpl import *
def pyCmplShell():
if sys.version_info[:2] == (2,7):
print "CMPL\'s Python API"
sys.ps1 = "pyCmpl> "
sys.ps2 = "....... "
pyCmplAnswer=""
while pyCmplAnswer!="quit()" and pyCmplAnswer!="exit()" :
try:
pyCmplAnswer = raw_input("pyCmpl> ")
if pyCmplAnswer!="quit()" and pyCmplAnswer!="exit()":
exec(pyCmplAnswer)
except CmplException, e:
print e.msg
except:
print "Python error : " +str(sys.exc_info()[1])
else:
print "ERROR - pyCmpl only supports Python 2.7"
exit(0)
if __name__ == "__main__":
pyCmplShell()
| {
"content_hash": "7ea70bf66a096179f7bc257078e0b949",
"timestamp": "",
"source": "github",
"line_count": 65,
"max_line_length": 77,
"avg_line_length": 27.8,
"alnum_prop": 0.6502490315439956,
"repo_name": "Mangara/ArboralExplorer",
"id": "c0cef33080a857ee23c3e2d318e6b79ded3c1db2",
"size": "1880",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lib/Cmpl/pyCmpl/lib/pyCmpl/pyCmplShell.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "2773"
},
{
"name": "Java",
"bytes": "140503"
},
{
"name": "Python",
"bytes": "194573"
},
{
"name": "Shell",
"bytes": "6500"
}
],
"symlink_target": ""
} |
import os
import urllib
import datetime
import csv
import unicodecsv
def daterange(start, stop, step_days=1):
current = start
step = datetime.timedelta(step_days)
if step_days > 0:
while current < stop:
yield current
current += step
elif step_days < 0:
while current > stop:
yield current
current += step
else:
raise ValueError("daterange() step_days argument must not be zero")
step = 7
date_format = '%Y-%m-%d'
DOWNLOADS_DIR = './cache'
startRange = datetime.date(2014, 12, 29)
endRange = datetime.date(2015, 12, 31)
with open('../processing/poseidon/data/quakes-sample.csv', 'wb') as result:
a = unicodecsv.writer(result, encoding='utf-8')
i = 0
result.write("time,latitude,longitude,depth,mag,dmin,rms\r\n")
for i in daterange(startRange, endRange, step):
start = i.strftime(date_format)
end = (i+datetime.timedelta(days=step-1)).strftime(date_format)
query = os.path.join(DOWNLOADS_DIR, start + "_" + end + ".csv")
previousDate = None
x = 0
with open(query, "rb") as source:
rdr = csv.reader( source )
wtr = csv.writer( result )
next(rdr)
for row in rdr:
if (x > 20):
#currentDate = datetime.datetime.strptime( row[0], "%Y-%m-%dT%H:%M:%S.%fZ" )
#if (previousDate):
# if (previousDate.date() < currentDate.date()):
wtr.writerow( (row[0], row[1], row[2], row[3], row[4], row[8], row[9]) )
x = 0
x += 1
#previousDate = datetime.datetime.strptime( row[0], "%Y-%m-%dT%H:%M:%S.%fZ" )
| {
"content_hash": "bfec184f2e5f272e1145fb5711487d40",
"timestamp": "",
"source": "github",
"line_count": 55,
"max_line_length": 81,
"avg_line_length": 26.854545454545455,
"alnum_prop": 0.6479350033852403,
"repo_name": "struct78/fault-trace",
"id": "3db9505279ba3d31963a6cd90c06f61621bc3499",
"size": "1477",
"binary": false,
"copies": "1",
"ref": "refs/heads/blackhole",
"path": "src/python/random-sample.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Processing",
"bytes": "74651"
},
{
"name": "Python",
"bytes": "7557"
},
{
"name": "Shell",
"bytes": "1612"
}
],
"symlink_target": ""
} |
from .importer import BlackduckCRImporter
from dojo.models import Finding
class BlackduckComponentRiskParser(object):
"""
Can import as exported from Blackduck:
- from a zip file containing a security.csv, sources.csv and components.csv
"""
def get_scan_types(self):
return ["Blackduck Component Risk"]
def get_label_for_scan_types(self, scan_type):
return "Blackduck Component Risk"
def get_description_for_scan_types(self, scan_type):
return "Upload the zip file containing the security.csv and files.csv."
def get_findings(self, filename, test):
"""
Function initializes the parser with a file and returns the items.
:param filename: Input in Defect Dojo
:param test:
"""
components, securities, sources = self.import_data(filename)
return self.ingest_findings(components, securities, sources, test)
def import_data(self, filename) -> (dict, dict, dict):
"""
Calls the Importer from dojo/tools/blackduck_component_risk/importer to
parse through the zip file and export needed information from the
three relevant files (security, source and components).
:param filename: Name of the zipfile. Passed in via Defect Dojo
:return: Returns a tuple of dictionaries, Components and Securities.
"""
importer = BlackduckCRImporter()
components, securities, sources = importer.parse_findings(filename)
return components, securities, sources
def ingest_findings(self, components, securities, sources, test):
"""
Takes the components and securities from the importer that parsed the zip file, and
iterates over them, creating findings.
:param components: Dictionary containing all components from the components csv
:param securities: Dictionary containing all security vulnerabilities for each component
:param sources: Dictionary containing all sources data from the sources csv
:param test:
:return:
"""
items = []
# License Risk
license_risk = []
for component_id, component in components.items():
source = {}
# Find the sources.csv data for this component
for id, src in sources.items():
if id in component_id:
source = src
if component.get('Component policy status') == "In Violation":
# We have us a license risk:
title = self.license_title(component)
description = self.license_description(component, source)
severity = "High"
mitigation = self.license_mitigation(component)
impact = "N/A"
references = self.license_references(component)
finding = Finding(title=title,
test=test,
description=description,
severity=severity,
mitigation=mitigation,
impact=impact,
references=references,
static_finding=True,
unique_id_from_tool=component_id)
license_risk.append(finding)
elif "None" not in self.license_severity(component):
# We have a license risk for review, but not directly "In Violation"
title = "Review " + self.license_title(component)
description = self.license_description(component, source)
severity = self.license_severity(component)
mitigation = self.license_mitigation(component, False)
impact = "N/A"
references = self.license_references(component)
finding = Finding(title=title,
test=test,
description=description,
severity=severity,
mitigation=mitigation,
impact=impact,
references=references,
static_finding=True,
unique_id_from_tool=component_id)
license_risk.append(finding)
items.extend(license_risk)
# Security Risk
security_risk = []
for component_id, vulns in securities.items():
title = self.security_title(vulns)
description = self.security_description(vulns)
severity = self.security_severity(vulns)
mitigation = self.security_mitigation(vulns)
impact = self.security_impact(vulns)
references = self.security_references(vulns)
file_path = self.security_filepath(vulns)
finding = Finding(title=title,
test=test,
description=description,
severity=severity,
mitigation=mitigation,
impact=impact,
references=references,
static_finding=True,
file_path=file_path,
unique_id_from_tool=component_id)
security_risk.append(finding)
items.extend(security_risk)
return items
def license_title(self, component):
"""
Uses the Component name and Component version name. The Origin id is sometimes blank,
however it seems that component name and version name isn't.
:param component: Dictionary containing all components.
:return:
"""
return "License Risk: {}:{}".format(component.get('Component name'),
component.get('Component version name'))
def license_description(self, component, source):
"""
Pulls out all important information from the components CSV regarding the License in use.
:param component: Dictionary containing all components.
:return:
"""
desc = "**License Name:** {} \n".format(component.get('License names'))
desc += "**License Families:** {} \n".format(component.get('License families'))
desc += "**License Usage:** {} \n".format(component.get('Usage'))
desc += "**License Origin name:** {} \n".format(component.get('Origin name'))
desc += "**License Origin id:** {} \n".format(component.get('Origin id'))
desc += "**Match type:** {}\n".format(component.get('Match type'))
try:
desc += "**Path:** {}\n".format(source.get('Path'))
desc += "**Archive context:** {}\n".format(source.get('Archive context'))
desc += "**Scan:** {}\n".format(source.get('Scan'))
except KeyError:
desc += "**Path:** Unable to find path in source data."
desc += "**Archive context:** Unable to find archive context in source data."
desc += "**Scan:** Unable to find scan in source data."
return desc
def license_mitigation(self, component, violation=True):
"""
Uses Component name and Component version name to display the package.
:param component: Dictionary containing all components.
:param violation: Boolean indicating if this is a violation or for review
:return:
"""
mit = ""
if violation:
mit = "Package has a license that is In Violation and should not be used: {}:{}. ".format(
component.get('Component name'), component.get('Component version name')
)
mit += "Please use another component with an acceptable license."
else:
mit = "Package has a potential license risk and should be reviewed: {}:{}. ".format(
component.get('Component name'), component.get('Component version name')
)
mit += "A legal review may indicate that another component should be used with an acceptable license."
return mit
def license_references(self, component):
return "**Project:** {}\n".format(component.get('Project path'))
def security_title(self, vulns):
"""
Creates the Title using the Component name and Component version name.
These should be identical for each vuln in the list.
:param vulns: Dictionary {component_version_identifier: [vulns]}
:return:
"""
title = "Security Risk: {}:{}".format(vulns[0]["Component name"],
vulns[0]["Component version name"])
return title
def security_description(self, vulns):
"""
Markdown formated description that displays information about each CVE found in the
csv file for a given component.
:param vulns: Dictionary {component_version_identifier: [vulns]}
:return:
"""
desc = "#Vulnerabilities \nThis component version contains the following " \
"vulnerabilities:\n\n"
for vuln in vulns:
desc += "###{} \n".format(vuln["Vulnerability id"])
desc += "**Base Score:** {} \n**Exploitability:** {} \n**Impact:** {}\n".format(
vuln["Base score"], vuln["Exploitability"], vuln["Impact"]
)
# Not all have a URL
if vuln["URL"] != "":
desc += "**URL:** [{}]({})\n".format(vuln["Vulnerability id"],
vuln["URL"])
desc += "**Description:** {}\n".format(vuln["Description"])
return desc
def license_severity(self, component):
"""
Iterates over all base_scores of each vulnerability and picks the max. A map is used to
map the all-caps format of the CSV with the case that Defect Dojo expects.
(Could use a .lower() or ignore_case during comparison)
:param vulns: Dictionary {component_version_identifier: [vulns]}
:return:
"""
map = {"HIGH": "High", "MEDIUM": "Medium", "LOW": "Low", "INFO": "Info",
"CRITICAL": "Critical", "OK": "None"}
sev = "None"
try:
sev = map[component.get('License Risk')]
except KeyError:
sev = "None"
return sev
def security_severity(self, vulns):
"""
Iterates over all base_scores of each vulnerability and picks the max. A map is used to
map the all-caps format of the CSV with the case that Defect Dojo expects.
(Could use a .lower() or ignore_case during comparison)
:param vulns: Dictionary {component_version_identifier: [vulns]}
:return:
"""
map = {"HIGH": "High", "MEDIUM": "Medium", "LOW": "Low", "INFO": "Info",
"CRITICAL": "Critical"}
max_severity = 0.0
sev = "Info"
for vuln in vulns:
if float(vuln["Base score"]) > max_severity:
max_severity = float(vuln["Base score"])
sev = map[vuln["Security Risk"]]
return sev
def security_mitigation(self, vulns):
"""
Mitigation is always "update package", that the entire point of Blackduck, to identify
when projects are using vulnerable versions of components. Mitigation is to update the
package. Identifies the component with name:version_name.
:param vulns: Dictionary {component_version_identifier: [vulns]}
:return:
"""
mit = "Update component {}:{} to a secure version".format(
vulns[0]["Component name"], vulns[0]["Component version name"]
)
return mit
def security_impact(self, vulns):
"""
Each vuln has an impact ratiing, so I figured I would iterate over and pull out the
largest value.
:param vulns: Dictionary {component_version_identifier: [vulns]}
:return:
"""
max_impact = 0.0
for vuln in vulns:
if float(vuln["Impact"]) > max_impact:
max_impact = float(vuln["Impact"])
return max_impact
def security_references(self, vulns):
"""
Takes all of the URL fields out of the csv, not all findings will have a URL, so it will
only create it for those that do.
:param vulns: Dictionary {component_version_identifier: [vulns]}
:return:
"""
references = "**Project:** {}\n".format(vulns[0]["Project path"])
for vuln in vulns:
if vuln["URL"] != "":
references += "{}: [{}]({})\n".format(vuln["Vulnerability id"], vuln["URL"],
vuln["URL"])
return references
def security_filepath(self, vulns):
"""
The origin name (maven, github, npmjs, etc) and the component origin id is used. However,
not all items will have an origin id, so to try to still match as closely as possible,
"component_name/version" is used.
1. origin:component_origin_id
2. origin:component_name/version
:param vulns: Dictionary {component_version_identifier: [vulns]}
:return:
"""
if vulns[0]["Component origin id"] == "":
component_key = "{}/{}".format(vulns[0]["Component name"],
vulns[0]["Component version name"])
else:
component_key = vulns[0]["Component origin id"]
return "{}:{}".format(vulns[0]["Component origin name"], component_key)
| {
"content_hash": "e858fb97fbfed5d2ddbcfb9bf99ca0c3",
"timestamp": "",
"source": "github",
"line_count": 301,
"max_line_length": 114,
"avg_line_length": 45.800664451827245,
"alnum_prop": 0.5608588423037865,
"repo_name": "rackerlabs/django-DefectDojo",
"id": "5711fd3c8cf1259f2a974b1c91eaad7a63e79c43",
"size": "13813",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "dojo/tools/blackduck_component_risk/parser.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "18132"
},
{
"name": "Groff",
"bytes": "91"
},
{
"name": "HTML",
"bytes": "666571"
},
{
"name": "JavaScript",
"bytes": "6393"
},
{
"name": "Python",
"bytes": "524728"
},
{
"name": "Shell",
"bytes": "20558"
},
{
"name": "XSLT",
"bytes": "6624"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
import sys
import os
import warnings
import ruamel.yaml as yaml
__author__ = "Pymatgen Development Team"
__email__ ="pymatgen@googlegroups.com"
__maintainer__ = "Shyue Ping Ong"
__maintainer_email__ ="shyuep@gmail.com"
__version__ = "2018.6.11"
SETTINGS_FILE = os.path.join(os.path.expanduser("~"), ".pmgrc.yaml")
def _load_pmg_settings():
try:
with open(SETTINGS_FILE, "rt") as f:
d = yaml.safe_load(f)
except IOError:
# If there are any errors, default to using environment variables
# if present.
d = {}
for k, v in os.environ.items():
if k.startswith("PMG_"):
d[k] = v
elif k in ["VASP_PSP_DIR", "MAPI_KEY", "DEFAULT_FUNCTIONAL"]:
d["PMG_" + k] = v
clean_d = {}
for k, v in d.items():
if not k.startswith("PMG_"):
warnings.warn('With effect from pmg 5.0, all pymatgen settings are'
' prefixed with a "PMG_". E.g., "PMG_VASP_PSP_DIR" '
'instead of "VASP_PSP_DIR".')
clean_d["PMG_" + k] = v
else:
clean_d[k] = v
return clean_d
SETTINGS = _load_pmg_settings()
# Order of imports is important on some systems to avoid
# failures when loading shared libraries.
# import spglib
# from . import optimization, util
# del(spglib, optimization, util)
# Useful aliases for commonly used objects and modules.
# Allows from pymatgen import <class> for quick usage.
from pymatgen.core import *
from .electronic_structure.core import Spin, Orbital
from .ext.matproj import MPRester
from monty.json import MontyEncoder, MontyDecoder, MSONable
def get_structure_from_mp(formula):
"""
Convenience method to get a crystal from the Materials Project database via
the API. Requires PMG_MAPI_KEY to be set.
Args:
formula (str): A formula
Returns:
(Structure) The lowest energy structure in Materials Project with that
formula.
"""
m = MPRester()
entries = m.get_entries(formula, inc_structure="final")
if len(entries) == 0:
raise ValueError("No structure with formula %s in Materials Project!" %
formula)
elif len(entries) > 1:
warnings.warn("%d structures with formula %s found in Materials "
"Project. The lowest energy structure will be returned." %
(len(entries), formula))
return min(entries, key=lambda e: e.energy_per_atom).structure
if sys.version_info < (3, 5):
warnings.warn("""
Pymatgen will drop Py2k support from v2019.1.1. Pls consult the documentation
at https://www.pymatgen.org for more details.""")
| {
"content_hash": "4a49dcf191b60399d05703c29fb72f86",
"timestamp": "",
"source": "github",
"line_count": 87,
"max_line_length": 80,
"avg_line_length": 31.597701149425287,
"alnum_prop": 0.6184066933430338,
"repo_name": "nisse3000/pymatgen",
"id": "a6b747320f3205f83859c8aa0cd4975a39375d29",
"size": "2749",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pymatgen/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "5100"
},
{
"name": "CSS",
"bytes": "7550"
},
{
"name": "Common Lisp",
"bytes": "3029065"
},
{
"name": "HTML",
"bytes": "827"
},
{
"name": "Makefile",
"bytes": "5573"
},
{
"name": "Perl",
"bytes": "229104"
},
{
"name": "Propeller Spin",
"bytes": "4026362"
},
{
"name": "Python",
"bytes": "6934548"
},
{
"name": "Roff",
"bytes": "1135003"
}
],
"symlink_target": ""
} |
try:
from http.server import HTTPServer, SimpleHTTPRequestHandler
except ImportError:
from BaseHTTPServer import HTTPServer
from SimpleHTTPServer import SimpleHTTPRequestHandler
import ssl
import threading
import pytest
from requests.compat import urljoin
def prepare_url(value):
# Issue #1483: Make sure the URL always has a trailing slash
httpbin_url = value.url.rstrip("/") + "/"
def inner(*suffix):
return urljoin(httpbin_url, "/".join(suffix))
return inner
@pytest.fixture
def httpbin(httpbin):
return prepare_url(httpbin)
@pytest.fixture
def httpbin_secure(httpbin_secure):
return prepare_url(httpbin_secure)
@pytest.fixture
def nosan_server(tmp_path_factory):
# delay importing until the fixture in order to make it possible
# to deselect the test via command-line when trustme is not available
import trustme
tmpdir = tmp_path_factory.mktemp("certs")
ca = trustme.CA()
# only commonName, no subjectAltName
server_cert = ca.issue_cert(common_name="localhost")
ca_bundle = str(tmpdir / "ca.pem")
ca.cert_pem.write_to_path(ca_bundle)
context = ssl.create_default_context(ssl.Purpose.CLIENT_AUTH)
server_cert.configure_cert(context)
server = HTTPServer(("localhost", 0), SimpleHTTPRequestHandler)
server.socket = context.wrap_socket(server.socket, server_side=True)
server_thread = threading.Thread(target=server.serve_forever)
server_thread.start()
yield "localhost", server.server_address[1], ca_bundle
server.shutdown()
server_thread.join()
| {
"content_hash": "52d98a0df2e013e7477c1912139eb193",
"timestamp": "",
"source": "github",
"line_count": 58,
"max_line_length": 73,
"avg_line_length": 27.25862068965517,
"alnum_prop": 0.7235926628716003,
"repo_name": "psf/requests",
"id": "530a4c2a5f9917d62ffed2e2eecfdb8164655740",
"size": "1581",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "tests/conftest.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "796"
},
{
"name": "Python",
"bytes": "340075"
}
],
"symlink_target": ""
} |
import boto3
import datetime
import json
kinesis = boto3.client("kinesis")
payload = {
"network": "array_of_things_chicago",
"meta_id": 0,
"node_id": "0000001e0610ba72",
"sensor": "tmp421",
"data": {"temperature": 10.0},
"datetime": str(datetime.datetime.now()),
}
kinesis.put_record(**{
"StreamName": "ValidationStream",
"PartitionKey": "arbitrary",
"Data": json.dumps(payload)
})
print(payload)
| {
"content_hash": "bbd1bfe04c0b245ff043e54ef9100781",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 53,
"avg_line_length": 22.17391304347826,
"alnum_prop": 0.5450980392156862,
"repo_name": "UrbanCCD-UChicago/plenario-lambdas",
"id": "20b5ea634514a682f59dd80276450e372dc7f004",
"size": "510",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "utils/observation.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "45524"
},
{
"name": "Python",
"bytes": "19641"
}
],
"symlink_target": ""
} |
import _plotly_utils.basevalidators
class ColorValidator(_plotly_utils.basevalidators.ColorValidator):
def __init__(
self, plotly_name="color", parent_name="scattersmith.textfont", **kwargs
):
super(ColorValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
array_ok=kwargs.pop("array_ok", True),
edit_type=kwargs.pop("edit_type", "style"),
**kwargs,
)
| {
"content_hash": "7706ccb8123eb19ff4a13b7009ac5e87",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 80,
"avg_line_length": 33.57142857142857,
"alnum_prop": 0.597872340425532,
"repo_name": "plotly/plotly.py",
"id": "2d7df6248cde2122bdd03c7e67e0b64f0749cff8",
"size": "470",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "packages/python/plotly/plotly/validators/scattersmith/textfont/_color.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "545"
},
{
"name": "JavaScript",
"bytes": "2074"
},
{
"name": "PostScript",
"bytes": "565328"
},
{
"name": "Python",
"bytes": "31506317"
},
{
"name": "TypeScript",
"bytes": "71337"
}
],
"symlink_target": ""
} |
import os
import sys
import json
import collections
srcfile = os.path.realpath(__file__)
deployDir = os.path.split(srcfile)[0]
rootDirProject = os.path.split(deployDir)[0]
electionConfig = rootDirProject + "/_configFiles_/handlerConfigFile.json"
try:
jsonFile = open(electionConfig, 'r+')
jsonData = json.load(jsonFile, object_pairs_hook=collections.OrderedDict)
jsonData["deployment"] = True
jsonFile.seek(0)
json.dump(jsonData, jsonFile, indent = 4)
jsonFile.truncate()
jsonFile.close()
except IOError:
print('Handler configuration file missing or corrupted ("deployment" field not found)')
| {
"content_hash": "2fbdcc2bd44e5c70b588a5b48923623d",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 91,
"avg_line_length": 29.904761904761905,
"alnum_prop": 0.7340764331210191,
"repo_name": "escapin/ElectionManager",
"id": "6f1dc4f8f92425c011453a67f1cb9ca4548689c2",
"size": "628",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "deployment/configDeployment.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "65239"
},
{
"name": "HTML",
"bytes": "17136"
},
{
"name": "JavaScript",
"bytes": "595387"
},
{
"name": "Makefile",
"bytes": "3276"
},
{
"name": "Python",
"bytes": "74572"
},
{
"name": "Shell",
"bytes": "3463"
}
],
"symlink_target": ""
} |
import contextlib
import threading
from oslo_db.sqlalchemy import session
from oslo_log import log as logging
from sqlalchemy import MetaData
from trove.common import cfg
from trove.common.i18n import _
from trove.db.sqlalchemy import mappers
_FACADE = None
_LOCK = threading.Lock()
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
def configure_db(options, models_mapper=None):
facade = _create_facade(options)
if models_mapper:
models_mapper.map(facade)
else:
from trove.backup import models as backup_models
from trove.cluster import models as cluster_models
from trove.conductor import models as conductor_models
from trove.configuration import models as configurations_models
from trove.datastore import models as datastores_models
from trove.dns import models as dns_models
from trove.extensions.mysql import models as mysql_models
from trove.extensions.security_group import models as secgrp_models
from trove.guestagent import models as agent_models
from trove.instance import models as base_models
from trove.module import models as module_models
from trove.quota import models as quota_models
model_modules = [
base_models,
datastores_models,
dns_models,
mysql_models,
agent_models,
quota_models,
backup_models,
secgrp_models,
configurations_models,
conductor_models,
cluster_models,
module_models
]
models = {}
for module in model_modules:
models.update(module.persisted_models())
mappers.map(get_engine(), models)
def _create_facade(options):
global _LOCK, _FACADE
# TODO(mvandijk): Refactor this once oslo.db spec is implemented:
# https://specs.openstack.org/openstack/oslo-specs/specs/kilo/
# make-enginefacade-a-facade.html
if _FACADE is None:
with _LOCK:
if _FACADE is None:
conf = CONF.database
# pop the deprecated config option 'query_log'
if conf.query_log:
if conf.connection_debug < 50:
conf['connection_debug'] = 50
LOG.warning(_('Configuration option "query_log" has been '
'depracated. Use "connection_debug" '
'instead. Setting connection_debug = '
'%(debug_level)s instead.'),
conf.get('connection_debug'))
# TODO(mvandijk): once query_log is removed,
# use enginefacade.from_config() instead
database_opts = dict(CONF.database)
database_opts.pop('query_log')
_FACADE = session.EngineFacade(
options['database']['connection'],
**database_opts
)
return _FACADE
def _check_facade():
if _FACADE is None:
msg = _("***The Database has not been setup!!!***")
LOG.exception(msg)
raise RuntimeError(msg)
def get_facade():
_check_facade()
return _FACADE
def get_engine(use_slave=False):
_check_facade()
return _FACADE.get_engine(use_slave=use_slave)
def get_session(**kwargs):
return get_facade().get_session(**kwargs)
def raw_query(model, **kwargs):
return get_session(**kwargs).query(model)
def clean_db():
engine = get_engine()
meta = MetaData()
meta.bind = engine
meta.reflect()
with contextlib.closing(engine.connect()) as con:
trans = con.begin()
for table in reversed(meta.sorted_tables):
if table.name != "migrate_version":
con.execute(table.delete())
trans.commit()
def drop_db(options):
if options:
_create_facade(options)
engine = get_engine()
meta = MetaData()
meta.bind = engine
meta.reflect()
meta.drop_all()
| {
"content_hash": "4cd71b8438f9c56443b1591b6b8eff36",
"timestamp": "",
"source": "github",
"line_count": 134,
"max_line_length": 78,
"avg_line_length": 30.470149253731343,
"alnum_prop": 0.5919666911584619,
"repo_name": "zhangg/trove",
"id": "188b7d5123425cf07880c352b23ee7a9ba6b7ad0",
"size": "4719",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "trove/db/sqlalchemy/session.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "4546016"
},
{
"name": "Shell",
"bytes": "145524"
}
],
"symlink_target": ""
} |
import os
import sys
from scipy.io import loadmat
homepath = os.path.join('..', '..')
if not homepath in sys.path:
sys.path.insert(0, homepath)
import dlearn.stats as stats
from dlearn.utils.serialize import save_data
def compute_stats(output, target):
n = target.shape[1]
ret = [0] * n
for j in xrange(n):
o = output[:, j].ravel()
t = target[:, j].ravel()
fpr, tpr, thresh = stats.roc(o, t)
auc = stats.auc(fpr, tpr)
ret[j] = (auc, fpr, tpr, thresh)
return ret
def show_stats(ret):
import matplotlib.pyplot as plt
n_cols = 4
n_rows = len(ret) // n_cols + 1
for j, (auc, fpr, tpr, thresh) in enumerate(ret):
# Plot stats
plt.subplot(n_rows, n_cols, j + 1)
plt.plot(fpr, tpr)
plt.title('AUC = {:.2f}%'.format(auc * 100))
plt.show()
matdata = loadmat('svm_result_mix.mat')
target = matdata['targets']
output = matdata['outputs']
ret = compute_stats(output, target)
save_data(ret, 'stats_attr_svm_mix.pkl')
show_stats(ret)
| {
"content_hash": "f8b571d9a02fa9daa02b044ef4869722",
"timestamp": "",
"source": "github",
"line_count": 48,
"max_line_length": 53,
"avg_line_length": 21.791666666666668,
"alnum_prop": 0.5994263862332696,
"repo_name": "Cysu/dlearn",
"id": "56b5f1562579062cb52dd8eab11f46728c4c053e",
"size": "1046",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/human_sar/evaluate_svm.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Matlab",
"bytes": "1125"
},
{
"name": "Python",
"bytes": "126320"
},
{
"name": "Shell",
"bytes": "7881"
}
],
"symlink_target": ""
} |
import os
import shutil
import stat
import tempfile
import threading
import time
import unittest
from collections import namedtuple
from pyspark import SparkConf, SparkFiles, SparkContext
from pyspark.testing.utils import ReusedPySparkTestCase, PySparkTestCase, QuietTest, SPARK_HOME
class CheckpointTests(ReusedPySparkTestCase):
def setUp(self):
self.checkpointDir = tempfile.NamedTemporaryFile(delete=False)
os.unlink(self.checkpointDir.name)
self.sc.setCheckpointDir(self.checkpointDir.name)
def tearDown(self):
shutil.rmtree(self.checkpointDir.name)
def test_basic_checkpointing(self):
parCollection = self.sc.parallelize([1, 2, 3, 4])
flatMappedRDD = parCollection.flatMap(lambda x: range(1, x + 1))
self.assertFalse(flatMappedRDD.isCheckpointed())
self.assertTrue(flatMappedRDD.getCheckpointFile() is None)
flatMappedRDD.checkpoint()
result = flatMappedRDD.collect()
time.sleep(1) # 1 second
self.assertTrue(flatMappedRDD.isCheckpointed())
self.assertEqual(flatMappedRDD.collect(), result)
self.assertEqual("file:" + self.checkpointDir.name,
os.path.dirname(os.path.dirname(flatMappedRDD.getCheckpointFile())))
def test_checkpoint_and_restore(self):
parCollection = self.sc.parallelize([1, 2, 3, 4])
flatMappedRDD = parCollection.flatMap(lambda x: [x])
self.assertFalse(flatMappedRDD.isCheckpointed())
self.assertTrue(flatMappedRDD.getCheckpointFile() is None)
flatMappedRDD.checkpoint()
flatMappedRDD.count() # forces a checkpoint to be computed
time.sleep(1) # 1 second
self.assertTrue(flatMappedRDD.getCheckpointFile() is not None)
recovered = self.sc._checkpointFile(flatMappedRDD.getCheckpointFile(),
flatMappedRDD._jrdd_deserializer)
self.assertEqual([1, 2, 3, 4], recovered.collect())
class LocalCheckpointTests(ReusedPySparkTestCase):
def test_basic_localcheckpointing(self):
parCollection = self.sc.parallelize([1, 2, 3, 4])
flatMappedRDD = parCollection.flatMap(lambda x: range(1, x + 1))
self.assertFalse(flatMappedRDD.isCheckpointed())
self.assertFalse(flatMappedRDD.isLocallyCheckpointed())
flatMappedRDD.localCheckpoint()
result = flatMappedRDD.collect()
time.sleep(1) # 1 second
self.assertTrue(flatMappedRDD.isCheckpointed())
self.assertTrue(flatMappedRDD.isLocallyCheckpointed())
self.assertEqual(flatMappedRDD.collect(), result)
class AddFileTests(PySparkTestCase):
def test_add_py_file(self):
# To ensure that we're actually testing addPyFile's effects, check that
# this job fails due to `userlibrary` not being on the Python path:
# disable logging in log4j temporarily
def func(x):
from userlibrary import UserClass
return UserClass().hello()
with QuietTest(self.sc):
self.assertRaises(Exception, self.sc.parallelize(range(2)).map(func).first)
# Add the file, so the job should now succeed:
path = os.path.join(SPARK_HOME, "python/test_support/userlibrary.py")
self.sc.addPyFile(path)
res = self.sc.parallelize(range(2)).map(func).first()
self.assertEqual("Hello World!", res)
def test_add_file_locally(self):
path = os.path.join(SPARK_HOME, "python/test_support/hello/hello.txt")
self.sc.addFile(path)
download_path = SparkFiles.get("hello.txt")
self.assertNotEqual(path, download_path)
with open(download_path) as test_file:
self.assertEqual("Hello World!\n", test_file.readline())
def test_add_file_recursively_locally(self):
path = os.path.join(SPARK_HOME, "python/test_support/hello")
self.sc.addFile(path, True)
download_path = SparkFiles.get("hello")
self.assertNotEqual(path, download_path)
with open(download_path + "/hello.txt") as test_file:
self.assertEqual("Hello World!\n", test_file.readline())
with open(download_path + "/sub_hello/sub_hello.txt") as test_file:
self.assertEqual("Sub Hello World!\n", test_file.readline())
def test_add_py_file_locally(self):
# To ensure that we're actually testing addPyFile's effects, check that
# this fails due to `userlibrary` not being on the Python path:
def func():
from userlibrary import UserClass
self.assertRaises(ImportError, func)
path = os.path.join(SPARK_HOME, "python/test_support/userlibrary.py")
self.sc.addPyFile(path)
from userlibrary import UserClass
self.assertEqual("Hello World!", UserClass().hello())
def test_add_egg_file_locally(self):
# To ensure that we're actually testing addPyFile's effects, check that
# this fails due to `userlibrary` not being on the Python path:
def func():
from userlib import UserClass
self.assertRaises(ImportError, func)
path = os.path.join(SPARK_HOME, "python/test_support/userlib-0.1.zip")
self.sc.addPyFile(path)
from userlib import UserClass
self.assertEqual("Hello World from inside a package!", UserClass().hello())
def test_overwrite_system_module(self):
self.sc.addPyFile(os.path.join(SPARK_HOME, "python/test_support/SimpleHTTPServer.py"))
import SimpleHTTPServer
self.assertEqual("My Server", SimpleHTTPServer.__name__)
def func(x):
import SimpleHTTPServer
return SimpleHTTPServer.__name__
self.assertEqual(["My Server"], self.sc.parallelize(range(1)).map(func).collect())
class ContextTests(unittest.TestCase):
def test_failed_sparkcontext_creation(self):
# Regression test for SPARK-1550
self.assertRaises(Exception, lambda: SparkContext("an-invalid-master-name"))
def test_get_or_create(self):
with SparkContext.getOrCreate() as sc:
self.assertTrue(SparkContext.getOrCreate() is sc)
def test_parallelize_eager_cleanup(self):
with SparkContext() as sc:
temp_files = os.listdir(sc._temp_dir)
rdd = sc.parallelize([0, 1, 2])
post_parallalize_temp_files = os.listdir(sc._temp_dir)
self.assertEqual(temp_files, post_parallalize_temp_files)
def test_set_conf(self):
# This is for an internal use case. When there is an existing SparkContext,
# SparkSession's builder needs to set configs into SparkContext's conf.
sc = SparkContext()
sc._conf.set("spark.test.SPARK16224", "SPARK16224")
self.assertEqual(sc._jsc.sc().conf().get("spark.test.SPARK16224"), "SPARK16224")
sc.stop()
def test_stop(self):
sc = SparkContext()
self.assertNotEqual(SparkContext._active_spark_context, None)
sc.stop()
self.assertEqual(SparkContext._active_spark_context, None)
def test_with(self):
with SparkContext() as sc:
self.assertNotEqual(SparkContext._active_spark_context, None)
self.assertEqual(SparkContext._active_spark_context, None)
def test_with_exception(self):
try:
with SparkContext() as sc:
self.assertNotEqual(SparkContext._active_spark_context, None)
raise Exception()
except:
pass
self.assertEqual(SparkContext._active_spark_context, None)
def test_with_stop(self):
with SparkContext() as sc:
self.assertNotEqual(SparkContext._active_spark_context, None)
sc.stop()
self.assertEqual(SparkContext._active_spark_context, None)
def test_progress_api(self):
with SparkContext() as sc:
sc.setJobGroup('test_progress_api', '', True)
rdd = sc.parallelize(range(10)).map(lambda x: time.sleep(100))
def run():
# When thread is pinned, job group should be set for each thread for now.
# Local properties seem not being inherited like Scala side does.
if os.environ.get("PYSPARK_PIN_THREAD", "false").lower() == "true":
sc.setJobGroup('test_progress_api', '', True)
try:
rdd.count()
except Exception:
pass
t = threading.Thread(target=run)
t.daemon = True
t.start()
# wait for scheduler to start
time.sleep(1)
tracker = sc.statusTracker()
jobIds = tracker.getJobIdsForGroup('test_progress_api')
self.assertEqual(1, len(jobIds))
job = tracker.getJobInfo(jobIds[0])
self.assertEqual(1, len(job.stageIds))
stage = tracker.getStageInfo(job.stageIds[0])
self.assertEqual(rdd.getNumPartitions(), stage.numTasks)
sc.cancelAllJobs()
t.join()
# wait for event listener to update the status
time.sleep(1)
job = tracker.getJobInfo(jobIds[0])
self.assertEqual('FAILED', job.status)
self.assertEqual([], tracker.getActiveJobsIds())
self.assertEqual([], tracker.getActiveStageIds())
sc.stop()
def test_startTime(self):
with SparkContext() as sc:
self.assertGreater(sc.startTime, 0)
def test_forbid_insecure_gateway(self):
# Fail immediately if you try to create a SparkContext
# with an insecure gateway
parameters = namedtuple('MockGatewayParameters', 'auth_token')(None)
mock_insecure_gateway = namedtuple('MockJavaGateway', 'gateway_parameters')(parameters)
with self.assertRaises(ValueError) as context:
SparkContext(gateway=mock_insecure_gateway)
self.assertIn("insecure Py4j gateway", str(context.exception))
def test_resources(self):
"""Test the resources are empty by default."""
with SparkContext() as sc:
resources = sc.resources
self.assertEqual(len(resources), 0)
class ContextTestsWithResources(unittest.TestCase):
def setUp(self):
class_name = self.__class__.__name__
self.tempFile = tempfile.NamedTemporaryFile(delete=False)
self.tempFile.write(b'echo {\\"name\\": \\"gpu\\", \\"addresses\\": [\\"0\\"]}')
self.tempFile.close()
# create temporary directory for Worker resources coordination
self.tempdir = tempfile.NamedTemporaryFile(delete=False)
os.unlink(self.tempdir.name)
os.chmod(self.tempFile.name, stat.S_IRWXU | stat.S_IXGRP | stat.S_IRGRP |
stat.S_IROTH | stat.S_IXOTH)
conf = SparkConf().set("spark.test.home", SPARK_HOME)
conf = conf.set("spark.resources.dir", self.tempdir.name)
conf = conf.set("spark.driver.resource.gpu.amount", "1")
conf = conf.set("spark.driver.resource.gpu.discoveryScript", self.tempFile.name)
self.sc = SparkContext('local-cluster[2,1,1024]', class_name, conf=conf)
def test_resources(self):
"""Test the resources are available."""
resources = self.sc.resources
self.assertEqual(len(resources), 1)
self.assertTrue('gpu' in resources)
self.assertEqual(resources['gpu'].name, 'gpu')
self.assertEqual(resources['gpu'].addresses, ['0'])
def tearDown(self):
os.unlink(self.tempFile.name)
shutil.rmtree(self.tempdir.name)
self.sc.stop()
if __name__ == "__main__":
from pyspark.tests.test_context import *
try:
import xmlrunner
testRunner = xmlrunner.XMLTestRunner(output='target/test-reports', verbosity=2)
except ImportError:
testRunner = None
unittest.main(testRunner=testRunner, verbosity=2)
| {
"content_hash": "c96be6117fb3b85fb58d423d77763c9f",
"timestamp": "",
"source": "github",
"line_count": 295,
"max_line_length": 95,
"avg_line_length": 40.40338983050847,
"alnum_prop": 0.6421679671113348,
"repo_name": "darionyaphet/spark",
"id": "edfea42bed71d8a6f38a0ccda21a8cb7cc1ea8b4",
"size": "12703",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/pyspark/tests/test_context.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ANTLR",
"bytes": "49712"
},
{
"name": "Batchfile",
"bytes": "31400"
},
{
"name": "C",
"bytes": "1493"
},
{
"name": "CSS",
"bytes": "26852"
},
{
"name": "Dockerfile",
"bytes": "9127"
},
{
"name": "HTML",
"bytes": "40529"
},
{
"name": "HiveQL",
"bytes": "1890736"
},
{
"name": "Java",
"bytes": "4154533"
},
{
"name": "JavaScript",
"bytes": "209928"
},
{
"name": "Makefile",
"bytes": "7776"
},
{
"name": "PLSQL",
"bytes": "8614"
},
{
"name": "PLpgSQL",
"bytes": "380488"
},
{
"name": "PowerShell",
"bytes": "3865"
},
{
"name": "Python",
"bytes": "3194369"
},
{
"name": "R",
"bytes": "1188507"
},
{
"name": "Roff",
"bytes": "36450"
},
{
"name": "SQLPL",
"bytes": "9325"
},
{
"name": "Scala",
"bytes": "32470257"
},
{
"name": "Shell",
"bytes": "209294"
},
{
"name": "TSQL",
"bytes": "474891"
},
{
"name": "Thrift",
"bytes": "67584"
},
{
"name": "q",
"bytes": "79845"
}
],
"symlink_target": ""
} |
from ramrod import base, errors, utils
from ramrod.options import DEFAULT_UPDATE_OPTIONS
from ramrod.cybox import Cybox_2_0_Updater
# relative
from . import register_updater
from .base import BaseSTIXUpdater, STIXVocab
class MotivationVocab(STIXVocab):
OLD_TYPES = ('MotivationVocab-1.0',)
NEW_TYPE = 'MotivationVocab-1.0.1'
VOCAB_REFERENCE = 'http://stix.mitre.org/XMLSchema/default_vocabularies/1.0.1/stix_default_vocabularies.xsd#MotivationVocab-1.0.1'
VOCAB_NAME = 'STIX Default Motivation Vocabulary'
TERMS = {
"Ideological - Anti-Establisment": "Ideological - Anti-Establishment",
}
class PlanningAndOperationalSupportVocab(STIXVocab):
OLD_TYPES = ('PlanningAndOperationalSupportVocab-1.0',)
NEW_TYPE = 'PlanningAndOperationalSupportVocab-1.0.1'
VOCAB_REFERENCE = 'http://stix.mitre.org/XMLSchema/default_vocabularies/1.0.1/stix_default_vocabularies.xsd#PlanningAndOperationalSupportVocab-1.0.1',
VOCAB_NAME = 'STIX Default Planning and Operational Support Vocabulary'
TERMS = {
"Planning - Open-Source Intelligence (OSINT) Gethering": "Planning - Open-Source Intelligence (OSINT) Gathering",
"Planning ": "Planning"
}
class DisallowedMAEC(base.DisallowedFields):
CTX_TYPES = {
"MAEC4.0InstanceType": "http://stix.mitre.org/extensions/Malware#MAEC4.0-1"
}
class DisallowedMalware(base.DisallowedFields):
"""A ``ttp:Malware`` field **must** contain at least one child. If all
children are instances of the MAEC Malware Extension, they will be removed
and leave the parent ``ttp:Malware`` instance with no children, rendering
it schema-invalid.
This flags the ``ttp:Malware`` field as disallowed if it contains only
MAEC Malware Extension instances.
"""
XPATH = ".//ttp:Malware"
NS_MAEC_EXT = "http://stix.mitre.org/extensions/Malware#MAEC4.0-1"
@classmethod
def _check_maec(cls, node):
"""Returns ``True`` if every child node is an instance of the MAEC
Malware extension.
"""
try:
namespaces = (utils.get_ext_namespace(x) for x in utils.iterchildren(node))
return all(ns == cls.NS_MAEC_EXT for ns in namespaces)
except KeyError:
# At least one node didn't contain an xsi:type attribute
return False
@classmethod
def _interrogate(cls, nodes):
return [x for x in nodes if cls._check_maec(x)]
class DisallowedCAPEC(base.DisallowedFields):
CTX_TYPES = {
"CAPEC2.5InstanceType": "http://stix.mitre.org/extensions/AP#CAPEC2.5-1"
}
class DisallowedAttackPatterns(base.DisallowedFields):
"""A ``ttp:Attack_Patterns`` field **must** contain at least one child. If
all children are instances of the CAPEC Attack Pattern Extension, they will
be removed and leave the parent ``ttp:Attack_Patterns`` instance with no
children, rendering it schema-invalid.
This flags the ``ttp:Attack_Patterns`` field as disallowed if it contains
only CAPEC Attack Pattern Extension instances.
"""
XPATH = ".//ttp:Attack_Patterns"
NS_CAPEC_EXT = "http://stix.mitre.org/extensions/AP#CAPEC2.5-1"
@classmethod
def _check_capec(cls, node):
"""Returns ``True`` if every child node is an instance of the CAPEC
Attack Pattern extension.
"""
try:
namespaces = (utils.get_ext_namespace(x) for x in utils.iterchildren(node))
return all(ns == cls.NS_CAPEC_EXT for ns in namespaces)
except KeyError:
# At least one node didn't contain an xsi:type attribute
return False
@classmethod
def _interrogate(cls, nodes):
return [x for x in nodes if cls._check_capec(x)]
@register_updater
class STIX_1_0_Updater(BaseSTIXUpdater):
"""Updates STIX v1.0 content to STIX v1.0.1.
The following fields and types are translated:
* ``MotivationVocab-1.0`` upgraded to ``MotivationVocab-1.0.1``
* ``PlanningAndOperationalSupportVocab-1.0`` updated to
``PlanningAndOperationalSupportVocab 1.0.1``
The following fields and types **cannot** be translated:
* MAEC 4.0 Malware extension instances
* CAPEC 2.5 Attack Pattern extension instances
* ``TTP:Malware`` nodes that contain only MAEC Malware_Instance
children
* ``TTP:Attack_Patterns`` nodes that contain only CAPEC Attack Pattern
instance children
"""
VERSION = '1.0'
NSMAP = {
'campaign': 'http://stix.mitre.org/Campaign-1',
'stix-capec': 'http://stix.mitre.org/extensions/AP#CAPEC2.5-1',
'ciqAddress': 'http://stix.mitre.org/extensions/Address#CIQAddress3.0-1',
'stix-ciq': 'http://stix.mitre.org/extensions/Identity#stix-ciq3.0-1',
'coa': 'http://stix.mitre.org/CourseOfAction-1',
'et': 'http://stix.mitre.org/ExploitTarget-1',
'genericStructuredCOA': 'http://stix.mitre.org/extensions/StructuredCOA#Generic-1',
'genericTM': 'http://stix.mitre.org/extensions/TestMechanism#Generic-1',
'incident': 'http://stix.mitre.org/Incident-1',
'indicator': 'http://stix.mitre.org/Indicator-2',
'stix-maec': 'http://stix.mitre.org/extensions/Malware#MAEC4.0-1',
'marking': 'http://data-marking.mitre.org/Marking-1',
'stix-openioc': 'http://stix.mitre.org/extensions/TestMechanism#OpenIOC2010-1',
'stix-oval': 'http://stix.mitre.org/extensions/TestMechanism#OVAL5.10-1',
'simpleMarking': 'http://data-marking.mitre.org/extensions/MarkingStructure#Simple-1',
'snortTM': 'http://stix.mitre.org/extensions/TestMechanism#Snort-1',
'stix': 'http://stix.mitre.org/stix-1',
'stixCommon': 'http://stix.mitre.org/common-1',
'stixVocabs': 'http://stix.mitre.org/default_vocabularies-1',
'ta': 'http://stix.mitre.org/ThreatActor-1',
'tlpMarking': 'http://data-marking.mitre.org/extensions/MarkingStructure#TLP-1',
'ttp': 'http://stix.mitre.org/TTP-1',
'stix-cvrf': 'http://stix.mitre.org/extensions/Vulnerability#CVRF-1',
'yaraTM': 'http://stix.mitre.org/extensions/TestMechanism#YARA-1'
}
DISALLOWED_NAMESPACES = (
'http://stix.mitre.org/extensions/AP#CAPEC2.5-1',
'http://stix.mitre.org/extensions/Malware#MAEC4.0-1',
)
DISALLOWED = (
DisallowedCAPEC,
DisallowedMAEC,
DisallowedMalware,
DisallowedAttackPatterns,
)
# STIX v1.0.1 NS => STIX v1.0.1 SCHEMALOC
UPDATE_SCHEMALOC_MAP = {
'http://data-marking.mitre.org/Marking-1': 'http://stix.mitre.org/XMLSchema/data_marking/1.0.1/data_marking.xsd',
'http://data-marking.mitre.org/extensions/MarkingStructure#Simple-1': 'http://stix.mitre.org/XMLSchema/extensions/marking/simple_marking/1.0.1/simple_marking.xsd',
'http://data-marking.mitre.org/extensions/MarkingStructure#TLP-1': 'http://stix.mitre.org/XMLSchema/extensions/marking/tlp/1.0.1/tlp.xsd',
'http://stix.mitre.org/Campaign-1': 'http://stix.mitre.org/XMLSchema/campaign/1.0.1/campaign.xsd',
'http://stix.mitre.org/CourseOfAction-1': 'http://stix.mitre.org/XMLSchema/course_of_action/1.0.1/course_of_action.xsd',
'http://stix.mitre.org/ExploitTarget-1': 'http://stix.mitre.org/XMLSchema/exploit_target/1.0.1/exploit_target.xsd',
'http://stix.mitre.org/Incident-1': 'http://stix.mitre.org/XMLSchema/incident/1.0.1/incident.xsd',
'http://stix.mitre.org/Indicator-2': 'http://stix.mitre.org/XMLSchema/indicator/2.0.1/indicator.xsd',
'http://stix.mitre.org/TTP-1': 'http://stix.mitre.org/XMLSchema/ttp/1.0.1/ttp.xsd',
'http://stix.mitre.org/ThreatActor-1': 'http://stix.mitre.org/XMLSchema/threat_actor/1.0.1/threat_actor.xsd',
'http://stix.mitre.org/common-1': 'http://stix.mitre.org/XMLSchema/common/1.0.1/stix_common.xsd',
'http://stix.mitre.org/default_vocabularies-1': 'http://stix.mitre.org/XMLSchema/default_vocabularies/1.0.1/stix_default_vocabularies.xsd',
'http://stix.mitre.org/extensions/AP#CAPEC2.6-1': 'http://stix.mitre.org/XMLSchema/extensions/attack_pattern/capec_2.6.1/1.0.1/capec_2.6.1.xsd',
'http://stix.mitre.org/extensions/Address#CIQAddress3.0-1': 'http://stix.mitre.org/XMLSchema/extensions/address/ciq_address_3.0/1.0.1/ciq_address_3.0.xsd',
'http://stix.mitre.org/extensions/Identity#CIQIdentity3.0-1': 'http://stix.mitre.org/XMLSchema/extensions/identity/ciq_identity_3.0/1.0.1/ciq_identity_3.0.xsd',
'http://stix.mitre.org/extensions/Malware#MAEC4.0-1': 'http://stix.mitre.org/XMLSchema/extensions/malware/maec_4.0.1/1.0.1/maec_4.0.1.xsd',
'http://stix.mitre.org/extensions/StructuredCOA#Generic-1': 'http://stix.mitre.org/XMLSchema/extensions/structured_coa/generic/1.0.1/generic.xsd',
'http://stix.mitre.org/extensions/TestMechanism#Generic-1': 'http://stix.mitre.org/XMLSchema/extensions/test_mechanism/generic/1.0.1/generic.xsd',
'http://stix.mitre.org/extensions/TestMechanism#OVAL5.10-1': 'http://stix.mitre.org/XMLSchema/extensions/test_mechanism/oval_5.10/1.0.1/oval_5.10.xsd',
'http://stix.mitre.org/extensions/TestMechanism#OpenIOC2010-1': 'http://stix.mitre.org/XMLSchema/extensions/test_mechanism/open_ioc_2010/1.0.1/open_ioc_2010.xsd',
'http://stix.mitre.org/extensions/TestMechanism#Snort-1': 'http://stix.mitre.org/XMLSchema/extensions/test_mechanism/snort/1.0.1/snort.xsd',
'http://stix.mitre.org/extensions/TestMechanism#YARA-1': 'http://stix.mitre.org/XMLSchema/extensions/test_mechanism/yara/1.0.1/yara.xsd',
'http://stix.mitre.org/extensions/Vulnerability#CVRF-1': 'http://stix.mitre.org/XMLSchema/extensions/vulnerability/cvrf_1.1/1.0.1/cvrf_1.1.xsd',
'http://stix.mitre.org/stix-1': 'http://stix.mitre.org/XMLSchema/core/1.0.1/stix_core.xsd',
}
UPDATE_VOCABS = (
MotivationVocab,
PlanningAndOperationalSupportVocab,
)
CYBOX_UPDATER = Cybox_2_0_Updater
def __init__(self):
super(STIX_1_0_Updater, self).__init__()
self._init_cybox_updater()
def _init_cybox_updater(self):
super(STIX_1_0_Updater, self)._init_cybox_updater()
selectors = (
"//stix:Observables | "
"//incident:Structured_Description | "
"//ttp:Observable_Characterization"
)
updater = self._cybox_updater # noqa
updater.XPATH_ROOT_NODES = selectors
updater.XPATH_VERSIONED_NODES = selectors
def _get_duplicates(self, root):
"""The STIX v1.0.1 schema does not enforce ID uniqueness, so this
overrides the default ``_get_duplicates()`` by immediately returning.
Note:
This assumes that `root` is schema-valid.
"""
pass
def _get_disallowed(self, root, options=None):
"""Finds all xml entities under `root` that cannot be updated.
Note:
This checks for both untranslatable STIX and CybOX entities.
Args:
root: The top-level xml node
Returns:
A list of untranslatable items.
"""
disallowed = []
for klass in self.DISALLOWED:
found = klass.find(root)
disallowed.extend(found)
disallowed_cybox = self._cybox_updater._get_disallowed(root) # noqa
if disallowed_cybox:
disallowed.extend(disallowed_cybox)
return disallowed
def _clean_disallowed(self, disallowed, options):
"""Removes the `disallowed` nodes from the source document.
Args:
disallowed: A list of nodes to remove from the source document.
Returns:
A list of `disallowed` node copies.
"""
removed = []
for node in disallowed:
dup = utils.copy_xml_element(node)
utils.remove_xml_element(node)
removed.append(dup)
return removed
def _update_versions(self, root):
"""Updates the versions of versioned nodes under `root` to align with
STIX v1.0.1 versions.
"""
nodes = self._get_versioned_nodes(root)
for node in nodes:
name = utils.get_localname(node)
if name == "Indicator":
node.attrib['version'] = '2.0.1'
else:
node.attrib['version'] = '1.0.1'
def _update_cybox(self, root, options):
"""Updates the CybOX content found under the `root` node.
Returns:
An updated `root` node. This may be a new ``etree._Element``
instance.
"""
updated = self._cybox_updater._update(root, options) # noqa
return updated
def check_update(self, root, options=None):
"""Determines if the input document can be upgraded.
Args:
root: The XML document. This can be a filename, a file-like object,
an instance of ``etree._Element`` or an instance of
``etree._ElementTree``.
options (optional): A ``ramrod.UpdateOptions`` instance. If
``None``, ``ramrod.DEFAULT_UPDATE_OPTIONS`` will be used.
Raises:
.UnknownVersionError: If the input document does not have a
version.
.InvalidVersionError: If the version of the input document
does not match the `VERSION` class-level attribute value.
.UpdateError: If the input document contains fields which
cannot be updated or constructs with non-unique IDs are discovered.
"""
root = utils.get_etree_root(root)
options = options or DEFAULT_UPDATE_OPTIONS
if options.check_versions:
self._check_version(root)
self._cybox_updater._check_version(root) # noqa
disallowed = self._get_disallowed(root)
if not disallowed:
return
raise errors.UpdateError(
message="Found untranslatable fields in source document.",
disallowed=disallowed
)
def _update(self, root, options):
updated = self._update_cybox(root, options)
updated = self._update_namespaces(updated)
self._update_schemalocs(updated)
self._update_versions(updated)
if options.update_vocabularies:
self._update_vocabs(updated)
return updated
| {
"content_hash": "066311d28022ba3a94378c782084f590",
"timestamp": "",
"source": "github",
"line_count": 344,
"max_line_length": 171,
"avg_line_length": 41.81104651162791,
"alnum_prop": 0.6500034763262185,
"repo_name": "STIXProject/stix-ramrod",
"id": "34adde36ac40cade7c928b80fa93aaab8593e91f",
"size": "14499",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ramrod/stix/stix_1_0.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "292338"
}
],
"symlink_target": ""
} |
""" Run bot logic
If --Run Task-- is selected, the task will be sent to tw_daemon.py.
Then tw_deamon will run main twitter logic (twitter_daemon.py)
"""
import yaml # pip install pyyaml is needed
import logging
import pika # pip install pika is needed
import sys
SERVER_IP = "localhost" # if no config
#logging.basicConfig(level = logging.DEBUG)
class YamlConfig(object):
""" get and ste config """
def __init(self):
""" Constructor """
pass
def get_config(self):
""" get and set config from config.yaml """
logging.info("Read config.yaml file.")
with open('config.yaml', 'r') as config_file:
config = yaml.load(config_file)
logging.info("SERVER_IP init")
SERVER_IP = config["parameters"]["server_ip"]
""" get and set config form config.yaml """
logging.info("Read config.yaml")
with open('config.yaml', 'r') as config_file:
config = yaml.load(config_file)
SERVER_IP = config["parameters"]["server_ip"]
logging.info("SERVER_IP init")
return SERVER_IP
class SendCommand(object):
""" Send command message to the queue for tw_server.py"""
def __init__(self):
""" Constructor """
self.task_queue = "task_queue" # ===> config ?
self.result_queue = "server_return_queue" # ===> config ?
self.task = "test task" # to do variable
# connection initialization"""
logging.info("Connect to server ...")
self.connection = pika.BlockingConnection(pika.ConnectionParameters(SERVER_IP))
self.channel = self.connection.channel()
self.channel.queue_declare(queue=self.result_queue)
logging.info("connection established.")
def __del__(self):
""" Destructor """
logging.info("Close connection.")
self.connection.close()
def _consume(self):
# wait for result from tw_server queue="result_queue"
print " [x] Waiting for answer from server"
print " [x] To EXIT press Ctrl+C"
self.channel.basic_consume(self._on_response, queue=self.result_queue, no_ack=True)
try:
self.channel.start_consuming()
except KeyboardInterrupt:
print "\nExit"
sys.exit(0)
def _on_response(self, ch, method, properties, body):
print "Daemon response: {0}".format(body)
def send_task(self, task):
self.task = task
""" send message to the brocker in Queue = daemon_id"""
logging.info("send message(task)")
# send task for tw_server queue="task_queue"
self.channel.basic_publish(exchange='', routing_key=self.task_queue, body=self.task)
print " [x] Sent {0}".format(self.task)
self._consume()
conf = YamlConfig()
conf.get_config()
sender = SendCommand()
select = True
while select:
print("""
1. Run Task.
2. Print Help.
0. Exit.
""")
select = raw_input("Select ")
if select=="1":
print ("\t\t--> post_tweet message\n\t\t--> search q")
task = raw_input("Enter task: ")
task_list = task.split(' ')
if task_list[0] != "post_tweet" and task_list[0] != "search" or len(task_list) != 2:
print "Not valid command"
break
print "\n\tRun task:"
sender.send_task(task)
elif select=="2":
print "\n\tPrint help: If --Run Task-- is selected, the task will be sent to tw_daemon.py."
print "\tThen tw_deamon will run main twitter logic (twitter_daemon.py)"
print "\t\t commands for daemon:"
print ("\t\t--> post_tweet message\n\t\t--> search q")
elif select=="0":
print "\n\tExit."
break
elif select!="":
print ("\n\tNot Valid Choice")
| {
"content_hash": "675b2e96e2785070b425bd656efa2ced",
"timestamp": "",
"source": "github",
"line_count": 106,
"max_line_length": 100,
"avg_line_length": 35.56603773584906,
"alnum_prop": 0.5949602122015916,
"repo_name": "merzlyakov-me/Twitter_Bot",
"id": "da578416e2022a0b75807de2b73189da922bad95",
"size": "3808",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "bot/tw_client.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "6378"
},
{
"name": "Shell",
"bytes": "3454"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import models, migrations
import django.core.validators
class Migration(migrations.Migration):
dependencies = [
('taskmanager', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Project',
fields=[
('id', models.AutoField(verbose_name='ID', auto_created=True, serialize=False, primary_key=True)),
('name', models.CharField(verbose_name='name', max_length=100, help_text='Enter the project name')),
('color', models.CharField(verbose_name='color', validators=[django.core.validators.RegexValidator('(^#[0-9a-fA-F]{3}$)|(^#[0-9a-fA-F]{6}$)')], default='#fff', max_length=7, help_text='Enter the hex color code, like #ccc or #cccccc')),
('user', models.ForeignKey(verbose_name='user', related_name='profjects', to='taskmanager.Profile')),
],
options={
'ordering': ('user', 'name'),
'verbose_name': 'Project',
'verbose_name_plural': 'Projects',
},
),
migrations.AlterUniqueTogether(
name='project',
unique_together=set([('user', 'name')]),
),
]
| {
"content_hash": "0bfb22d81d538cef6ae7d41904e7b113",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 251,
"avg_line_length": 39.5625,
"alnum_prop": 0.5647709320695102,
"repo_name": "memnonila/taskbuster",
"id": "756b2f87569593cd2e91679a4b5c5bb79d16970d",
"size": "1290",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "taskbuster/apps/taskmanager/migrations/0002_auto_20150708_1158.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "240"
},
{
"name": "HTML",
"bytes": "6674"
},
{
"name": "JavaScript",
"bytes": "244"
},
{
"name": "Python",
"bytes": "22556"
}
],
"symlink_target": ""
} |
"""Default high score mode."""
| {
"content_hash": "c736d3a0bc6debcaf44b200e28ab4fa7",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 30,
"avg_line_length": 31,
"alnum_prop": 0.6451612903225806,
"repo_name": "missionpinball/mpf",
"id": "87352e235d5faaed6a63ac1bb498ad42062c1fc8",
"size": "31",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "mpf/modes/high_score/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "640"
},
{
"name": "C++",
"bytes": "4019"
},
{
"name": "Makefile",
"bytes": "382"
},
{
"name": "Python",
"bytes": "4532953"
}
],
"symlink_target": ""
} |
import testtools
from neutron.agent.linux import keepalived
from neutron.tests import base
# Keepalived user guide:
# http://www.keepalived.org/pdf/UserGuide.pdf
class KeepalivedGetFreeRangeTestCase(base.BaseTestCase):
def test_get_free_range(self):
free_range = keepalived.get_free_range(
parent_range='169.254.0.0/16',
excluded_ranges=['169.254.0.0/24',
'169.254.1.0/24',
'169.254.2.0/24'],
size=24)
self.assertEqual('169.254.3.0/24', free_range)
def test_get_free_range_without_excluded(self):
free_range = keepalived.get_free_range(
parent_range='169.254.0.0/16',
excluded_ranges=[],
size=20)
self.assertEqual('169.254.0.0/20', free_range)
def test_get_free_range_excluded_out_of_parent(self):
free_range = keepalived.get_free_range(
parent_range='169.254.0.0/16',
excluded_ranges=['255.255.255.0/24'],
size=24)
self.assertEqual('169.254.0.0/24', free_range)
def test_get_free_range_not_found(self):
tiny_parent_range = '192.168.1.0/24'
huge_size = 8
with testtools.ExpectedException(ValueError):
keepalived.get_free_range(
parent_range=tiny_parent_range,
excluded_ranges=[],
size=huge_size)
class KeepalivedConfBaseMixin(object):
def _get_config(self):
config = keepalived.KeepalivedConf()
instance1 = keepalived.KeepalivedInstance('MASTER', 'eth0', 1,
'169.254.192.0/18',
advert_int=5)
instance1.set_authentication('AH', 'pass123')
instance1.track_interfaces.append("eth0")
instance1.set_notify('master', '/tmp/script.sh')
vip_address1 = keepalived.KeepalivedVipAddress('192.168.1.0/24',
'eth1')
vip_address2 = keepalived.KeepalivedVipAddress('192.168.2.0/24',
'eth2')
vip_address3 = keepalived.KeepalivedVipAddress('192.168.3.0/24',
'eth2')
vip_address_ex = keepalived.KeepalivedVipAddress('192.168.55.0/24',
'eth10')
instance1.vips.append(vip_address1)
instance1.vips.append(vip_address2)
instance1.vips.append(vip_address3)
instance1.vips.append(vip_address_ex)
virtual_route = keepalived.KeepalivedVirtualRoute("0.0.0.0/0",
"192.168.1.1",
"eth1")
instance1.virtual_routes.append(virtual_route)
instance2 = keepalived.KeepalivedInstance('MASTER', 'eth4', 2,
'169.254.192.0/18',
mcast_src_ip='224.0.0.1')
instance2.track_interfaces.append("eth4")
vip_address1 = keepalived.KeepalivedVipAddress('192.168.3.0/24',
'eth6')
instance2.vips.append(vip_address1)
instance2.vips.append(vip_address2)
instance2.vips.append(vip_address_ex)
config.add_instance(instance1)
config.add_instance(instance2)
return config
class KeepalivedConfTestCase(base.BaseTestCase,
KeepalivedConfBaseMixin):
expected = """vrrp_instance VR_1 {
state MASTER
interface eth0
virtual_router_id 1
priority 50
advert_int 5
authentication {
auth_type AH
auth_pass pass123
}
track_interface {
eth0
}
virtual_ipaddress {
169.254.0.1/24 dev eth0
}
virtual_ipaddress_excluded {
192.168.1.0/24 dev eth1
192.168.2.0/24 dev eth2
192.168.3.0/24 dev eth2
192.168.55.0/24 dev eth10
}
virtual_routes {
0.0.0.0/0 via 192.168.1.1 dev eth1
}
notify_master "/tmp/script.sh"
}
vrrp_instance VR_2 {
state MASTER
interface eth4
virtual_router_id 2
priority 50
mcast_src_ip 224.0.0.1
track_interface {
eth4
}
virtual_ipaddress {
169.254.0.2/24 dev eth4
}
virtual_ipaddress_excluded {
192.168.2.0/24 dev eth2
192.168.3.0/24 dev eth6
192.168.55.0/24 dev eth10
}
}"""
def test_config_generation(self):
config = self._get_config()
self.assertEqual(self.expected, config.get_config_str())
def test_config_with_reset(self):
config = self._get_config()
self.assertEqual(self.expected, config.get_config_str())
config.reset()
self.assertEqual('', config.get_config_str())
def test_get_existing_vip_ip_addresses_returns_list(self):
config = self._get_config()
instance = config.get_instance(1)
current_vips = sorted(instance.get_existing_vip_ip_addresses('eth2'))
self.assertEqual(['192.168.2.0/24', '192.168.3.0/24'], current_vips)
class KeepalivedStateExceptionTestCase(base.BaseTestCase):
def test_state_exception(self):
instance = keepalived.KeepalivedInstance('MASTER', 'eth0', 1,
'169.254.192.0/18')
invalid_notify_state = 'a seal walks'
self.assertRaises(keepalived.InvalidNotifyStateException,
instance.set_notify,
invalid_notify_state, '/tmp/script.sh')
invalid_vrrp_state = 'into a club'
self.assertRaises(keepalived.InvalidInstanceStateException,
keepalived.KeepalivedInstance,
invalid_vrrp_state, 'eth0', 33, '169.254.192.0/18')
invalid_auth_type = '[hip, hip]'
instance = keepalived.KeepalivedInstance('MASTER', 'eth0', 1,
'169.254.192.0/18')
self.assertRaises(keepalived.InvalidAuthenticationTypeExecption,
instance.set_authentication,
invalid_auth_type, 'some_password')
class KeepalivedInstanceTestCase(base.BaseTestCase,
KeepalivedConfBaseMixin):
def test_generate_primary_vip(self):
instance = keepalived.KeepalivedInstance('MASTER', 'ha0', 42,
'169.254.192.0/18')
self.assertEqual('169.254.0.42/24',
str(instance._generate_primary_vip()))
def test_remove_adresses_by_interface(self):
config = self._get_config()
instance = config.get_instance(1)
instance.remove_vips_vroutes_by_interface('eth2')
instance.remove_vips_vroutes_by_interface('eth10')
expected = """vrrp_instance VR_1 {
state MASTER
interface eth0
virtual_router_id 1
priority 50
advert_int 5
authentication {
auth_type AH
auth_pass pass123
}
track_interface {
eth0
}
virtual_ipaddress {
169.254.0.1/24 dev eth0
}
virtual_ipaddress_excluded {
192.168.1.0/24 dev eth1
}
virtual_routes {
0.0.0.0/0 via 192.168.1.1 dev eth1
}
notify_master "/tmp/script.sh"
}
vrrp_instance VR_2 {
state MASTER
interface eth4
virtual_router_id 2
priority 50
mcast_src_ip 224.0.0.1
track_interface {
eth4
}
virtual_ipaddress {
169.254.0.2/24 dev eth4
}
virtual_ipaddress_excluded {
192.168.2.0/24 dev eth2
192.168.3.0/24 dev eth6
192.168.55.0/24 dev eth10
}
}"""
self.assertEqual(expected, config.get_config_str())
def test_build_config_no_vips(self):
expected = """vrrp_instance VR_1 {
state MASTER
interface eth0
virtual_router_id 1
priority 50
virtual_ipaddress {
169.254.0.1/24 dev eth0
}
}"""
instance = keepalived.KeepalivedInstance(
'MASTER', 'eth0', 1, '169.254.192.0/18')
self.assertEqual(expected, '\n'.join(instance.build_config()))
class KeepalivedVipAddressTestCase(base.BaseTestCase):
def test_vip_with_scope(self):
vip = keepalived.KeepalivedVipAddress('fe80::3e97:eff:fe26:3bfa/64',
'eth1',
'link')
self.assertEqual('fe80::3e97:eff:fe26:3bfa/64 dev eth1 scope link',
vip.build_config())
class KeepalivedVirtualRouteTestCase(base.BaseTestCase):
def test_virtual_route_with_dev(self):
route = keepalived.KeepalivedVirtualRoute('0.0.0.0/0', '1.2.3.4',
'eth0')
self.assertEqual('0.0.0.0/0 via 1.2.3.4 dev eth0',
route.build_config())
def test_virtual_route_without_dev(self):
route = keepalived.KeepalivedVirtualRoute('50.0.0.0/8', '1.2.3.4')
self.assertEqual('50.0.0.0/8 via 1.2.3.4', route.build_config())
| {
"content_hash": "6a47059ad70a7508902471d1e42f8b66",
"timestamp": "",
"source": "github",
"line_count": 278,
"max_line_length": 77,
"avg_line_length": 33.226618705035975,
"alnum_prop": 0.5524520948359857,
"repo_name": "cloudbase/neutron-virtualbox",
"id": "624ccd2f851ea68a81ee024ae256fc4d061a4ff0",
"size": "9844",
"binary": false,
"copies": "1",
"ref": "refs/heads/virtualbox_agent",
"path": "neutron/tests/unit/agent/linux/test_keepalived.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Mako",
"bytes": "1043"
},
{
"name": "Python",
"bytes": "8448838"
},
{
"name": "Shell",
"bytes": "12510"
}
],
"symlink_target": ""
} |
import tensorflow as tf # neural network for function approximation
import gym # environment
import numpy as np # matrix operation and math functions
from gym import wrappers
# GLOBAL SETTINGS
RNG_SEED = 8
ENVIRONMENT = "CartPole-v0"
# ENVIRONMENT = "CartPole-v1"
MAX_EPISODES = 1000
HIDDEN_LAYER = True
HIDDEN_SIZE = 6
DISPLAY_WEIGHTS = False # Help debug weight update
RENDER = False # Render the generation representative
gamma = 0.99 # Discount per step
alpha = 0.02205 # Learning rate
# Upload to OpenAI
UPLOAD = False
EPISODE_INTERVAL = 50 # Generate a video at this interval
SESSION_FOLDER = "/tmp/CartPole-experiment-1"
API_KEY = ""
SUCCESS_THRESHOLD = 195
# SUCCESS_THRESHOLD = 475
CONSECUTIVE_TARGET = 100
def record_interval(n):
global EPISODE_INTERVAL
return n % EPISODE_INTERVAL == 0
env = gym.make(ENVIRONMENT)
if UPLOAD:
env = wrappers.Monitor(env, SESSION_FOLDER, video_callable=record_interval)
env.seed(RNG_SEED)
np.random.seed(RNG_SEED)
tf.set_random_seed(RNG_SEED)
input_size = env.observation_space.shape[0]
try:
output_size = env.action_space.shape[0]
except AttributeError:
output_size = env.action_space.n
# Tensorflow network setup
x = tf.placeholder(tf.float32, shape=(None, input_size))
y = tf.placeholder(tf.float32, shape=(None, 1))
expected_returns = tf.placeholder(tf.float32, shape=(None, 1))
w_init = tf.contrib.layers.xavier_initializer()
if HIDDEN_LAYER:
hidden_W = tf.get_variable("W1", shape=[input_size, HIDDEN_SIZE],
initializer=w_init)
hidden_B = tf.Variable(tf.zeros(HIDDEN_SIZE))
dist_W = tf.get_variable("W2", shape=[HIDDEN_SIZE, output_size],
initializer=w_init)
dist_B = tf.Variable(tf.zeros(output_size))
hidden = tf.nn.elu(tf.matmul(x, hidden_W) + hidden_B)
dist = tf.tanh(tf.matmul(hidden, dist_W) + dist_B)
else:
dist_W = tf.get_variable("W1", shape=[input_size, output_size],
initializer=w_init)
dist_B = tf.Variable(tf.zeros(output_size))
dist = tf.tanh(tf.matmul(x, dist_W) + dist_B)
dist_soft = tf.nn.log_softmax(dist)
dist_in = tf.matmul(dist_soft, tf.Variable([[1.], [0.]]))
pi = tf.contrib.distributions.Bernoulli(dist_in)
pi_sample = pi.sample()
log_pi = pi.log_prob(y)
optimizer = tf.train.RMSPropOptimizer(alpha)
train = optimizer.minimize(-1.0 * expected_returns * log_pi)
sess = tf.Session()
sess.run(tf.global_variables_initializer())
def run_episode(environment, render=False):
raw_reward = 0
discounted_reward = 0
cumulative_reward = []
discount = 1.0
states = []
actions = []
obs = environment.reset()
done = False
while not done:
states.append(obs)
cumulative_reward.append(discounted_reward)
if render:
obs.render()
action = sess.run(pi_sample, feed_dict={x: [obs]})[0]
actions.append(action)
obs, reward, done, info = env.step(action[0])
raw_reward += reward
if reward > 0:
discounted_reward += reward * discount
else:
discounted_reward += reward
discount *= gamma
return raw_reward, discounted_reward, cumulative_reward, states, actions
def display_weights(session):
global HIDDEN_LAYER
if HIDDEN_LAYER:
w1 = session.run(hidden_W)
b1 = session.run(hidden_B)
w2 = session.run(dist_W)
b2 = session.run(dist_B)
print(w1, b1, w2, b2)
else:
w1 = session.run(dist_W)
b1 = session.run(dist_B)
print(w1, b1)
returns = []
for ep in range(MAX_EPISODES):
raw_G, discounted_G, cumulative_G, ep_states, ep_actions = \
run_episode(env, RENDER and not UPLOAD)
expected_R = np.transpose([discounted_G - np.array(cumulative_G)])
sess.run(train, feed_dict={x: ep_states, y: ep_actions,
expected_returns: expected_R})
if DISPLAY_WEIGHTS:
display_weights(sess)
returns.append(raw_G)
returns = returns[-CONSECUTIVE_TARGET:]
mean_returns = np.mean(returns)
msg = "Episode: {}, Return: {}, Last {} returns mean: {}"
msg = msg.format(ep, raw_G, CONSECUTIVE_TARGET, mean_returns)
print(msg)
env.close()
if UPLOAD:
gym.upload(SESSION_FOLDER, api_key=API_KEY)
| {
"content_hash": "ff4dcd7d5916d415eb8828393c121262",
"timestamp": "",
"source": "github",
"line_count": 140,
"max_line_length": 79,
"avg_line_length": 30.678571428571427,
"alnum_prop": 0.650523864959255,
"repo_name": "GitYiheng/reinforcement_learning_test",
"id": "10e095abcb7f3a6fa1e85c07fda4ac70b7db7f49",
"size": "4295",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test01_cartpendulum/Feb/t3_cartpole_mc.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "14810"
},
{
"name": "HTML",
"bytes": "15405"
},
{
"name": "JavaScript",
"bytes": "51050"
},
{
"name": "Jupyter Notebook",
"bytes": "3492256"
},
{
"name": "Python",
"bytes": "1033931"
},
{
"name": "Shell",
"bytes": "3108"
}
],
"symlink_target": ""
} |
import re
import unittest
import subprocess32 as subprocess
import Gaffer
import GafferTest
class StatsApplicationTest( GafferTest.TestCase ) :
def test( self ) :
script = Gaffer.ScriptNode()
script["frameRange"]["start"].setValue( 10 )
script["frameRange"]["end"].setValue( 50 )
script["variables"].addChild( Gaffer.NameValuePlug( "test", 20.5, flags = Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic ) )
script["n"] = GafferTest.AddNode()
script["b"] = Gaffer.Box()
script["b"]["n"] = GafferTest.AddNode()
script["fileName"].setValue( self.temporaryDirectory() + "/script.gfr" )
script.save()
o = subprocess.check_output( [ "gaffer", "stats", script["fileName"].getValue() ] )
self.assertTrue( Gaffer.About.versionString() in o )
self.assertTrue( re.search( r"frameRange\.start\s*10", o ) )
self.assertTrue( re.search( r"frameRange\.end\s*50", o ) )
self.assertTrue( re.search( r"framesPerSecond\s*24.0", o ) )
self.assertTrue( re.search( r"test\s*20.5", o ) )
self.assertTrue( re.search( r"AddNode\s*2", o ) )
self.assertTrue( re.search( r"Box\s*1", o ) )
self.assertTrue( re.search( r"Total\s*3", o ) )
if __name__ == "__main__":
unittest.main()
| {
"content_hash": "710bb5d6fe810649f25d1ab12051f364",
"timestamp": "",
"source": "github",
"line_count": 37,
"max_line_length": 133,
"avg_line_length": 32.54054054054054,
"alnum_prop": 0.6727574750830565,
"repo_name": "appleseedhq/gaffer",
"id": "b9b49ba3ba877bf00a45b88c9448708e2c1271fa",
"size": "3007",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/GafferTest/StatsApplicationTest.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "39910"
},
{
"name": "C++",
"bytes": "7337901"
},
{
"name": "CMake",
"bytes": "85201"
},
{
"name": "GLSL",
"bytes": "6236"
},
{
"name": "Python",
"bytes": "7531988"
},
{
"name": "Shell",
"bytes": "15031"
}
],
"symlink_target": ""
} |
import unittest
from contains_digits import contains_digits
class Test_Digits_Containning(unittest.TestCase):
def test_containning_digits(self):
self.assertTrue(contains_digits(297876954274, [2, 7, 5, 9]))
if __name__ == "__main__":
unittest.main()
| {
"content_hash": "57e48477e9436143315b1139dda431fe",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 68,
"avg_line_length": 22.5,
"alnum_prop": 0.6962962962962963,
"repo_name": "sevgo/Programming101",
"id": "ee3fdc4237dd1dc3178a55b6920372f0d3bd5f7d",
"size": "294",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "week1/the_real_deal/contains_digits_test.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "81618"
}
],
"symlink_target": ""
} |
import urllib
import urllib2
print ('------------------------error 404 ------------------------------------------')
url = 'https://api.ordnancesurvey.co.uk/places/v1/addresses/find?'
values = {'query':'''ORDNANCE SURVEY, 4, ADANAC DRIVE, NURSLING, SOUTHAMPTON, SO1 0AS''',
'dataset':'DPA,LPI',
'key':'INSERT_YOUR_API_KEY_HERE'}
data = urllib.urlencode(values)
req = urllib2.Request(url, data)
try:
f = urllib2.urlopen(req)
except urllib2.HTTPError, e:
if e.code == 401:
print '401 not authorized'
elif e.code == 404:
print '404 not found'
elif e.code == 503:
print 'service unavailable'
else:
print 'unknown error: '
else:
print 'success'
response=f.read()
for line in response.splitlines():
word_lst = line.split(':')
for word in word_lst:
if '"ADDRESS" ' in word: print(line)
if 'COORDINATE' in word: print(line)
if 'UPRN' in word: print(line)
f.close()
print ('------------------------error 401 ------------------------------------------')
url = 'https://api.ordnancesurvey.co.uk/places/v1/addresses/find?'
values = {'query':'''ORDNANCE SURVEY, 4, ADANAC DRIVE, NURSLING, SOUTHAMPTON, SO1 0AS''',
'dataset':'DPA,LPI',
'key':'INSERT_YOUR_API_KEY_HERE'}
data = urllib.urlencode(values)
req = urllib2.Request(url, data)
try:
f = urllib2.urlopen(req)
except urllib2.HTTPError, e:
if e.code == 401:
print '401 not authorized'
elif e.code == 404:
print '404 not found'
elif e.code == 503:
print 'service unavailable'
else:
print 'unknown error: '
else:
print 'success'
response=f.read()
for line in response.splitlines():
word_lst = line.split(':')
for word in word_lst:
if '"ADDRESS" ' in word: print(line)
if 'COORDINATE' in word: print(line)
if 'UPRN' in word: print(line)
f.close()
print ('------------------------second try ------------------------------------------')
url = 'https://api.ordnancesurvey.co.uk/places/v1/addresses/find'
values = {'query':'ORDNANCE SURVEY, 4, ADANAC DRIVE NURSLING SOUTHAMPTON SO1 0AS',
'dataset':'DPA,LPI',
'key':'INSERT_YOUR_API_KEY_HERE'}
full_url = url + '?' + data
print full_url
try:
f = urllib2.urlopen(full_url)
except urllib2.HTTPError, e:
if e.code == 401:
print '401 not authorized'
elif e.code == 404:
print '404 not found'
elif e.code == 503:
print 'service unavailable'
else:
print 'unknown error: '
else:
print 'success'
response=f.read()
response_count = 0
for line in response.splitlines():
word_lst = line.split(':')
for word in word_lst:
if response_count < 10:
if 'UPRN' in word:
response_count = response_count + 1
print '-'*80
print(line)
if '"ADDRESS" ' in word: print(line)
if 'COORDINATE' in word: print(line)
if 'MATCH' in word: print(line)
f.close()
| {
"content_hash": "87cc51b2ceee6abc5f6a7d35885d03bf",
"timestamp": "",
"source": "github",
"line_count": 106,
"max_line_length": 89,
"avg_line_length": 35,
"alnum_prop": 0.45795148247978434,
"repo_name": "GusSmith21/OS-Places",
"id": "238408d5c21b2f33319e412caa2d115affd246b7",
"size": "3710",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Python3/urlib2_find.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "5208"
},
{
"name": "HTML",
"bytes": "3384"
},
{
"name": "JavaScript",
"bytes": "56629"
},
{
"name": "Python",
"bytes": "13367"
}
],
"symlink_target": ""
} |
import os
import sys
import time
from magnolia.utility import *
from magnolia.utility import LOG as L
from magnolia.script.sinoalice import testcase_normal
class TestCase(testcase_normal.TestCase):
def __init__(self, *args, **kwargs):
super(TestCase, self).__init__(*args, **kwargs)
@classmethod
def setUpClass(cls):
L.info("*** Start TestCase : %s *** " % __file__)
def test_1(self):
L.info("*** Capture ***")
try:
self.minicap_start(); time.sleep(2)
"""
self.assertTrue(self.reinstall()); time.sleep(2)
self.assertTrue(self.maintenance())
self.assertTrue(self.terms())
self.assertTrue(self.initial_gacha())
self.assertTrue(self.name())
self.assertTrue(self.download())
self.assertTrue(self.select()); time.sleep(5)
self.assertTrue(self.first_sweep()); time.sleep(5)
self.assertTrue(self.message_skip())
self.assertTrue(self.box()); time.sleep(2)
self.assertTrue(self.login_bonus()); time.sleep(2)
self.assertTrue(self.box()); time.sleep(5)
self.assertTrue(self.gacha()); time.sleep(2)
self.assertTrue(self.inherit())
"""
self.minicap_finish(); time.sleep(2)
except Exception as e:
L.warning(type(e).__name__ + ": " + str(e))
#L.warning(traceback.print_exc())
self.minicap_finish(); time.sleep(2)
self.minicap_create_video()
self.fail()
@classmethod
def tearDownClass(cls):
L.info("*** End TestCase : %s *** " % __file__)
| {
"content_hash": "8f93bb15cf4ecd88740393679baafb2b",
"timestamp": "",
"source": "github",
"line_count": 47,
"max_line_length": 62,
"avg_line_length": 35.787234042553195,
"alnum_prop": 0.5618311533888228,
"repo_name": "setsulla/stir",
"id": "9296ab5cafd205b08daf2a7551536a4f9f9b5510",
"size": "1682",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "project/magnolia/script/sinoalice/test_org.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "176459"
}
],
"symlink_target": ""
} |
'''Displays a helpful help message full of help.'''
__matcher__ = '%NICK.*help'
def respond(brain, user, message, groups):
yield 'name\t| description\t| regex'
for script in brain.bot_info['scripts']:
name = script.__name__.split('.')[-1]
regex = script.__matcher__.replace('%NICK', brain.bot_info['nick'])
doc = script.__doc__ if script.__doc__ is not None else '\t\t'
yield '%s\t| %s\t| %s' % (name, doc, regex)
| {
"content_hash": "711c88b4ce8747e8b822e4c482f72208",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 75,
"avg_line_length": 38.083333333333336,
"alnum_prop": 0.5820568927789934,
"repo_name": "iDevy/yaubot",
"id": "5c4e7a9c1beb0ee4dd622c1105ea9bc932f156e0",
"size": "1036",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "yaubot/scripts/help.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "10452"
}
],
"symlink_target": ""
} |
from django.urls import path
from . import views
urlpatterns = [
path('district-create', views.district_create),
path('districts-load', views.districts_load),
path('district-edit', views.district_edit),
path('district-save-limit', views.district_save_limit),
]
| {
"content_hash": "a8bac32f24ff731d4fd35ed3f5ef0555",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 59,
"avg_line_length": 27.9,
"alnum_prop": 0.7096774193548387,
"repo_name": "moodpulse/l2",
"id": "e52b5ce7c358c450643eef5c0636178ec31d38e3",
"size": "279",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "api/districts/urls.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "38747"
},
{
"name": "Dockerfile",
"bytes": "146"
},
{
"name": "HTML",
"bytes": "238498"
},
{
"name": "JavaScript",
"bytes": "425946"
},
{
"name": "Makefile",
"bytes": "1515"
},
{
"name": "Python",
"bytes": "3710422"
},
{
"name": "SCSS",
"bytes": "48493"
},
{
"name": "Shell",
"bytes": "1815"
},
{
"name": "TypeScript",
"bytes": "98237"
},
{
"name": "Vue",
"bytes": "1980612"
}
],
"symlink_target": ""
} |
from benchexec.tools.sv_benchmarks_util import get_data_model_from_task, ILP32, LP64
import benchexec.result as result
import benchexec.tools.template
class Tool(benchexec.tools.template.BaseTool2):
"""
Tool info for Deagle, an SMT-based concurrent program verification tool.
Project URL: https://github.com/Misasasa/Deagle
"""
def executable(self, tool_locator):
return tool_locator.find_executable("deagle")
def name(self):
return "Deagle"
def version(self, executable):
return self._version_from_tool(executable, arg="--version")
def cmdline(self, executable, options, task, rlimits):
data_model_param = get_data_model_from_task(task, {ILP32: "--32", LP64: "--64"})
if not data_model_param:
data_model_param = "--32"
if data_model_param not in options:
options += [data_model_param]
return [executable] + options + [task.single_input_file]
def determine_result(self, run):
if run.output.any_line_contains("SUCCESSFUL"):
status = result.RESULT_TRUE_PROP
elif run.output.any_line_contains("FAILED"):
status = result.RESULT_FALSE_REACH
elif run.exit_code.value == 1:
status = result.RESULT_UNKNOWN
else:
status = result.RESULT_ERROR
return status
| {
"content_hash": "65f8fc64364b10e2a0b0313b6ee2901e",
"timestamp": "",
"source": "github",
"line_count": 40,
"max_line_length": 88,
"avg_line_length": 34.1,
"alnum_prop": 0.6407624633431085,
"repo_name": "sosy-lab/benchexec",
"id": "ad0bbbc59c5f790d9bb8d9216f75c0ceb2191d2b",
"size": "1594",
"binary": false,
"copies": "2",
"ref": "refs/heads/main",
"path": "benchexec/tools/deagle.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "3210"
},
{
"name": "CSS",
"bytes": "609"
},
{
"name": "Dockerfile",
"bytes": "3164"
},
{
"name": "Gnuplot",
"bytes": "5032"
},
{
"name": "HTML",
"bytes": "1505"
},
{
"name": "JavaScript",
"bytes": "75586"
},
{
"name": "Jinja",
"bytes": "285"
},
{
"name": "PHP",
"bytes": "4241"
},
{
"name": "Python",
"bytes": "1218836"
},
{
"name": "Roff",
"bytes": "3145"
},
{
"name": "SCSS",
"bytes": "25181"
},
{
"name": "Shell",
"bytes": "7671"
},
{
"name": "TeX",
"bytes": "7458"
}
],
"symlink_target": ""
} |
import os, sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "web_dja.settings")
# Add the lib/ directory to the system path
sys.path.append("lib")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
| {
"content_hash": "80b96f61b75b6cdffdbc748b108ce097",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 71,
"avg_line_length": 29.7,
"alnum_prop": 0.6868686868686869,
"repo_name": "jradd/Django_web_dja",
"id": "3982e3d98ae3a7d93d8d5ce159736a3566029577",
"size": "319",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "manage.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "11300"
},
{
"name": "JavaScript",
"bytes": "5336"
},
{
"name": "Python",
"bytes": "32052"
},
{
"name": "Shell",
"bytes": "8144"
}
],
"symlink_target": ""
} |
from unittest import mock
from oslo_config import cfg
from oslo_utils import units
from ironic_inspector import node_cache
from ironic_inspector.plugins import base
from ironic_inspector.plugins import standard as std_plugins
from ironic_inspector import process
from ironic_inspector.test import base as test_base
from ironic_inspector import utils
CONF = cfg.CONF
@mock.patch('ironic_inspector.common.ironic.get_client', new=mock.Mock())
class TestSchedulerHook(test_base.NodeTest):
def setUp(self):
super(TestSchedulerHook, self).setUp()
self.hook = std_plugins.SchedulerHook()
self.node_info = node_cache.NodeInfo(uuid=self.uuid, started_at=0,
node=self.node)
def test_hook_loadable_by_name(self):
CONF.set_override('processing_hooks', 'scheduler', 'processing')
ext = base.processing_hooks_manager()['scheduler']
self.assertIsInstance(ext.obj, std_plugins.SchedulerHook)
@mock.patch.object(node_cache.NodeInfo, 'patch', autospec=True)
def test_ok(self, mock_patch):
patch = [
{'path': '/properties/cpus', 'value': '4', 'op': 'add'},
{'path': '/properties/cpu_arch', 'value': 'x86_64', 'op': 'add'},
{'path': '/properties/memory_mb', 'value': '12288', 'op': 'add'},
]
self.hook.before_update(self.data, self.node_info)
self.assertCalledWithPatch(patch, mock_patch)
@mock.patch.object(node_cache.NodeInfo, 'patch', autospec=True)
def test_no_overwrite(self, mock_patch):
CONF.set_override('overwrite_existing', False, 'processing')
self.node.properties = {
'memory_mb': '4096',
'cpu_arch': 'i686'
}
patch = [
{'path': '/properties/cpus', 'value': '4', 'op': 'add'},
]
self.hook.before_update(self.data, self.node_info)
self.assertCalledWithPatch(patch, mock_patch)
@mock.patch.object(node_cache.NodeInfo, 'patch', autospec=True)
def test_missing_cpu(self, mock_patch):
self.data['inventory']['cpu'] = {'count': 'none'}
patch = [
{'path': '/properties/memory_mb', 'value': '12288', 'op': 'add'},
]
self.hook.before_update(self.data, self.node_info)
self.assertCalledWithPatch(patch, mock_patch)
@mock.patch.object(node_cache.NodeInfo, 'patch', autospec=True)
def test_missing_memory(self, mock_patch):
# We require physical_mb, not total
self.data['inventory']['memory'] = {'total': 42}
patch = [
{'path': '/properties/cpus', 'value': '4', 'op': 'add'},
{'path': '/properties/cpu_arch', 'value': 'x86_64', 'op': 'add'},
]
self.hook.before_update(self.data, self.node_info)
self.assertCalledWithPatch(patch, mock_patch)
@mock.patch.object(node_cache.NodeInfo, 'patch', autospec=True)
def test_no_data(self, mock_patch):
self.data['inventory']['cpu'] = {}
self.data['inventory']['memory'] = {}
self.hook.before_update(self.data, self.node_info)
del self.data['inventory']['cpu']
del self.data['inventory']['memory']
self.hook.before_update(self.data, self.node_info)
self.assertFalse(mock_patch.called)
class TestValidateInterfacesHookLoad(test_base.NodeTest):
def test_hook_loadable_by_name(self):
CONF.set_override('processing_hooks', 'validate_interfaces',
'processing')
ext = base.processing_hooks_manager()['validate_interfaces']
self.assertIsInstance(ext.obj, std_plugins.ValidateInterfacesHook)
class TestValidateInterfacesHookBeforeProcessing(test_base.NodeTest):
def setUp(self):
super(TestValidateInterfacesHookBeforeProcessing, self).setUp()
self.hook = std_plugins.ValidateInterfacesHook()
def test_no_interfaces(self):
self.assertRaisesRegex(utils.Error,
'Hardware inventory is empty or missing',
self.hook.before_processing, {})
self.assertRaisesRegex(utils.Error,
'Hardware inventory is empty or missing',
self.hook.before_processing, {'inventory': {}})
del self.inventory['interfaces']
self.assertRaisesRegex(utils.Error,
'No network interfaces',
self.hook.before_processing, self.data)
def test_only_pxe(self):
self.hook.before_processing(self.data)
self.assertEqual(self.pxe_interfaces, self.data['interfaces'])
self.assertEqual([self.pxe_mac], self.data['macs'])
self.assertEqual(self.all_interfaces, self.data['all_interfaces'])
def test_only_pxe_mac_format(self):
self.data['boot_interface'] = self.pxe_mac
self.hook.before_processing(self.data)
self.assertEqual(self.pxe_interfaces, self.data['interfaces'])
self.assertEqual([self.pxe_mac], self.data['macs'])
self.assertEqual(self.all_interfaces, self.data['all_interfaces'])
def test_only_pxe_not_found(self):
self.data['boot_interface'] = 'aa:bb:cc:dd:ee:ff'
self.assertRaisesRegex(utils.Error, 'No suitable interfaces',
self.hook.before_processing, self.data)
def test_only_pxe_no_boot_interface(self):
del self.data['boot_interface']
self.hook.before_processing(self.data)
self.active_interfaces[self.pxe_iface_name]['pxe'] = False
self.all_interfaces[self.pxe_iface_name]['pxe'] = False
self.assertEqual(self.active_interfaces, self.data['interfaces'])
self.assertEqual(sorted(i['mac'] for i in
self.active_interfaces.values()),
sorted(self.data['macs']))
self.assertEqual(self.all_interfaces, self.data['all_interfaces'])
def test_only_active(self):
CONF.set_override('add_ports', 'active', 'processing')
self.hook.before_processing(self.data)
self.assertEqual(self.active_interfaces, self.data['interfaces'])
self.assertEqual(sorted(i['mac'] for i in
self.active_interfaces.values()),
sorted(self.data['macs']))
self.assertEqual(self.all_interfaces, self.data['all_interfaces'])
def test_all(self):
CONF.set_override('add_ports', 'all', 'processing')
self.hook.before_processing(self.data)
self.assertEqual(self.all_interfaces, self.data['interfaces'])
self.assertEqual(sorted(i['mac'] for i in
self.all_interfaces.values()),
sorted(self.data['macs']))
self.assertEqual(self.all_interfaces, self.data['all_interfaces'])
@mock.patch.object(node_cache.NodeInfo, 'create_ports', autospec=True)
def test_disabled_bad_conf(self, mock_create_port):
CONF.set_override('add_ports', 'disabled', 'processing')
CONF.set_override('keep_ports', 'added', 'processing')
self.assertRaisesRegex(utils.Error, 'Configuration error:',
self.hook.__init__)
mock_create_port.assert_not_called()
@mock.patch.object(node_cache.NodeInfo, 'create_ports', autospec=True)
def test_disabled(self, mock_create_port):
CONF.set_override('add_ports', 'disabled', 'processing')
CONF.set_override('keep_ports', 'all', 'processing')
self.hook.before_processing(self.data)
self.assertEqual(self.active_interfaces, self.data['interfaces'])
mock_create_port.assert_not_called()
def test_malformed_interfaces(self):
self.inventory['interfaces'] = [
# no name
{'mac_address': '11:11:11:11:11:11', 'ipv4_address': '1.1.1.1'},
# empty
{},
]
self.assertRaisesRegex(utils.Error, 'No interfaces supplied',
self.hook.before_processing, self.data)
def test_skipped_interfaces(self):
CONF.set_override('add_ports', 'all', 'processing')
self.inventory['interfaces'] = [
# local interface (by name)
{'name': 'lo', 'mac_address': '11:11:11:11:11:11',
'ipv4_address': '1.1.1.1'},
# local interface (by IP address)
{'name': 'em1', 'mac_address': '22:22:22:22:22:22',
'ipv4_address': '127.0.0.1'},
# no MAC provided
{'name': 'em3', 'ipv4_address': '2.2.2.2'},
# malformed MAC provided
{'name': 'em4', 'mac_address': 'foobar',
'ipv4_address': '2.2.2.2'},
]
self.assertRaisesRegex(utils.Error, 'No suitable interfaces found',
self.hook.before_processing, self.data)
def test_interfaces_with_ipv6_addresses_only(self):
CONF.set_override('add_ports', 'all', 'processing')
self.inventory['interfaces'] = [
# loopback interface (by IPv6 address)
{'name': 'em2', 'mac_address': '33:33:33:33:33:33',
'ipv6_address': '::1'},
# interface with local-link address
{'name': 'em3', 'mac_address': '44:44:44:44:44:44',
'ipv6_address': 'fe80::4644:44ff:fe44:4444'},
# interface with local-link address with suffix
{'name': 'em4', 'mac_address': '55:55:55:55:55:55',
'ipv6_address': 'fe80::5755:55ff:fe55:5555%em4'},
# interface with ULA address
{'name': 'em5', 'mac_address': '66:66:66:66:66:66',
'ipv6_address': 'fd00::1111:2222:6666'},
]
self.hook.before_processing(self.data)
interfaces = self.data['interfaces']
self.assertEqual(interfaces['em3']['mac'], '44:44:44:44:44:44')
self.assertEqual(interfaces['em3']['ip'], 'fe80::4644:44ff:fe44:4444')
self.assertEqual(interfaces['em4']['mac'], '55:55:55:55:55:55')
self.assertEqual(interfaces['em4']['ip'], 'fe80::5755:55ff:fe55:5555')
self.assertEqual(interfaces['em5']['mac'], '66:66:66:66:66:66')
self.assertEqual(interfaces['em5']['ip'], 'fd00::1111:2222:6666')
@mock.patch.object(node_cache.NodeInfo, 'delete_port', autospec=True)
@mock.patch.object(node_cache.NodeInfo, 'create_ports', autospec=True)
class TestValidateInterfacesHookBeforeUpdateDeletion(test_base.NodeTest):
def setUp(self):
super(TestValidateInterfacesHookBeforeUpdateDeletion, self).setUp()
self.hook = std_plugins.ValidateInterfacesHook()
self.interfaces_to_create = sorted(self.valid_interfaces.values(),
key=lambda i: i['mac'])
self.existing_ports = [mock.Mock(spec=['address', 'id'],
address=a)
for a in (self.macs[1],
'44:44:44:44:44:44')]
self.node_info = node_cache.NodeInfo(uuid=self.uuid, started_at=0,
node=self.node,
ports=self.existing_ports)
def test_keep_all(self, mock_create_ports, mock_delete_port):
self.hook.before_update(self.data, self.node_info)
# NOTE(dtantsur): dictionary ordering is not defined
mock_create_ports.assert_called_once_with(self.node_info, mock.ANY)
self.assertEqual(self.interfaces_to_create,
sorted(mock_create_ports.call_args[0][1],
key=lambda i: i['mac']))
self.assertFalse(mock_delete_port.called)
def test_keep_present(self, mock_create_ports, mock_delete_port):
CONF.set_override('keep_ports', 'present', 'processing')
self.data['all_interfaces'] = self.all_interfaces
self.hook.before_update(self.data, self.node_info)
mock_create_ports.assert_called_once_with(self.node_info, mock.ANY)
self.assertEqual(self.interfaces_to_create,
sorted(mock_create_ports.call_args[0][1],
key=lambda i: i['mac']))
mock_delete_port.assert_called_once_with(self.node_info,
self.existing_ports[1])
def test_keep_added(self, mock_create_ports, mock_delete_port):
CONF.set_override('keep_ports', 'added', 'processing')
self.data['macs'] = [self.pxe_mac]
self.hook.before_update(self.data, self.node_info)
mock_create_ports.assert_called_once_with(self.node_info, mock.ANY)
self.assertEqual(self.interfaces_to_create,
sorted(mock_create_ports.call_args[0][1],
key=lambda i: i['mac']))
mock_delete_port.assert_any_call(self.node_info,
self.existing_ports[0])
mock_delete_port.assert_any_call(self.node_info,
self.existing_ports[1])
def test_active_do_not_delete(self, mock_create_ports, mock_delete_port):
CONF.set_override('permit_active_introspection', True, 'processing')
CONF.set_override('keep_ports', 'present', 'processing')
self.data['all_interfaces'] = self.all_interfaces
self.node_info.node().provision_state = 'active'
self.hook.before_update(self.data, self.node_info)
mock_create_ports.assert_called_once_with(self.node_info, mock.ANY)
self.assertFalse(mock_delete_port.called)
@mock.patch.object(node_cache.NodeInfo, 'patch_port', autospec=True)
@mock.patch.object(node_cache.NodeInfo, 'create_ports', autospec=True)
class TestValidateInterfacesHookBeforeUpdatePXEEnabled(test_base.NodeTest):
def setUp(self):
super(TestValidateInterfacesHookBeforeUpdatePXEEnabled, self).setUp()
self.hook = std_plugins.ValidateInterfacesHook()
# Note(milan) assumes the ordering of self.macs from test_base.NodeTest
# where the first item '11:22:33:44:55:66' is the MAC of the
# self.pxe_iface_name 'eth1', the "real" PXE interface
sorted_interfaces = sorted(self.valid_interfaces.values(),
key=lambda i: i['mac'])
self.existing_ports = [
mock.Mock(spec=['address', 'uuid', 'is_pxe_enabled'],
address=iface['mac'], is_pxe_enabled=True)
for iface in sorted_interfaces
]
self.node_info = node_cache.NodeInfo(uuid=self.uuid, started_at=0,
node=self.node,
ports=self.existing_ports)
def test_fix_is_pxe_enabled(self, mock_create_ports, mock_patch_port):
self.hook.before_update(self.data, self.node_info)
# Note(milan) there are just 2 self.valid_interfaces, 'eth1' and 'ib0'
# eth1 is the PXE booting interface and eth1.mac < ib0.mac
mock_patch_port.assert_called_once_with(
self.node_info, self.existing_ports[1],
[{'op': 'replace', 'path': '/pxe_enabled', 'value': False}])
def test_active_do_not_modify(self, mock_create_ports, mock_patch_port):
CONF.set_override('permit_active_introspection', True, 'processing')
self.node_info.node().provision_state = 'active'
self.hook.before_update(self.data, self.node_info)
self.assertFalse(mock_patch_port.called)
def test_no_overwrite(self, mock_create_ports, mock_patch_port):
CONF.set_override('overwrite_existing', False, 'processing')
self.hook.before_update(self.data, self.node_info)
self.assertFalse(mock_patch_port.called)
class TestRootDiskSelection(test_base.NodeTest):
def setUp(self):
super(TestRootDiskSelection, self).setUp()
self.hook = std_plugins.RootDiskSelectionHook()
self.inventory['disks'] = [
{'model': 'Model 1', 'size': 20 * units.Gi, 'name': '/dev/sdb'},
{'model': 'Model 2', 'size': 5 * units.Gi, 'name': '/dev/sda'},
{'model': 'Model 3', 'size': 10 * units.Gi, 'name': '/dev/sdc'},
{'model': 'Model 4', 'size': 4 * units.Gi, 'name': '/dev/sdd'},
{'model': 'Too Small', 'size': 1 * units.Gi, 'name': '/dev/sde'},
{'model': 'Floppy', 'size': 0, 'name': '/dev/sdf'},
{'model': 'Model 3', 'size': 20 * units.Gi, 'name': '/dev/sdg'},
]
self.matched = self.inventory['disks'][2].copy()
self.node_info = mock.Mock(spec=node_cache.NodeInfo,
_state='foo',
uuid=self.uuid,
**{'node.return_value': self.node})
def test_no_hints(self):
del self.data['root_disk']
self.hook.before_update(self.data, self.node_info)
self.assertEqual(0, self.data['local_gb'])
self.assertNotIn('root_disk', self.data)
self.node_info.update_properties.assert_called_once_with(local_gb='0')
def test_no_hints_no_overwrite(self):
CONF.set_override('overwrite_existing', False, 'processing')
del self.data['root_disk']
self.hook.before_update(self.data, self.node_info)
self.assertEqual(0, self.data['local_gb'])
self.assertNotIn('root_disk', self.data)
self.assertFalse(self.node_info.update_properties.called)
def test_no_inventory(self):
self.node.properties['root_device'] = {'model': 'foo'}
del self.data['inventory']
del self.data['root_disk']
self.assertRaisesRegex(utils.Error,
'Hardware inventory is empty or missing',
self.hook.before_update,
self.data, self.node_info)
self.assertNotIn('local_gb', self.data)
self.assertNotIn('root_disk', self.data)
self.assertFalse(self.node_info.update_properties.called)
def test_no_disks(self):
self.node.properties['root_device'] = {'size': 10}
self.inventory['disks'] = []
self.assertRaisesRegex(utils.Error,
'No disks satisfied root device hints',
self.hook.before_update,
self.data, self.node_info)
self.assertNotIn('local_gb', self.data)
self.assertFalse(self.node_info.update_properties.called)
def test_one_that_matches_on_skip_list(self):
self.node.properties['root_device'] = {'size': 10}
self.node.properties['skip_block_devices'] = [{'size': 10}]
self.assertRaisesRegex(utils.Error,
'No disks satisfied root device hints',
self.hook.before_update,
self.data, self.node_info)
self.assertNotIn('local_gb', self.data)
self.assertFalse(self.node_info.update_properties.called)
def test_first_match_on_skip_list_use_second(self):
self.node.properties['root_device'] = {'model': 'Model 3'}
self.node.properties['skip_block_devices'] = [{'size': 10}]
second = self.inventory['disks'][6].copy()
self.hook.before_update(self.data, self.node_info)
self.assertEqual(second, self.data['root_disk'])
self.assertEqual(19, self.data['local_gb'])
self.node_info.update_properties.assert_called_once_with(local_gb='19')
def test_one_matches(self):
self.node.properties['root_device'] = {'size': 10}
self.hook.before_update(self.data, self.node_info)
self.assertEqual(self.matched, self.data['root_disk'])
self.assertEqual(9, self.data['local_gb'])
self.node_info.update_properties.assert_called_once_with(local_gb='9')
def test_local_gb_without_spacing(self):
CONF.set_override('disk_partitioning_spacing', False, 'processing')
self.node.properties['root_device'] = {'size': 10}
self.hook.before_update(self.data, self.node_info)
self.assertEqual(self.matched, self.data['root_disk'])
self.assertEqual(10, self.data['local_gb'])
self.node_info.update_properties.assert_called_once_with(local_gb='10')
def test_zero_size(self):
self.node.properties['root_device'] = {'name': '/dev/sdf'}
self.hook.before_update(self.data, self.node_info)
self.assertEqual(self.inventory['disks'][5], self.data['root_disk'])
self.assertEqual(0, self.data['local_gb'])
self.node_info.update_properties.assert_called_once_with(local_gb='0')
def test_all_match(self):
self.node.properties['root_device'] = {'size': 10,
'model': 'Model 3'}
self.hook.before_update(self.data, self.node_info)
self.assertEqual(self.matched, self.data['root_disk'])
self.assertEqual(9, self.data['local_gb'])
self.node_info.update_properties.assert_called_once_with(local_gb='9')
def test_one_fails(self):
self.node.properties['root_device'] = {'size': 10,
'model': 'Model 42'}
del self.data['root_disk']
self.assertRaisesRegex(utils.Error,
'No disks satisfied root device hints',
self.hook.before_update,
self.data, self.node_info)
self.assertNotIn('local_gb', self.data)
self.assertNotIn('root_disk', self.data)
self.assertFalse(self.node_info.update_properties.called)
def test_size_string(self):
self.node.properties['root_device'] = {'size': '10'}
self.hook.before_update(self.data, self.node_info)
self.assertEqual(self.matched, self.data['root_disk'])
self.assertEqual(9, self.data['local_gb'])
self.node_info.update_properties.assert_called_once_with(local_gb='9')
def test_size_invalid(self):
for bad_size in ('foo', None, {}):
self.node.properties['root_device'] = {'size': bad_size}
self.assertRaisesRegex(utils.Error,
'No disks could be found',
self.hook.before_update,
self.data, self.node_info)
self.assertNotIn('local_gb', self.data)
self.assertFalse(self.node_info.update_properties.called)
class TestRamdiskError(test_base.InventoryTest):
def setUp(self):
super(TestRamdiskError, self).setUp()
self.msg = 'BOOM'
self.bmc_address = '1.2.3.4'
self.data['error'] = self.msg
def test_no_logs(self):
self.assertRaisesRegex(utils.Error,
self.msg,
process.process, self.data)
| {
"content_hash": "b6feab16124c164a40b06fea18a3521f",
"timestamp": "",
"source": "github",
"line_count": 511,
"max_line_length": 79,
"avg_line_length": 44.82974559686888,
"alnum_prop": 0.5883534136546185,
"repo_name": "openstack/ironic-inspector",
"id": "3f3139a7008a386db8e9a8c53c7b1a056b940da1",
"size": "23454",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ironic_inspector/test/unit/test_plugins_standard.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Mako",
"bytes": "972"
},
{
"name": "Python",
"bytes": "904664"
},
{
"name": "Roff",
"bytes": "757"
},
{
"name": "Shell",
"bytes": "34350"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
def shape(tensor):
static = tensor.get_shape().as_list()
dynamic = tf.unstack(tf.shape(tensor))
assert len(static) == len(dynamic)
combined = [d if s is None else s for s, d in zip(static, dynamic)]
return combined
| {
"content_hash": "03ccb5faa753fcafa17bff009f1879f9",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 69,
"avg_line_length": 27.76923076923077,
"alnum_prop": 0.7119113573407202,
"repo_name": "google-research/planet",
"id": "e3929b96336722c1f451023a49905bde20ba5d4b",
"size": "967",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "planet/tools/shape.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "251609"
}
],
"symlink_target": ""
} |
def find_slope(points):
dx = points[2] - points[0]
dy = points[3] - points[1]
return str(dy / dx) if dx else 'undefined'
| {
"content_hash": "e62306fb5e44a60fd21fcd05d5026bfb",
"timestamp": "",
"source": "github",
"line_count": 5,
"max_line_length": 46,
"avg_line_length": 26.8,
"alnum_prop": 0.5970149253731343,
"repo_name": "RevansChen/online-judge",
"id": "fc51ad5dfba7d0a6a0199cff4253ce86a8f0eb1b",
"size": "152",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Codewars/8kyu/find-the-slope/Python/solution1.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Brainfuck",
"bytes": "102"
},
{
"name": "C",
"bytes": "6829"
},
{
"name": "C#",
"bytes": "19758"
},
{
"name": "C++",
"bytes": "9439"
},
{
"name": "Clojure",
"bytes": "75"
},
{
"name": "CoffeeScript",
"bytes": "903"
},
{
"name": "Crystal",
"bytes": "52"
},
{
"name": "Dart",
"bytes": "182"
},
{
"name": "Elixir",
"bytes": "1027"
},
{
"name": "Erlang",
"bytes": "132"
},
{
"name": "F#",
"bytes": "40"
},
{
"name": "Go",
"bytes": "83"
},
{
"name": "Haskell",
"bytes": "102"
},
{
"name": "Java",
"bytes": "11057"
},
{
"name": "JavaScript",
"bytes": "44773"
},
{
"name": "Kotlin",
"bytes": "82"
},
{
"name": "Lua",
"bytes": "93"
},
{
"name": "PHP",
"bytes": "2875"
},
{
"name": "Python",
"bytes": "563400"
},
{
"name": "R",
"bytes": "265"
},
{
"name": "Ruby",
"bytes": "7171"
},
{
"name": "Rust",
"bytes": "74"
},
{
"name": "Scala",
"bytes": "84"
},
{
"name": "Shell",
"bytes": "438"
},
{
"name": "Swift",
"bytes": "6597"
},
{
"name": "TSQL",
"bytes": "3531"
},
{
"name": "TypeScript",
"bytes": "5744"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
from __future__ import print_function
from __future__ import with_statement
import collections
import os
import stat
import types
import warnings
from twisted.internet import defer
from twisted.internet.endpoints import TCP4ClientEndpoint
from twisted.internet.endpoints import UNIXClientEndpoint
from twisted.internet.interfaces import IReactorCore
from twisted.internet.interfaces import IStreamClientEndpoint
from zope.interface import implementer
from txtorcon import TorProtocolFactory
from txtorcon.stream import Stream
from txtorcon.circuit import Circuit
from txtorcon.router import Router, hashFromHexId
from txtorcon.addrmap import AddrMap
from txtorcon.torcontrolprotocol import parse_keywords
from txtorcon.log import txtorlog
from txtorcon.torcontrolprotocol import TorProtocolError
from txtorcon.interface import ITorControlProtocol
from txtorcon.interface import IRouterContainer
from txtorcon.interface import ICircuitListener
from txtorcon.interface import ICircuitContainer
from txtorcon.interface import IStreamListener
from txtorcon.interface import IStreamAttacher
from .spaghetti import FSM, State, Transition
from .util import basestring
def _build_state(proto):
state = TorState(proto)
return state.post_bootstrap
def _wait_for_proto(proto):
return proto.post_bootstrap
def build_tor_connection(connection, build_state=True, wait_for_proto=True,
password_function=lambda: None):
"""
This is used to build a valid TorState (which has .protocol for
the TorControlProtocol). For example::
from twisted.internet import reactor
from twisted.internet.endpoints import TCP4ClientEndpoint
import txtorcon
def example(state):
print "Fully bootstrapped state:",state
print " with bootstrapped protocol:",state.protocol
d = txtorcon.build_tor_connection(TCP4ClientEndpoint(reactor,
"localhost",
9051))
d.addCallback(example)
reactor.run()
:param password_function:
See :class:`txtorcon.TorControlProtocol`
:param build_state:
If True (the default) a TorState object will be
built as well. If False, just a TorControlProtocol will be
returned via the Deferred.
:return:
a Deferred that fires with a TorControlProtocol or, if you
specified build_state=True, a TorState. In both cases, the
object has finished bootstrapping
(i.e. TorControlProtocol.post_bootstrap or
TorState.post_bootstap has fired, as needed)
"""
if IStreamClientEndpoint.providedBy(connection):
endpoint = connection
elif isinstance(connection, tuple):
if len(connection) == 2:
reactor, socket = connection
if (os.path.exists(socket) and
os.stat(socket).st_mode & (stat.S_IRGRP | stat.S_IRUSR |
stat.S_IROTH)):
endpoint = UNIXClientEndpoint(reactor, socket)
else:
raise ValueError('Can\'t use "%s" as a socket' % (socket, ))
elif len(connection) == 3:
endpoint = TCP4ClientEndpoint(*connection)
else:
raise TypeError('Expected either a (reactor, socket)- or a '
'(reactor, host, port)-tuple for argument '
'"connection", got %s' % (connection, ))
else:
raise TypeError('Expected a (reactor, socket)- or a (reactor, host, '
'port)-tuple or an object implementing IStreamClient'
'Endpoint for argument "connection", got %s' %
(connection, ))
d = endpoint.connect(
TorProtocolFactory(
password_function=password_function
)
)
if build_state:
d.addCallback(build_state
if isinstance(build_state, collections.Callable)
else _build_state)
elif wait_for_proto:
d.addCallback(wait_for_proto
if isinstance(wait_for_proto, collections.Callable)
else _wait_for_proto)
return d
def build_local_tor_connection(reactor, host='127.0.0.1', port=9051,
socket='/var/run/tor/control', *args, **kwargs):
"""
This builds a connection to a local Tor, either via 127.0.0.1:9051
(which is tried first) or /var/run/tor/control (by default). See
also :meth:`build_tor_connection
<txtorcon.torstate.build_tor_connection>` for other key-word
arguments that are accepted here also.
:param host:
An IP address to find Tor at. Corresponds to the
ControlListenAddress torrc option.
:param port:
The port to use with the address when trying to contact
Tor. This corresponds to the ControlPort option in torrc
(default is 9051).
"""
try:
return build_tor_connection((reactor, socket), *args, **kwargs)
except:
return build_tor_connection((reactor, host, port), *args, **kwargs)
def flags_from_dict(kw):
"""
This turns a dict with keys that are flags (e.g. for CLOSECIRCUIT,
CLOSESTREAM) only if the values are true.
"""
if len(kw) == 0:
return ''
flags = ''
for (k, v) in kw.items():
if v:
flags += ' ' + str(k)
# note that we want the leading space if there's at least one
# flag.
return flags
@implementer(ICircuitListener)
@implementer(ICircuitContainer)
@implementer(IRouterContainer)
@implementer(IStreamListener)
class TorState(object):
"""
This tracks the current state of Tor using a TorControlProtocol.
On setup it first queries the initial state of streams and
circuits. It then asks for updates via the listeners. It requires
an ITorControlProtocol instance. The control protocol doesn't need
to be bootstrapped yet. The Deferred .post_boostrap is driggered
when the TorState instance is fully ready to go. The easiest way
is to use the helper method
:func:`txtorcon.build_tor_connection`. For details, see the
implementation of that.
You may add an :class:`txtorcon.interface.IStreamAttacher` to
provide a custom mapping for Strams to Circuits (by default Tor
picks by itself).
This is also a good example of the various listeners, and acts as
an :class:`txtorcon.interface.ICircuitContainer` and
:class:`txtorcon.interface.IRouterContainer`.
:cvar DO_NOT_ATTACH:
Constant to return from an IAttacher indicating you don't want to
attach this stream at all.
"""
@classmethod
def from_protocol(cls, protocol, **kw):
'''
Create a new, boot-strapped TorState from a TorControlProtocol
instance.
:return: a Deferred that fires with a TorState instance
'''
state = TorState(protocol, bootstrap=True)
return state.post_bootstrap
def __init__(self, protocol, bootstrap=True):
self.protocol = ITorControlProtocol(protocol)
# fixme could use protocol.on_disconnect to re-connect; see issue #3
# could override these to get your own Circuit/Stream subclasses
# to track these things
self.circuit_factory = Circuit
self.stream_factory = Stream
self.attacher = None
"""If set, provides
:class:`txtorcon.interface.IStreamAttacher` to attach new
streams we hear about."""
self.tor_binary = 'tor'
self.circuit_listeners = []
self.stream_listeners = []
self.addrmap = AddrMap()
#: keys on id (integer)
self.circuits = {}
#: keys on id (integer)
self.streams = {}
#: list of unique routers
self.all_routers = set()
#: keys by hexid (string) and by unique names
self.routers = {}
#: keys on name, value always list (many duplicate "Unnamed"
#: routers, for example)
self.routers_by_name = {}
#: keys by hexid (string)
self.routers_by_hash = {}
#: potentially-usable as entry guards, I think? (any router
#: with 'Guard' flag)
self.guards = {}
#: from GETINFO entry-guards, our current entry guards
self.entry_guards = {}
#: list of entry guards we didn't parse out
self.unusable_entry_guards = []
#: keys by name
self.authorities = {}
#: see set_attacher
self.cleanup = None
class die(object):
__name__ = 'die' # FIXME? just to ease spagetti.py:82's pain
def __init__(self, msg):
self.msg = msg
def __call__(self, *args):
raise RuntimeError(self.msg % tuple(args))
waiting_r = State("waiting_r")
waiting_w = State("waiting_w")
waiting_p = State("waiting_p")
waiting_s = State("waiting_s")
def ignorable_line(x):
x = x.strip()
return x in ['.', 'OK', ''] or x.startswith('ns/')
waiting_r.add_transition(Transition(waiting_r, ignorable_line, None))
waiting_r.add_transition(Transition(waiting_s, lambda x: x.startswith('r '), self._router_begin))
# FIXME use better method/func than die!!
waiting_r.add_transition(Transition(waiting_r, lambda x: not x.startswith('r '), die('Expected "r " while parsing routers not "%s"')))
waiting_s.add_transition(Transition(waiting_w, lambda x: x.startswith('s '), self._router_flags))
waiting_s.add_transition(Transition(waiting_s, lambda x: x.startswith('a '), self._router_address))
waiting_s.add_transition(Transition(waiting_r, ignorable_line, None))
waiting_s.add_transition(Transition(waiting_r, lambda x: not x.startswith('s ') and not x.startswith('a '), die('Expected "s " while parsing routers not "%s"')))
waiting_s.add_transition(Transition(waiting_r, lambda x: x.strip() == '.', None))
waiting_w.add_transition(Transition(waiting_p, lambda x: x.startswith('w '), self._router_bandwidth))
waiting_w.add_transition(Transition(waiting_r, ignorable_line, None))
waiting_w.add_transition(Transition(waiting_s, lambda x: x.startswith('r '), self._router_begin)) # "w" lines are optional
waiting_w.add_transition(Transition(waiting_r, lambda x: not x.startswith('w '), die('Expected "w " while parsing routers not "%s"')))
waiting_w.add_transition(Transition(waiting_r, lambda x: x.strip() == '.', None))
waiting_p.add_transition(Transition(waiting_r, lambda x: x.startswith('p '), self._router_policy))
waiting_p.add_transition(Transition(waiting_r, ignorable_line, None))
waiting_p.add_transition(Transition(waiting_s, lambda x: x.startswith('r '), self._router_begin)) # "p" lines are optional
waiting_p.add_transition(Transition(waiting_r, lambda x: x[:2] != 'p ', die('Expected "p " while parsing routers not "%s"')))
waiting_p.add_transition(Transition(waiting_r, lambda x: x.strip() == '.', None))
self._network_status_parser = FSM([waiting_r, waiting_s, waiting_w, waiting_p])
self.post_bootstrap = defer.Deferred()
if bootstrap:
self.protocol.post_bootstrap.addCallback(self._bootstrap)
self.protocol.post_bootstrap.addErrback(self.post_bootstrap.errback)
def _router_begin(self, data):
args = data.split()
self._router = Router(self.protocol)
self._router.from_consensus = True
self._router.update(
args[1], # nickname
args[2], # idhash
args[3], # orhash
args[4] + ' ' + args[5], # modified (like '%Y-%m-%f %H:%M:%S')
args[6], # ip address
args[7], # ORPort
args[8], # DirPort
)
if self._router.id_hex in self.routers:
# FIXME should I do an update() on this one??
self._router = self.routers[self._router.id_hex]
return
if self._router.name in self.routers_by_name:
self.routers_by_name[self._router.name].append(self._router)
else:
self.routers_by_name[self._router.name] = [self._router]
if self._router.name in self.routers:
self.routers[self._router.name] = None
else:
self.routers[self._router.name] = self._router
self.routers[self._router.id_hex] = self._router
self.routers_by_hash[self._router.id_hex] = self._router
self.all_routers.add(self._router)
def _router_flags(self, data):
args = data.split()
self._router.flags = args[1:]
if 'guard' in self._router.flags:
self.guards[self._router.id_hex] = self._router
if 'authority' in self._router.flags:
self.authorities[self._router.name] = self._router
def _router_address(self, data):
"""only for IPv6 addresses"""
self._router.ip_v6.append(data.split()[1].strip())
def _router_bandwidth(self, data):
args = data.split()
self._router.bandwidth = int(args[1].split('=')[1])
def _router_policy(self, data):
args = data.split()
self._router.policy = args[1:]
self._router = None
@defer.inlineCallbacks
def _bootstrap(self, arg=None):
"This takes an arg so we can use it as a callback (see __init__)."
# update list of routers (must be before we do the
# circuit-status) note that we're feeding each line
# incrementally to a state-machine called
# _network_status_parser, set up in constructor. "ns" should
# be the empty string, but we call _update_network_status for
# the de-duplication of named routers
ns = yield self.protocol.get_info_incremental(
'ns/all',
self._network_status_parser.process
)
self._update_network_status(ns)
# update list of existing circuits
cs = yield self.protocol.get_info_raw('circuit-status')
self._circuit_status(cs)
# update list of streams
ss = yield self.protocol.get_info_raw('stream-status')
self._stream_status(ss)
# update list of existing address-maps
key = 'address-mappings/all'
am = yield self.protocol.get_info_raw(key)
# strip addressmappsings/all= and OK\n from raw data
am = am[len(key) + 1:]
for line in am.split('\n'):
if len(line.strip()) == 0:
continue # FIXME
self.addrmap.update(line)
self._add_events()
entries = yield self.protocol.get_info_raw("entry-guards")
for line in entries.split('\n')[1:]:
if len(line.strip()) == 0 or line.strip() == 'OK':
# XXX does this ever really happen?
continue
args = line.split()
(name, status) = args[:2]
name = name[:41]
# this is sometimes redundant, as a missing entry guard
# usually means it won't be in our list of routers right
# now, but just being on the safe side
if status.lower() != 'up':
self.unusable_entry_guards.append(line)
continue
try:
self.entry_guards[name] = self.router_from_id(name)
except KeyError:
self.unusable_entry_guards.append(line)
# in case process/pid doesn't exist and we don't know the PID
# because we own it, we just leave it as 0 (previously
# guessed using psutil, but that only works if there's
# exactly one tor running anyway)
try:
pid = yield self.protocol.get_info_raw("process/pid")
except TorProtocolError:
pid = None
self.tor_pid = 0
if pid:
try:
pid = parse_keywords(pid)['process/pid']
self.tor_pid = int(pid)
except:
self.tor_pid = 0
if not self.tor_pid and self.protocol.is_owned:
self.tor_pid = self.protocol.is_owned
self.post_bootstrap.callback(self)
self.post_boostrap = None
def undo_attacher(self):
"""
Shouldn't Tor handle this by turning this back to 0 if the
controller that twiddled it disconnects?
"""
return self.protocol.set_conf("__LeaveStreamsUnattached", 0)
def set_attacher(self, attacher, myreactor):
"""
Provide an :class:`txtorcon.interface.IStreamAttacher` to
associate streams to circuits. This won't get turned on until
after bootstrapping is completed. ('__LeaveStreamsUnattached'
needs to be set to '1' and the existing circuits list needs to
be populated).
"""
react = IReactorCore(myreactor)
if attacher:
self.attacher = IStreamAttacher(attacher)
else:
self.attacher = None
if self.attacher is None:
d = self.undo_attacher()
if self.cleanup:
react.removeSystemEventTrigger(self.cleanup)
self.cleanup = None
else:
d = self.protocol.set_conf("__LeaveStreamsUnattached", "1")
self.cleanup = react.addSystemEventTrigger('before', 'shutdown',
self.undo_attacher)
return d
# noqa
stream_close_reasons = {
'REASON_MISC': 1, # (catch-all for unlisted reasons)
'REASON_RESOLVEFAILED': 2, # (couldn't look up hostname)
'REASON_CONNECTREFUSED': 3, # (remote host refused connection) [*]
'REASON_EXITPOLICY': 4, # (OR refuses to connect to host or port)
'REASON_DESTROY': 5, # (Circuit is being destroyed)
'REASON_DONE': 6, # (Anonymized TCP connection was closed)
'REASON_TIMEOUT': 7, # (Connection timed out, or OR timed out while connecting)
'REASON_NOROUTE': 8, # (Routing error while attempting to contact destination)
'REASON_HIBERNATING': 9, # (OR is temporarily hibernating)
'REASON_INTERNAL': 10, # (Internal error at the OR)
'REASON_RESOURCELIMIT': 11, # (OR has no resources to fulfill request)
'REASON_CONNRESET': 12, # (Connection was unexpectedly reset)
'REASON_TORPROTOCOL': 13, # (Sent when closing connection because of Tor protocol violations.)
'REASON_NOTDIRECTORY': 14} # (Client sent RELAY_BEGIN_DIR to a non-directory relay.)
def close_stream(self, stream, reason='REASON_MISC', **kwargs):
"""
This sends a STREAMCLOSE command, using the specified reason
(either an int or one of the 14 strings in section 6.3 of
tor-spec.txt if the argument is a string). Any kwards are
passed through as flags if they evaluated to true
(e.g. "SomeFlag=True"). Currently there are none that Tor accepts.
"""
if type(stream) != int:
# assume it's a Stream instance
stream = stream.id
try:
reason = int(reason)
except ValueError:
try:
reason = TorState.stream_close_reasons[reason]
except KeyError:
raise ValueError(
'Unknown stream close reason "%s"' % str(reason)
)
flags = flags_from_dict(kwargs)
# stream is now an ID no matter what we passed in
cmd = 'CLOSESTREAM %d %d%s' % (stream, reason, flags)
return self.protocol.queue_command(cmd)
def close_circuit(self, circid, **kwargs):
"""
This sends a CLOSECIRCUIT command, using any keyword arguments
passed as the Flags (currently, that is just 'IfUnused' which
means to only close the circuit when it is no longer used by
any streams).
:param circid:
Either a circuit-id (int) or a Circuit instance
:return:
a Deferred which callbacks with the result of queuing the
command to Tor (usually "OK"). If you want to instead know
when the circuit is actually-gone, see
:meth:`Circuit.close <txtorcon.circuit.Circuit.close>`
"""
if type(circid) != int:
# assume it's a Circuit instance
circid = circid.id
flags = flags_from_dict(kwargs)
return self.protocol.queue_command(
'CLOSECIRCUIT %s%s' % (circid, flags)
)
def add_circuit_listener(self, icircuitlistener):
listen = ICircuitListener(icircuitlistener)
for circ in self.circuits.values():
circ.listen(listen)
self.circuit_listeners.append(listen)
def add_stream_listener(self, istreamlistener):
listen = IStreamListener(istreamlistener)
for stream in self.streams.values():
stream.listen(listen)
self.stream_listeners.append(listen)
def _find_circuit_after_extend(self, x):
ex, circ_id = x.split()
if ex != 'EXTENDED':
raise RuntimeError('Expected EXTENDED, got "%s"' % x)
circ_id = int(circ_id)
circ = self._maybe_create_circuit(circ_id)
circ.update([str(circ_id), 'EXTENDED'])
return circ
def build_circuit(self, routers=None, using_guards=True):
"""
Builds a circuit consisting of exactly the routers specified,
in order. This issues an EXTENDCIRCUIT call to Tor with all
the routers specified.
:param routers: a list of Router instances which is the path
desired. To allow Tor to choose the routers itself, pass
None (the default) for routers.
:param using_guards: A warning is issued if the first router
isn't in self.entry_guards.
:return:
A Deferred that will callback with a Circuit instance
(with the .id member being valid, and probably nothing
else).
"""
if routers is None or routers == []:
cmd = "EXTENDCIRCUIT 0"
else:
if using_guards and routers[0] not in self.entry_guards.values():
warnings.warn(
"Circuit doesn't start with a guard: %s" % routers,
RuntimeWarning
)
cmd = "EXTENDCIRCUIT 0 "
first = True
for router in routers:
if first:
first = False
else:
cmd += ','
if isinstance(router, basestring) and len(router) == 40 \
and hashFromHexId(router):
cmd += router
else:
cmd += router.id_hex[1:]
d = self.protocol.queue_command(cmd)
d.addCallback(self._find_circuit_after_extend)
return d
DO_NOT_ATTACH = object()
def _maybe_attach(self, stream):
"""
If we've got a custom stream-attachment instance (see
set_attacher) this will ask it for the appropriate
circuit. Note that we ignore .exit URIs and let Tor deal with
those (by passing circuit ID 0).
The stream attacher is allowed to return a Deferred which will
callback with the desired circuit.
You may return the special object DO_NOT_ATTACH which will
cause the circuit attacher to simply ignore the stream
(neither attaching it, nor telling Tor to attach it).
"""
if self.attacher is None:
return None
if stream.target_host is not None \
and '.exit' in stream.target_host:
# we want to totally ignore .exit URIs as these are
# used to specify a particular exit node, and trying
# to do STREAMATTACH on them will fail with an error
# from Tor anyway.
txtorlog.msg("ignore attacher:", stream)
return
# handle async or sync .attach() the same
circ_d = defer.maybeDeferred(
self.attacher.attach_stream,
stream, self.circuits,
)
# actually do the attachment logic; .attach() can return 3 things:
# 1. None: let Tor do whatever it wants
# 2. DO_NOT_ATTACH: don't attach the stream at all
# 3. Circuit instance: attach to the provided circuit
def issue_stream_attach(circ):
txtorlog.msg("circuit:", circ)
if circ is None:
# tell Tor to do what it likes
return self.protocol.queue_command("ATTACHSTREAM %d 0" % stream.id)
elif circ is self.DO_NOT_ATTACH:
# do nothing; don't attach the stream
return
else:
# should get a Circuit instance; check it for suitability
if not isinstance(circ, Circuit):
raise RuntimeError(
"IStreamAttacher.attach() must return a Circuit instance "
"(or None or DO_NOT_ATTACH): %s"
)
if circ.id not in self.circuits:
raise RuntimeError(
"Attacher returned a circuit unknown to me."
)
if circ.state != 'BUILT':
raise RuntimeError(
"Can only attach to BUILT circuits; %d is in %s." %
(circ.id, circ.state)
)
# we've got a valid Circuit instance; issue the command
return self.protocol.queue_command(
"ATTACHSTREAM %d %d" % (stream.id, circ.id)
)
circ_d.addCallback(issue_stream_attach)
circ_d.addErrback(self._attacher_error)
return circ_d
def _attacher_error(self, fail):
"""
not ideal, but there's not really a good way to let the caller
handler errors :/ since we ultimately call this due to an
async request from Tor. Mostly these errors will be logic or
syntax errors in the caller's code anyway.
tests monkey-patch this to reduce spew
"""
print("Failure while attaching stream:", fail)
return fail
def _circuit_status(self, data):
"""Used internally as a callback for updating Circuit information"""
data = data[len('circuit-status='):].split('\n')
# sometimes there's a newline after circuit-status= and
# sometimes not, so we get rid of it
if len(data) and len(data[0].strip()) == 0:
data = data[1:]
for line in data:
self._circuit_update(line)
def _stream_status(self, data):
"Used internally as a callback for updating Stream information"
# there's a slight issue with a single-stream vs >= 2 streams,
# in that in the latter case we have a line by itself with
# "stream-status=" on it followed by the streams EXCEPT in the
# single-stream case which has "stream-status=123 blahblah"
# (i.e. the key + value on one line)
lines = data.split('\n')
if len(lines) == 1:
d = lines[0][len('stream-status='):]
# if there are actually 0 streams, then there's nothing
# left to parse
if len(d):
self._stream_update(d)
else:
[self._stream_update(line) for line in lines[1:]]
def _update_network_status(self, data):
"""
Used internally as a callback for updating Router information
from NS and NEWCONSENSUS events.
"""
self.all_routers = set()
for line in data.split('\n'):
self._network_status_parser.process(line)
txtorlog.msg(len(self.routers_by_name), "named routers found.")
# remove any names we added that turned out to have dups
for (k, v) in self.routers.items():
if v is None:
txtorlog.msg(len(self.routers_by_name[k]), "dups:", k)
del self.routers[k]
txtorlog.msg(len(self.guards), "GUARDs")
def _maybe_create_circuit(self, circ_id):
if circ_id not in self.circuits:
c = self.circuit_factory(self)
c.listen(self)
for listener in self.circuit_listeners:
c.listen(listener)
else:
c = self.circuits[circ_id]
return c
def _circuit_update(self, line):
"""
Used internally as a callback to update Circuit information
from CIRC events.
"""
# print("circuit_update", line)
args = line.split()
circ_id = int(args[0])
c = self._maybe_create_circuit(circ_id)
c.update(args)
def _stream_update(self, line):
"""
Used internally as a callback to update Stream information
from STREAM events.
"""
# print("stream_update", line)
if line.strip() == 'stream-status=':
# this happens if there are no active streams
return
args = line.split()
assert len(args) >= 3
stream_id = int(args[0])
wasnew = False
if stream_id not in self.streams:
stream = self.stream_factory(self, self.addrmap)
self.streams[stream_id] = stream
stream.listen(self)
for x in self.stream_listeners:
stream.listen(x)
wasnew = True
self.streams[stream_id].update(args)
# if the update closed the stream, it won't be in our list
# anymore. FIXME: how can we ever hit such a case as the
# first update being a CLOSE?
if wasnew and stream_id in self.streams:
self._maybe_attach(self.streams[stream_id])
def _addr_map(self, addr):
"Internal callback to update DNS cache. Listens to ADDRMAP."
txtorlog.msg(" --> addr_map", addr)
self.addrmap.update(addr)
event_map = {'STREAM': _stream_update,
'CIRC': _circuit_update,
'NS': _update_network_status,
'NEWCONSENSUS': _update_network_status,
'ADDRMAP': _addr_map}
"""event_map used by add_events to map event_name -> unbound method"""
@defer.inlineCallbacks
def _add_events(self):
"""
Add listeners for all the events the controller is interested in.
"""
for (event, func) in self.event_map.items():
# the map contains unbound methods, so we bind them
# to self so they call the right thing
yield self.protocol.add_event_listener(
event,
types.MethodType(func, self, TorState)
)
# ICircuitContainer
def find_circuit(self, circid):
"ICircuitContainer API"
return self.circuits[circid]
# IRouterContainer
def router_from_id(self, routerid):
"""IRouterContainer API"""
try:
return self.routers[routerid[:41]]
except KeyError:
if routerid[0] != '$':
raise # just re-raise the KeyError
router = Router(self.protocol)
idhash = routerid[1:41]
nick = ''
is_named = False
if len(routerid) > 41:
nick = routerid[42:]
is_named = routerid[41] == '='
router.update(nick, hashFromHexId(idhash), '0' * 27, 'unknown',
'unknown', '0', '0')
router.name_is_unique = is_named
self.routers[router.id_hex] = router
return router
# implement IStreamListener
def stream_new(self, stream):
"IStreamListener: a new stream has been created"
txtorlog.msg("stream_new", stream)
def stream_succeeded(self, stream):
"IStreamListener: stream has succeeded"
txtorlog.msg("stream_succeeded", stream)
def stream_attach(self, stream, circuit):
"""
IStreamListener: the stream has been attached to a circuit. It
seems you get an attach to None followed by an attach to real
circuit fairly frequently. Perhaps related to __LeaveStreamsUnattached?
"""
txtorlog.msg("stream_attach", stream.id,
stream.target_host, " -> ", circuit)
def stream_detach(self, stream, **kw):
"""
IStreamListener
"""
txtorlog.msg("stream_detach", stream.id)
def stream_closed(self, stream, **kw):
"""
IStreamListener: stream has been closed (won't be in
controller's list anymore)
"""
txtorlog.msg("stream_closed", stream.id)
del self.streams[stream.id]
def stream_failed(self, stream, **kw):
"""
IStreamListener: stream failed for some reason (won't be in
controller's list anymore)
"""
txtorlog.msg("stream_failed", stream.id)
del self.streams[stream.id]
# implement ICircuitListener
def circuit_launched(self, circuit):
"ICircuitListener API"
txtorlog.msg("circuit_launched", circuit)
self.circuits[circuit.id] = circuit
def circuit_extend(self, circuit, router):
"ICircuitListener API"
txtorlog.msg("circuit_extend:", circuit.id, router)
def circuit_built(self, circuit):
"ICircuitListener API"
txtorlog.msg(
"circuit_built:", circuit.id,
"->".join("%s.%s" % (x.name, x.location.countrycode) for x in circuit.path),
circuit.streams
)
def circuit_new(self, circuit):
"ICircuitListener API"
txtorlog.msg("circuit_new:", circuit.id)
self.circuits[circuit.id] = circuit
def circuit_destroy(self, circuit):
"Used by circuit_closed and circuit_failed (below)"
txtorlog.msg("circuit_destroy:", circuit.id)
for d in circuit._when_built:
d.errback(Exception("Destroying circuit; will never hit BUILT"))
del self.circuits[circuit.id]
def circuit_closed(self, circuit, **kw):
"ICircuitListener API"
txtorlog.msg("circuit_closed", circuit)
self.circuit_destroy(circuit)
def circuit_failed(self, circuit, **kw):
"ICircuitListener API"
txtorlog.msg("circuit_failed", circuit, str(kw))
self.circuit_destroy(circuit)
| {
"content_hash": "65c39dddb80cc195ed4159a384b7d6f7",
"timestamp": "",
"source": "github",
"line_count": 941,
"max_line_length": 169,
"avg_line_length": 37.05207226354941,
"alnum_prop": 0.5889978775884817,
"repo_name": "david415/txtorcon",
"id": "c18a5778e5681ec0fdfbf65a736b8e431c024ff7",
"size": "34891",
"binary": false,
"copies": "1",
"ref": "refs/heads/192.fix_build_timeout_circuit.0",
"path": "txtorcon/torstate.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "3610"
},
{
"name": "Python",
"bytes": "489291"
},
{
"name": "Shell",
"bytes": "1287"
}
],
"symlink_target": ""
} |
'''
@author: Josinaldo
# 1) Faça um programa que peça dois números inteiros e imprima a soma desses dois números
'''
if __name__ == '__main__':
n1 = int (raw_input("Digite o primeiro número: "))
n2 = int (raw_input("Digite o segundo número: "))
print "A soma deles é: %d" % (n1 + n2)
| {
"content_hash": "acf2b316d93e6b572c5125d5c00a1fa2",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 89,
"avg_line_length": 33,
"alnum_prop": 0.6195286195286195,
"repo_name": "GADS2014M/Exercicios",
"id": "ed00cce80f79e0ea5e0ba8c7aff6a38249e68488",
"size": "323",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Extra/PYTHON/Exercicios/Nivel1/Josinaldo/1.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "85282"
},
{
"name": "Java",
"bytes": "41333"
},
{
"name": "Python",
"bytes": "48625"
}
],
"symlink_target": ""
} |