function
stringlengths 11
56k
| repo_name
stringlengths 5
60
| features
sequence |
---|---|---|
def do_exit(self, _):
"""Quit. | rsc-dev/loophole | [
146,
16,
146,
4,
1453282427
] |
def do_EOF(self, _):
"""Quit. handles EOF"""
self.do_exit(_) | rsc-dev/loophole | [
146,
16,
146,
4,
1453282427
] |
def do_list(self, _):
"""List available Polar devices. | rsc-dev/loophole | [
146,
16,
146,
4,
1453282427
] |
def do_connect(self, dev_no):
"""Connect Polar device. Run 'list' to see available devices. | rsc-dev/loophole | [
146,
16,
146,
4,
1453282427
] |
def do_disconnect(self, _):
"""Disconnect Polar device.
"""
self.device.close()
self.device = None
self.prompt = LoopholeCli.__PROMPT.format('no device')
print '[+] Device disconnected.'
print | rsc-dev/loophole | [
146,
16,
146,
4,
1453282427
] |
def do_get(self, line):
"""Read file from device and store in under local_path. | rsc-dev/loophole | [
146,
16,
146,
4,
1453282427
] |
def do_delete(self, line):
"""Delete file from device. | rsc-dev/loophole | [
146,
16,
146,
4,
1453282427
] |
def do_dump(self, path):
"""Dump device memory. Path is local folder to store dump. | rsc-dev/loophole | [
146,
16,
146,
4,
1453282427
] |
def do_info(self, _):
"""Print connected device info. | rsc-dev/loophole | [
146,
16,
146,
4,
1453282427
] |
def do_fuzz(self, _):
import polar
num = _.strip()
if len(num) > 0:
num = int(num)
resp = self.device.send_raw([0x01, num] + [0x00] * 62)
print 'req: {} '.format(num),
if resp:
print 'err code: {}'.format(polar.PFTP_ERROR[resp[0]])
return
for i in xrange(256):
#raw_input('Sending [{}]...<press enter>'.format(i))
if (i & 0x03) == 2:
continue
if i in [3, 251, 252]:
continue
resp = self.device.send_raw([0x01, i] + [0x00] * 62)
print 'resp: {} '.format(i),
if resp:
print 'err code: {}'.format(polar.PFTP_ERROR[resp[0]])
else:
print | rsc-dev/loophole | [
146,
16,
146,
4,
1453282427
] |
def do_put_file(self, line):
path, filename = line.split()
self.device.put_file(path.strip(), filename.strip()) | rsc-dev/loophole | [
146,
16,
146,
4,
1453282427
] |
def do_walk(self, path):
"""Walk file system. Default device_path is device root folder. | rsc-dev/loophole | [
146,
16,
146,
4,
1453282427
] |
def main():
cli = LoopholeCli()
cli.cmdloop(__INTRO.format(__version__)) | rsc-dev/loophole | [
146,
16,
146,
4,
1453282427
] |
def run(self, edit):
self.view.window().show_input_panel(
"Working directory that contains pawncc.exe",
"C:\\Pawno\\",
self.onPawnPathDone,
None,
None
) | Southclaw/pawn-sublime-language | [
30,
12,
30,
1,
1392238875
] |
def fix_missing_gene_symbols(de_novos, build='grch37'):
""" adds gene symbols to variants lacking them. | jeremymcrae/denovoFilter | [
6,
2,
6,
1,
1436451739
] |
def open_url(url, headers):
""" open url with python libraries | jeremymcrae/denovoFilter | [
6,
2,
6,
1,
1436451739
] |
def rate_limit_requests(rate_limit=0.0667):
""" limit ensembl requests to one per 0.067 s
""" | jeremymcrae/denovoFilter | [
6,
2,
6,
1,
1436451739
] |
def get_gene_id(chrom, start_pos, end_pos, build="grch37", verbose=False, attempts=0):
"""find the hgnc symbol overlapping a variant position | jeremymcrae/denovoFilter | [
6,
2,
6,
1,
1436451739
] |
def current_additions():
"""
Proxy to the currently added requirements
"""
rv = _additional_ctx_stack.top
if rv is None:
return None
return rv[1] | justanr/flask-allows | [
106,
16,
106,
14,
1440088981
] |
def check(self, other):
if not isinstance(other, Additional):
return NotImplemented
return f(self, other) | justanr/flask-allows | [
106,
16,
106,
14,
1440088981
] |
def __init__(self, *requirements):
self._requirements = set(requirements) | justanr/flask-allows | [
106,
16,
106,
14,
1440088981
] |
def remove(self, requirement, *requirements):
self._requirements.difference_update((requirement,) + requirements) | justanr/flask-allows | [
106,
16,
106,
14,
1440088981
] |
def __add__(self, other):
requirements = self._requirements | other._requirements
return Additional(*requirements) | justanr/flask-allows | [
106,
16,
106,
14,
1440088981
] |
def __iadd__(self, other):
if len(other._requirements) > 0:
self._requirements.add(*other._requirements)
return self | justanr/flask-allows | [
106,
16,
106,
14,
1440088981
] |
def __sub__(self, other):
requirements = self._requirements - other._requirements
return Additional(*requirements) | justanr/flask-allows | [
106,
16,
106,
14,
1440088981
] |
def __isub__(self, other):
if len(other._requirements) > 0:
self.remove(*other._requirements)
return self | justanr/flask-allows | [
106,
16,
106,
14,
1440088981
] |
def __eq__(self, other):
return self._requirements == other._requirements | justanr/flask-allows | [
106,
16,
106,
14,
1440088981
] |
def __ne__(self, other):
return not self == other | justanr/flask-allows | [
106,
16,
106,
14,
1440088981
] |
def is_added(self, requirement):
return requirement in self._requirements | justanr/flask-allows | [
106,
16,
106,
14,
1440088981
] |
def __len__(self):
return len(self._requirements) | justanr/flask-allows | [
106,
16,
106,
14,
1440088981
] |
def __repr__(self):
return "Additional({!r})".format(self._requirements) | justanr/flask-allows | [
106,
16,
106,
14,
1440088981
] |
def push(self, additional, use_parent=False):
"""
Binds an additional to the current context, optionally use the
current additionals in conjunction with this additional
If ``use_parent`` is true, a new additional is created from the
parent and child additionals rather than manipulating either
directly.
"""
current = self.current
if use_parent and current:
additional = current + additional
_additional_ctx_stack.push((self, additional)) | justanr/flask-allows | [
106,
16,
106,
14,
1440088981
] |
def current(self):
"""
Returns the current additional context if set otherwise None
"""
try:
return _additional_ctx_stack.top[1]
except TypeError:
return None | justanr/flask-allows | [
106,
16,
106,
14,
1440088981
] |
def row_to_concept(row):
concept = {'uri': row['c']['value'],
'pref': row['pref']['value'],
'ysapref': row['ysapref']['value'],
'allarspref': row['allarspref']['value']}
if 'alts' in row:
concept['alts'] = row['alts']['value']
return concept | osma/annif | [
14,
1,
14,
5,
1487323466
] |
def search_finna(params):
r = requests.get(FINNA_API_SEARCH, params=params, headers={'User-agent': 'annif 0.1'})
return r.json() | osma/annif | [
14,
1,
14,
5,
1487323466
] |
def generate_text(concept, lang):
# start with pref- and altlabels
labels = [concept['pref']]
if lang == 'fi':
# we can use the YSA label too
labels.append(concept['ysapref'])
if lang == 'sv':
# we can use the Allars label too
labels.append(concept['allarspref'])
if 'alts' in concept:
labels.append(concept['alts']) | osma/annif | [
14,
1,
14,
5,
1487323466
] |
def setUp(self):
"""Prepare to run test."""
super(TaskPrologEnd2End, self).setUp()
self.script = os.path.join(os.path.dirname(self.script), 'mytaskprolog.py') | hpcugent/vsc-mympirun | [
6,
9,
6,
38,
1358903732
] |
def testFIPS180_1_Appendix_A(self):
""" APPENDIX A. A SAMPLE MESSAGE AND ITS MESSAGE DIGEST """
hashAlg = SHA1()
message = 'abc'
message_digest = 0xA9993E36L, 0x4706816AL, 0xBA3E2571L, 0x7850C26CL, 0x9CD0D89DL
md_string = _toBString(message_digest)
assert( hashAlg(message) == md_string ), 'FIPS180 Appendix A test Failed' | dknlght/dkodi | [
14,
22,
14,
9,
1437005334
] |
def testFIPS180_1_Appendix_B(self):
""" APPENDIX B. A SECOND SAMPLE MESSAGE AND ITS MESSAGE DIGEST """
hashAlg = SHA1()
message = 'abcdbcdecdefdefgefghfghighijhijkijkljklmklmnlmnomnopnopq'
message_digest = 0x84983E44L, 0x1C3BD26EL, 0xBAAE4AA1L, 0xF95129E5L, 0xE54670F1L
md_string = _toBString(message_digest)
assert( hashAlg(message) == md_string ), 'FIPS180 Appendix B test Failed' | dknlght/dkodi | [
14,
22,
14,
9,
1437005334
] |
def testFIPS180_1_Appendix_C(self):
""" APPENDIX C. A THIRD SAMPLE MESSAGE AND ITS MESSAGE DIGEST
Let the message be the binary-coded form of the ASCII string which consists
of 1,000,000 repetitions of "a". """
hashAlg = SHA1()
message = 1000000*'a'
message_digest = 0x34AA973CL, 0xD4C4DAA4L, 0xF61EEB2BL, 0xDBAD2731L, 0x6534016FL
md_string = _toBString(message_digest)
assert( hashAlg(message) == md_string ), 'FIPS180 Appendix C test Failed' | dknlght/dkodi | [
14,
22,
14,
9,
1437005334
] |
def _toBlock(binaryString):
""" Convert binary string to blocks of 5 words of uint32() """
return [uint32(word) for word in struct.unpack('!IIIII', binaryString)] | dknlght/dkodi | [
14,
22,
14,
9,
1437005334
] |
def _toBString(block):
""" Convert block (5 words of 32 bits to binary string """
return ''.join([struct.pack('!I',word) for word in block]) | dknlght/dkodi | [
14,
22,
14,
9,
1437005334
] |
def __init__(self, parent):
self.parent=parent | johm/infoshopkeeper | [
8,
3,
8,
3,
1323614202
] |
def Borrow(self, id):
borrower = self.parent.membersList.get(id)
print borrower
for i in self.parent.orderbox.items:
# Check if this work on sqlobject 0.7... I got
# lots of problem on 0.6.1, and itemID __isn't__
# defined in emprunt, which is plain weirdness
e = Emprunt(borrower = id, itemID=i.database_id)
print i.database_id
self.parent.orderbox.setBorrowed()
self.parent.orderbox.void()
self.Close() | johm/infoshopkeeper | [
8,
3,
8,
3,
1323614202
] |
def OnCancel(self,event):
self.EndModal(1) | johm/infoshopkeeper | [
8,
3,
8,
3,
1323614202
] |
def __init__(self, file):
self.file = file
self.major = 0
self.minor = 0
self.revision = 0
self.build = 1
self.touch() | mickem/nscp | [
217,
88,
217,
271,
1335593133
] |
def read(self):
try:
f = open(self.file, 'r')
lines = f.readlines()
f.close()
for line in lines:
self.readline(line)
except IOError as e:
print 'File not found: %s (%s)'%(self.file, e)
sys.exit(1) | mickem/nscp | [
217,
88,
217,
271,
1335593133
] |
def readline(self, line):
line = line.strip('\r\n\t ')
if len(line) == 0:
return
try:
m = re.search('(.*)=(.*)$', line)
if not m:
print 'Failed to parse line: %s'%(line.strip('\n\t '))
return
self.set(m.group(1), m.group(2))
except IndexError as e:
print 'Failed to parse line: %s (%s)'%(line.strip('\n\t '),e) | mickem/nscp | [
217,
88,
217,
271,
1335593133
] |
def touch(self):
today = date.today()
self.date = today.isoformat() | mickem/nscp | [
217,
88,
217,
271,
1335593133
] |
def version(self):
return '%d.%d.%d.%d'%(self.major, self.minor, self.revision, self.build) | mickem/nscp | [
217,
88,
217,
271,
1335593133
] |
def __str__(self):
return 'version: %s, date %s'%(self.version(), self.date) | mickem/nscp | [
217,
88,
217,
271,
1335593133
] |
def increment(self, key):
if key == 'build':
self.build += 1
elif key == 'revision':
self.revision += 1
self.build = 0
elif key == 'minor':
self.minor += 1
self.revision = 0
self.build = 0
elif key == 'major':
self.major += 1
self.minor = 0
self.revision = 0
self.build = 0 | mickem/nscp | [
217,
88,
217,
271,
1335593133
] |
def print_version(self):
print '%d.%d.%d.%d'%(self.major, self.minor, self.revision, self.build) | mickem/nscp | [
217,
88,
217,
271,
1335593133
] |
def write_hpp(self, file):
d = os.path.dirname(file)
if not os.path.exists(d):
os.makedirs(d)
f = open(file, 'w')
(ignored, filename) = os.path.split(file)
name = filename.upper().replace('.', '_')
f.write('#ifndef %s\n'%name)
f.write('#define %s\n'%name) | mickem/nscp | [
217,
88,
217,
271,
1335593133
] |
def setup_action_groups (self):
self.actionGroup = Gtk.ActionGroup(name='RecipeEmailerActionGroup')
self.actionGroup.add_actions([
('EmailRecipes',None,_('Email recipes'),
None,_('Email all selected recipes (or all recipes if no recipes are selected'),self.email_selected),
])
self.action_groups.append(self.actionGroup) | thinkle/gourmet | [
341,
141,
341,
274,
1355706416
] |
def get_selected_recs (self):
recs = self.rg.get_selected_recs_from_rec_tree()
if not recs:
recs = self.rd.fetch_all(self.rd.recipe_table, deleted=False, sort_by=[('title',1)])
return recs | thinkle/gourmet | [
341,
141,
341,
274,
1355706416
] |
def base_app(request):
"""Flask application fixture."""
instance_path = tempfile.mkdtemp()
os.environ.update(
APP_INSTANCE_PATH=instance_path
)
app = create_app(
# CELERY_ALWAYS_EAGER=True,
# CELERY_CACHE_BACKEND="memory",
# CELERY_EAGER_PROPAGATES_EXCEPTIONS=True,
# CELERY_RESULT_BACKEND="cache",
SECRET_KEY="CHANGE_ME",
SECURITY_PASSWORD_SALT="CHANGE_ME",
MAIL_SUPPRESS_SEND=True,
TESTING=True,
)
with app.app_context():
yield app
# Teardown
shutil.rmtree(instance_path) | CERNDocumentServer/cds | [
16,
30,
16,
126,
1399296999
] |
def db(base_app):
"""Initialize database."""
# Init
if not database_exists(str(_db.engine.url)):
create_database(str(_db.engine.url))
_db.create_all()
yield _db
# Teardown
_db.session.remove()
_db.drop_all() | CERNDocumentServer/cds | [
16,
30,
16,
126,
1399296999
] |
def es(base_app):
"""Provide elasticsearch access."""
try:
list(current_search.create())
except RequestError:
list(current_search.delete())
list(current_search.create())
current_search_client.indices.refresh()
yield current_search_client
list(current_search.delete(ignore=[404])) | CERNDocumentServer/cds | [
16,
30,
16,
126,
1399296999
] |
def app(base_app, es, db):
"""Application with ES and DB."""
yield base_app | CERNDocumentServer/cds | [
16,
30,
16,
126,
1399296999
] |
def env_browser(request):
"""Fixture for a webdriver instance of the browser."""
if request.param is None:
request.param = "Firefox"
# Create instance of webdriver.`request.param`()
browser = getattr(webdriver, request.param)()
yield browser
# Quit the webdriver instance
browser.quit() | CERNDocumentServer/cds | [
16,
30,
16,
126,
1399296999
] |
def isStockObject(obj):
"""Is this a stock windows object."""
return (obj & 0x80000000) != 0 | veusz/veusz | [
634,
96,
634,
303,
1296746008
] |
def __init__(self, style=pyemf.PS_SOLID, width=1, color=0,
styleentries=[]):
"""Create pen.
styleentries is a list of dash and space lengths."""
pyemf._EMR._EXTCREATEPEN.__init__(self)
self.style = style
self.penwidth = width
self.color = pyemf._normalizeColor(color)
self.brushstyle = 0x0 # solid
if style & pyemf.PS_STYLE_MASK != pyemf.PS_USERSTYLE:
styleentries = []
self.numstyleentries = len(styleentries)
if styleentries:
self.unhandleddata = struct.pack(
"i"*self.numstyleentries, *styleentries) | veusz/veusz | [
634,
96,
634,
303,
1296746008
] |
def __init__(self, width_in, height_in, dpi=75):
qt.QPaintEngine.__init__(
self,
qt.QPaintEngine.Antialiasing |
qt.QPaintEngine.PainterPaths |
qt.QPaintEngine.PrimitiveTransform |
qt.QPaintEngine.PaintOutsidePaintEvent |
qt.QPaintEngine.PatternBrush
)
self.width = width_in
self.height = height_in
self.dpi = dpi | veusz/veusz | [
634,
96,
634,
303,
1296746008
] |
def drawLines(self, lines):
"""Draw lines to emf output."""
for line in lines:
self.emf.Polyline(
[ (int(line.x1()*scale), int(line.y1()*scale)),
(int(line.x2()*scale), int(line.y2()*scale)) ] ) | veusz/veusz | [
634,
96,
634,
303,
1296746008
] |
def drawEllipse(self, rect):
"""Draw an ellipse."""
# print "ellipse"
args = (
int(rect.left()*scale), int(rect.top()*scale),
int(rect.right()*scale), int(rect.bottom()*scale),
int(rect.left()*scale), int(rect.top()*scale),
int(rect.left()*scale), int(rect.top()*scale),
)
self.emf.Pie(*args)
self.emf.Arc(*args) | veusz/veusz | [
634,
96,
634,
303,
1296746008
] |
def drawPixmap(self, r, pixmap, sr):
"""Draw pixmap to display."""
# convert pixmap to BMP format
bytearr = qt.QByteArray()
buf = qt.QBuffer(bytearr)
buf.open(qt.QIODevice.WriteOnly)
pixmap.save(buf, "BMP")
# chop off bmp header to get DIB
bmp = bytes(buf.data())
dib = bmp[0xe:]
hdrsize, = struct.unpack('<i', bmp[0xe:0x12])
dataindex, = struct.unpack('<i', bmp[0xa:0xe])
datasize, = struct.unpack('<i', bmp[0x22:0x26])
epix = pyemf._EMR._STRETCHDIBITS()
epix.rclBounds_left = int(r.left()*scale)
epix.rclBounds_top = int(r.top()*scale)
epix.rclBounds_right = int(r.right()*scale)
epix.rclBounds_bottom = int(r.bottom()*scale)
epix.xDest = int(r.left()*scale)
epix.yDest = int(r.top()*scale)
epix.cxDest = int(r.width()*scale)
epix.cyDest = int(r.height()*scale)
epix.xSrc = int(sr.left())
epix.ySrc = int(sr.top())
epix.cxSrc = int(sr.width())
epix.cySrc = int(sr.height())
epix.dwRop = 0xcc0020 # SRCCOPY
offset = epix.format.minstructsize + 8
epix.offBmiSrc = offset
epix.cbBmiSrc = hdrsize
epix.offBitsSrc = offset + dataindex - 0xe
epix.cbBitsSrc = datasize
epix.iUsageSrc = 0x0 # DIB_RGB_COLORS
epix.unhandleddata = dib
self.emf._append(epix) | veusz/veusz | [
634,
96,
634,
303,
1296746008
] |
def drawPath(self, path):
"""Draw a path on the output."""
# print "path"
self._createPath(path)
self.emf.StrokeAndFillPath() | veusz/veusz | [
634,
96,
634,
303,
1296746008
] |
def end(self):
return True | veusz/veusz | [
634,
96,
634,
303,
1296746008
] |
def _updatePen(self, pen):
"""Update the pen to the currently selected one."""
# line style
style = {
qt.Qt.NoPen: pyemf.PS_NULL,
qt.Qt.SolidLine: pyemf.PS_SOLID,
qt.Qt.DashLine: pyemf.PS_DASH,
qt.Qt.DotLine: pyemf.PS_DOT,
qt.Qt.DashDotLine: pyemf.PS_DASHDOT,
qt.Qt.DashDotDotLine: pyemf.PS_DASHDOTDOT,
qt.Qt.CustomDashLine: pyemf.PS_USERSTYLE,
}[pen.style()]
if style != pyemf.PS_NULL:
# set cap style
style |= {
qt.Qt.FlatCap: pyemf.PS_ENDCAP_FLAT,
qt.Qt.SquareCap: pyemf.PS_ENDCAP_SQUARE,
qt.Qt.RoundCap: pyemf.PS_ENDCAP_ROUND,
}[pen.capStyle()]
# set join style
style |= {
qt.Qt.MiterJoin: pyemf.PS_JOIN_MITER,
qt.Qt.BevelJoin: pyemf.PS_JOIN_BEVEL,
qt.Qt.RoundJoin: pyemf.PS_JOIN_ROUND,
qt.Qt.SvgMiterJoin: pyemf.PS_JOIN_MITER,
}[pen.joinStyle()]
# use proper widths of lines
style |= pyemf.PS_GEOMETRIC
width = int(pen.widthF()*scale)
qc = pen.color()
color = (qc.red(), qc.green(), qc.blue())
self.pencolor = color
if pen.style() == qt.Qt.CustomDashLine:
# make an extended pen if we need a custom dash pattern
dash = [int(pen.widthF()*scale*f) for f in pen.dashPattern()]
newpen = self.emf._appendHandle( _EXTCREATEPEN(
style, width=width, color=color, styleentries=dash))
else:
# use a standard create pen
newpen = self.emf.CreatePen(style, width, color)
self.emf.SelectObject(newpen)
# delete old pen if it is not a stock object
if not isStockObject(self.pen):
self.emf.DeleteObject(self.pen)
self.pen = newpen | veusz/veusz | [
634,
96,
634,
303,
1296746008
] |
def _updateClipPath(self, path, operation):
"""Update clipping path."""
# print "clip"
if operation != qt.Qt.NoClip:
self._createPath(path)
clipmode = {
qt.Qt.ReplaceClip: pyemf.RGN_COPY,
qt.Qt.IntersectClip: pyemf.RGN_AND,
}[operation]
else:
# is this the only wave to get rid of clipping?
self.emf.BeginPath()
self.emf.MoveTo(0,0)
w = int(self.width*self.dpi*scale)
h = int(self.height*self.dpi*scale)
self.emf.LineTo(w, 0)
self.emf.LineTo(w, h)
self.emf.LineTo(0, h)
self.emf.CloseFigure()
self.emf.EndPath()
clipmode = pyemf.RGN_COPY
self.emf.SelectClipPath(mode=clipmode) | veusz/veusz | [
634,
96,
634,
303,
1296746008
] |
def updateState(self, state):
"""Examine what has changed in state and call apropriate function."""
ss = state.state()
if ss & qt.QPaintEngine.DirtyPen:
self._updatePen(state.pen())
if ss & qt.QPaintEngine.DirtyBrush:
self._updateBrush(state.brush())
if ss & qt.QPaintEngine.DirtyTransform:
self._updateTransform(state.transform())
if ss & qt.QPaintEngine.DirtyClipPath:
self._updateClipPath(state.clipPath(), state.clipOperation())
if ss & qt.QPaintEngine.DirtyClipRegion:
path = qt.QPainterPath()
path.addRegion(state.clipRegion())
self._updateClipPath(path, state.clipOperation()) | veusz/veusz | [
634,
96,
634,
303,
1296746008
] |
def __init__(self, width_in, height_in, dpi=75):
qt.QPaintDevice.__init__(self)
self.engine = EMFPaintEngine(width_in, height_in, dpi=dpi) | veusz/veusz | [
634,
96,
634,
303,
1296746008
] |
def __init__(self, parent, name, address, length_bytes, device_info=None):
"""
:param parent: Parent object who owns this TenGbe instance
:param name: Unique name of the instance
:param address:
:param length_bytes:
:param device_info: Information about this device
"""
Memory.__init__(self, name, 32, address, length_bytes)
Gbe.__init__(self, parent, name, address, length_bytes, device_info)
self.memmap_compliant = self._check_memmap_compliance() | ska-sa/casperfpga | [
6,
40,
6,
5,
1402594479
] |
def mac(self):
return self.get_gbe_core_details()['mac'] | ska-sa/casperfpga | [
6,
40,
6,
5,
1402594479
] |
def ip_address(self):
return self.get_gbe_core_details()['ip'] | ska-sa/casperfpga | [
6,
40,
6,
5,
1402594479
] |
def port(self):
return self.get_gbe_core_details()['fabric_port'] | ska-sa/casperfpga | [
6,
40,
6,
5,
1402594479
] |
def post_create_update(self, raw_device_info):
"""
Update the device with information not available at creation.
:param raw_device_info: info about this block that may be useful
"""
super(TenGbe, self).post_create_update(raw_device_info)
self.snaps = {'tx': None, 'rx': None}
for snapshot in self.parent.snapshots:
if snapshot.name.find(self.name + '_') == 0:
name = snapshot.name.replace(self.name + '_', '')
if name == 'txs_ss':
self.snaps['tx'] = snapshot.name
elif name == 'rxs_ss':
self.snaps['rx'] = snapshot.name
else:
errmsg = '%s: incorrect snap %s under tengbe ' \
'block' % (self.fullname, snapshot.name)
LOGGER.error(errmsg)
raise RuntimeError(errmsg) | ska-sa/casperfpga | [
6,
40,
6,
5,
1402594479
] |
def read_rxsnap(self):
"""
Read the RX snapshot embedded in this TenGBE yellow block
"""
return self.snaps['rx'].read(timeout=10)['data'] | ska-sa/casperfpga | [
6,
40,
6,
5,
1402594479
] |
def dhcp_start(self):
"""
Configure this interface, then start a DHCP client on ALL interfaces.
"""
#if self.mac is None:
# TODO get MAC from EEPROM serial number and assign here
# self.mac = '0'
reply, _ = self.parent.transport.katcprequest(
name='tap-start', request_timeout=5,
require_ok=True,
request_args=(self.name, self.name, '0.0.0.0',
str(self.port), str(self.mac), ))
if reply.arguments[0] != 'ok':
raise RuntimeError('%s: failure starting tap driver.' % self.name)
reply, _ = self.parent.transport.katcprequest(
name='tap-arp-config', request_timeout=1,
require_ok=True,
request_args=(self.name, 'mode', '0'))
if reply.arguments[0] != 'ok':
raise RuntimeError('%s: failure disabling ARP.' % self.name)
reply, _ = self.parent.transport.katcprequest(
name='tap-dhcp', request_timeout=30,
require_ok=True,
request_args=(self.name, ))
if reply.arguments[0] != 'ok':
raise RuntimeError('%s: failure starting DHCP client.' % self.name)
reply, _ = self.parent.transport.katcprequest(
name='tap-arp-config', request_timeout=1,
require_ok=True,
request_args=(self.name, 'mode', '-1'))
if reply.arguments[0] != 'ok':
raise RuntimeError('%s: failure re-enabling ARP.' % self.name)
# it looks like the command completed without error, so
# update the basic core details
self.get_gbe_core_details() | ska-sa/casperfpga | [
6,
40,
6,
5,
1402594479
] |
def tap_stop(self):
"""
Stop a TAP driver.
"""
if not self.tap_running():
return
LOGGER.info('%s: stopping tap driver.' % self.fullname)
reply, _ = self.parent.transport.katcprequest(
name='tap-stop', request_timeout=-1,
require_ok=True, request_args=(self.name, ))
if reply.arguments[0] != 'ok':
raise RuntimeError('%s: failure stopping tap '
'device.' % self.fullname) | ska-sa/casperfpga | [
6,
40,
6,
5,
1402594479
] |
def handle_inform(msg):
uninforms.append(msg) | ska-sa/casperfpga | [
6,
40,
6,
5,
1402594479
] |
def tap_running(self):
"""
Determine if an instance if tap is already running on for this
ten GBE interface.
"""
tapinfo = self.tap_info()
if tapinfo['name'] == '':
return False
return True | ska-sa/casperfpga | [
6,
40,
6,
5,
1402594479
] |
def multicast_receive(self, ip_str, group_size):
"""
Send a request to KATCP to have this tap instance send a multicast
group join request.
:param ip_str: A dotted decimal string representation of the base
mcast IP address.
:param group_size: An integer for how many mcast addresses from
base to respond to.
"""
# mask = 255*(2 ** 24) + 255*(2 ** 16) + 255*(2 ** 8) + (255-group_size)
# self.parent.write_int(self.name, str2ip(ip_str), offset=12)
# self.parent.write_int(self.name, mask, offset=13)
# mcast_group_string = ip_str + '+' + str(group_size)
mcast_group_string = ip_str
reply, _ = self.parent.transport.katcprequest(
'tap-multicast-add', -1, True, request_args=(self.name, 'recv',
mcast_group_string, ))
if reply.arguments[0] == 'ok':
if mcast_group_string not in self.multicast_subscriptions:
self.multicast_subscriptions.append(mcast_group_string)
return
else:
raise RuntimeError('%s: failed adding multicast receive %s to '
'tap device.' % (self.fullname,
mcast_group_string)) | ska-sa/casperfpga | [
6,
40,
6,
5,
1402594479
] |
def _fabric_enable_disable(self, target_val):
"""
:param target_val:
"""
if self.memmap_compliant:
word_bytes = list(
struct.unpack('>4B', self.parent.read(self.name, 4, OFFSET_FLAGS)))
if word_bytes[0] == target_val:
return
word_bytes[0] = target_val
word_packed = struct.pack('>4B', *word_bytes)
self.parent.write(self.name, word_packed, OFFSET_FLAGS)
else:
# 0x20 or (0x20 / 4)? What was the /4 for?
word_bytes = list(
struct.unpack('>4B', self.parent.read(self.name, 4, 0x20)))
if word_bytes[1] == target_val:
return
word_bytes[1] = target_val
word_packed = struct.pack('>4B', *word_bytes)
self.parent.write(self.name, word_packed, 0x20) | ska-sa/casperfpga | [
6,
40,
6,
5,
1402594479
] |
def fabric_disable(self):
"""
Enable the core fabric
"""
self._fabric_enable_disable(0) | ska-sa/casperfpga | [
6,
40,
6,
5,
1402594479
] |
def write_val(val):
word_bytes[2] = val
word_packed = struct.pack('>4B', *word_bytes)
if val == 0:
self.parent.write(self.name, word_packed, OFFSET_FLAGS)
else:
self.parent.blindwrite(self.name, word_packed, OFFSET_FLAGS) | ska-sa/casperfpga | [
6,
40,
6,
5,
1402594479
] |
def write_val(val):
word_bytes[0] = val
word_packed = struct.pack('>4B', *word_bytes)
if val == 0:
self.parent.write(self.name, word_packed, 0x20)
else:
self.parent.blindwrite(self.name, word_packed, 0x20) | ska-sa/casperfpga | [
6,
40,
6,
5,
1402594479
] |
def get_gbe_core_details(self, read_arp=False, read_cpu=False):
"""
Get 10GbE core details.
assemble struct for header stuff...
.. code-block:: python
\"\"\"
0x00 - 0x07: MAC address
0x08 - 0x0b: Not used
0x0c - 0x0f: Gateway addr
0x10 - 0x13: IP addr
0x14 - 0x17: Not assigned
0x18 - 0x1b: Buffer sizes
0x1c - 0x1f: Not assigned
0x20 : Soft reset (bit 0)
0x21 : Fabric enable (bit 0)
0x22 - 0x23: Fabric port
0x24 - 0x27: XAUI status (bit 2,3,4,5 = lane sync, bit6 = chan_bond)
0x28 - 0x2b: PHY config
0x28 : RX_eq_mix
0x29 : RX_eq_pol
0x2a : TX_preemph
0x2b : TX_diff_ctrl
0x30 - 0x33: Multicast IP RX base address
0x34 - 0x37: Multicast IP mask
0x38 - 0x3b: Subnet mask
0x1000 : CPU TX buffer
0x2000 : CPU RX buffer
0x3000 : ARP tables start
word_width = 8
\"\"\"
self.add_field(Bitfield.Field('mac0', 0, word_width, 0, 0 * word_width))
self.add_field(Bitfield.Field('mac1', 0, word_width, 0, 1 * word_width))
self.add_field(Bitfield.Field('mac2', 0, word_width, 0, 2 * word_width))
self.add_field(Bitfield.Field('mac3', 0, word_width, 0, 3 * word_width))
self.add_field(Bitfield.Field('mac4', 0, word_width, 0, 4 * word_width))
self.add_field(Bitfield.Field('mac5', 0, word_width, 0, 5 * word_width))
self.add_field(Bitfield.Field('mac6', 0, word_width, 0, 6 * word_width))
self.add_field(Bitfield.Field('mac7', 0, word_width, 0, 7 * word_width))
self.add_field(Bitfield.Field('unused_1', 0, (0x0c - 0x08) * word_width, 0, 8 * word_width))
self.add_field(Bitfield.Field('gateway_ip0', 0, word_width, 0, 0x0c * word_width))
self.add_field(Bitfield.Field('gateway_ip1', 0, word_width, 0, 0x0d * word_width))
self.add_field(Bitfield.Field('gateway_ip2', 0, word_width, 0, 0x0e * word_width))
self.add_field(Bitfield.Field('gateway_ip3', 0, word_width, 0, 0x0f * word_width))
self.add_field(Bitfield.Field('ip0', 0, word_width, 0, 0x10 * word_width))
self.add_field(Bitfield.Field('ip1', 0, word_width, 0, 0x11 * word_width))
self.add_field(Bitfield.Field('ip2', 0, word_width, 0, 0x12 * word_width))
self.add_field(Bitfield.Field('ip3', 0, word_width, 0, 0x13 * word_width))
self.add_field(Bitfield.Field('unused_2', 0, (0x18 - 0x14) * word_width, 0, 0x14 * word_width))
self.add_field(Bitfield.Field('buf_sizes', 0, (0x1c - 0x18) * word_width, 0, 0x18 * word_width))
self.add_field(Bitfield.Field('unused_3', 0, (0x20 - 0x1c) * word_width, 0, 0x1c * word_width))
self.add_field(Bitfield.Field('soft_reset', 2, 1, 0, 0x20 * word_width))
self.add_field(Bitfield.Field('fabric_enable', 2, 1, 0, 0x21 * word_width))
self.add_field(Bitfield.Field('port', 0, (0x24 - 0x22) * word_width, 0, 0x22 * word_width))
self.add_field(Bitfield.Field('xaui_status', 0, (0x28 - 0x24) * word_width, 0, 0x24 * word_width))
self.add_field(Bitfield.Field('rx_eq_mix', 0, word_width, 0, 0x28 * word_width))
self.add_field(Bitfield.Field('rq_eq_pol', 0, word_width, 0, 0x29 * word_width))
self.add_field(Bitfield.Field('tx_preempth', 0, word_width, 0, 0x2a * word_width))
self.add_field(Bitfield.Field('tx_diff_ctrl', 0, word_width, 0, 0x2b * word_width))
#self.add_field(Bitfield.Field('buffer_tx', 0, 0x1000 * word_width, 0, 0x1000 * word_width))
#self.add_field(Bitfield.Field('buffer_rx', 0, 0x1000 * word_width, 0, 0x2000 * word_width))
#self.add_field(Bitfield.Field('arp_table', 0, 0x1000 * word_width, 0, 0x3000 * word_width))
"""
if self.memmap_compliant:
data = self.parent.read(self.name, 16384)
data = list(struct.unpack('>16384B', data))
returnval = {
'ip_prefix': '%i.%i.%i.' % (data[0x14], data[0x15], data[0x16]),
'ip': IpAddress('%i.%i.%i.%i' % (data[0x14], data[0x15],
data[0x16], data[0x17])),
'subnet_mask': IpAddress('%i.%i.%i.%i' % (
data[0x1c], data[0x1d], data[0x1e], data[0x1f])),
'mac': Mac('%i:%i:%i:%i:%i:%i' % (data[0x0e], data[0x0f],
data[0x10], data[0x11],
data[0x12], data[0x13])),
'gateway_ip': IpAddress('%i.%i.%i.%i' % (data[0x18], data[0x19],
data[0x1a], data[0x1b])),
'fabric_port': ((data[0x32] << 8) + (data[0x33])),
'fabric_en': bool(data[0x2f] & 1),
'multicast': {'base_ip': IpAddress('%i.%i.%i.%i' % (
data[0x20], data[0x21], data[0x22], data[0x23])),
'ip_mask': IpAddress('%i.%i.%i.%i' % (
data[0x24], data[0x25], data[0x26], data[0x27])),
'rx_ips': []}
}
else:
data = self.parent.read(self.name, 16384)
data = list(struct.unpack('>16384B', data))
returnval = {
'ip_prefix': '%i.%i.%i.' % (data[0x10], data[0x11], data[0x12]),
'ip': IpAddress('%i.%i.%i.%i' % (data[0x10], data[0x11],
data[0x12], data[0x13])),
'subnet_mask': IpAddress('%i.%i.%i.%i' % (
data[0x38], data[0x39], data[0x3a], data[0x3b])),
'mac': Mac('%i:%i:%i:%i:%i:%i' % (data[0x02], data[0x03],
data[0x04], data[0x05],
data[0x06], data[0x07])),
'gateway_ip': IpAddress('%i.%i.%i.%i' % (data[0x0c], data[0x0d],
data[0x0e], data[0x0f])),
'fabric_port': ((data[0x22] << 8) + (data[0x23])),
'fabric_en': bool(data[0x21] & 1),
'xaui_lane_sync': [bool(data[0x27] & 4), bool(data[0x27] & 8),
bool(data[0x27] & 16), bool(data[0x27] & 32)],
'xaui_status': [data[0x24], data[0x25], data[0x26], data[0x27]],
'xaui_chan_bond': bool(data[0x27] & 64),
'xaui_phy': {'rx_eq_mix': data[0x28], 'rx_eq_pol': data[0x29],
'tx_preemph': data[0x2a], 'tx_swing': data[0x2b]},
'multicast': {'base_ip': IpAddress('%i.%i.%i.%i' % (
data[0x30], data[0x31], data[0x32], data[0x33])),
'ip_mask': IpAddress('%i.%i.%i.%i' % (
data[0x34], data[0x35], data[0x36], data[0x37])),
'rx_ips': []}
}
possible_addresses = [int(returnval['multicast']['base_ip'])]
mask_int = int(returnval['multicast']['ip_mask'])
for ctr in range(32):
mask_bit = (mask_int >> ctr) & 1
if not mask_bit:
new_ips = []
for ip in possible_addresses:
new_ips.append(ip & (~(1 << ctr)))
new_ips.append(new_ips[-1] | (1 << ctr))
possible_addresses.extend(new_ips)
tmp = list(set(possible_addresses))
for ip in tmp:
returnval['multicast']['rx_ips'].append(IpAddress(ip))
if read_arp:
returnval['arp'] = self.get_arp_details(data)
if read_cpu:
returnval.update(self.get_cpu_details(data))
self.core_details = returnval
return returnval | ska-sa/casperfpga | [
6,
40,
6,
5,
1402594479
] |
def get_cpu_details(self, port_dump=None):
"""
Read details of the CPU buffers.
:param port_dump:
"""
#TODO Not memmap compliant
if port_dump is None:
port_dump = self.parent.read(self.name, 16384)
port_dump = list(struct.unpack('>16384B', port_dump))
returnval = {'cpu_tx': {}}
for ctr in range(4096 / 8):
tmp = []
for ctr2 in range(8):
tmp.append(port_dump[4096 + (8 * ctr) + ctr2])
returnval['cpu_tx'][ctr*8] = tmp
returnval['cpu_rx_buf_unack_data'] = port_dump[6 * 4 + 3]
returnval['cpu_rx'] = {}
for ctr in range(port_dump[6 * 4 + 3] + 8):
tmp = []
for ctr2 in range(8):
tmp.append(port_dump[8192 + (8 * ctr) + ctr2])
returnval['cpu_rx'][ctr * 8] = tmp
return returnval | ska-sa/casperfpga | [
6,
40,
6,
5,
1402594479
] |
def guess_payload(p):
LDPTypes = {
0x0001: LDPNotification,
0x0100: LDPHello,
0x0200: LDPInit,
0x0201: LDPKeepAlive,
0x0300: LDPAddress,
0x0301: LDPAddressWM,
0x0400: LDPLabelMM,
0x0401: LDPLabelReqM,
0x0404: LDPLabelARM,
0x0402: LDPLabelWM,
0x0403: LDPLabelRelM,
}
type = struct.unpack("!H",p[0:2])[0]
type = type & 0x7fff
if type == 0x0001 and struct.unpack("!H",p[2:4])[0] > 20:
return LDP
if type in LDPTypes:
return LDPTypes[type]
else:
return conf.raw_layer | phaethon/scapy | [
847,
200,
847,
72,
1422527787
] |
def m2i(self, pkt, x):
nbr = struct.unpack("!H",x[2:4])[0]
used = 0
x=x[4:]
list=[]
while x:
#if x[0] == 1:
# list.append('Wildcard')
#else:
#mask=ord(x[8*i+3])
#add=inet_ntoa(x[8*i+4:8*i+8])
mask=ord(x[3])
nbroctets = mask / 8
if mask % 8:
nbroctets += 1
add=inet_ntoa(x[4:4+nbroctets]+"\x00"*(4-nbroctets))
list.append( (add, mask) )
used += 4 + nbroctets
x=x[4+nbroctets:]
return list | phaethon/scapy | [
847,
200,
847,
72,
1422527787
] |
def size(self, s):
"""Get the size of this field"""
l = 4 + struct.unpack("!H",s[2:4])[0]
return l | phaethon/scapy | [
847,
200,
847,
72,
1422527787
] |
def m2i(self, pkt, x):
return struct.unpack("!I",x[4:8])[0] | phaethon/scapy | [
847,
200,
847,
72,
1422527787
] |
def size(self, s):
"""Get the size of this field"""
l = 4 + struct.unpack("!H",s[2:4])[0]
return l | phaethon/scapy | [
847,
200,
847,
72,
1422527787
] |
def m2i(self, pkt, x):
nbr = struct.unpack("!H",x[2:4])[0] - 2
nbr /= 4
x=x[6:]
list=[]
for i in range(0,nbr):
add = x[4*i:4*i+4]
list.append(inet_ntoa(add))
return list | phaethon/scapy | [
847,
200,
847,
72,
1422527787
] |
def size(self, s):
"""Get the size of this field"""
l = 4 + struct.unpack("!H",s[2:4])[0]
return l | phaethon/scapy | [
847,
200,
847,
72,
1422527787
] |
def m2i(self, pkt, x):
l = []
statuscode = struct.unpack("!I",x[4:8])[0]
l.append( (statuscode & 2**31) >> 31)
l.append( (statuscode & 2**30) >> 30)
l.append( statuscode & 0x3FFFFFFF )
l.append( struct.unpack("!I", x[8:12])[0] )
l.append( struct.unpack("!H", x[12:14])[0] )
return l | phaethon/scapy | [
847,
200,
847,
72,
1422527787
] |
def getfield(self, pkt, s):
l = 14
return s[l:],self.m2i(pkt, s[:l]) | phaethon/scapy | [
847,
200,
847,
72,
1422527787
] |
def m2i(self, pkt, x):
list = []
v = struct.unpack("!H",x[4:6])[0]
list.append(v)
flags = struct.unpack("B",x[6])[0]
v = ( flags & 0x80 ) >> 7
list.append(v)
v = ( flags & 0x40 ) >> 7
list.append(v)
return list | phaethon/scapy | [
847,
200,
847,
72,
1422527787
] |