code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
| text
stringlengths 164
112k
|
---|---|---|
def rbac_policy_update(request, policy_id, **kwargs):
"""Update a RBAC Policy.
:param request: request context
:param policy_id: target policy id
:param target_tenant: target tenant of the policy
:return: RBACPolicy object
"""
body = {'rbac_policy': kwargs}
rbac_policy = neutronclient(request).update_rbac_policy(
policy_id, body=body).get('rbac_policy')
return RBACPolicy(rbac_policy) | Update a RBAC Policy.
:param request: request context
:param policy_id: target policy id
:param target_tenant: target tenant of the policy
:return: RBACPolicy object | Below is the the instruction that describes the task:
### Input:
Update a RBAC Policy.
:param request: request context
:param policy_id: target policy id
:param target_tenant: target tenant of the policy
:return: RBACPolicy object
### Response:
def rbac_policy_update(request, policy_id, **kwargs):
"""Update a RBAC Policy.
:param request: request context
:param policy_id: target policy id
:param target_tenant: target tenant of the policy
:return: RBACPolicy object
"""
body = {'rbac_policy': kwargs}
rbac_policy = neutronclient(request).update_rbac_policy(
policy_id, body=body).get('rbac_policy')
return RBACPolicy(rbac_policy) |
def query_hek(time, time_window=1):
"""
requests hek responses for a given time
:param time: datetime object
:param time_window: how far in hours on either side of the input time to look for results
:return: hek response list
"""
hek_client = hek.HEKClient()
start_time = time - timedelta(hours=time_window)
end_time = time + timedelta(hours=time_window)
responses = hek_client.query(hek.attrs.Time(start_time, end_time))
return responses | requests hek responses for a given time
:param time: datetime object
:param time_window: how far in hours on either side of the input time to look for results
:return: hek response list | Below is the the instruction that describes the task:
### Input:
requests hek responses for a given time
:param time: datetime object
:param time_window: how far in hours on either side of the input time to look for results
:return: hek response list
### Response:
def query_hek(time, time_window=1):
"""
requests hek responses for a given time
:param time: datetime object
:param time_window: how far in hours on either side of the input time to look for results
:return: hek response list
"""
hek_client = hek.HEKClient()
start_time = time - timedelta(hours=time_window)
end_time = time + timedelta(hours=time_window)
responses = hek_client.query(hek.attrs.Time(start_time, end_time))
return responses |
def style_from_dict(style_dict, include_defaults=True):
"""
Create a ``Style`` instance from a dictionary or other mapping.
The dictionary is equivalent to the ``Style.styles`` dictionary from
pygments, with a few additions: it supports 'reverse' and 'blink'.
Usage::
style_from_dict({
Token: '#ff0000 bold underline',
Token.Title: 'blink',
Token.SomethingElse: 'reverse',
})
:param include_defaults: Include the defaults (built-in) styling for
selected text, etc...)
"""
assert isinstance(style_dict, Mapping)
if include_defaults:
s2 = {}
s2.update(DEFAULT_STYLE_EXTENSIONS)
s2.update(style_dict)
style_dict = s2
# Expand token inheritance and turn style description into Attrs.
token_to_attrs = {}
# (Loop through the tokens in order. Sorting makes sure that
# we process the parent first.)
for ttype, styledef in sorted(style_dict.items()):
# Start from parent Attrs or default Attrs.
attrs = DEFAULT_ATTRS
if 'noinherit' not in styledef:
for i in range(1, len(ttype) + 1):
try:
attrs = token_to_attrs[ttype[:-i]]
except KeyError:
pass
else:
break
# Now update with the given attributes.
for part in styledef.split():
if part == 'noinherit':
pass
elif part == 'bold':
attrs = attrs._replace(bold=True)
elif part == 'nobold':
attrs = attrs._replace(bold=False)
elif part == 'italic':
attrs = attrs._replace(italic=True)
elif part == 'noitalic':
attrs = attrs._replace(italic=False)
elif part == 'underline':
attrs = attrs._replace(underline=True)
elif part == 'nounderline':
attrs = attrs._replace(underline=False)
# prompt_toolkit extensions. Not in Pygments.
elif part == 'blink':
attrs = attrs._replace(blink=True)
elif part == 'noblink':
attrs = attrs._replace(blink=False)
elif part == 'reverse':
attrs = attrs._replace(reverse=True)
elif part == 'noreverse':
attrs = attrs._replace(reverse=False)
# Pygments properties that we ignore.
elif part in ('roman', 'sans', 'mono'):
pass
elif part.startswith('border:'):
pass
# Colors.
elif part.startswith('bg:'):
attrs = attrs._replace(bgcolor=_colorformat(part[3:]))
else:
attrs = attrs._replace(color=_colorformat(part))
token_to_attrs[ttype] = attrs
return _StyleFromDict(token_to_attrs) | Create a ``Style`` instance from a dictionary or other mapping.
The dictionary is equivalent to the ``Style.styles`` dictionary from
pygments, with a few additions: it supports 'reverse' and 'blink'.
Usage::
style_from_dict({
Token: '#ff0000 bold underline',
Token.Title: 'blink',
Token.SomethingElse: 'reverse',
})
:param include_defaults: Include the defaults (built-in) styling for
selected text, etc...) | Below is the the instruction that describes the task:
### Input:
Create a ``Style`` instance from a dictionary or other mapping.
The dictionary is equivalent to the ``Style.styles`` dictionary from
pygments, with a few additions: it supports 'reverse' and 'blink'.
Usage::
style_from_dict({
Token: '#ff0000 bold underline',
Token.Title: 'blink',
Token.SomethingElse: 'reverse',
})
:param include_defaults: Include the defaults (built-in) styling for
selected text, etc...)
### Response:
def style_from_dict(style_dict, include_defaults=True):
"""
Create a ``Style`` instance from a dictionary or other mapping.
The dictionary is equivalent to the ``Style.styles`` dictionary from
pygments, with a few additions: it supports 'reverse' and 'blink'.
Usage::
style_from_dict({
Token: '#ff0000 bold underline',
Token.Title: 'blink',
Token.SomethingElse: 'reverse',
})
:param include_defaults: Include the defaults (built-in) styling for
selected text, etc...)
"""
assert isinstance(style_dict, Mapping)
if include_defaults:
s2 = {}
s2.update(DEFAULT_STYLE_EXTENSIONS)
s2.update(style_dict)
style_dict = s2
# Expand token inheritance and turn style description into Attrs.
token_to_attrs = {}
# (Loop through the tokens in order. Sorting makes sure that
# we process the parent first.)
for ttype, styledef in sorted(style_dict.items()):
# Start from parent Attrs or default Attrs.
attrs = DEFAULT_ATTRS
if 'noinherit' not in styledef:
for i in range(1, len(ttype) + 1):
try:
attrs = token_to_attrs[ttype[:-i]]
except KeyError:
pass
else:
break
# Now update with the given attributes.
for part in styledef.split():
if part == 'noinherit':
pass
elif part == 'bold':
attrs = attrs._replace(bold=True)
elif part == 'nobold':
attrs = attrs._replace(bold=False)
elif part == 'italic':
attrs = attrs._replace(italic=True)
elif part == 'noitalic':
attrs = attrs._replace(italic=False)
elif part == 'underline':
attrs = attrs._replace(underline=True)
elif part == 'nounderline':
attrs = attrs._replace(underline=False)
# prompt_toolkit extensions. Not in Pygments.
elif part == 'blink':
attrs = attrs._replace(blink=True)
elif part == 'noblink':
attrs = attrs._replace(blink=False)
elif part == 'reverse':
attrs = attrs._replace(reverse=True)
elif part == 'noreverse':
attrs = attrs._replace(reverse=False)
# Pygments properties that we ignore.
elif part in ('roman', 'sans', 'mono'):
pass
elif part.startswith('border:'):
pass
# Colors.
elif part.startswith('bg:'):
attrs = attrs._replace(bgcolor=_colorformat(part[3:]))
else:
attrs = attrs._replace(color=_colorformat(part))
token_to_attrs[ttype] = attrs
return _StyleFromDict(token_to_attrs) |
def merge(self, target, source,
target_comment=None, source_comment=None):
"""
Merge the ticket(s) or ticket ID(s) in source into the target ticket.
:param target: ticket id or object to merge tickets into
:param source: ticket id, object or list of tickets or ids to merge into target
:param source_comment: optional comment for the source ticket(s)
:param target_comment: optional comment for the target ticket
:return: a JobStatus object
"""
return TicketMergeRequest(self).post(target, source,
target_comment=target_comment,
source_comment=source_comment) | Merge the ticket(s) or ticket ID(s) in source into the target ticket.
:param target: ticket id or object to merge tickets into
:param source: ticket id, object or list of tickets or ids to merge into target
:param source_comment: optional comment for the source ticket(s)
:param target_comment: optional comment for the target ticket
:return: a JobStatus object | Below is the the instruction that describes the task:
### Input:
Merge the ticket(s) or ticket ID(s) in source into the target ticket.
:param target: ticket id or object to merge tickets into
:param source: ticket id, object or list of tickets or ids to merge into target
:param source_comment: optional comment for the source ticket(s)
:param target_comment: optional comment for the target ticket
:return: a JobStatus object
### Response:
def merge(self, target, source,
target_comment=None, source_comment=None):
"""
Merge the ticket(s) or ticket ID(s) in source into the target ticket.
:param target: ticket id or object to merge tickets into
:param source: ticket id, object or list of tickets or ids to merge into target
:param source_comment: optional comment for the source ticket(s)
:param target_comment: optional comment for the target ticket
:return: a JobStatus object
"""
return TicketMergeRequest(self).post(target, source,
target_comment=target_comment,
source_comment=source_comment) |
def _recursive_round(self, value, precision):
"""
Round all numbers within an array or nested arrays
value: number or nested array of numbers
precision: integer valueue of number of decimals to keep
"""
if hasattr(value, '__iter__'):
return tuple(self._recursive_round(v, precision) for v in value)
return round(value, precision) | Round all numbers within an array or nested arrays
value: number or nested array of numbers
precision: integer valueue of number of decimals to keep | Below is the the instruction that describes the task:
### Input:
Round all numbers within an array or nested arrays
value: number or nested array of numbers
precision: integer valueue of number of decimals to keep
### Response:
def _recursive_round(self, value, precision):
"""
Round all numbers within an array or nested arrays
value: number or nested array of numbers
precision: integer valueue of number of decimals to keep
"""
if hasattr(value, '__iter__'):
return tuple(self._recursive_round(v, precision) for v in value)
return round(value, precision) |
def Lomb_Scargle(data, precision, min_period, max_period, period_jobs=1):
"""
Returns the period of *data* according to the
`Lomb-Scargle periodogram <https://en.wikipedia.org/wiki/Least-squares_spectral_analysis#The_Lomb.E2.80.93Scargle_periodogram>`_.
**Parameters**
data : array-like, shape = [n_samples, 2] or [n_samples, 3]
Array containing columns *time*, *mag*, and (optional) *error*.
precision : number
Distance between contiguous frequencies in search-space.
min_period : number
Minimum period in search-space.
max_period : number
Maximum period in search-space.
period_jobs : int, optional
Number of simultaneous processes to use while searching. Only one
process will ever be used, but argument is included to conform to
*periodogram* standards of :func:`find_period` (default 1).
**Returns**
period : number
The period of *data*.
"""
time, mags, *err = data.T
scaled_mags = (mags-mags.mean())/mags.std()
minf, maxf = 2*np.pi/max_period, 2*np.pi/min_period
freqs = np.arange(minf, maxf, precision)
pgram = lombscargle(time, scaled_mags, freqs)
return 2*np.pi/freqs[np.argmax(pgram)] | Returns the period of *data* according to the
`Lomb-Scargle periodogram <https://en.wikipedia.org/wiki/Least-squares_spectral_analysis#The_Lomb.E2.80.93Scargle_periodogram>`_.
**Parameters**
data : array-like, shape = [n_samples, 2] or [n_samples, 3]
Array containing columns *time*, *mag*, and (optional) *error*.
precision : number
Distance between contiguous frequencies in search-space.
min_period : number
Minimum period in search-space.
max_period : number
Maximum period in search-space.
period_jobs : int, optional
Number of simultaneous processes to use while searching. Only one
process will ever be used, but argument is included to conform to
*periodogram* standards of :func:`find_period` (default 1).
**Returns**
period : number
The period of *data*. | Below is the the instruction that describes the task:
### Input:
Returns the period of *data* according to the
`Lomb-Scargle periodogram <https://en.wikipedia.org/wiki/Least-squares_spectral_analysis#The_Lomb.E2.80.93Scargle_periodogram>`_.
**Parameters**
data : array-like, shape = [n_samples, 2] or [n_samples, 3]
Array containing columns *time*, *mag*, and (optional) *error*.
precision : number
Distance between contiguous frequencies in search-space.
min_period : number
Minimum period in search-space.
max_period : number
Maximum period in search-space.
period_jobs : int, optional
Number of simultaneous processes to use while searching. Only one
process will ever be used, but argument is included to conform to
*periodogram* standards of :func:`find_period` (default 1).
**Returns**
period : number
The period of *data*.
### Response:
def Lomb_Scargle(data, precision, min_period, max_period, period_jobs=1):
"""
Returns the period of *data* according to the
`Lomb-Scargle periodogram <https://en.wikipedia.org/wiki/Least-squares_spectral_analysis#The_Lomb.E2.80.93Scargle_periodogram>`_.
**Parameters**
data : array-like, shape = [n_samples, 2] or [n_samples, 3]
Array containing columns *time*, *mag*, and (optional) *error*.
precision : number
Distance between contiguous frequencies in search-space.
min_period : number
Minimum period in search-space.
max_period : number
Maximum period in search-space.
period_jobs : int, optional
Number of simultaneous processes to use while searching. Only one
process will ever be used, but argument is included to conform to
*periodogram* standards of :func:`find_period` (default 1).
**Returns**
period : number
The period of *data*.
"""
time, mags, *err = data.T
scaled_mags = (mags-mags.mean())/mags.std()
minf, maxf = 2*np.pi/max_period, 2*np.pi/min_period
freqs = np.arange(minf, maxf, precision)
pgram = lombscargle(time, scaled_mags, freqs)
return 2*np.pi/freqs[np.argmax(pgram)] |
def to_json(self):
"""
Returns the JSON Representation of the resource.
"""
result = super(FieldsResource, self).to_json()
result['fields'] = self.fields_with_locales()
return result | Returns the JSON Representation of the resource. | Below is the the instruction that describes the task:
### Input:
Returns the JSON Representation of the resource.
### Response:
def to_json(self):
"""
Returns the JSON Representation of the resource.
"""
result = super(FieldsResource, self).to_json()
result['fields'] = self.fields_with_locales()
return result |
def _approxaA(self,R,vR,vT,z,vz,phi,interp=True,cindx=None):
"""
NAME:
_approxaA
PURPOSE:
return action-angle coordinates for a point based on the linear
approximation around the stream track
INPUT:
R,vR,vT,z,vz,phi - phase-space coordinates of the given point
interp= (True), if True, use the interpolated track
cindx= index of the closest point on the (interpolated) stream track if not given, determined from the dimensions given
OUTPUT:
(Or,Op,Oz,ar,ap,az)
HISTORY:
2013-12-03 - Written - Bovy (IAS)
2015-11-12 - Added weighted sum of two nearest Jacobians to help with smoothness - Bovy (UofT)
"""
if isinstance(R,(int,float,numpy.float32,numpy.float64)): #Scalar input
R= numpy.array([R])
vR= numpy.array([vR])
vT= numpy.array([vT])
z= numpy.array([z])
vz= numpy.array([vz])
phi= numpy.array([phi])
X= R*numpy.cos(phi)
Y= R*numpy.sin(phi)
Z= z
if cindx is None:
closestIndx= [self._find_closest_trackpoint(X[ii],Y[ii],Z[ii],
z[ii],vz[ii],phi[ii],
interp=interp,
xy=True,usev=False)
for ii in range(len(R))]
else:
closestIndx= cindx
out= numpy.empty((6,len(R)))
for ii in range(len(R)):
dxv= numpy.empty(6)
if interp:
dxv[0]= R[ii]-self._interpolatedObsTrack[closestIndx[ii],0]
dxv[1]= vR[ii]-self._interpolatedObsTrack[closestIndx[ii],1]
dxv[2]= vT[ii]-self._interpolatedObsTrack[closestIndx[ii],2]
dxv[3]= z[ii]-self._interpolatedObsTrack[closestIndx[ii],3]
dxv[4]= vz[ii]-self._interpolatedObsTrack[closestIndx[ii],4]
dxv[5]= phi[ii]-self._interpolatedObsTrack[closestIndx[ii],5]
jacIndx= self._find_closest_trackpoint(R[ii],vR[ii],vT[ii],
z[ii],vz[ii],phi[ii],
interp=False,
xy=False)
else:
dxv[0]= R[ii]-self._ObsTrack[closestIndx[ii],0]
dxv[1]= vR[ii]-self._ObsTrack[closestIndx[ii],1]
dxv[2]= vT[ii]-self._ObsTrack[closestIndx[ii],2]
dxv[3]= z[ii]-self._ObsTrack[closestIndx[ii],3]
dxv[4]= vz[ii]-self._ObsTrack[closestIndx[ii],4]
dxv[5]= phi[ii]-self._ObsTrack[closestIndx[ii],5]
jacIndx= closestIndx[ii]
# Find 2nd closest Jacobian point for smoothing
dmJacIndx= (X[ii]-self._ObsTrackXY[jacIndx,0])**2.\
+(Y[ii]-self._ObsTrackXY[jacIndx,1])**2.\
+(Z[ii]-self._ObsTrackXY[jacIndx,2])**2.
if jacIndx == 0:
jacIndx2= jacIndx+1
dmJacIndx2= (X[ii]-self._ObsTrackXY[jacIndx+1,0])**2.\
+(Y[ii]-self._ObsTrackXY[jacIndx+1,1])**2.\
+(Z[ii]-self._ObsTrackXY[jacIndx+1,2])**2.
elif jacIndx == self._nTrackChunks-1:
jacIndx2= jacIndx-1
dmJacIndx2= (X[ii]-self._ObsTrackXY[jacIndx-1,0])**2.\
+(Y[ii]-self._ObsTrackXY[jacIndx-1,1])**2.\
+(Z[ii]-self._ObsTrackXY[jacIndx-1,2])**2.
else:
dm1= (X[ii]-self._ObsTrackXY[jacIndx-1,0])**2.\
+(Y[ii]-self._ObsTrackXY[jacIndx-1,1])**2.\
+(Z[ii]-self._ObsTrackXY[jacIndx-1,2])**2.
dm2= (X[ii]-self._ObsTrackXY[jacIndx+1,0])**2.\
+(Y[ii]-self._ObsTrackXY[jacIndx+1,1])**2.\
+(Z[ii]-self._ObsTrackXY[jacIndx+1,2])**2.
if dm1 < dm2:
jacIndx2= jacIndx-1
dmJacIndx2= dm1
else:
jacIndx2= jacIndx+1
dmJacIndx2= dm2
ampJacIndx= numpy.sqrt(dmJacIndx)/(numpy.sqrt(dmJacIndx)\
+numpy.sqrt(dmJacIndx2))
#Make sure phi hasn't wrapped around
if dxv[5] > numpy.pi:
dxv[5]-= 2.*numpy.pi
elif dxv[5] < -numpy.pi:
dxv[5]+= 2.*numpy.pi
#Apply closest jacobians
out[:,ii]= numpy.dot((1.-ampJacIndx)*self._alljacsTrack[jacIndx,:,:]
+ampJacIndx*self._alljacsTrack[jacIndx2,:,:],
dxv)
if interp:
out[:,ii]+= self._interpolatedObsTrackAA[closestIndx[ii]]
else:
out[:,ii]+= self._ObsTrackAA[closestIndx[ii]]
return out | NAME:
_approxaA
PURPOSE:
return action-angle coordinates for a point based on the linear
approximation around the stream track
INPUT:
R,vR,vT,z,vz,phi - phase-space coordinates of the given point
interp= (True), if True, use the interpolated track
cindx= index of the closest point on the (interpolated) stream track if not given, determined from the dimensions given
OUTPUT:
(Or,Op,Oz,ar,ap,az)
HISTORY:
2013-12-03 - Written - Bovy (IAS)
2015-11-12 - Added weighted sum of two nearest Jacobians to help with smoothness - Bovy (UofT) | Below is the the instruction that describes the task:
### Input:
NAME:
_approxaA
PURPOSE:
return action-angle coordinates for a point based on the linear
approximation around the stream track
INPUT:
R,vR,vT,z,vz,phi - phase-space coordinates of the given point
interp= (True), if True, use the interpolated track
cindx= index of the closest point on the (interpolated) stream track if not given, determined from the dimensions given
OUTPUT:
(Or,Op,Oz,ar,ap,az)
HISTORY:
2013-12-03 - Written - Bovy (IAS)
2015-11-12 - Added weighted sum of two nearest Jacobians to help with smoothness - Bovy (UofT)
### Response:
def _approxaA(self,R,vR,vT,z,vz,phi,interp=True,cindx=None):
"""
NAME:
_approxaA
PURPOSE:
return action-angle coordinates for a point based on the linear
approximation around the stream track
INPUT:
R,vR,vT,z,vz,phi - phase-space coordinates of the given point
interp= (True), if True, use the interpolated track
cindx= index of the closest point on the (interpolated) stream track if not given, determined from the dimensions given
OUTPUT:
(Or,Op,Oz,ar,ap,az)
HISTORY:
2013-12-03 - Written - Bovy (IAS)
2015-11-12 - Added weighted sum of two nearest Jacobians to help with smoothness - Bovy (UofT)
"""
if isinstance(R,(int,float,numpy.float32,numpy.float64)): #Scalar input
R= numpy.array([R])
vR= numpy.array([vR])
vT= numpy.array([vT])
z= numpy.array([z])
vz= numpy.array([vz])
phi= numpy.array([phi])
X= R*numpy.cos(phi)
Y= R*numpy.sin(phi)
Z= z
if cindx is None:
closestIndx= [self._find_closest_trackpoint(X[ii],Y[ii],Z[ii],
z[ii],vz[ii],phi[ii],
interp=interp,
xy=True,usev=False)
for ii in range(len(R))]
else:
closestIndx= cindx
out= numpy.empty((6,len(R)))
for ii in range(len(R)):
dxv= numpy.empty(6)
if interp:
dxv[0]= R[ii]-self._interpolatedObsTrack[closestIndx[ii],0]
dxv[1]= vR[ii]-self._interpolatedObsTrack[closestIndx[ii],1]
dxv[2]= vT[ii]-self._interpolatedObsTrack[closestIndx[ii],2]
dxv[3]= z[ii]-self._interpolatedObsTrack[closestIndx[ii],3]
dxv[4]= vz[ii]-self._interpolatedObsTrack[closestIndx[ii],4]
dxv[5]= phi[ii]-self._interpolatedObsTrack[closestIndx[ii],5]
jacIndx= self._find_closest_trackpoint(R[ii],vR[ii],vT[ii],
z[ii],vz[ii],phi[ii],
interp=False,
xy=False)
else:
dxv[0]= R[ii]-self._ObsTrack[closestIndx[ii],0]
dxv[1]= vR[ii]-self._ObsTrack[closestIndx[ii],1]
dxv[2]= vT[ii]-self._ObsTrack[closestIndx[ii],2]
dxv[3]= z[ii]-self._ObsTrack[closestIndx[ii],3]
dxv[4]= vz[ii]-self._ObsTrack[closestIndx[ii],4]
dxv[5]= phi[ii]-self._ObsTrack[closestIndx[ii],5]
jacIndx= closestIndx[ii]
# Find 2nd closest Jacobian point for smoothing
dmJacIndx= (X[ii]-self._ObsTrackXY[jacIndx,0])**2.\
+(Y[ii]-self._ObsTrackXY[jacIndx,1])**2.\
+(Z[ii]-self._ObsTrackXY[jacIndx,2])**2.
if jacIndx == 0:
jacIndx2= jacIndx+1
dmJacIndx2= (X[ii]-self._ObsTrackXY[jacIndx+1,0])**2.\
+(Y[ii]-self._ObsTrackXY[jacIndx+1,1])**2.\
+(Z[ii]-self._ObsTrackXY[jacIndx+1,2])**2.
elif jacIndx == self._nTrackChunks-1:
jacIndx2= jacIndx-1
dmJacIndx2= (X[ii]-self._ObsTrackXY[jacIndx-1,0])**2.\
+(Y[ii]-self._ObsTrackXY[jacIndx-1,1])**2.\
+(Z[ii]-self._ObsTrackXY[jacIndx-1,2])**2.
else:
dm1= (X[ii]-self._ObsTrackXY[jacIndx-1,0])**2.\
+(Y[ii]-self._ObsTrackXY[jacIndx-1,1])**2.\
+(Z[ii]-self._ObsTrackXY[jacIndx-1,2])**2.
dm2= (X[ii]-self._ObsTrackXY[jacIndx+1,0])**2.\
+(Y[ii]-self._ObsTrackXY[jacIndx+1,1])**2.\
+(Z[ii]-self._ObsTrackXY[jacIndx+1,2])**2.
if dm1 < dm2:
jacIndx2= jacIndx-1
dmJacIndx2= dm1
else:
jacIndx2= jacIndx+1
dmJacIndx2= dm2
ampJacIndx= numpy.sqrt(dmJacIndx)/(numpy.sqrt(dmJacIndx)\
+numpy.sqrt(dmJacIndx2))
#Make sure phi hasn't wrapped around
if dxv[5] > numpy.pi:
dxv[5]-= 2.*numpy.pi
elif dxv[5] < -numpy.pi:
dxv[5]+= 2.*numpy.pi
#Apply closest jacobians
out[:,ii]= numpy.dot((1.-ampJacIndx)*self._alljacsTrack[jacIndx,:,:]
+ampJacIndx*self._alljacsTrack[jacIndx2,:,:],
dxv)
if interp:
out[:,ii]+= self._interpolatedObsTrackAA[closestIndx[ii]]
else:
out[:,ii]+= self._ObsTrackAA[closestIndx[ii]]
return out |
def validate_regex(ctx, param, value):
"""
Validate that a provided regex compiles.
"""
if not value:
return None
try:
re.compile(value)
except re.error:
raise click.BadParameter('Invalid regex "{0}" provided'.format(value))
return value | Validate that a provided regex compiles. | Below is the the instruction that describes the task:
### Input:
Validate that a provided regex compiles.
### Response:
def validate_regex(ctx, param, value):
"""
Validate that a provided regex compiles.
"""
if not value:
return None
try:
re.compile(value)
except re.error:
raise click.BadParameter('Invalid regex "{0}" provided'.format(value))
return value |
def runs(self, path="", filters={}, order="-created_at", per_page=None):
"""Return a set of runs from a project that match the filters provided.
You can filter by config.*, summary.*, state, username, createdAt, etc.
The filters use the same query language as MongoDB:
https://docs.mongodb.com/manual/reference/operator/query
Order can be created_at, heartbeat_at, config.*.value, or summary.*. By default
the order is descending, if you prepend order with a + order becomes ascending.
"""
username, project, run = self._parse_path(path)
if not self._runs.get(path):
self._runs[path + str(filters) + str(order)] = Runs(self.client, username, project,
filters=filters, order=order, per_page=per_page)
return self._runs[path + str(filters) + str(order)] | Return a set of runs from a project that match the filters provided.
You can filter by config.*, summary.*, state, username, createdAt, etc.
The filters use the same query language as MongoDB:
https://docs.mongodb.com/manual/reference/operator/query
Order can be created_at, heartbeat_at, config.*.value, or summary.*. By default
the order is descending, if you prepend order with a + order becomes ascending. | Below is the the instruction that describes the task:
### Input:
Return a set of runs from a project that match the filters provided.
You can filter by config.*, summary.*, state, username, createdAt, etc.
The filters use the same query language as MongoDB:
https://docs.mongodb.com/manual/reference/operator/query
Order can be created_at, heartbeat_at, config.*.value, or summary.*. By default
the order is descending, if you prepend order with a + order becomes ascending.
### Response:
def runs(self, path="", filters={}, order="-created_at", per_page=None):
"""Return a set of runs from a project that match the filters provided.
You can filter by config.*, summary.*, state, username, createdAt, etc.
The filters use the same query language as MongoDB:
https://docs.mongodb.com/manual/reference/operator/query
Order can be created_at, heartbeat_at, config.*.value, or summary.*. By default
the order is descending, if you prepend order with a + order becomes ascending.
"""
username, project, run = self._parse_path(path)
if not self._runs.get(path):
self._runs[path + str(filters) + str(order)] = Runs(self.client, username, project,
filters=filters, order=order, per_page=per_page)
return self._runs[path + str(filters) + str(order)] |
def mergebam(args):
"""
%prog mergebam dir1 homo_outdir
or
%prog mergebam dir1 dir2/20.bam het_outdir
Merge sets of BAMs to make diploid. Two modes:
- Homozygous mode: pair-up the bams in the two folders and merge
- Heterozygous mode: pair the bams in first folder with a particular bam
"""
p = OptionParser(mergebam.__doc__)
p.set_cpus()
opts, args = p.parse_args(args)
if len(args) not in (2, 3):
sys.exit(not p.print_help())
if len(args) == 2:
idir1, outdir = args
dir1 = [idir1] if idir1.endswith(".bam") else iglob(idir1, "*.bam")
logging.debug("Homozygous mode")
dir2 = [""] * len(dir1)
elif len(args) == 3:
idir1, idir2, outdir = args
dir1 = [idir1] if idir1.endswith(".bam") else iglob(idir1, "*.bam")
dir2 = [idir2] if idir2.endswith(".bam") else iglob(idir2, "*.bam")
assert len(dir2) == 1, "Second pile must contain a single bam"
dir2 = [idir2] * len(dir1)
assert len(dir1) == len(dir2), "Two piles must contain same number of bams"
cmd = "samtools merge {} {} {} && samtools index {}"
cmds = []
mkdir(outdir)
for a, b in zip(dir1, dir2):
ia = op.basename(a).split(".")[0]
ib = op.basename(b).split(".")[0] if b else ia
outfile = op.join(outdir, "{}_{}.bam".format(ia, ib))
cmds.append(cmd.format(outfile, a, b, outfile))
p = Parallel(cmds, cpus=opts.cpus)
p.run() | %prog mergebam dir1 homo_outdir
or
%prog mergebam dir1 dir2/20.bam het_outdir
Merge sets of BAMs to make diploid. Two modes:
- Homozygous mode: pair-up the bams in the two folders and merge
- Heterozygous mode: pair the bams in first folder with a particular bam | Below is the the instruction that describes the task:
### Input:
%prog mergebam dir1 homo_outdir
or
%prog mergebam dir1 dir2/20.bam het_outdir
Merge sets of BAMs to make diploid. Two modes:
- Homozygous mode: pair-up the bams in the two folders and merge
- Heterozygous mode: pair the bams in first folder with a particular bam
### Response:
def mergebam(args):
"""
%prog mergebam dir1 homo_outdir
or
%prog mergebam dir1 dir2/20.bam het_outdir
Merge sets of BAMs to make diploid. Two modes:
- Homozygous mode: pair-up the bams in the two folders and merge
- Heterozygous mode: pair the bams in first folder with a particular bam
"""
p = OptionParser(mergebam.__doc__)
p.set_cpus()
opts, args = p.parse_args(args)
if len(args) not in (2, 3):
sys.exit(not p.print_help())
if len(args) == 2:
idir1, outdir = args
dir1 = [idir1] if idir1.endswith(".bam") else iglob(idir1, "*.bam")
logging.debug("Homozygous mode")
dir2 = [""] * len(dir1)
elif len(args) == 3:
idir1, idir2, outdir = args
dir1 = [idir1] if idir1.endswith(".bam") else iglob(idir1, "*.bam")
dir2 = [idir2] if idir2.endswith(".bam") else iglob(idir2, "*.bam")
assert len(dir2) == 1, "Second pile must contain a single bam"
dir2 = [idir2] * len(dir1)
assert len(dir1) == len(dir2), "Two piles must contain same number of bams"
cmd = "samtools merge {} {} {} && samtools index {}"
cmds = []
mkdir(outdir)
for a, b in zip(dir1, dir2):
ia = op.basename(a).split(".")[0]
ib = op.basename(b).split(".")[0] if b else ia
outfile = op.join(outdir, "{}_{}.bam".format(ia, ib))
cmds.append(cmd.format(outfile, a, b, outfile))
p = Parallel(cmds, cpus=opts.cpus)
p.run() |
def detect(self, stream, threshold, threshold_type, trig_int, plotvar,
daylong=False, parallel_process=True, xcorr_func=None,
concurrency=None, cores=None, ignore_length=False,
group_size=None, overlap="calculate", debug=0,
full_peaks=False, save_progress=False,
process_cores=None, **kwargs):
"""
Detect using a Tribe of templates within a continuous stream.
:type stream: `obspy.core.stream.Stream`
:param stream: Continuous data to detect within using the Template.
:type threshold: float
:param threshold:
Threshold level, if using `threshold_type='MAD'` then this will be
the multiple of the median absolute deviation.
:type threshold_type: str
:param threshold_type:
The type of threshold to be used, can be MAD, absolute or
av_chan_corr. See Note on thresholding below.
:type trig_int: float
:param trig_int:
Minimum gap between detections in seconds. If multiple detections
occur within trig_int of one-another, the one with the highest
cross-correlation sum will be selected.
:type plotvar: bool
:param plotvar:
Turn plotting on or off, see warning about plotting below
:type daylong: bool
:param daylong:
Set to True to use the
:func:`eqcorrscan.utils.pre_processing.dayproc` routine, which
preforms additional checks and is more efficient for day-long data
over other methods.
:type parallel_process: bool
:param parallel_process:
:type xcorr_func: str or callable
:param xcorr_func:
A str of a registered xcorr function or a callable for implementing
a custom xcorr function. For more information see:
:func:`eqcorrscan.utils.correlate.register_array_xcorr`
:type concurrency: str
:param concurrency:
The type of concurrency to apply to the xcorr function. Options are
'multithread', 'multiprocess', 'concurrent'. For more details see
:func:`eqcorrscan.utils.correlate.get_stream_xcorr`
:type cores: int
:param cores: Number of workers for procesisng and detection.
:type ignore_length: bool
:param ignore_length:
If using daylong=True, then dayproc will try check that the data
are there for at least 80% of the day, if you don't want this check
(which will raise an error if too much data are missing) then set
ignore_length=True. This is not recommended!
:type group_size: int
:param group_size:
Maximum number of templates to run at once, use to reduce memory
consumption, if unset will use all templates.
:type overlap: float
:param overlap:
Either None, "calculate" or a float of number of seconds to
overlap detection streams by. This is to counter the effects of
the delay-and-stack in calculating cross-correlation sums. Setting
overlap = "calculate" will work out the appropriate overlap based
on the maximum lags within templates.
:type debug: int
:param debug:
Debug level from 0-5 where five is more output, for debug levels
4 and 5, detections will not be computed in parallel.
:type full_peaks: bool
:param full_peaks: See `eqcorrscan.utils.findpeak.find_peaks2_short`
:type save_progress: bool
:param save_progress:
Whether to save the resulting party at every data step or not.
Useful for long-running processes.
:type process_cores: int
:param process_cores:
Number of processes to use for pre-processing (if different to
`cores`).
:return:
:class:`eqcorrscan.core.match_filter.Party` of Families of
detections.
.. Note::
`stream` must not be pre-processed. If your data contain gaps
you should *NOT* fill those gaps before using this method.
The pre-process functions (called within) will fill the gaps
internally prior to processing, process the data, then re-fill
the gaps with zeros to ensure correlations are not incorrectly
calculated within gaps. If your data have gaps you should pass a
merged stream without the `fill_value` argument
(e.g.: `stream = stream.merge()`).
.. Note::
Detections are not corrected for `pre-pick`, the
detection.detect_time corresponds to the beginning of the earliest
template channel at detection.
.. warning::
Picks included in the output Party.get_catalog() will not be
corrected for pre-picks in the template.
.. note::
**Data overlap:**
Internally this routine shifts and trims the data according to the
offsets in the template (e.g. if trace 2 starts 2 seconds after
trace 1 in the template then the continuous data will be shifted
by 2 seconds to align peak correlations prior to summing).
Because of this, detections at the start and end of continuous
data streams **may be missed**. The maximum time-period that
might be missing detections is the maximum offset in the template.
To work around this, if you are conducting matched-filter
detections through long-duration continuous data, we suggest
using some overlap (a few seconds, on the order of the maximum
offset in the templates) in the continuous data. You will then
need to post-process the detections (which should be done anyway
to remove duplicates). See below note for how `overlap` argument
affects data internally if `stream` is longer than the processing
length.
.. Note::
If `stream` is longer than processing length, this routine will
ensure that data overlap between loops, which will lead to no
missed detections at data start-stop points (see above note).
This will result in end-time not being strictly
honoured, so detections may occur after the end-time set. This is
because data must be run in the correct process-length.
.. note::
**Thresholding:**
**MAD** threshold is calculated as the:
.. math::
threshold {\\times} (median(abs(cccsum)))
where :math:`cccsum` is the cross-correlation sum for a given
template.
**absolute** threshold is a true absolute threshold based on the
cccsum value.
**av_chan_corr** is based on the mean values of single-channel
cross-correlations assuming all data are present as required for
the template, e.g:
.. math::
av\_chan\_corr\_thresh=threshold \\times (cccsum /
len(template))
where :math:`template` is a single template from the input and the
length is the number of channels within this template.
"""
party = Party()
template_groups = []
for master in self.templates:
for group in template_groups:
if master in group:
break
else:
new_group = [master]
for slave in self.templates:
if master.same_processing(slave) and master != slave:
new_group.append(slave)
template_groups.append(new_group)
# template_groups will contain an empty first list
for group in template_groups:
if len(group) == 0:
template_groups.remove(group)
# now we can compute the detections for each group
for group in template_groups:
group_party = _group_detect(
templates=group, stream=stream.copy(), threshold=threshold,
threshold_type=threshold_type, trig_int=trig_int,
plotvar=plotvar, group_size=group_size, pre_processed=False,
daylong=daylong, parallel_process=parallel_process,
xcorr_func=xcorr_func, concurrency=concurrency, cores=cores,
ignore_length=ignore_length, overlap=overlap, debug=debug,
full_peaks=full_peaks, process_cores=process_cores, **kwargs)
party += group_party
if save_progress:
party.write("eqcorrscan_temporary_party")
if len(party) > 0:
for family in party:
if family is not None:
family.detections = family._uniq().detections
return party | Detect using a Tribe of templates within a continuous stream.
:type stream: `obspy.core.stream.Stream`
:param stream: Continuous data to detect within using the Template.
:type threshold: float
:param threshold:
Threshold level, if using `threshold_type='MAD'` then this will be
the multiple of the median absolute deviation.
:type threshold_type: str
:param threshold_type:
The type of threshold to be used, can be MAD, absolute or
av_chan_corr. See Note on thresholding below.
:type trig_int: float
:param trig_int:
Minimum gap between detections in seconds. If multiple detections
occur within trig_int of one-another, the one with the highest
cross-correlation sum will be selected.
:type plotvar: bool
:param plotvar:
Turn plotting on or off, see warning about plotting below
:type daylong: bool
:param daylong:
Set to True to use the
:func:`eqcorrscan.utils.pre_processing.dayproc` routine, which
preforms additional checks and is more efficient for day-long data
over other methods.
:type parallel_process: bool
:param parallel_process:
:type xcorr_func: str or callable
:param xcorr_func:
A str of a registered xcorr function or a callable for implementing
a custom xcorr function. For more information see:
:func:`eqcorrscan.utils.correlate.register_array_xcorr`
:type concurrency: str
:param concurrency:
The type of concurrency to apply to the xcorr function. Options are
'multithread', 'multiprocess', 'concurrent'. For more details see
:func:`eqcorrscan.utils.correlate.get_stream_xcorr`
:type cores: int
:param cores: Number of workers for procesisng and detection.
:type ignore_length: bool
:param ignore_length:
If using daylong=True, then dayproc will try check that the data
are there for at least 80% of the day, if you don't want this check
(which will raise an error if too much data are missing) then set
ignore_length=True. This is not recommended!
:type group_size: int
:param group_size:
Maximum number of templates to run at once, use to reduce memory
consumption, if unset will use all templates.
:type overlap: float
:param overlap:
Either None, "calculate" or a float of number of seconds to
overlap detection streams by. This is to counter the effects of
the delay-and-stack in calculating cross-correlation sums. Setting
overlap = "calculate" will work out the appropriate overlap based
on the maximum lags within templates.
:type debug: int
:param debug:
Debug level from 0-5 where five is more output, for debug levels
4 and 5, detections will not be computed in parallel.
:type full_peaks: bool
:param full_peaks: See `eqcorrscan.utils.findpeak.find_peaks2_short`
:type save_progress: bool
:param save_progress:
Whether to save the resulting party at every data step or not.
Useful for long-running processes.
:type process_cores: int
:param process_cores:
Number of processes to use for pre-processing (if different to
`cores`).
:return:
:class:`eqcorrscan.core.match_filter.Party` of Families of
detections.
.. Note::
`stream` must not be pre-processed. If your data contain gaps
you should *NOT* fill those gaps before using this method.
The pre-process functions (called within) will fill the gaps
internally prior to processing, process the data, then re-fill
the gaps with zeros to ensure correlations are not incorrectly
calculated within gaps. If your data have gaps you should pass a
merged stream without the `fill_value` argument
(e.g.: `stream = stream.merge()`).
.. Note::
Detections are not corrected for `pre-pick`, the
detection.detect_time corresponds to the beginning of the earliest
template channel at detection.
.. warning::
Picks included in the output Party.get_catalog() will not be
corrected for pre-picks in the template.
.. note::
**Data overlap:**
Internally this routine shifts and trims the data according to the
offsets in the template (e.g. if trace 2 starts 2 seconds after
trace 1 in the template then the continuous data will be shifted
by 2 seconds to align peak correlations prior to summing).
Because of this, detections at the start and end of continuous
data streams **may be missed**. The maximum time-period that
might be missing detections is the maximum offset in the template.
To work around this, if you are conducting matched-filter
detections through long-duration continuous data, we suggest
using some overlap (a few seconds, on the order of the maximum
offset in the templates) in the continuous data. You will then
need to post-process the detections (which should be done anyway
to remove duplicates). See below note for how `overlap` argument
affects data internally if `stream` is longer than the processing
length.
.. Note::
If `stream` is longer than processing length, this routine will
ensure that data overlap between loops, which will lead to no
missed detections at data start-stop points (see above note).
This will result in end-time not being strictly
honoured, so detections may occur after the end-time set. This is
because data must be run in the correct process-length.
.. note::
**Thresholding:**
**MAD** threshold is calculated as the:
.. math::
threshold {\\times} (median(abs(cccsum)))
where :math:`cccsum` is the cross-correlation sum for a given
template.
**absolute** threshold is a true absolute threshold based on the
cccsum value.
**av_chan_corr** is based on the mean values of single-channel
cross-correlations assuming all data are present as required for
the template, e.g:
.. math::
av\_chan\_corr\_thresh=threshold \\times (cccsum /
len(template))
where :math:`template` is a single template from the input and the
length is the number of channels within this template. | Below is the the instruction that describes the task:
### Input:
Detect using a Tribe of templates within a continuous stream.
:type stream: `obspy.core.stream.Stream`
:param stream: Continuous data to detect within using the Template.
:type threshold: float
:param threshold:
Threshold level, if using `threshold_type='MAD'` then this will be
the multiple of the median absolute deviation.
:type threshold_type: str
:param threshold_type:
The type of threshold to be used, can be MAD, absolute or
av_chan_corr. See Note on thresholding below.
:type trig_int: float
:param trig_int:
Minimum gap between detections in seconds. If multiple detections
occur within trig_int of one-another, the one with the highest
cross-correlation sum will be selected.
:type plotvar: bool
:param plotvar:
Turn plotting on or off, see warning about plotting below
:type daylong: bool
:param daylong:
Set to True to use the
:func:`eqcorrscan.utils.pre_processing.dayproc` routine, which
preforms additional checks and is more efficient for day-long data
over other methods.
:type parallel_process: bool
:param parallel_process:
:type xcorr_func: str or callable
:param xcorr_func:
A str of a registered xcorr function or a callable for implementing
a custom xcorr function. For more information see:
:func:`eqcorrscan.utils.correlate.register_array_xcorr`
:type concurrency: str
:param concurrency:
The type of concurrency to apply to the xcorr function. Options are
'multithread', 'multiprocess', 'concurrent'. For more details see
:func:`eqcorrscan.utils.correlate.get_stream_xcorr`
:type cores: int
:param cores: Number of workers for procesisng and detection.
:type ignore_length: bool
:param ignore_length:
If using daylong=True, then dayproc will try check that the data
are there for at least 80% of the day, if you don't want this check
(which will raise an error if too much data are missing) then set
ignore_length=True. This is not recommended!
:type group_size: int
:param group_size:
Maximum number of templates to run at once, use to reduce memory
consumption, if unset will use all templates.
:type overlap: float
:param overlap:
Either None, "calculate" or a float of number of seconds to
overlap detection streams by. This is to counter the effects of
the delay-and-stack in calculating cross-correlation sums. Setting
overlap = "calculate" will work out the appropriate overlap based
on the maximum lags within templates.
:type debug: int
:param debug:
Debug level from 0-5 where five is more output, for debug levels
4 and 5, detections will not be computed in parallel.
:type full_peaks: bool
:param full_peaks: See `eqcorrscan.utils.findpeak.find_peaks2_short`
:type save_progress: bool
:param save_progress:
Whether to save the resulting party at every data step or not.
Useful for long-running processes.
:type process_cores: int
:param process_cores:
Number of processes to use for pre-processing (if different to
`cores`).
:return:
:class:`eqcorrscan.core.match_filter.Party` of Families of
detections.
.. Note::
`stream` must not be pre-processed. If your data contain gaps
you should *NOT* fill those gaps before using this method.
The pre-process functions (called within) will fill the gaps
internally prior to processing, process the data, then re-fill
the gaps with zeros to ensure correlations are not incorrectly
calculated within gaps. If your data have gaps you should pass a
merged stream without the `fill_value` argument
(e.g.: `stream = stream.merge()`).
.. Note::
Detections are not corrected for `pre-pick`, the
detection.detect_time corresponds to the beginning of the earliest
template channel at detection.
.. warning::
Picks included in the output Party.get_catalog() will not be
corrected for pre-picks in the template.
.. note::
**Data overlap:**
Internally this routine shifts and trims the data according to the
offsets in the template (e.g. if trace 2 starts 2 seconds after
trace 1 in the template then the continuous data will be shifted
by 2 seconds to align peak correlations prior to summing).
Because of this, detections at the start and end of continuous
data streams **may be missed**. The maximum time-period that
might be missing detections is the maximum offset in the template.
To work around this, if you are conducting matched-filter
detections through long-duration continuous data, we suggest
using some overlap (a few seconds, on the order of the maximum
offset in the templates) in the continuous data. You will then
need to post-process the detections (which should be done anyway
to remove duplicates). See below note for how `overlap` argument
affects data internally if `stream` is longer than the processing
length.
.. Note::
If `stream` is longer than processing length, this routine will
ensure that data overlap between loops, which will lead to no
missed detections at data start-stop points (see above note).
This will result in end-time not being strictly
honoured, so detections may occur after the end-time set. This is
because data must be run in the correct process-length.
.. note::
**Thresholding:**
**MAD** threshold is calculated as the:
.. math::
threshold {\\times} (median(abs(cccsum)))
where :math:`cccsum` is the cross-correlation sum for a given
template.
**absolute** threshold is a true absolute threshold based on the
cccsum value.
**av_chan_corr** is based on the mean values of single-channel
cross-correlations assuming all data are present as required for
the template, e.g:
.. math::
av\_chan\_corr\_thresh=threshold \\times (cccsum /
len(template))
where :math:`template` is a single template from the input and the
length is the number of channels within this template.
### Response:
def detect(self, stream, threshold, threshold_type, trig_int, plotvar,
daylong=False, parallel_process=True, xcorr_func=None,
concurrency=None, cores=None, ignore_length=False,
group_size=None, overlap="calculate", debug=0,
full_peaks=False, save_progress=False,
process_cores=None, **kwargs):
"""
Detect using a Tribe of templates within a continuous stream.
:type stream: `obspy.core.stream.Stream`
:param stream: Continuous data to detect within using the Template.
:type threshold: float
:param threshold:
Threshold level, if using `threshold_type='MAD'` then this will be
the multiple of the median absolute deviation.
:type threshold_type: str
:param threshold_type:
The type of threshold to be used, can be MAD, absolute or
av_chan_corr. See Note on thresholding below.
:type trig_int: float
:param trig_int:
Minimum gap between detections in seconds. If multiple detections
occur within trig_int of one-another, the one with the highest
cross-correlation sum will be selected.
:type plotvar: bool
:param plotvar:
Turn plotting on or off, see warning about plotting below
:type daylong: bool
:param daylong:
Set to True to use the
:func:`eqcorrscan.utils.pre_processing.dayproc` routine, which
preforms additional checks and is more efficient for day-long data
over other methods.
:type parallel_process: bool
:param parallel_process:
:type xcorr_func: str or callable
:param xcorr_func:
A str of a registered xcorr function or a callable for implementing
a custom xcorr function. For more information see:
:func:`eqcorrscan.utils.correlate.register_array_xcorr`
:type concurrency: str
:param concurrency:
The type of concurrency to apply to the xcorr function. Options are
'multithread', 'multiprocess', 'concurrent'. For more details see
:func:`eqcorrscan.utils.correlate.get_stream_xcorr`
:type cores: int
:param cores: Number of workers for procesisng and detection.
:type ignore_length: bool
:param ignore_length:
If using daylong=True, then dayproc will try check that the data
are there for at least 80% of the day, if you don't want this check
(which will raise an error if too much data are missing) then set
ignore_length=True. This is not recommended!
:type group_size: int
:param group_size:
Maximum number of templates to run at once, use to reduce memory
consumption, if unset will use all templates.
:type overlap: float
:param overlap:
Either None, "calculate" or a float of number of seconds to
overlap detection streams by. This is to counter the effects of
the delay-and-stack in calculating cross-correlation sums. Setting
overlap = "calculate" will work out the appropriate overlap based
on the maximum lags within templates.
:type debug: int
:param debug:
Debug level from 0-5 where five is more output, for debug levels
4 and 5, detections will not be computed in parallel.
:type full_peaks: bool
:param full_peaks: See `eqcorrscan.utils.findpeak.find_peaks2_short`
:type save_progress: bool
:param save_progress:
Whether to save the resulting party at every data step or not.
Useful for long-running processes.
:type process_cores: int
:param process_cores:
Number of processes to use for pre-processing (if different to
`cores`).
:return:
:class:`eqcorrscan.core.match_filter.Party` of Families of
detections.
.. Note::
`stream` must not be pre-processed. If your data contain gaps
you should *NOT* fill those gaps before using this method.
The pre-process functions (called within) will fill the gaps
internally prior to processing, process the data, then re-fill
the gaps with zeros to ensure correlations are not incorrectly
calculated within gaps. If your data have gaps you should pass a
merged stream without the `fill_value` argument
(e.g.: `stream = stream.merge()`).
.. Note::
Detections are not corrected for `pre-pick`, the
detection.detect_time corresponds to the beginning of the earliest
template channel at detection.
.. warning::
Picks included in the output Party.get_catalog() will not be
corrected for pre-picks in the template.
.. note::
**Data overlap:**
Internally this routine shifts and trims the data according to the
offsets in the template (e.g. if trace 2 starts 2 seconds after
trace 1 in the template then the continuous data will be shifted
by 2 seconds to align peak correlations prior to summing).
Because of this, detections at the start and end of continuous
data streams **may be missed**. The maximum time-period that
might be missing detections is the maximum offset in the template.
To work around this, if you are conducting matched-filter
detections through long-duration continuous data, we suggest
using some overlap (a few seconds, on the order of the maximum
offset in the templates) in the continuous data. You will then
need to post-process the detections (which should be done anyway
to remove duplicates). See below note for how `overlap` argument
affects data internally if `stream` is longer than the processing
length.
.. Note::
If `stream` is longer than processing length, this routine will
ensure that data overlap between loops, which will lead to no
missed detections at data start-stop points (see above note).
This will result in end-time not being strictly
honoured, so detections may occur after the end-time set. This is
because data must be run in the correct process-length.
.. note::
**Thresholding:**
**MAD** threshold is calculated as the:
.. math::
threshold {\\times} (median(abs(cccsum)))
where :math:`cccsum` is the cross-correlation sum for a given
template.
**absolute** threshold is a true absolute threshold based on the
cccsum value.
**av_chan_corr** is based on the mean values of single-channel
cross-correlations assuming all data are present as required for
the template, e.g:
.. math::
av\_chan\_corr\_thresh=threshold \\times (cccsum /
len(template))
where :math:`template` is a single template from the input and the
length is the number of channels within this template.
"""
party = Party()
template_groups = []
for master in self.templates:
for group in template_groups:
if master in group:
break
else:
new_group = [master]
for slave in self.templates:
if master.same_processing(slave) and master != slave:
new_group.append(slave)
template_groups.append(new_group)
# template_groups will contain an empty first list
for group in template_groups:
if len(group) == 0:
template_groups.remove(group)
# now we can compute the detections for each group
for group in template_groups:
group_party = _group_detect(
templates=group, stream=stream.copy(), threshold=threshold,
threshold_type=threshold_type, trig_int=trig_int,
plotvar=plotvar, group_size=group_size, pre_processed=False,
daylong=daylong, parallel_process=parallel_process,
xcorr_func=xcorr_func, concurrency=concurrency, cores=cores,
ignore_length=ignore_length, overlap=overlap, debug=debug,
full_peaks=full_peaks, process_cores=process_cores, **kwargs)
party += group_party
if save_progress:
party.write("eqcorrscan_temporary_party")
if len(party) > 0:
for family in party:
if family is not None:
family.detections = family._uniq().detections
return party |
def find_executable(executable, path=None):
"""
As distutils.spawn.find_executable, but on Windows, look up
every extension declared in PATHEXT instead of just `.exe`
"""
if sys.platform != 'win32':
return distutils.spawn.find_executable(executable, path)
if path is None:
path = os.environ['PATH']
paths = path.split(os.pathsep)
extensions = os.environ.get('PATHEXT', '.exe').split(os.pathsep)
base, ext = os.path.splitext(executable)
if not os.path.isfile(executable):
for p in paths:
for ext in extensions:
f = os.path.join(p, base + ext)
if os.path.isfile(f):
return f
return None
else:
return executable | As distutils.spawn.find_executable, but on Windows, look up
every extension declared in PATHEXT instead of just `.exe` | Below is the the instruction that describes the task:
### Input:
As distutils.spawn.find_executable, but on Windows, look up
every extension declared in PATHEXT instead of just `.exe`
### Response:
def find_executable(executable, path=None):
"""
As distutils.spawn.find_executable, but on Windows, look up
every extension declared in PATHEXT instead of just `.exe`
"""
if sys.platform != 'win32':
return distutils.spawn.find_executable(executable, path)
if path is None:
path = os.environ['PATH']
paths = path.split(os.pathsep)
extensions = os.environ.get('PATHEXT', '.exe').split(os.pathsep)
base, ext = os.path.splitext(executable)
if not os.path.isfile(executable):
for p in paths:
for ext in extensions:
f = os.path.join(p, base + ext)
if os.path.isfile(f):
return f
return None
else:
return executable |
def print(root):
# type: (Union[Nonterminal,Terminal,Rule])-> str
"""
Transform the parsed tree to the string. Expects tree like structure.
You can see example output below.
(R)SplitRules26
|--(N)Iterate
| `--(R)SplitRules30
| `--(N)Symb
| `--(R)SplitRules4
| `--(T)e
`--(N)Concat
`--(R)SplitRules27
`--(N)Iterate
`--(R)SplitRules30
`--(N)Symb
`--(R)SplitRules5
`--(T)f
:param root: Root node of the parsed tree.
:return: String representing the parsed tree (ends with newline).
"""
# print the part before the element
def print_before(previous=0, defined=None, is_last=False):
defined = defined or {}
ret = ''
if previous != 0:
for i in range(previous - 1):
# if the column is still active write |
if i in defined:
ret += '| '
# otherwise just print space
else:
ret += ' '
# if is current element last child, don't print |-- but `-- instead
ret += '`--' if is_last else '|--'
return ret
# print the terminal
def terminal_traverse(term, callback, previous=0, defined=None, is_last=False):
before = print_before(previous, defined, is_last)
yield before + '(T)' + str(term.s) + '\n'
# print the nonterminal
def nonterminal_traverse(nonterm, callback, previous=0, defined=None, is_last=False):
before = print_before(previous, defined, is_last)
yield before + '(N)' + nonterm.__class__.__name__ + '\n'
yield callback(nonterm.to_rule, previous + 1, defined, True)
# print the rule
def rule_traverse(rule, callback, previous=0, defined=None, is_last=False):
# print the rule name
before = print_before(previous, defined, is_last)
yield before + '(R)' + rule.__class__.__name__ + '\n'
# register new column
defined = defined or set()
defined.add(previous)
# print all childs except the last one
for i in range(len(rule.to_symbols) - 1):
yield callback(rule.to_symbols[i], previous + 1, defined, False)
# unregister the column as last child print it automatically
defined.remove(previous)
yield callback(rule.to_symbols[-1], previous + 1, defined, True)
res = Traversing.traverse_separated(root, rule_traverse, nonterminal_traverse, terminal_traverse)
return str.join("", res) | Transform the parsed tree to the string. Expects tree like structure.
You can see example output below.
(R)SplitRules26
|--(N)Iterate
| `--(R)SplitRules30
| `--(N)Symb
| `--(R)SplitRules4
| `--(T)e
`--(N)Concat
`--(R)SplitRules27
`--(N)Iterate
`--(R)SplitRules30
`--(N)Symb
`--(R)SplitRules5
`--(T)f
:param root: Root node of the parsed tree.
:return: String representing the parsed tree (ends with newline). | Below is the the instruction that describes the task:
### Input:
Transform the parsed tree to the string. Expects tree like structure.
You can see example output below.
(R)SplitRules26
|--(N)Iterate
| `--(R)SplitRules30
| `--(N)Symb
| `--(R)SplitRules4
| `--(T)e
`--(N)Concat
`--(R)SplitRules27
`--(N)Iterate
`--(R)SplitRules30
`--(N)Symb
`--(R)SplitRules5
`--(T)f
:param root: Root node of the parsed tree.
:return: String representing the parsed tree (ends with newline).
### Response:
def print(root):
# type: (Union[Nonterminal,Terminal,Rule])-> str
"""
Transform the parsed tree to the string. Expects tree like structure.
You can see example output below.
(R)SplitRules26
|--(N)Iterate
| `--(R)SplitRules30
| `--(N)Symb
| `--(R)SplitRules4
| `--(T)e
`--(N)Concat
`--(R)SplitRules27
`--(N)Iterate
`--(R)SplitRules30
`--(N)Symb
`--(R)SplitRules5
`--(T)f
:param root: Root node of the parsed tree.
:return: String representing the parsed tree (ends with newline).
"""
# print the part before the element
def print_before(previous=0, defined=None, is_last=False):
defined = defined or {}
ret = ''
if previous != 0:
for i in range(previous - 1):
# if the column is still active write |
if i in defined:
ret += '| '
# otherwise just print space
else:
ret += ' '
# if is current element last child, don't print |-- but `-- instead
ret += '`--' if is_last else '|--'
return ret
# print the terminal
def terminal_traverse(term, callback, previous=0, defined=None, is_last=False):
before = print_before(previous, defined, is_last)
yield before + '(T)' + str(term.s) + '\n'
# print the nonterminal
def nonterminal_traverse(nonterm, callback, previous=0, defined=None, is_last=False):
before = print_before(previous, defined, is_last)
yield before + '(N)' + nonterm.__class__.__name__ + '\n'
yield callback(nonterm.to_rule, previous + 1, defined, True)
# print the rule
def rule_traverse(rule, callback, previous=0, defined=None, is_last=False):
# print the rule name
before = print_before(previous, defined, is_last)
yield before + '(R)' + rule.__class__.__name__ + '\n'
# register new column
defined = defined or set()
defined.add(previous)
# print all childs except the last one
for i in range(len(rule.to_symbols) - 1):
yield callback(rule.to_symbols[i], previous + 1, defined, False)
# unregister the column as last child print it automatically
defined.remove(previous)
yield callback(rule.to_symbols[-1], previous + 1, defined, True)
res = Traversing.traverse_separated(root, rule_traverse, nonterminal_traverse, terminal_traverse)
return str.join("", res) |
def fullname(self):
""" includes the full path with parent names """
prefix = ""
if self.parent:
if self.parent.fullname:
prefix = self.parent.fullname + ":"
else:
# Only the root does not have a parent. In that case we also don't need a name.
return ""
return prefix + self.name | includes the full path with parent names | Below is the the instruction that describes the task:
### Input:
includes the full path with parent names
### Response:
def fullname(self):
""" includes the full path with parent names """
prefix = ""
if self.parent:
if self.parent.fullname:
prefix = self.parent.fullname + ":"
else:
# Only the root does not have a parent. In that case we also don't need a name.
return ""
return prefix + self.name |
def to_bytes(s, encoding=None, errors=None):
'''Convert *s* into bytes'''
if not isinstance(s, bytes):
return ('%s' % s).encode(encoding or 'utf-8', errors or 'strict')
elif not encoding or encoding == 'utf-8':
return s
else:
d = s.decode('utf-8')
return d.encode(encoding, errors or 'strict') | Convert *s* into bytes | Below is the the instruction that describes the task:
### Input:
Convert *s* into bytes
### Response:
def to_bytes(s, encoding=None, errors=None):
'''Convert *s* into bytes'''
if not isinstance(s, bytes):
return ('%s' % s).encode(encoding or 'utf-8', errors or 'strict')
elif not encoding or encoding == 'utf-8':
return s
else:
d = s.decode('utf-8')
return d.encode(encoding, errors or 'strict') |
def igrf12syn(isv, date, itype, alt, lat, elong):
"""
This is a synthesis routine for the 12th generation IGRF as agreed
in December 2014 by IAGA Working Group V-MOD. It is valid 1900.0 to
2020.0 inclusive. Values for dates from 1945.0 to 2010.0 inclusive are
definitive, otherwise they are non-definitive.
INPUT
isv = 0 if main-field values are required
isv = 1 if secular variation values are required
date = year A.D. Must be greater than or equal to 1900.0 and
less than or equal to 2025.0. Warning message is given
for dates greater than 2020.0. Must be double precision.
itype = 1 if geodetic (spheroid)
itype = 2 if geocentric (sphere)
alt = height in km above sea level if itype = 1
= distance from centre of Earth in km if itype = 2 (>3485 km)
lat = latitude (-90~90)
elong = east-longitude (0-360)
alt, colat and elong must be double precision.
OUTPUT
x = north component (nT) if isv = 0, nT/year if isv = 1
y = east component (nT) if isv = 0, nT/year if isv = 1
z = vertical component (nT) if isv = 0, nT/year if isv = 1
f = total intensity (nT) if isv = 0, rubbish if isv = 1
To get the other geomagnetic elements (D, I, H and secular
variations dD, dH, dI and dF) use routines ptoc and ptocsv.
Adapted from 8th generation version to include new maximum degree for
main-field models for 2000.0 and onwards and use WGS84 spheroid instead
of International Astronomical Union 1966 spheroid as recommended by IAGA
in July 2003. Reference radius remains as 6371.2 km - it is NOT the mean
radius (= 6371.0 km) but 6371.2 km is what is used in determining the
coefficients. Adaptation by Susan Macmillan, August 2003 (for
9th generation), December 2004, December 2009 \ December 2014.
Coefficients at 1995.0 incorrectly rounded (rounded up instead of
to even) included as these are the coefficients published in Excel
spreadsheet July 2005.
"""
p, q, cl, sl = [0.] * 105, [0.] * 105, [0.] * 13, [0.] * 13
# set initial values
x, y, z = 0., 0., 0.
if date < 1900.0 or date > 2025.0:
f = 1.0
print('This subroutine will not work with a date of ' + str(date))
print('Date must be in the range 1900.0 <= date <= 2025.0')
print('On return f = 1.0, x = y = z = 0')
return x, y, z, f
elif date >= 2015.0:
if date > 2020.0:
# not adapt for the model but can calculate
print('This version of the IGRF is intended for use up to 2020.0.')
print('values for ' + str(date) + ' will be computed but may be of reduced accuracy')
t = date - 2015.0
tc = 1.0
if isv == 1:
t = 1.0
tc = 0.0
# pointer for last coefficient in pen-ultimate set of MF coefficients...
ll = 3060
nmx = 13
nc = nmx * (nmx + 2)
kmx = (nmx + 1) * (nmx + 2) / 2
else:
t = 0.2 * (date - 1900.0)
ll = int(t)
t = t - ll
# SH models before 1995.0 are only to degree 10
if date < 1995.0:
nmx = 10
nc = nmx * (nmx + 2)
ll = nc * ll
kmx = (nmx + 1) * (nmx + 2) / 2
else:
nmx = 13
nc = nmx * (nmx + 2)
ll = round(0.2 * (date - 1995.0))
# 19 is the number of SH models that extend to degree 10
ll = 120 * 19 + nc * ll
kmx = (nmx + 1) * (nmx + 2) / 2
tc = 1.0 - t
if isv == 1:
tc = -0.2
t = 0.2
colat = 90-lat
r = alt
one = colat / FACT
ct = np.cos(one)
st = np.sin(one)
one = elong / FACT
cl[0] = np.cos(one)
sl[0] = np.sin(one)
cd = 1.0
sd = 0.0
l = 1
m = 1
n = 0
if itype != 2:
gclat, gclon, r = geodetic2geocentric(np.arctan2(st, ct), alt)
ct, st = np.cos(gclat), np.sin(gclat)
cd, sd = np.cos(gclon), np.sin(gclon)
ratio = 6371.2 / r
rr = ratio * ratio
# computation of Schmidt quasi-normal coefficients p and x(=q)
p[0] = 1.0
p[2] = st
q[0] = 0.0
q[2] = ct
fn, gn = n, n-1
for k in range(2, int(kmx)+1):
if n < m:
m = 0
n = n + 1
rr = rr * ratio
fn = n
gn = n - 1
fm = m
if m != n:
gmm = m * m
one = np.sqrt(fn * fn - gmm)
two = np.sqrt(gn * gn - gmm) / one
three = (fn + gn) / one
i = k - n
j = i - n + 1
p[k - 1] = three * ct * p[i - 1] - two * p[j - 1]
q[k - 1] = three * (ct * q[i - 1] - st * p[i - 1]) - two * q[j - 1]
else:
if k != 3:
one = np.sqrt(1.0 - 0.5 / fm)
j = k - n - 1
p[k-1] = one * st * p[j-1]
q[k-1] = one * (st * q[j-1] + ct * p[j-1])
cl[m-1] = cl[m - 2] * cl[0] - sl[m - 2] * sl[0]
sl[m-1] = sl[m - 2] * cl[0] + cl[m - 2] * sl[0]
# synthesis of x, y and z in geocentric coordinates
lm = ll + l
# print('g', n, m, k, gh[int(lm-1)], gh[int(lm + nc-1)])
one = (tc * gh[int(lm-1)] + t * gh[int(lm + nc-1)]) * rr
if m == 0:
x = x + one * q[k - 1]
z = z - (fn + 1.0) * one * p[k - 1]
l = l + 1
else:
# print('h', n, m, k, gh[int(lm)], gh[int(lm + nc)])
two = (tc * gh[int(lm)] + t * gh[int(lm + nc)]) * rr
three = one * cl[m-1] + two * sl[m-1]
x = x + three * q[k-1]
z = z - (fn + 1.0) * three * p[k-1]
if st == 0.0:
y = y + (one * sl[m - 1] - two * cl[m - 1]) * q[k - 1] * ct
else:
y = y + (one * sl[m-1] - two * cl[m-1]) * fm * p[k-1] / st
l = l + 2
m = m+1
# conversion to coordinate system specified by itype
one = x
x = x * cd + z * sd
z = z * cd - one * sd
f = np.sqrt(x * x + y * y + z * z)
#
return x, y, z, f | This is a synthesis routine for the 12th generation IGRF as agreed
in December 2014 by IAGA Working Group V-MOD. It is valid 1900.0 to
2020.0 inclusive. Values for dates from 1945.0 to 2010.0 inclusive are
definitive, otherwise they are non-definitive.
INPUT
isv = 0 if main-field values are required
isv = 1 if secular variation values are required
date = year A.D. Must be greater than or equal to 1900.0 and
less than or equal to 2025.0. Warning message is given
for dates greater than 2020.0. Must be double precision.
itype = 1 if geodetic (spheroid)
itype = 2 if geocentric (sphere)
alt = height in km above sea level if itype = 1
= distance from centre of Earth in km if itype = 2 (>3485 km)
lat = latitude (-90~90)
elong = east-longitude (0-360)
alt, colat and elong must be double precision.
OUTPUT
x = north component (nT) if isv = 0, nT/year if isv = 1
y = east component (nT) if isv = 0, nT/year if isv = 1
z = vertical component (nT) if isv = 0, nT/year if isv = 1
f = total intensity (nT) if isv = 0, rubbish if isv = 1
To get the other geomagnetic elements (D, I, H and secular
variations dD, dH, dI and dF) use routines ptoc and ptocsv.
Adapted from 8th generation version to include new maximum degree for
main-field models for 2000.0 and onwards and use WGS84 spheroid instead
of International Astronomical Union 1966 spheroid as recommended by IAGA
in July 2003. Reference radius remains as 6371.2 km - it is NOT the mean
radius (= 6371.0 km) but 6371.2 km is what is used in determining the
coefficients. Adaptation by Susan Macmillan, August 2003 (for
9th generation), December 2004, December 2009 \ December 2014.
Coefficients at 1995.0 incorrectly rounded (rounded up instead of
to even) included as these are the coefficients published in Excel
spreadsheet July 2005. | Below is the the instruction that describes the task:
### Input:
This is a synthesis routine for the 12th generation IGRF as agreed
in December 2014 by IAGA Working Group V-MOD. It is valid 1900.0 to
2020.0 inclusive. Values for dates from 1945.0 to 2010.0 inclusive are
definitive, otherwise they are non-definitive.
INPUT
isv = 0 if main-field values are required
isv = 1 if secular variation values are required
date = year A.D. Must be greater than or equal to 1900.0 and
less than or equal to 2025.0. Warning message is given
for dates greater than 2020.0. Must be double precision.
itype = 1 if geodetic (spheroid)
itype = 2 if geocentric (sphere)
alt = height in km above sea level if itype = 1
= distance from centre of Earth in km if itype = 2 (>3485 km)
lat = latitude (-90~90)
elong = east-longitude (0-360)
alt, colat and elong must be double precision.
OUTPUT
x = north component (nT) if isv = 0, nT/year if isv = 1
y = east component (nT) if isv = 0, nT/year if isv = 1
z = vertical component (nT) if isv = 0, nT/year if isv = 1
f = total intensity (nT) if isv = 0, rubbish if isv = 1
To get the other geomagnetic elements (D, I, H and secular
variations dD, dH, dI and dF) use routines ptoc and ptocsv.
Adapted from 8th generation version to include new maximum degree for
main-field models for 2000.0 and onwards and use WGS84 spheroid instead
of International Astronomical Union 1966 spheroid as recommended by IAGA
in July 2003. Reference radius remains as 6371.2 km - it is NOT the mean
radius (= 6371.0 km) but 6371.2 km is what is used in determining the
coefficients. Adaptation by Susan Macmillan, August 2003 (for
9th generation), December 2004, December 2009 \ December 2014.
Coefficients at 1995.0 incorrectly rounded (rounded up instead of
to even) included as these are the coefficients published in Excel
spreadsheet July 2005.
### Response:
def igrf12syn(isv, date, itype, alt, lat, elong):
"""
This is a synthesis routine for the 12th generation IGRF as agreed
in December 2014 by IAGA Working Group V-MOD. It is valid 1900.0 to
2020.0 inclusive. Values for dates from 1945.0 to 2010.0 inclusive are
definitive, otherwise they are non-definitive.
INPUT
isv = 0 if main-field values are required
isv = 1 if secular variation values are required
date = year A.D. Must be greater than or equal to 1900.0 and
less than or equal to 2025.0. Warning message is given
for dates greater than 2020.0. Must be double precision.
itype = 1 if geodetic (spheroid)
itype = 2 if geocentric (sphere)
alt = height in km above sea level if itype = 1
= distance from centre of Earth in km if itype = 2 (>3485 km)
lat = latitude (-90~90)
elong = east-longitude (0-360)
alt, colat and elong must be double precision.
OUTPUT
x = north component (nT) if isv = 0, nT/year if isv = 1
y = east component (nT) if isv = 0, nT/year if isv = 1
z = vertical component (nT) if isv = 0, nT/year if isv = 1
f = total intensity (nT) if isv = 0, rubbish if isv = 1
To get the other geomagnetic elements (D, I, H and secular
variations dD, dH, dI and dF) use routines ptoc and ptocsv.
Adapted from 8th generation version to include new maximum degree for
main-field models for 2000.0 and onwards and use WGS84 spheroid instead
of International Astronomical Union 1966 spheroid as recommended by IAGA
in July 2003. Reference radius remains as 6371.2 km - it is NOT the mean
radius (= 6371.0 km) but 6371.2 km is what is used in determining the
coefficients. Adaptation by Susan Macmillan, August 2003 (for
9th generation), December 2004, December 2009 \ December 2014.
Coefficients at 1995.0 incorrectly rounded (rounded up instead of
to even) included as these are the coefficients published in Excel
spreadsheet July 2005.
"""
p, q, cl, sl = [0.] * 105, [0.] * 105, [0.] * 13, [0.] * 13
# set initial values
x, y, z = 0., 0., 0.
if date < 1900.0 or date > 2025.0:
f = 1.0
print('This subroutine will not work with a date of ' + str(date))
print('Date must be in the range 1900.0 <= date <= 2025.0')
print('On return f = 1.0, x = y = z = 0')
return x, y, z, f
elif date >= 2015.0:
if date > 2020.0:
# not adapt for the model but can calculate
print('This version of the IGRF is intended for use up to 2020.0.')
print('values for ' + str(date) + ' will be computed but may be of reduced accuracy')
t = date - 2015.0
tc = 1.0
if isv == 1:
t = 1.0
tc = 0.0
# pointer for last coefficient in pen-ultimate set of MF coefficients...
ll = 3060
nmx = 13
nc = nmx * (nmx + 2)
kmx = (nmx + 1) * (nmx + 2) / 2
else:
t = 0.2 * (date - 1900.0)
ll = int(t)
t = t - ll
# SH models before 1995.0 are only to degree 10
if date < 1995.0:
nmx = 10
nc = nmx * (nmx + 2)
ll = nc * ll
kmx = (nmx + 1) * (nmx + 2) / 2
else:
nmx = 13
nc = nmx * (nmx + 2)
ll = round(0.2 * (date - 1995.0))
# 19 is the number of SH models that extend to degree 10
ll = 120 * 19 + nc * ll
kmx = (nmx + 1) * (nmx + 2) / 2
tc = 1.0 - t
if isv == 1:
tc = -0.2
t = 0.2
colat = 90-lat
r = alt
one = colat / FACT
ct = np.cos(one)
st = np.sin(one)
one = elong / FACT
cl[0] = np.cos(one)
sl[0] = np.sin(one)
cd = 1.0
sd = 0.0
l = 1
m = 1
n = 0
if itype != 2:
gclat, gclon, r = geodetic2geocentric(np.arctan2(st, ct), alt)
ct, st = np.cos(gclat), np.sin(gclat)
cd, sd = np.cos(gclon), np.sin(gclon)
ratio = 6371.2 / r
rr = ratio * ratio
# computation of Schmidt quasi-normal coefficients p and x(=q)
p[0] = 1.0
p[2] = st
q[0] = 0.0
q[2] = ct
fn, gn = n, n-1
for k in range(2, int(kmx)+1):
if n < m:
m = 0
n = n + 1
rr = rr * ratio
fn = n
gn = n - 1
fm = m
if m != n:
gmm = m * m
one = np.sqrt(fn * fn - gmm)
two = np.sqrt(gn * gn - gmm) / one
three = (fn + gn) / one
i = k - n
j = i - n + 1
p[k - 1] = three * ct * p[i - 1] - two * p[j - 1]
q[k - 1] = three * (ct * q[i - 1] - st * p[i - 1]) - two * q[j - 1]
else:
if k != 3:
one = np.sqrt(1.0 - 0.5 / fm)
j = k - n - 1
p[k-1] = one * st * p[j-1]
q[k-1] = one * (st * q[j-1] + ct * p[j-1])
cl[m-1] = cl[m - 2] * cl[0] - sl[m - 2] * sl[0]
sl[m-1] = sl[m - 2] * cl[0] + cl[m - 2] * sl[0]
# synthesis of x, y and z in geocentric coordinates
lm = ll + l
# print('g', n, m, k, gh[int(lm-1)], gh[int(lm + nc-1)])
one = (tc * gh[int(lm-1)] + t * gh[int(lm + nc-1)]) * rr
if m == 0:
x = x + one * q[k - 1]
z = z - (fn + 1.0) * one * p[k - 1]
l = l + 1
else:
# print('h', n, m, k, gh[int(lm)], gh[int(lm + nc)])
two = (tc * gh[int(lm)] + t * gh[int(lm + nc)]) * rr
three = one * cl[m-1] + two * sl[m-1]
x = x + three * q[k-1]
z = z - (fn + 1.0) * three * p[k-1]
if st == 0.0:
y = y + (one * sl[m - 1] - two * cl[m - 1]) * q[k - 1] * ct
else:
y = y + (one * sl[m-1] - two * cl[m-1]) * fm * p[k-1] / st
l = l + 2
m = m+1
# conversion to coordinate system specified by itype
one = x
x = x * cd + z * sd
z = z * cd - one * sd
f = np.sqrt(x * x + y * y + z * z)
#
return x, y, z, f |
def format(self):
"""
Get the subtag code conventional format according to RFC 5646 section 2.1.1.
:return: string -- subtag code conventional format.
"""
subtag = self.data['subtag']
if self.data['type'] == 'region':
return subtag.upper()
if self.data['type'] == 'script':
return subtag.capitalize()
return subtag | Get the subtag code conventional format according to RFC 5646 section 2.1.1.
:return: string -- subtag code conventional format. | Below is the the instruction that describes the task:
### Input:
Get the subtag code conventional format according to RFC 5646 section 2.1.1.
:return: string -- subtag code conventional format.
### Response:
def format(self):
"""
Get the subtag code conventional format according to RFC 5646 section 2.1.1.
:return: string -- subtag code conventional format.
"""
subtag = self.data['subtag']
if self.data['type'] == 'region':
return subtag.upper()
if self.data['type'] == 'script':
return subtag.capitalize()
return subtag |
def pull_byte(self, stack_pointer):
""" pulled a byte from stack """
addr = stack_pointer.value
byte = self.memory.read_byte(addr)
# log.info(
# log.error(
# "%x|\tpull $%x from %s stack at $%x\t|%s",
# self.last_op_address, byte, stack_pointer.name, addr,
# self.cfg.mem_info.get_shortest(self.last_op_address)
# )
# FIXME: self.system_stack_pointer += 1
stack_pointer.increment(1)
return byte | pulled a byte from stack | Below is the the instruction that describes the task:
### Input:
pulled a byte from stack
### Response:
def pull_byte(self, stack_pointer):
""" pulled a byte from stack """
addr = stack_pointer.value
byte = self.memory.read_byte(addr)
# log.info(
# log.error(
# "%x|\tpull $%x from %s stack at $%x\t|%s",
# self.last_op_address, byte, stack_pointer.name, addr,
# self.cfg.mem_info.get_shortest(self.last_op_address)
# )
# FIXME: self.system_stack_pointer += 1
stack_pointer.increment(1)
return byte |
def wallet_frontiers(self, wallet):
"""
Returns a list of pairs of account and block hash representing the
head block starting for accounts from **wallet**
:param wallet: Wallet to return frontiers for
:type wallet: str
:raises: :py:exc:`nano.rpc.RPCException`
>>> rpc.wallet_frontiers(
... wallet="000D1BAEC8EC208142C99059B393051BAC8380F9B5A2E6B2489A277D81789F3F"
... )
{
"xrb_3e3j5tkog48pnny9dmfzj1r16pg8t1e76dz5tmac6iq689wyjfpi00000000": "000D1BAEC8EC208142C99059B393051BAC8380F9B5A2E6B2489A277D81789F3F"
}
"""
wallet = self._process_value(wallet, 'wallet')
payload = {"wallet": wallet}
resp = self.call('wallet_frontiers', payload)
return resp.get('frontiers') or {} | Returns a list of pairs of account and block hash representing the
head block starting for accounts from **wallet**
:param wallet: Wallet to return frontiers for
:type wallet: str
:raises: :py:exc:`nano.rpc.RPCException`
>>> rpc.wallet_frontiers(
... wallet="000D1BAEC8EC208142C99059B393051BAC8380F9B5A2E6B2489A277D81789F3F"
... )
{
"xrb_3e3j5tkog48pnny9dmfzj1r16pg8t1e76dz5tmac6iq689wyjfpi00000000": "000D1BAEC8EC208142C99059B393051BAC8380F9B5A2E6B2489A277D81789F3F"
} | Below is the the instruction that describes the task:
### Input:
Returns a list of pairs of account and block hash representing the
head block starting for accounts from **wallet**
:param wallet: Wallet to return frontiers for
:type wallet: str
:raises: :py:exc:`nano.rpc.RPCException`
>>> rpc.wallet_frontiers(
... wallet="000D1BAEC8EC208142C99059B393051BAC8380F9B5A2E6B2489A277D81789F3F"
... )
{
"xrb_3e3j5tkog48pnny9dmfzj1r16pg8t1e76dz5tmac6iq689wyjfpi00000000": "000D1BAEC8EC208142C99059B393051BAC8380F9B5A2E6B2489A277D81789F3F"
}
### Response:
def wallet_frontiers(self, wallet):
"""
Returns a list of pairs of account and block hash representing the
head block starting for accounts from **wallet**
:param wallet: Wallet to return frontiers for
:type wallet: str
:raises: :py:exc:`nano.rpc.RPCException`
>>> rpc.wallet_frontiers(
... wallet="000D1BAEC8EC208142C99059B393051BAC8380F9B5A2E6B2489A277D81789F3F"
... )
{
"xrb_3e3j5tkog48pnny9dmfzj1r16pg8t1e76dz5tmac6iq689wyjfpi00000000": "000D1BAEC8EC208142C99059B393051BAC8380F9B5A2E6B2489A277D81789F3F"
}
"""
wallet = self._process_value(wallet, 'wallet')
payload = {"wallet": wallet}
resp = self.call('wallet_frontiers', payload)
return resp.get('frontiers') or {} |
def places_within_radius(
self, place=None, latitude=None, longitude=None, radius=0, **kwargs
):
"""
Return descriptions of the places stored in the collection that are
within the circle specified by the given location and radius.
A list of dicts will be returned.
The center of the circle can be specified by the identifier of another
place in the collection with the *place* keyword argument.
Or, it can be specified by using both the *latitude* and *longitude*
keyword arguments.
By default the *radius* is given in kilometers, but you may also set
the *unit* keyword argument to ``'m'``, ``'mi'``, or ``'ft'``.
Limit the number of results returned with the *count* keyword argument.
Change the sorted order by setting the *sort* keyword argument to
``b'DESC'``.
"""
kwargs['withdist'] = True
kwargs['withcoord'] = True
kwargs['withhash'] = False
kwargs.setdefault('sort', 'ASC')
unit = kwargs.setdefault('unit', 'km')
# Make the query
if place is not None:
response = self.redis.georadiusbymember(
self.key, self._pickle(place), radius, **kwargs
)
elif (latitude is not None) and (longitude is not None):
response = self.redis.georadius(
self.key, longitude, latitude, radius, **kwargs
)
else:
raise ValueError(
'Must specify place, or both latitude and longitude'
)
# Assemble the result
ret = []
for item in response:
ret.append(
{
'place': self._unpickle(item[0]),
'distance': item[1],
'unit': unit,
'latitude': item[2][1],
'longitude': item[2][0],
}
)
return ret | Return descriptions of the places stored in the collection that are
within the circle specified by the given location and radius.
A list of dicts will be returned.
The center of the circle can be specified by the identifier of another
place in the collection with the *place* keyword argument.
Or, it can be specified by using both the *latitude* and *longitude*
keyword arguments.
By default the *radius* is given in kilometers, but you may also set
the *unit* keyword argument to ``'m'``, ``'mi'``, or ``'ft'``.
Limit the number of results returned with the *count* keyword argument.
Change the sorted order by setting the *sort* keyword argument to
``b'DESC'``. | Below is the the instruction that describes the task:
### Input:
Return descriptions of the places stored in the collection that are
within the circle specified by the given location and radius.
A list of dicts will be returned.
The center of the circle can be specified by the identifier of another
place in the collection with the *place* keyword argument.
Or, it can be specified by using both the *latitude* and *longitude*
keyword arguments.
By default the *radius* is given in kilometers, but you may also set
the *unit* keyword argument to ``'m'``, ``'mi'``, or ``'ft'``.
Limit the number of results returned with the *count* keyword argument.
Change the sorted order by setting the *sort* keyword argument to
``b'DESC'``.
### Response:
def places_within_radius(
self, place=None, latitude=None, longitude=None, radius=0, **kwargs
):
"""
Return descriptions of the places stored in the collection that are
within the circle specified by the given location and radius.
A list of dicts will be returned.
The center of the circle can be specified by the identifier of another
place in the collection with the *place* keyword argument.
Or, it can be specified by using both the *latitude* and *longitude*
keyword arguments.
By default the *radius* is given in kilometers, but you may also set
the *unit* keyword argument to ``'m'``, ``'mi'``, or ``'ft'``.
Limit the number of results returned with the *count* keyword argument.
Change the sorted order by setting the *sort* keyword argument to
``b'DESC'``.
"""
kwargs['withdist'] = True
kwargs['withcoord'] = True
kwargs['withhash'] = False
kwargs.setdefault('sort', 'ASC')
unit = kwargs.setdefault('unit', 'km')
# Make the query
if place is not None:
response = self.redis.georadiusbymember(
self.key, self._pickle(place), radius, **kwargs
)
elif (latitude is not None) and (longitude is not None):
response = self.redis.georadius(
self.key, longitude, latitude, radius, **kwargs
)
else:
raise ValueError(
'Must specify place, or both latitude and longitude'
)
# Assemble the result
ret = []
for item in response:
ret.append(
{
'place': self._unpickle(item[0]),
'distance': item[1],
'unit': unit,
'latitude': item[2][1],
'longitude': item[2][0],
}
)
return ret |
def make_content_range(self, length):
"""Creates a :class:`~werkzeug.datastructures.ContentRange` object
from the current range and given content length.
"""
rng = self.range_for_length(length)
if rng is not None:
return ContentRange(self.units, rng[0], rng[1], length) | Creates a :class:`~werkzeug.datastructures.ContentRange` object
from the current range and given content length. | Below is the the instruction that describes the task:
### Input:
Creates a :class:`~werkzeug.datastructures.ContentRange` object
from the current range and given content length.
### Response:
def make_content_range(self, length):
"""Creates a :class:`~werkzeug.datastructures.ContentRange` object
from the current range and given content length.
"""
rng = self.range_for_length(length)
if rng is not None:
return ContentRange(self.units, rng[0], rng[1], length) |
def _increment_stage(self):
"""
Purpose: Increment stage pointer. Also check if Pipeline has completed.
"""
try:
if self._cur_stage < self._stage_count:
self._cur_stage += 1
else:
self._completed_flag.set()
except Exception, ex:
raise EnTKError(text=ex) | Purpose: Increment stage pointer. Also check if Pipeline has completed. | Below is the the instruction that describes the task:
### Input:
Purpose: Increment stage pointer. Also check if Pipeline has completed.
### Response:
def _increment_stage(self):
"""
Purpose: Increment stage pointer. Also check if Pipeline has completed.
"""
try:
if self._cur_stage < self._stage_count:
self._cur_stage += 1
else:
self._completed_flag.set()
except Exception, ex:
raise EnTKError(text=ex) |
def lock_pidfile_or_die(pidfile):
"""
@pidfile:
must be a writable path
Exceptions are logged.
Returns the PID.
"""
pid = os.getpid()
try:
remove_if_stale_pidfile(pidfile)
pid_write_file = pidfile + '.' + str(pid)
fpid = open(pid_write_file, 'w')
try:
fpid.write("%s\n" % pid)
finally:
fpid.close()
if not take_file_lock(pid_write_file, pidfile, "%s\n" % pid):
sys.exit(1)
except SystemExit:
raise
except Exception:
log.exception("unable to take pidfile")
sys.exit(1)
return pid | @pidfile:
must be a writable path
Exceptions are logged.
Returns the PID. | Below is the the instruction that describes the task:
### Input:
@pidfile:
must be a writable path
Exceptions are logged.
Returns the PID.
### Response:
def lock_pidfile_or_die(pidfile):
"""
@pidfile:
must be a writable path
Exceptions are logged.
Returns the PID.
"""
pid = os.getpid()
try:
remove_if_stale_pidfile(pidfile)
pid_write_file = pidfile + '.' + str(pid)
fpid = open(pid_write_file, 'w')
try:
fpid.write("%s\n" % pid)
finally:
fpid.close()
if not take_file_lock(pid_write_file, pidfile, "%s\n" % pid):
sys.exit(1)
except SystemExit:
raise
except Exception:
log.exception("unable to take pidfile")
sys.exit(1)
return pid |
def debug_print_strip_msg(self, i, line):
"""
Debug print indicating that an empty line is being skipped
:param i: The line number of the line that is being currently parsed
:param line: the parsed line
:return: None
"""
if self.debug_level == 2:
print(" Stripping Line %d: '%s'" % (i + 1, line.rstrip(' \r\n\t\f')))
elif self.debug_level > 2:
print(" Stripping Line %d:" % (i + 1))
hexdump(line) | Debug print indicating that an empty line is being skipped
:param i: The line number of the line that is being currently parsed
:param line: the parsed line
:return: None | Below is the the instruction that describes the task:
### Input:
Debug print indicating that an empty line is being skipped
:param i: The line number of the line that is being currently parsed
:param line: the parsed line
:return: None
### Response:
def debug_print_strip_msg(self, i, line):
"""
Debug print indicating that an empty line is being skipped
:param i: The line number of the line that is being currently parsed
:param line: the parsed line
:return: None
"""
if self.debug_level == 2:
print(" Stripping Line %d: '%s'" % (i + 1, line.rstrip(' \r\n\t\f')))
elif self.debug_level > 2:
print(" Stripping Line %d:" % (i + 1))
hexdump(line) |
def get_previous_price_list(self, currency, start_date, end_date):
"""
Get List of prices between two dates
"""
start = start_date.strftime('%Y-%m-%d')
end = end_date.strftime('%Y-%m-%d')
url = (
'https://api.coindesk.com/v1/bpi/historical/close.json'
'?start={}&end={}¤cy={}'.format(
start, end, currency
)
)
response = requests.get(url)
if response.status_code == 200:
data = self._decode_rates(response)
price_dict = data.get('bpi', {})
return price_dict
return {} | Get List of prices between two dates | Below is the the instruction that describes the task:
### Input:
Get List of prices between two dates
### Response:
def get_previous_price_list(self, currency, start_date, end_date):
"""
Get List of prices between two dates
"""
start = start_date.strftime('%Y-%m-%d')
end = end_date.strftime('%Y-%m-%d')
url = (
'https://api.coindesk.com/v1/bpi/historical/close.json'
'?start={}&end={}¤cy={}'.format(
start, end, currency
)
)
response = requests.get(url)
if response.status_code == 200:
data = self._decode_rates(response)
price_dict = data.get('bpi', {})
return price_dict
return {} |
def get_devices(self, condition=None, page_size=1000):
"""Iterates over each :class:`Device` for this device cloud account
Examples::
# get a list of all devices
all_devices = list(dc.devicecore.get_devices())
# build a mapping of devices by their vendor id using a
# dict comprehension
devices = dc.devicecore.get_devices() # generator object
devs_by_vendor_id = {d.get_vendor_id(): d for d in devices}
# iterate over all devices in 'minnesota' group and
# print the device mac and location
for device in dc.get_devices(group_path == 'minnesota'):
print "%s at %s" % (device.get_mac(), device.get_location())
:param condition: An :class:`.Expression` which defines the condition
which must be matched on the devicecore. If unspecified,
an iterator over all devices will be returned.
:param int page_size: The number of results to fetch in a
single page. In general, the default will suffice.
:returns: Iterator over each :class:`~Device` in this device cloud
account in the form of a generator object.
"""
condition = validate_type(condition, type(None), Expression, *six.string_types)
page_size = validate_type(page_size, *six.integer_types)
params = {"embed": "true"}
if condition is not None:
params["condition"] = condition.compile()
for device_json in self._conn.iter_json_pages("/ws/DeviceCore", page_size=page_size, **params):
yield Device(self._conn, self._sci, device_json) | Iterates over each :class:`Device` for this device cloud account
Examples::
# get a list of all devices
all_devices = list(dc.devicecore.get_devices())
# build a mapping of devices by their vendor id using a
# dict comprehension
devices = dc.devicecore.get_devices() # generator object
devs_by_vendor_id = {d.get_vendor_id(): d for d in devices}
# iterate over all devices in 'minnesota' group and
# print the device mac and location
for device in dc.get_devices(group_path == 'minnesota'):
print "%s at %s" % (device.get_mac(), device.get_location())
:param condition: An :class:`.Expression` which defines the condition
which must be matched on the devicecore. If unspecified,
an iterator over all devices will be returned.
:param int page_size: The number of results to fetch in a
single page. In general, the default will suffice.
:returns: Iterator over each :class:`~Device` in this device cloud
account in the form of a generator object. | Below is the the instruction that describes the task:
### Input:
Iterates over each :class:`Device` for this device cloud account
Examples::
# get a list of all devices
all_devices = list(dc.devicecore.get_devices())
# build a mapping of devices by their vendor id using a
# dict comprehension
devices = dc.devicecore.get_devices() # generator object
devs_by_vendor_id = {d.get_vendor_id(): d for d in devices}
# iterate over all devices in 'minnesota' group and
# print the device mac and location
for device in dc.get_devices(group_path == 'minnesota'):
print "%s at %s" % (device.get_mac(), device.get_location())
:param condition: An :class:`.Expression` which defines the condition
which must be matched on the devicecore. If unspecified,
an iterator over all devices will be returned.
:param int page_size: The number of results to fetch in a
single page. In general, the default will suffice.
:returns: Iterator over each :class:`~Device` in this device cloud
account in the form of a generator object.
### Response:
def get_devices(self, condition=None, page_size=1000):
"""Iterates over each :class:`Device` for this device cloud account
Examples::
# get a list of all devices
all_devices = list(dc.devicecore.get_devices())
# build a mapping of devices by their vendor id using a
# dict comprehension
devices = dc.devicecore.get_devices() # generator object
devs_by_vendor_id = {d.get_vendor_id(): d for d in devices}
# iterate over all devices in 'minnesota' group and
# print the device mac and location
for device in dc.get_devices(group_path == 'minnesota'):
print "%s at %s" % (device.get_mac(), device.get_location())
:param condition: An :class:`.Expression` which defines the condition
which must be matched on the devicecore. If unspecified,
an iterator over all devices will be returned.
:param int page_size: The number of results to fetch in a
single page. In general, the default will suffice.
:returns: Iterator over each :class:`~Device` in this device cloud
account in the form of a generator object.
"""
condition = validate_type(condition, type(None), Expression, *six.string_types)
page_size = validate_type(page_size, *six.integer_types)
params = {"embed": "true"}
if condition is not None:
params["condition"] = condition.compile()
for device_json in self._conn.iter_json_pages("/ws/DeviceCore", page_size=page_size, **params):
yield Device(self._conn, self._sci, device_json) |
def download_sample_and_align(job, sample, inputs, ids):
"""
Downloads the sample and runs BWA-kit
:param JobFunctionWrappingJob job: Passed by Toil automatically
:param tuple(str, list) sample: UUID and URLS for sample
:param Namespace inputs: Contains input arguments
:param dict ids: FileStore IDs for shared inputs
"""
uuid, urls = sample
r1_url, r2_url = urls if len(urls) == 2 else (urls[0], None)
job.fileStore.logToMaster('Downloaded sample: {0}. R1 {1}\nR2 {2}\nStarting BWA Run'.format(uuid, r1_url, r2_url))
# Read fastq samples from file store
ids['r1'] = job.addChildJobFn(download_url_job, r1_url, s3_key_path=inputs.ssec, disk=inputs.file_size).rv()
if r2_url:
ids['r2'] = job.addChildJobFn(download_url_job, r2_url, s3_key_path=inputs.ssec, disk=inputs.file_size).rv()
else:
ids['r2'] = None
# Create config for bwakit
inputs.cores = min(inputs.maxCores, multiprocessing.cpu_count())
inputs.uuid = uuid
config = dict(**vars(inputs)) # Create config as a copy of inputs since it has values we want
config.update(ids) # Overwrite attributes with the FileStoreIDs from ids
config = argparse.Namespace(**config)
# Define and wire job functions
bam_id = job.wrapJobFn(run_bwakit, config, sort=inputs.sort, trim=inputs.trim,
disk=inputs.file_size, cores=inputs.cores)
job.addFollowOn(bam_id)
output_name = uuid + '.bam' + str(inputs.suffix) if inputs.suffix else uuid + '.bam'
if urlparse(inputs.output_dir).scheme == 's3':
bam_id.addChildJobFn(s3am_upload_job, file_id=bam_id.rv(), file_name=output_name, s3_dir=inputs.output_dir,
s3_key_path=inputs.ssec, cores=inputs.cores, disk=inputs.file_size)
else:
mkdir_p(inputs.ouput_dir)
bam_id.addChildJobFn(copy_file_job, name=output_name, file_id=bam_id.rv(), output_dir=inputs.output_dir,
disk=inputs.file_size) | Downloads the sample and runs BWA-kit
:param JobFunctionWrappingJob job: Passed by Toil automatically
:param tuple(str, list) sample: UUID and URLS for sample
:param Namespace inputs: Contains input arguments
:param dict ids: FileStore IDs for shared inputs | Below is the the instruction that describes the task:
### Input:
Downloads the sample and runs BWA-kit
:param JobFunctionWrappingJob job: Passed by Toil automatically
:param tuple(str, list) sample: UUID and URLS for sample
:param Namespace inputs: Contains input arguments
:param dict ids: FileStore IDs for shared inputs
### Response:
def download_sample_and_align(job, sample, inputs, ids):
"""
Downloads the sample and runs BWA-kit
:param JobFunctionWrappingJob job: Passed by Toil automatically
:param tuple(str, list) sample: UUID and URLS for sample
:param Namespace inputs: Contains input arguments
:param dict ids: FileStore IDs for shared inputs
"""
uuid, urls = sample
r1_url, r2_url = urls if len(urls) == 2 else (urls[0], None)
job.fileStore.logToMaster('Downloaded sample: {0}. R1 {1}\nR2 {2}\nStarting BWA Run'.format(uuid, r1_url, r2_url))
# Read fastq samples from file store
ids['r1'] = job.addChildJobFn(download_url_job, r1_url, s3_key_path=inputs.ssec, disk=inputs.file_size).rv()
if r2_url:
ids['r2'] = job.addChildJobFn(download_url_job, r2_url, s3_key_path=inputs.ssec, disk=inputs.file_size).rv()
else:
ids['r2'] = None
# Create config for bwakit
inputs.cores = min(inputs.maxCores, multiprocessing.cpu_count())
inputs.uuid = uuid
config = dict(**vars(inputs)) # Create config as a copy of inputs since it has values we want
config.update(ids) # Overwrite attributes with the FileStoreIDs from ids
config = argparse.Namespace(**config)
# Define and wire job functions
bam_id = job.wrapJobFn(run_bwakit, config, sort=inputs.sort, trim=inputs.trim,
disk=inputs.file_size, cores=inputs.cores)
job.addFollowOn(bam_id)
output_name = uuid + '.bam' + str(inputs.suffix) if inputs.suffix else uuid + '.bam'
if urlparse(inputs.output_dir).scheme == 's3':
bam_id.addChildJobFn(s3am_upload_job, file_id=bam_id.rv(), file_name=output_name, s3_dir=inputs.output_dir,
s3_key_path=inputs.ssec, cores=inputs.cores, disk=inputs.file_size)
else:
mkdir_p(inputs.ouput_dir)
bam_id.addChildJobFn(copy_file_job, name=output_name, file_id=bam_id.rv(), output_dir=inputs.output_dir,
disk=inputs.file_size) |
def features_properties_null_remove(obj):
"""
Remove any properties of features in the collection that have
entries mapping to a null (i.e., None) value
"""
features = obj['features']
for i in tqdm(range(len(features))):
if 'properties' in features[i]:
properties = features[i]['properties']
features[i]['properties'] = {p:properties[p] for p in properties if properties[p] is not None}
return obj | Remove any properties of features in the collection that have
entries mapping to a null (i.e., None) value | Below is the the instruction that describes the task:
### Input:
Remove any properties of features in the collection that have
entries mapping to a null (i.e., None) value
### Response:
def features_properties_null_remove(obj):
"""
Remove any properties of features in the collection that have
entries mapping to a null (i.e., None) value
"""
features = obj['features']
for i in tqdm(range(len(features))):
if 'properties' in features[i]:
properties = features[i]['properties']
features[i]['properties'] = {p:properties[p] for p in properties if properties[p] is not None}
return obj |
def merge(self, keypath, value, op='set'):
"""
First gets the cell at BeliefState's keypath, or creates a new cell
from the first target that has that keypath (This could mess up if the
member its copying from has a different Cell or domain for that keypath.)
Second, this merges that cell with the value
"""
negated = False
keypath = keypath[:] # copy it
if keypath[0] == 'target':
# only pull negated if it can potentially modify target
negated = self.get_environment_variable('negated', pop=False, default=False)
if negated:
keypath[0] = "distractor"
if keypath not in self:
first_referent = None
if keypath[0] in ['target', 'distractor']:
has_targets = False
for _, referent in self.iter_singleton_referents():
has_targets = True
if keypath[1:] in referent:
first_referent = referent
break
if first_referent is None:
# this happens when none of the available targets have the
# path that is attempted to being merged to
if has_targets:
raise CellConstructionFailure("Cannot merge; no target: %s" \
% (str(keypath)))
else:
# this will always happen when size is 0
raise CellConstructionFailure("Empty belief state")
# find the type and add it to the
cell = first_referent.get_value_from_path(keypath[1:]).stem()
self.add_cell(keypath, cell)
else:
# should we allow merging undefined components outside of target?
raise Exception("Could not find Keypath %s" % (str(keypath)))
# break down keypaths into
cell = self
if not isinstance(keypath, list):
keypath = [keypath]
for key in keypath:
cell = cell[key]
# perform operation (set, <=, >= etc)
try:
return getattr(cell, op)(value)
except Contradiction as ctrd:
# add more information to the contradiction
raise Contradiction("Could not merge %s with %s: %s " % (str(keypath), str(value), ctrd)) | First gets the cell at BeliefState's keypath, or creates a new cell
from the first target that has that keypath (This could mess up if the
member its copying from has a different Cell or domain for that keypath.)
Second, this merges that cell with the value | Below is the the instruction that describes the task:
### Input:
First gets the cell at BeliefState's keypath, or creates a new cell
from the first target that has that keypath (This could mess up if the
member its copying from has a different Cell or domain for that keypath.)
Second, this merges that cell with the value
### Response:
def merge(self, keypath, value, op='set'):
"""
First gets the cell at BeliefState's keypath, or creates a new cell
from the first target that has that keypath (This could mess up if the
member its copying from has a different Cell or domain for that keypath.)
Second, this merges that cell with the value
"""
negated = False
keypath = keypath[:] # copy it
if keypath[0] == 'target':
# only pull negated if it can potentially modify target
negated = self.get_environment_variable('negated', pop=False, default=False)
if negated:
keypath[0] = "distractor"
if keypath not in self:
first_referent = None
if keypath[0] in ['target', 'distractor']:
has_targets = False
for _, referent in self.iter_singleton_referents():
has_targets = True
if keypath[1:] in referent:
first_referent = referent
break
if first_referent is None:
# this happens when none of the available targets have the
# path that is attempted to being merged to
if has_targets:
raise CellConstructionFailure("Cannot merge; no target: %s" \
% (str(keypath)))
else:
# this will always happen when size is 0
raise CellConstructionFailure("Empty belief state")
# find the type and add it to the
cell = first_referent.get_value_from_path(keypath[1:]).stem()
self.add_cell(keypath, cell)
else:
# should we allow merging undefined components outside of target?
raise Exception("Could not find Keypath %s" % (str(keypath)))
# break down keypaths into
cell = self
if not isinstance(keypath, list):
keypath = [keypath]
for key in keypath:
cell = cell[key]
# perform operation (set, <=, >= etc)
try:
return getattr(cell, op)(value)
except Contradiction as ctrd:
# add more information to the contradiction
raise Contradiction("Could not merge %s with %s: %s " % (str(keypath), str(value), ctrd)) |
def excel_to_sql(excel_file_path, engine,
read_excel_kwargs=None,
to_generic_type_kwargs=None,
to_sql_kwargs=None):
"""Create a database from excel.
:param read_excel_kwargs: dict, arguments for ``pandas.read_excel`` method.
example: ``{"employee": {"skiprows": 10}, "department": {}}``
:param to_sql_kwargs: dict, arguments for ``pandas.DataFrame.to_sql``
method.
limitation:
1. If a integer column has None value, data type in database will be float.
Because pandas thinks that it is ``np.nan``.
2. If a string column looks like integer, ``pandas.read_excel()`` method
doesn't have options to convert it to string.
"""
if read_excel_kwargs is None:
read_excel_kwargs = dict()
if to_sql_kwargs is None:
to_sql_kwargs = dict()
if to_generic_type_kwargs is None:
to_generic_type_kwargs = dict()
xl = pd.ExcelFile(excel_file_path)
for sheet_name in xl.sheet_names:
df = pd.read_excel(
excel_file_path, sheet_name,
**read_excel_kwargs.get(sheet_name, dict())
)
kwargs = to_generic_type_kwargs.get(sheet_name)
if kwargs:
data = to_dict_list_generic_type(df, **kwargs)
smart_insert(data, sheet_name, engine)
else:
df.to_sql(
sheet_name, engine, index=False,
**to_sql_kwargs.get(sheet_name, dict(if_exists="replace"))
) | Create a database from excel.
:param read_excel_kwargs: dict, arguments for ``pandas.read_excel`` method.
example: ``{"employee": {"skiprows": 10}, "department": {}}``
:param to_sql_kwargs: dict, arguments for ``pandas.DataFrame.to_sql``
method.
limitation:
1. If a integer column has None value, data type in database will be float.
Because pandas thinks that it is ``np.nan``.
2. If a string column looks like integer, ``pandas.read_excel()`` method
doesn't have options to convert it to string. | Below is the the instruction that describes the task:
### Input:
Create a database from excel.
:param read_excel_kwargs: dict, arguments for ``pandas.read_excel`` method.
example: ``{"employee": {"skiprows": 10}, "department": {}}``
:param to_sql_kwargs: dict, arguments for ``pandas.DataFrame.to_sql``
method.
limitation:
1. If a integer column has None value, data type in database will be float.
Because pandas thinks that it is ``np.nan``.
2. If a string column looks like integer, ``pandas.read_excel()`` method
doesn't have options to convert it to string.
### Response:
def excel_to_sql(excel_file_path, engine,
read_excel_kwargs=None,
to_generic_type_kwargs=None,
to_sql_kwargs=None):
"""Create a database from excel.
:param read_excel_kwargs: dict, arguments for ``pandas.read_excel`` method.
example: ``{"employee": {"skiprows": 10}, "department": {}}``
:param to_sql_kwargs: dict, arguments for ``pandas.DataFrame.to_sql``
method.
limitation:
1. If a integer column has None value, data type in database will be float.
Because pandas thinks that it is ``np.nan``.
2. If a string column looks like integer, ``pandas.read_excel()`` method
doesn't have options to convert it to string.
"""
if read_excel_kwargs is None:
read_excel_kwargs = dict()
if to_sql_kwargs is None:
to_sql_kwargs = dict()
if to_generic_type_kwargs is None:
to_generic_type_kwargs = dict()
xl = pd.ExcelFile(excel_file_path)
for sheet_name in xl.sheet_names:
df = pd.read_excel(
excel_file_path, sheet_name,
**read_excel_kwargs.get(sheet_name, dict())
)
kwargs = to_generic_type_kwargs.get(sheet_name)
if kwargs:
data = to_dict_list_generic_type(df, **kwargs)
smart_insert(data, sheet_name, engine)
else:
df.to_sql(
sheet_name, engine, index=False,
**to_sql_kwargs.get(sheet_name, dict(if_exists="replace"))
) |
def compute_alignments(self, prev_state, precomputed_values, mask=None):
"""
Compute the alignment weights based on the previous state.
"""
WaSp = T.dot(prev_state, self.Wa)
UaH = precomputed_values
# For test time the UaH will be (time, output_dim)
if UaH.ndim == 2:
preact = WaSp[:, None, :] + UaH[None, :, :]
else:
preact = WaSp[:, None, :] + UaH
act = T.activate(preact, 'tanh')
align_scores = T.dot(act, self.Va) # ~ (batch, time)
if mask:
mask = (1 - mask) * -99.00
if align_scores.ndim == 3:
align_scores += mask[None, :]
else:
align_scores += mask
align_weights = T.nnet.softmax(align_scores)
return align_weights | Compute the alignment weights based on the previous state. | Below is the the instruction that describes the task:
### Input:
Compute the alignment weights based on the previous state.
### Response:
def compute_alignments(self, prev_state, precomputed_values, mask=None):
"""
Compute the alignment weights based on the previous state.
"""
WaSp = T.dot(prev_state, self.Wa)
UaH = precomputed_values
# For test time the UaH will be (time, output_dim)
if UaH.ndim == 2:
preact = WaSp[:, None, :] + UaH[None, :, :]
else:
preact = WaSp[:, None, :] + UaH
act = T.activate(preact, 'tanh')
align_scores = T.dot(act, self.Va) # ~ (batch, time)
if mask:
mask = (1 - mask) * -99.00
if align_scores.ndim == 3:
align_scores += mask[None, :]
else:
align_scores += mask
align_weights = T.nnet.softmax(align_scores)
return align_weights |
def solve(self, lam):
'''Solves the GFL for a fixed value of lambda.'''
s = weighted_graphtf(self.nnodes, self.y, self.weights, lam,
self.Dk.shape[0], self.Dk.shape[1], self.Dk.nnz,
self.Dk.row.astype('int32'), self.Dk.col.astype('int32'), self.Dk.data.astype('double'),
self.maxsteps, self.converge,
self.beta, self.u)
self.steps.append(s)
return self.beta | Solves the GFL for a fixed value of lambda. | Below is the the instruction that describes the task:
### Input:
Solves the GFL for a fixed value of lambda.
### Response:
def solve(self, lam):
'''Solves the GFL for a fixed value of lambda.'''
s = weighted_graphtf(self.nnodes, self.y, self.weights, lam,
self.Dk.shape[0], self.Dk.shape[1], self.Dk.nnz,
self.Dk.row.astype('int32'), self.Dk.col.astype('int32'), self.Dk.data.astype('double'),
self.maxsteps, self.converge,
self.beta, self.u)
self.steps.append(s)
return self.beta |
def period(self):
"""Period of the orbit as a timedelta
"""
return timedelta(seconds=2 * np.pi * np.sqrt(self.kep.a ** 3 / self.mu)) | Period of the orbit as a timedelta | Below is the the instruction that describes the task:
### Input:
Period of the orbit as a timedelta
### Response:
def period(self):
"""Period of the orbit as a timedelta
"""
return timedelta(seconds=2 * np.pi * np.sqrt(self.kep.a ** 3 / self.mu)) |
def quandl_bundle(environ,
asset_db_writer,
minute_bar_writer,
daily_bar_writer,
adjustment_writer,
calendar,
start_session,
end_session,
cache,
show_progress,
output_dir):
"""
quandl_bundle builds a daily dataset using Quandl's WIKI Prices dataset.
For more information on Quandl's API and how to obtain an API key,
please visit https://docs.quandl.com/docs#section-authentication
"""
api_key = environ.get('QUANDL_API_KEY')
if api_key is None:
raise ValueError(
"Please set your QUANDL_API_KEY environment variable and retry."
)
raw_data = fetch_data_table(
api_key,
show_progress,
environ.get('QUANDL_DOWNLOAD_ATTEMPTS', 5)
)
asset_metadata = gen_asset_metadata(
raw_data[['symbol', 'date']],
show_progress
)
asset_db_writer.write(asset_metadata)
symbol_map = asset_metadata.symbol
sessions = calendar.sessions_in_range(start_session, end_session)
raw_data.set_index(['date', 'symbol'], inplace=True)
daily_bar_writer.write(
parse_pricing_and_vol(
raw_data,
sessions,
symbol_map
),
show_progress=show_progress
)
raw_data.reset_index(inplace=True)
raw_data['symbol'] = raw_data['symbol'].astype('category')
raw_data['sid'] = raw_data.symbol.cat.codes
adjustment_writer.write(
splits=parse_splits(
raw_data[[
'sid',
'date',
'split_ratio',
]].loc[raw_data.split_ratio != 1],
show_progress=show_progress
),
dividends=parse_dividends(
raw_data[[
'sid',
'date',
'ex_dividend',
]].loc[raw_data.ex_dividend != 0],
show_progress=show_progress
)
) | quandl_bundle builds a daily dataset using Quandl's WIKI Prices dataset.
For more information on Quandl's API and how to obtain an API key,
please visit https://docs.quandl.com/docs#section-authentication | Below is the the instruction that describes the task:
### Input:
quandl_bundle builds a daily dataset using Quandl's WIKI Prices dataset.
For more information on Quandl's API and how to obtain an API key,
please visit https://docs.quandl.com/docs#section-authentication
### Response:
def quandl_bundle(environ,
asset_db_writer,
minute_bar_writer,
daily_bar_writer,
adjustment_writer,
calendar,
start_session,
end_session,
cache,
show_progress,
output_dir):
"""
quandl_bundle builds a daily dataset using Quandl's WIKI Prices dataset.
For more information on Quandl's API and how to obtain an API key,
please visit https://docs.quandl.com/docs#section-authentication
"""
api_key = environ.get('QUANDL_API_KEY')
if api_key is None:
raise ValueError(
"Please set your QUANDL_API_KEY environment variable and retry."
)
raw_data = fetch_data_table(
api_key,
show_progress,
environ.get('QUANDL_DOWNLOAD_ATTEMPTS', 5)
)
asset_metadata = gen_asset_metadata(
raw_data[['symbol', 'date']],
show_progress
)
asset_db_writer.write(asset_metadata)
symbol_map = asset_metadata.symbol
sessions = calendar.sessions_in_range(start_session, end_session)
raw_data.set_index(['date', 'symbol'], inplace=True)
daily_bar_writer.write(
parse_pricing_and_vol(
raw_data,
sessions,
symbol_map
),
show_progress=show_progress
)
raw_data.reset_index(inplace=True)
raw_data['symbol'] = raw_data['symbol'].astype('category')
raw_data['sid'] = raw_data.symbol.cat.codes
adjustment_writer.write(
splits=parse_splits(
raw_data[[
'sid',
'date',
'split_ratio',
]].loc[raw_data.split_ratio != 1],
show_progress=show_progress
),
dividends=parse_dividends(
raw_data[[
'sid',
'date',
'ex_dividend',
]].loc[raw_data.ex_dividend != 0],
show_progress=show_progress
)
) |
def state(self):
"""Returns a new JIT state. You have to clean up by calling .destroy()
afterwards.
"""
return Emitter(weakref.proxy(self.lib), self.lib.jit_new_state()) | Returns a new JIT state. You have to clean up by calling .destroy()
afterwards. | Below is the the instruction that describes the task:
### Input:
Returns a new JIT state. You have to clean up by calling .destroy()
afterwards.
### Response:
def state(self):
"""Returns a new JIT state. You have to clean up by calling .destroy()
afterwards.
"""
return Emitter(weakref.proxy(self.lib), self.lib.jit_new_state()) |
def get_share_url_with_dirname(uk, shareid, dirname):
'''得到共享目录的链接'''
return ''.join([
const.PAN_URL, 'wap/link',
'?shareid=', shareid,
'&uk=', uk,
'&dir=', encoder.encode_uri_component(dirname),
'&third=0',
]) | 得到共享目录的链接 | Below is the the instruction that describes the task:
### Input:
得到共享目录的链接
### Response:
def get_share_url_with_dirname(uk, shareid, dirname):
'''得到共享目录的链接'''
return ''.join([
const.PAN_URL, 'wap/link',
'?shareid=', shareid,
'&uk=', uk,
'&dir=', encoder.encode_uri_component(dirname),
'&third=0',
]) |
def getEAnnotation(self, source):
"""Return the annotation with a matching source attribute."""
for annotation in self.eAnnotations:
if annotation.source == source:
return annotation
return None | Return the annotation with a matching source attribute. | Below is the the instruction that describes the task:
### Input:
Return the annotation with a matching source attribute.
### Response:
def getEAnnotation(self, source):
"""Return the annotation with a matching source attribute."""
for annotation in self.eAnnotations:
if annotation.source == source:
return annotation
return None |
def _read_response(self, response):
"""
JSON Documentation: https://www.jfrog.com/confluence/display/RTF/Repository+Configuration+JSON
"""
self.name = response['key']
self.description = response['description']
self.layoutName = response['repoLayoutRef']
self.archiveBrowsingEnabled = response['archiveBrowsingEnabled'] | JSON Documentation: https://www.jfrog.com/confluence/display/RTF/Repository+Configuration+JSON | Below is the the instruction that describes the task:
### Input:
JSON Documentation: https://www.jfrog.com/confluence/display/RTF/Repository+Configuration+JSON
### Response:
def _read_response(self, response):
"""
JSON Documentation: https://www.jfrog.com/confluence/display/RTF/Repository+Configuration+JSON
"""
self.name = response['key']
self.description = response['description']
self.layoutName = response['repoLayoutRef']
self.archiveBrowsingEnabled = response['archiveBrowsingEnabled'] |
def _analyze_file(self, f):
"""Analyze the file."""
f.seek(0)
# Check for BOMs
if self.CHECK_BOM:
encoding = self.has_bom(f)
f.seek(0)
else:
util.warn_deprecated(
"'CHECK_BOM' attribute is deprecated. "
"Please override 'has_bom` function to control or avoid BOM detection."
)
# Check file extensions
if encoding is None:
encoding = self._utf_strip_bom(self.header_check(f.read(1024)))
f.seek(0)
if encoding is None:
encoding = self._utf_strip_bom(self.content_check(f))
f.seek(0)
return encoding | Analyze the file. | Below is the the instruction that describes the task:
### Input:
Analyze the file.
### Response:
def _analyze_file(self, f):
"""Analyze the file."""
f.seek(0)
# Check for BOMs
if self.CHECK_BOM:
encoding = self.has_bom(f)
f.seek(0)
else:
util.warn_deprecated(
"'CHECK_BOM' attribute is deprecated. "
"Please override 'has_bom` function to control or avoid BOM detection."
)
# Check file extensions
if encoding is None:
encoding = self._utf_strip_bom(self.header_check(f.read(1024)))
f.seek(0)
if encoding is None:
encoding = self._utf_strip_bom(self.content_check(f))
f.seek(0)
return encoding |
def origin_east_asia(origin):
"""\
Returns if the origin is located in East Asia
Holds true for the following countries:
* China
* Japan
* Mongolia
* South Korea
* Taiwan
`origin`
The origin to check.
"""
return origin_china(origin) or origin_japan(origin) \
or origin_mongolia(origin) or origin_south_korea(origin) \
or origin_taiwan(origin) | \
Returns if the origin is located in East Asia
Holds true for the following countries:
* China
* Japan
* Mongolia
* South Korea
* Taiwan
`origin`
The origin to check. | Below is the the instruction that describes the task:
### Input:
\
Returns if the origin is located in East Asia
Holds true for the following countries:
* China
* Japan
* Mongolia
* South Korea
* Taiwan
`origin`
The origin to check.
### Response:
def origin_east_asia(origin):
"""\
Returns if the origin is located in East Asia
Holds true for the following countries:
* China
* Japan
* Mongolia
* South Korea
* Taiwan
`origin`
The origin to check.
"""
return origin_china(origin) or origin_japan(origin) \
or origin_mongolia(origin) or origin_south_korea(origin) \
or origin_taiwan(origin) |
def normalize(self) -> 'State':
"""Normalize the state"""
tensor = self.tensor / bk.ccast(bk.sqrt(self.norm()))
return State(tensor, self.qubits, self._memory) | Normalize the state | Below is the the instruction that describes the task:
### Input:
Normalize the state
### Response:
def normalize(self) -> 'State':
"""Normalize the state"""
tensor = self.tensor / bk.ccast(bk.sqrt(self.norm()))
return State(tensor, self.qubits, self._memory) |
def _load_poses(self):
"""Load ground truth poses (T_w_cam0) from file."""
pose_file = os.path.join(self.pose_path, self.sequence + '.txt')
# Read and parse the poses
poses = []
try:
with open(pose_file, 'r') as f:
lines = f.readlines()
if self.frames is not None:
lines = [lines[i] for i in self.frames]
for line in lines:
T_w_cam0 = np.fromstring(line, dtype=float, sep=' ')
T_w_cam0 = T_w_cam0.reshape(3, 4)
T_w_cam0 = np.vstack((T_w_cam0, [0, 0, 0, 1]))
poses.append(T_w_cam0)
except FileNotFoundError:
print('Ground truth poses are not available for sequence ' +
self.sequence + '.')
self.poses = poses | Load ground truth poses (T_w_cam0) from file. | Below is the the instruction that describes the task:
### Input:
Load ground truth poses (T_w_cam0) from file.
### Response:
def _load_poses(self):
"""Load ground truth poses (T_w_cam0) from file."""
pose_file = os.path.join(self.pose_path, self.sequence + '.txt')
# Read and parse the poses
poses = []
try:
with open(pose_file, 'r') as f:
lines = f.readlines()
if self.frames is not None:
lines = [lines[i] for i in self.frames]
for line in lines:
T_w_cam0 = np.fromstring(line, dtype=float, sep=' ')
T_w_cam0 = T_w_cam0.reshape(3, 4)
T_w_cam0 = np.vstack((T_w_cam0, [0, 0, 0, 1]))
poses.append(T_w_cam0)
except FileNotFoundError:
print('Ground truth poses are not available for sequence ' +
self.sequence + '.')
self.poses = poses |
def get_creation_date(
self,
bucket: str,
key: str,
) -> datetime:
"""
Retrieves the creation date for a given key in a given bucket.
:param bucket: the bucket the object resides in.
:param key: the key of the object for which the creation date is being retrieved.
:return: the creation date
"""
# An S3 object's creation date is stored in its LastModified field which stores the
# most recent value between the two.
return self.get_last_modified_date(bucket, key) | Retrieves the creation date for a given key in a given bucket.
:param bucket: the bucket the object resides in.
:param key: the key of the object for which the creation date is being retrieved.
:return: the creation date | Below is the the instruction that describes the task:
### Input:
Retrieves the creation date for a given key in a given bucket.
:param bucket: the bucket the object resides in.
:param key: the key of the object for which the creation date is being retrieved.
:return: the creation date
### Response:
def get_creation_date(
self,
bucket: str,
key: str,
) -> datetime:
"""
Retrieves the creation date for a given key in a given bucket.
:param bucket: the bucket the object resides in.
:param key: the key of the object for which the creation date is being retrieved.
:return: the creation date
"""
# An S3 object's creation date is stored in its LastModified field which stores the
# most recent value between the two.
return self.get_last_modified_date(bucket, key) |
def _pop_comment_block(self, statements, header_re):
"""Look for a series of comments that start with one that matches the
regex. If the first comment is found, all subsequent comments are
popped from statements, concatenated and dedented and returned.
"""
res = []
comments = []
match = None
st_iter = iter(statements)
# Look for the header
for st in st_iter:
if isinstance(st, ast.Comment):
match = header_re.match(st.text)
if match:
# Drop this comment an move on to consuming the block
break
else:
res.append(st)
else:
res.append(st)
# Consume consecutive comments
for st in st_iter:
if isinstance(st, ast.Comment):
comments.append(st)
else:
# The block is over, keep the rest of the statements
res.append(st)
break
# Keep the rest of the statements
res.extend(list(st_iter))
# Inside the comment block, drop the pound sign and any common indent
return match, dedent("".join(c.text[1:] + "\n" for c in comments)), res | Look for a series of comments that start with one that matches the
regex. If the first comment is found, all subsequent comments are
popped from statements, concatenated and dedented and returned. | Below is the the instruction that describes the task:
### Input:
Look for a series of comments that start with one that matches the
regex. If the first comment is found, all subsequent comments are
popped from statements, concatenated and dedented and returned.
### Response:
def _pop_comment_block(self, statements, header_re):
"""Look for a series of comments that start with one that matches the
regex. If the first comment is found, all subsequent comments are
popped from statements, concatenated and dedented and returned.
"""
res = []
comments = []
match = None
st_iter = iter(statements)
# Look for the header
for st in st_iter:
if isinstance(st, ast.Comment):
match = header_re.match(st.text)
if match:
# Drop this comment an move on to consuming the block
break
else:
res.append(st)
else:
res.append(st)
# Consume consecutive comments
for st in st_iter:
if isinstance(st, ast.Comment):
comments.append(st)
else:
# The block is over, keep the rest of the statements
res.append(st)
break
# Keep the rest of the statements
res.extend(list(st_iter))
# Inside the comment block, drop the pound sign and any common indent
return match, dedent("".join(c.text[1:] + "\n" for c in comments)), res |
def covlen(args):
"""
%prog covlen covfile fastafile
Plot coverage vs length. `covfile` is two-column listing contig id and
depth of coverage.
"""
import numpy as np
import pandas as pd
import seaborn as sns
from jcvi.formats.base import DictFile
p = OptionParser(covlen.__doc__)
p.add_option("--maxsize", default=1000000, type="int", help="Max contig size")
p.add_option("--maxcov", default=100, type="int", help="Max contig size")
p.add_option("--color", default='m', help="Color of the data points")
p.add_option("--kind", default="scatter",
choices=("scatter", "reg", "resid", "kde", "hex"),
help="Kind of plot to draw")
opts, args, iopts = p.set_image_options(args, figsize="8x8")
if len(args) != 2:
sys.exit(not p.print_help())
covfile, fastafile = args
cov = DictFile(covfile, cast=float)
s = Sizes(fastafile)
data = []
maxsize, maxcov = opts.maxsize, opts.maxcov
for ctg, size in s.iter_sizes():
c = cov.get(ctg, 0)
if size > maxsize:
continue
if c > maxcov:
continue
data.append((size, c))
x, y = zip(*data)
x = np.array(x)
y = np.array(y)
logging.debug("X size {0}, Y size {1}".format(x.size, y.size))
df = pd.DataFrame()
xlab, ylab = "Length", "Coverage of depth (X)"
df[xlab] = x
df[ylab] = y
sns.jointplot(xlab, ylab, kind=opts.kind, data=df,
xlim=(0, maxsize), ylim=(0, maxcov),
stat_func=None, edgecolor="w", color=opts.color)
figname = covfile + ".pdf"
savefig(figname, dpi=iopts.dpi, iopts=iopts) | %prog covlen covfile fastafile
Plot coverage vs length. `covfile` is two-column listing contig id and
depth of coverage. | Below is the the instruction that describes the task:
### Input:
%prog covlen covfile fastafile
Plot coverage vs length. `covfile` is two-column listing contig id and
depth of coverage.
### Response:
def covlen(args):
"""
%prog covlen covfile fastafile
Plot coverage vs length. `covfile` is two-column listing contig id and
depth of coverage.
"""
import numpy as np
import pandas as pd
import seaborn as sns
from jcvi.formats.base import DictFile
p = OptionParser(covlen.__doc__)
p.add_option("--maxsize", default=1000000, type="int", help="Max contig size")
p.add_option("--maxcov", default=100, type="int", help="Max contig size")
p.add_option("--color", default='m', help="Color of the data points")
p.add_option("--kind", default="scatter",
choices=("scatter", "reg", "resid", "kde", "hex"),
help="Kind of plot to draw")
opts, args, iopts = p.set_image_options(args, figsize="8x8")
if len(args) != 2:
sys.exit(not p.print_help())
covfile, fastafile = args
cov = DictFile(covfile, cast=float)
s = Sizes(fastafile)
data = []
maxsize, maxcov = opts.maxsize, opts.maxcov
for ctg, size in s.iter_sizes():
c = cov.get(ctg, 0)
if size > maxsize:
continue
if c > maxcov:
continue
data.append((size, c))
x, y = zip(*data)
x = np.array(x)
y = np.array(y)
logging.debug("X size {0}, Y size {1}".format(x.size, y.size))
df = pd.DataFrame()
xlab, ylab = "Length", "Coverage of depth (X)"
df[xlab] = x
df[ylab] = y
sns.jointplot(xlab, ylab, kind=opts.kind, data=df,
xlim=(0, maxsize), ylim=(0, maxcov),
stat_func=None, edgecolor="w", color=opts.color)
figname = covfile + ".pdf"
savefig(figname, dpi=iopts.dpi, iopts=iopts) |
def get_contacts(self):
"""
Fetches list of all contacts
This will return chats with people from the address book only
Use get_all_chats for all chats
:return: List of contacts
:rtype: list[Contact]
"""
all_contacts = self.wapi_functions.getAllContacts()
return [Contact(contact, self) for contact in all_contacts] | Fetches list of all contacts
This will return chats with people from the address book only
Use get_all_chats for all chats
:return: List of contacts
:rtype: list[Contact] | Below is the the instruction that describes the task:
### Input:
Fetches list of all contacts
This will return chats with people from the address book only
Use get_all_chats for all chats
:return: List of contacts
:rtype: list[Contact]
### Response:
def get_contacts(self):
"""
Fetches list of all contacts
This will return chats with people from the address book only
Use get_all_chats for all chats
:return: List of contacts
:rtype: list[Contact]
"""
all_contacts = self.wapi_functions.getAllContacts()
return [Contact(contact, self) for contact in all_contacts] |
def interp(self, new_timestamps, interpolation_mode=0):
""" returns a new *Signal* interpolated using the *new_timestamps*
Parameters
----------
new_timestamps : np.array
timestamps used for interpolation
interpolation_mode : int
interpolation mode for integer signals; default 0
* 0 - repeat previous samples
* 1 - linear interpolation
Returns
-------
signal : Signal
new interpolated *Signal*
"""
if not len(self.samples) or not len(new_timestamps):
return Signal(
self.samples.copy(),
self.timestamps.copy(),
self.unit,
self.name,
comment=self.comment,
conversion=self.conversion,
raw=self.raw,
master_metadata=self.master_metadata,
display_name=self.display_name,
attachment=self.attachment,
stream_sync=self.stream_sync,
invalidation_bits=self.invalidation_bits.copy()
if self.invalidation_bits is not None
else None,
encoding=self.encoding,
)
else:
if len(self.samples.shape) > 1:
idx = np.searchsorted(self.timestamps, new_timestamps, side="right")
idx -= 1
idx = np.clip(idx, 0, idx[-1])
s = self.samples[idx]
if self.invalidation_bits is not None:
invalidation_bits = self.invalidation_bits[idx]
else:
invalidation_bits = None
else:
kind = self.samples.dtype.kind
if kind == "f":
s = np.interp(new_timestamps, self.timestamps, self.samples)
if self.invalidation_bits is not None:
idx = np.searchsorted(
self.timestamps, new_timestamps, side="right"
)
idx -= 1
idx = np.clip(idx, 0, idx[-1])
invalidation_bits = self.invalidation_bits[idx]
else:
invalidation_bits = None
elif kind in "ui":
if interpolation_mode == 1:
s = np.interp(
new_timestamps, self.timestamps, self.samples
).astype(self.samples.dtype)
if self.invalidation_bits is not None:
idx = np.searchsorted(
self.timestamps, new_timestamps, side="right"
)
idx -= 1
idx = np.clip(idx, 0, idx[-1])
invalidation_bits = self.invalidation_bits[idx]
else:
invalidation_bits = None
else:
idx = np.searchsorted(
self.timestamps, new_timestamps, side="right"
)
idx -= 1
idx = np.clip(idx, 0, idx[-1])
s = self.samples[idx]
if self.invalidation_bits is not None:
invalidation_bits = self.invalidation_bits[idx]
else:
invalidation_bits = None
else:
idx = np.searchsorted(self.timestamps, new_timestamps, side="right")
idx -= 1
idx = np.clip(idx, 0, idx[-1])
s = self.samples[idx]
if self.invalidation_bits is not None:
invalidation_bits = self.invalidation_bits[idx]
else:
invalidation_bits = None
return Signal(
s,
new_timestamps,
self.unit,
self.name,
comment=self.comment,
conversion=self.conversion,
source=self.source,
raw=self.raw,
master_metadata=self.master_metadata,
display_name=self.display_name,
attachment=self.attachment,
stream_sync=self.stream_sync,
invalidation_bits=invalidation_bits,
encoding=self.encoding,
) | returns a new *Signal* interpolated using the *new_timestamps*
Parameters
----------
new_timestamps : np.array
timestamps used for interpolation
interpolation_mode : int
interpolation mode for integer signals; default 0
* 0 - repeat previous samples
* 1 - linear interpolation
Returns
-------
signal : Signal
new interpolated *Signal* | Below is the the instruction that describes the task:
### Input:
returns a new *Signal* interpolated using the *new_timestamps*
Parameters
----------
new_timestamps : np.array
timestamps used for interpolation
interpolation_mode : int
interpolation mode for integer signals; default 0
* 0 - repeat previous samples
* 1 - linear interpolation
Returns
-------
signal : Signal
new interpolated *Signal*
### Response:
def interp(self, new_timestamps, interpolation_mode=0):
""" returns a new *Signal* interpolated using the *new_timestamps*
Parameters
----------
new_timestamps : np.array
timestamps used for interpolation
interpolation_mode : int
interpolation mode for integer signals; default 0
* 0 - repeat previous samples
* 1 - linear interpolation
Returns
-------
signal : Signal
new interpolated *Signal*
"""
if not len(self.samples) or not len(new_timestamps):
return Signal(
self.samples.copy(),
self.timestamps.copy(),
self.unit,
self.name,
comment=self.comment,
conversion=self.conversion,
raw=self.raw,
master_metadata=self.master_metadata,
display_name=self.display_name,
attachment=self.attachment,
stream_sync=self.stream_sync,
invalidation_bits=self.invalidation_bits.copy()
if self.invalidation_bits is not None
else None,
encoding=self.encoding,
)
else:
if len(self.samples.shape) > 1:
idx = np.searchsorted(self.timestamps, new_timestamps, side="right")
idx -= 1
idx = np.clip(idx, 0, idx[-1])
s = self.samples[idx]
if self.invalidation_bits is not None:
invalidation_bits = self.invalidation_bits[idx]
else:
invalidation_bits = None
else:
kind = self.samples.dtype.kind
if kind == "f":
s = np.interp(new_timestamps, self.timestamps, self.samples)
if self.invalidation_bits is not None:
idx = np.searchsorted(
self.timestamps, new_timestamps, side="right"
)
idx -= 1
idx = np.clip(idx, 0, idx[-1])
invalidation_bits = self.invalidation_bits[idx]
else:
invalidation_bits = None
elif kind in "ui":
if interpolation_mode == 1:
s = np.interp(
new_timestamps, self.timestamps, self.samples
).astype(self.samples.dtype)
if self.invalidation_bits is not None:
idx = np.searchsorted(
self.timestamps, new_timestamps, side="right"
)
idx -= 1
idx = np.clip(idx, 0, idx[-1])
invalidation_bits = self.invalidation_bits[idx]
else:
invalidation_bits = None
else:
idx = np.searchsorted(
self.timestamps, new_timestamps, side="right"
)
idx -= 1
idx = np.clip(idx, 0, idx[-1])
s = self.samples[idx]
if self.invalidation_bits is not None:
invalidation_bits = self.invalidation_bits[idx]
else:
invalidation_bits = None
else:
idx = np.searchsorted(self.timestamps, new_timestamps, side="right")
idx -= 1
idx = np.clip(idx, 0, idx[-1])
s = self.samples[idx]
if self.invalidation_bits is not None:
invalidation_bits = self.invalidation_bits[idx]
else:
invalidation_bits = None
return Signal(
s,
new_timestamps,
self.unit,
self.name,
comment=self.comment,
conversion=self.conversion,
source=self.source,
raw=self.raw,
master_metadata=self.master_metadata,
display_name=self.display_name,
attachment=self.attachment,
stream_sync=self.stream_sync,
invalidation_bits=invalidation_bits,
encoding=self.encoding,
) |
def set_sequence_from_str(self, sequence):
"""
This is a convenience method to set the new QKeySequence of the
shortcut editor from a string.
"""
self._qsequences = [QKeySequence(s) for s in sequence.split(', ')]
self.update_warning() | This is a convenience method to set the new QKeySequence of the
shortcut editor from a string. | Below is the the instruction that describes the task:
### Input:
This is a convenience method to set the new QKeySequence of the
shortcut editor from a string.
### Response:
def set_sequence_from_str(self, sequence):
"""
This is a convenience method to set the new QKeySequence of the
shortcut editor from a string.
"""
self._qsequences = [QKeySequence(s) for s in sequence.split(', ')]
self.update_warning() |
def restore(self):
"""
Unloads all modules that weren't loaded when save_modules was called.
"""
sys = set(self._sys_modules.keys())
for mod_name in sys.difference(self._saved_modules):
del self._sys_modules[mod_name] | Unloads all modules that weren't loaded when save_modules was called. | Below is the the instruction that describes the task:
### Input:
Unloads all modules that weren't loaded when save_modules was called.
### Response:
def restore(self):
"""
Unloads all modules that weren't loaded when save_modules was called.
"""
sys = set(self._sys_modules.keys())
for mod_name in sys.difference(self._saved_modules):
del self._sys_modules[mod_name] |
def unload_extension(self, module_str):
"""Unload an IPython extension by its module name.
This function looks up the extension's name in ``sys.modules`` and
simply calls ``mod.unload_ipython_extension(self)``.
"""
if module_str in sys.modules:
mod = sys.modules[module_str]
self._call_unload_ipython_extension(mod) | Unload an IPython extension by its module name.
This function looks up the extension's name in ``sys.modules`` and
simply calls ``mod.unload_ipython_extension(self)``. | Below is the the instruction that describes the task:
### Input:
Unload an IPython extension by its module name.
This function looks up the extension's name in ``sys.modules`` and
simply calls ``mod.unload_ipython_extension(self)``.
### Response:
def unload_extension(self, module_str):
"""Unload an IPython extension by its module name.
This function looks up the extension's name in ``sys.modules`` and
simply calls ``mod.unload_ipython_extension(self)``.
"""
if module_str in sys.modules:
mod = sys.modules[module_str]
self._call_unload_ipython_extension(mod) |
def list_data_links(self, instance):
"""
Lists the data links visible to this client.
Data links are returned in random order.
:param str instance: A Yamcs instance name.
:rtype: ~collections.Iterable[.Link]
"""
# Server does not do pagination on listings of this resource.
# Return an iterator anyway for similarity with other API methods
response = self.get_proto(path='/links/' + instance)
message = rest_pb2.ListLinkInfoResponse()
message.ParseFromString(response.content)
links = getattr(message, 'link')
return iter([Link(link) for link in links]) | Lists the data links visible to this client.
Data links are returned in random order.
:param str instance: A Yamcs instance name.
:rtype: ~collections.Iterable[.Link] | Below is the the instruction that describes the task:
### Input:
Lists the data links visible to this client.
Data links are returned in random order.
:param str instance: A Yamcs instance name.
:rtype: ~collections.Iterable[.Link]
### Response:
def list_data_links(self, instance):
"""
Lists the data links visible to this client.
Data links are returned in random order.
:param str instance: A Yamcs instance name.
:rtype: ~collections.Iterable[.Link]
"""
# Server does not do pagination on listings of this resource.
# Return an iterator anyway for similarity with other API methods
response = self.get_proto(path='/links/' + instance)
message = rest_pb2.ListLinkInfoResponse()
message.ParseFromString(response.content)
links = getattr(message, 'link')
return iter([Link(link) for link in links]) |
def set_type_by_schema(self, schema_obj, schema_type):
"""
Set property type by schema object
Schema will create, if it doesn't exists in collection
:param dict schema_obj: raw schema object
:param str schema_type:
"""
schema_id = self._get_object_schema_id(schema_obj, schema_type)
if not self.storage.contains(schema_id):
schema = self.storage.create_schema(
schema_obj, self.name, schema_type, root=self.root)
assert schema.schema_id == schema_id
self._type = schema_id | Set property type by schema object
Schema will create, if it doesn't exists in collection
:param dict schema_obj: raw schema object
:param str schema_type: | Below is the the instruction that describes the task:
### Input:
Set property type by schema object
Schema will create, if it doesn't exists in collection
:param dict schema_obj: raw schema object
:param str schema_type:
### Response:
def set_type_by_schema(self, schema_obj, schema_type):
"""
Set property type by schema object
Schema will create, if it doesn't exists in collection
:param dict schema_obj: raw schema object
:param str schema_type:
"""
schema_id = self._get_object_schema_id(schema_obj, schema_type)
if not self.storage.contains(schema_id):
schema = self.storage.create_schema(
schema_obj, self.name, schema_type, root=self.root)
assert schema.schema_id == schema_id
self._type = schema_id |
def with_metaclass(meta, *bases):
"""
Create a base class with a metaclass.
For example, if you have the metaclass
>>> class Meta(type):
... pass
Use this as the metaclass by doing
>>> from symengine.compatibility import with_metaclass
>>> class MyClass(with_metaclass(Meta, object)):
... pass
This is equivalent to the Python 2::
class MyClass(object):
__metaclass__ = Meta
or Python 3::
class MyClass(object, metaclass=Meta):
pass
That is, the first argument is the metaclass, and the remaining arguments
are the base classes. Note that if the base class is just ``object``, you
may omit it.
>>> MyClass.__mro__
(<class 'MyClass'>, <... 'object'>)
>>> type(MyClass)
<class 'Meta'>
"""
class metaclass(meta):
__call__ = type.__call__
__init__ = type.__init__
def __new__(cls, name, this_bases, d):
if this_bases is None:
return type.__new__(cls, name, (), d)
return meta(name, bases, d)
return metaclass("NewBase", None, {}) | Create a base class with a metaclass.
For example, if you have the metaclass
>>> class Meta(type):
... pass
Use this as the metaclass by doing
>>> from symengine.compatibility import with_metaclass
>>> class MyClass(with_metaclass(Meta, object)):
... pass
This is equivalent to the Python 2::
class MyClass(object):
__metaclass__ = Meta
or Python 3::
class MyClass(object, metaclass=Meta):
pass
That is, the first argument is the metaclass, and the remaining arguments
are the base classes. Note that if the base class is just ``object``, you
may omit it.
>>> MyClass.__mro__
(<class 'MyClass'>, <... 'object'>)
>>> type(MyClass)
<class 'Meta'> | Below is the the instruction that describes the task:
### Input:
Create a base class with a metaclass.
For example, if you have the metaclass
>>> class Meta(type):
... pass
Use this as the metaclass by doing
>>> from symengine.compatibility import with_metaclass
>>> class MyClass(with_metaclass(Meta, object)):
... pass
This is equivalent to the Python 2::
class MyClass(object):
__metaclass__ = Meta
or Python 3::
class MyClass(object, metaclass=Meta):
pass
That is, the first argument is the metaclass, and the remaining arguments
are the base classes. Note that if the base class is just ``object``, you
may omit it.
>>> MyClass.__mro__
(<class 'MyClass'>, <... 'object'>)
>>> type(MyClass)
<class 'Meta'>
### Response:
def with_metaclass(meta, *bases):
"""
Create a base class with a metaclass.
For example, if you have the metaclass
>>> class Meta(type):
... pass
Use this as the metaclass by doing
>>> from symengine.compatibility import with_metaclass
>>> class MyClass(with_metaclass(Meta, object)):
... pass
This is equivalent to the Python 2::
class MyClass(object):
__metaclass__ = Meta
or Python 3::
class MyClass(object, metaclass=Meta):
pass
That is, the first argument is the metaclass, and the remaining arguments
are the base classes. Note that if the base class is just ``object``, you
may omit it.
>>> MyClass.__mro__
(<class 'MyClass'>, <... 'object'>)
>>> type(MyClass)
<class 'Meta'>
"""
class metaclass(meta):
__call__ = type.__call__
__init__ = type.__init__
def __new__(cls, name, this_bases, d):
if this_bases is None:
return type.__new__(cls, name, (), d)
return meta(name, bases, d)
return metaclass("NewBase", None, {}) |
def _get_text(self):
"""
Get the text corresponding to this page
"""
boxes = self.boxes
txt = []
for line in boxes:
txt_line = u""
for box in line.word_boxes:
txt_line += u" " + box.content
txt.append(txt_line)
return txt | Get the text corresponding to this page | Below is the the instruction that describes the task:
### Input:
Get the text corresponding to this page
### Response:
def _get_text(self):
"""
Get the text corresponding to this page
"""
boxes = self.boxes
txt = []
for line in boxes:
txt_line = u""
for box in line.word_boxes:
txt_line += u" " + box.content
txt.append(txt_line)
return txt |
def sense_ttb(self, target):
"""Activate the RF field and probe for a Type B Target.
The RC-S956 can discover Type B Targets (Type 4B Tag) at 106
kbps. For a Type 4B Tag the firmware automatically sends an
ATTRIB command that configures the use of DID and 64 byte
maximum frame size. The driver reverts this configuration with
a DESELECT and WUPB command to return the target prepared for
activation (which nfcpy does in the tag activation code).
"""
return super(Device, self).sense_ttb(target, did=b'\x01') | Activate the RF field and probe for a Type B Target.
The RC-S956 can discover Type B Targets (Type 4B Tag) at 106
kbps. For a Type 4B Tag the firmware automatically sends an
ATTRIB command that configures the use of DID and 64 byte
maximum frame size. The driver reverts this configuration with
a DESELECT and WUPB command to return the target prepared for
activation (which nfcpy does in the tag activation code). | Below is the the instruction that describes the task:
### Input:
Activate the RF field and probe for a Type B Target.
The RC-S956 can discover Type B Targets (Type 4B Tag) at 106
kbps. For a Type 4B Tag the firmware automatically sends an
ATTRIB command that configures the use of DID and 64 byte
maximum frame size. The driver reverts this configuration with
a DESELECT and WUPB command to return the target prepared for
activation (which nfcpy does in the tag activation code).
### Response:
def sense_ttb(self, target):
"""Activate the RF field and probe for a Type B Target.
The RC-S956 can discover Type B Targets (Type 4B Tag) at 106
kbps. For a Type 4B Tag the firmware automatically sends an
ATTRIB command that configures the use of DID and 64 byte
maximum frame size. The driver reverts this configuration with
a DESELECT and WUPB command to return the target prepared for
activation (which nfcpy does in the tag activation code).
"""
return super(Device, self).sense_ttb(target, did=b'\x01') |
def _process_messages(self, messages):
"""Send messages to the `processor` callback to be processed
In the case we have a commit policy, we send messages to the processor
in blocks no bigger than auto_commit_every_n (if set). Otherwise, we
send the entire message block to be processed.
"""
# Have we been told to shutdown?
if self._shuttingdown:
return
# Do we have any messages to process?
if not messages:
# No, we're done with this block. If we had another fetch result
# waiting, this callback will trigger the processing thereof.
if self._msg_block_d:
_msg_block_d, self._msg_block_d = self._msg_block_d, None
_msg_block_d.callback(True)
return
# Yes, we've got some messages to process.
# Default to processing the entire block...
proc_block_size = sys.maxsize
# Unless our auto commit_policy restricts us to process less
if self.auto_commit_every_n:
proc_block_size = self.auto_commit_every_n
# Divide messages into two lists: one to process now, and remainder
msgs_to_proc = messages[:proc_block_size]
msgs_remainder = messages[proc_block_size:]
# Call our processor callable and handle the possibility it returned
# a deferred...
last_offset = msgs_to_proc[-1].offset
self._processor_d = d = maybeDeferred(self.processor, self, msgs_to_proc)
log.debug('self.processor return: %r, last_offset: %r', d, last_offset)
# Once the processor completes, clear our _processor_d
d.addBoth(self._clear_processor_deferred)
# Record the offset of the last processed message and check autocommit
d.addCallback(self._update_processed_offset, last_offset)
# If we were stopped, cancel the processor deferred. Note, we have to
# do this here, in addition to in stop() because the processor func
# itself could have called stop(), and then when it returned, we re-set
# self._processor_d to the return of maybeDeferred().
if self._stopping or self._start_d is None:
d.cancel()
else:
# Setup to process the rest of our messages
d.addCallback(lambda _: self._process_messages(msgs_remainder))
# Add an error handler
d.addErrback(self._handle_processor_error) | Send messages to the `processor` callback to be processed
In the case we have a commit policy, we send messages to the processor
in blocks no bigger than auto_commit_every_n (if set). Otherwise, we
send the entire message block to be processed. | Below is the the instruction that describes the task:
### Input:
Send messages to the `processor` callback to be processed
In the case we have a commit policy, we send messages to the processor
in blocks no bigger than auto_commit_every_n (if set). Otherwise, we
send the entire message block to be processed.
### Response:
def _process_messages(self, messages):
"""Send messages to the `processor` callback to be processed
In the case we have a commit policy, we send messages to the processor
in blocks no bigger than auto_commit_every_n (if set). Otherwise, we
send the entire message block to be processed.
"""
# Have we been told to shutdown?
if self._shuttingdown:
return
# Do we have any messages to process?
if not messages:
# No, we're done with this block. If we had another fetch result
# waiting, this callback will trigger the processing thereof.
if self._msg_block_d:
_msg_block_d, self._msg_block_d = self._msg_block_d, None
_msg_block_d.callback(True)
return
# Yes, we've got some messages to process.
# Default to processing the entire block...
proc_block_size = sys.maxsize
# Unless our auto commit_policy restricts us to process less
if self.auto_commit_every_n:
proc_block_size = self.auto_commit_every_n
# Divide messages into two lists: one to process now, and remainder
msgs_to_proc = messages[:proc_block_size]
msgs_remainder = messages[proc_block_size:]
# Call our processor callable and handle the possibility it returned
# a deferred...
last_offset = msgs_to_proc[-1].offset
self._processor_d = d = maybeDeferred(self.processor, self, msgs_to_proc)
log.debug('self.processor return: %r, last_offset: %r', d, last_offset)
# Once the processor completes, clear our _processor_d
d.addBoth(self._clear_processor_deferred)
# Record the offset of the last processed message and check autocommit
d.addCallback(self._update_processed_offset, last_offset)
# If we were stopped, cancel the processor deferred. Note, we have to
# do this here, in addition to in stop() because the processor func
# itself could have called stop(), and then when it returned, we re-set
# self._processor_d to the return of maybeDeferred().
if self._stopping or self._start_d is None:
d.cancel()
else:
# Setup to process the rest of our messages
d.addCallback(lambda _: self._process_messages(msgs_remainder))
# Add an error handler
d.addErrback(self._handle_processor_error) |
def json(self, dict=False, **kwargs):
"""
Outputs NetJSON format
"""
try:
graph = self.graph
except AttributeError:
raise NotImplementedError()
return _netjson_networkgraph(self.protocol,
self.version,
self.revision,
self.metric,
graph.nodes(data=True),
graph.edges(data=True),
dict,
**kwargs) | Outputs NetJSON format | Below is the the instruction that describes the task:
### Input:
Outputs NetJSON format
### Response:
def json(self, dict=False, **kwargs):
"""
Outputs NetJSON format
"""
try:
graph = self.graph
except AttributeError:
raise NotImplementedError()
return _netjson_networkgraph(self.protocol,
self.version,
self.revision,
self.metric,
graph.nodes(data=True),
graph.edges(data=True),
dict,
**kwargs) |
def derive(self, modifier):
"""
Returns a new :class:`Event` instance that will fire
when this event fires. The value passed to the callbacks
to the new event is the return value of the given
`modifier` function which is passed the original value.
"""
def forward(value):
changed_value = modifier(value)
derived.fire(changed_value)
derived = Event()
self.add_callback(forward)
return derived | Returns a new :class:`Event` instance that will fire
when this event fires. The value passed to the callbacks
to the new event is the return value of the given
`modifier` function which is passed the original value. | Below is the the instruction that describes the task:
### Input:
Returns a new :class:`Event` instance that will fire
when this event fires. The value passed to the callbacks
to the new event is the return value of the given
`modifier` function which is passed the original value.
### Response:
def derive(self, modifier):
"""
Returns a new :class:`Event` instance that will fire
when this event fires. The value passed to the callbacks
to the new event is the return value of the given
`modifier` function which is passed the original value.
"""
def forward(value):
changed_value = modifier(value)
derived.fire(changed_value)
derived = Event()
self.add_callback(forward)
return derived |
def verify_client_id(self):
"""
Verify a provided client id against the database and set the `Client` object that is
associated with it to `self.client`.
TODO: Document all of the thrown exceptions.
"""
from .models import Client
from .exceptions.invalid_client import ClientDoesNotExist
from .exceptions.invalid_request import ClientNotProvided
if self.client_id:
try:
self.client = Client.objects.for_id(self.client_id)
# Catching also ValueError for the case when client_id doesn't contain an integer.
except (Client.DoesNotExist, ValueError):
raise ClientDoesNotExist()
else:
raise ClientNotProvided() | Verify a provided client id against the database and set the `Client` object that is
associated with it to `self.client`.
TODO: Document all of the thrown exceptions. | Below is the the instruction that describes the task:
### Input:
Verify a provided client id against the database and set the `Client` object that is
associated with it to `self.client`.
TODO: Document all of the thrown exceptions.
### Response:
def verify_client_id(self):
"""
Verify a provided client id against the database and set the `Client` object that is
associated with it to `self.client`.
TODO: Document all of the thrown exceptions.
"""
from .models import Client
from .exceptions.invalid_client import ClientDoesNotExist
from .exceptions.invalid_request import ClientNotProvided
if self.client_id:
try:
self.client = Client.objects.for_id(self.client_id)
# Catching also ValueError for the case when client_id doesn't contain an integer.
except (Client.DoesNotExist, ValueError):
raise ClientDoesNotExist()
else:
raise ClientNotProvided() |
def contains(self, key, counter_id):
"""
Return whether a counter_id is present for a given instance key.
If the key is not in the cache, raises a KeyError.
"""
with self._lock:
return counter_id in self._metadata[key] | Return whether a counter_id is present for a given instance key.
If the key is not in the cache, raises a KeyError. | Below is the the instruction that describes the task:
### Input:
Return whether a counter_id is present for a given instance key.
If the key is not in the cache, raises a KeyError.
### Response:
def contains(self, key, counter_id):
"""
Return whether a counter_id is present for a given instance key.
If the key is not in the cache, raises a KeyError.
"""
with self._lock:
return counter_id in self._metadata[key] |
def get_label(self,callb=None):
"""Convenience method to request the label from the device
This method will check whether the value has already been retrieved from the device,
if so, it will simply return it. If no, it will request the information from the device
and request that callb be executed when a response is received. The default callback
will simply cache the value.
:param callb: Callable to be used when the response is received. If not set,
self.resp_set_label will be used.
:type callb: callable
:returns: The cached value
:rtype: str
"""
if self.label is None:
mypartial=partial(self.resp_set_label)
if callb:
mycallb=lambda x,y:(mypartial(y),callb(x,y))
else:
mycallb=lambda x,y:mypartial(y)
response = self.req_with_resp(GetLabel, StateLabel, callb=mycallb )
return self.label | Convenience method to request the label from the device
This method will check whether the value has already been retrieved from the device,
if so, it will simply return it. If no, it will request the information from the device
and request that callb be executed when a response is received. The default callback
will simply cache the value.
:param callb: Callable to be used when the response is received. If not set,
self.resp_set_label will be used.
:type callb: callable
:returns: The cached value
:rtype: str | Below is the the instruction that describes the task:
### Input:
Convenience method to request the label from the device
This method will check whether the value has already been retrieved from the device,
if so, it will simply return it. If no, it will request the information from the device
and request that callb be executed when a response is received. The default callback
will simply cache the value.
:param callb: Callable to be used when the response is received. If not set,
self.resp_set_label will be used.
:type callb: callable
:returns: The cached value
:rtype: str
### Response:
def get_label(self,callb=None):
"""Convenience method to request the label from the device
This method will check whether the value has already been retrieved from the device,
if so, it will simply return it. If no, it will request the information from the device
and request that callb be executed when a response is received. The default callback
will simply cache the value.
:param callb: Callable to be used when the response is received. If not set,
self.resp_set_label will be used.
:type callb: callable
:returns: The cached value
:rtype: str
"""
if self.label is None:
mypartial=partial(self.resp_set_label)
if callb:
mycallb=lambda x,y:(mypartial(y),callb(x,y))
else:
mycallb=lambda x,y:mypartial(y)
response = self.req_with_resp(GetLabel, StateLabel, callb=mycallb )
return self.label |
def pkcs7_unpad(data):
"""
Remove the padding bytes that were added at point of encryption.
Implementation copied from pyaspora:
https://github.com/mjnovice/pyaspora/blob/master/pyaspora/diaspora/protocol.py#L209
"""
if isinstance(data, str):
return data[0:-ord(data[-1])]
else:
return data[0:-data[-1]] | Remove the padding bytes that were added at point of encryption.
Implementation copied from pyaspora:
https://github.com/mjnovice/pyaspora/blob/master/pyaspora/diaspora/protocol.py#L209 | Below is the the instruction that describes the task:
### Input:
Remove the padding bytes that were added at point of encryption.
Implementation copied from pyaspora:
https://github.com/mjnovice/pyaspora/blob/master/pyaspora/diaspora/protocol.py#L209
### Response:
def pkcs7_unpad(data):
"""
Remove the padding bytes that were added at point of encryption.
Implementation copied from pyaspora:
https://github.com/mjnovice/pyaspora/blob/master/pyaspora/diaspora/protocol.py#L209
"""
if isinstance(data, str):
return data[0:-ord(data[-1])]
else:
return data[0:-data[-1]] |
def export(name,
target=None,
rev=None,
user=None,
username=None,
password=None,
force=False,
overwrite=False,
externals=True,
trust=False,
trust_failures=None):
'''
Export a file or directory from an SVN repository
name
Address and path to the file or directory to be exported.
target
Name of the target directory where the checkout will put the working
directory
rev : None
The name revision number to checkout. Enable "force" if the directory
already exists.
user : None
Name of the user performing repository management operations
username : None
The user to access the name repository with. The svn default is the
current user
password
Connect to the Subversion server with this password
.. versionadded:: 0.17.0
force : False
Continue if conflicts are encountered
overwrite : False
Overwrite existing target
externals : True
Change to False to not checkout or update externals
trust : False
Automatically trust the remote server. SVN's --trust-server-cert
trust_failures : None
Comma-separated list of certificate trust failures, that shall be
ignored. This can be used if trust=True is not sufficient. The
specified string is passed to SVN's --trust-server-cert-failures
option as-is.
.. versionadded:: 2019.2.0
'''
ret = {'name': name, 'result': True, 'comment': '', 'changes': {}}
if not target:
return _fail(ret, 'Target option is required')
svn_cmd = 'svn.export'
cwd, basename = os.path.split(target)
opts = tuple()
if not overwrite and os.path.exists(target) and not os.path.isdir(target):
return _fail(ret,
'The path "{0}" exists and is not '
'a directory.'.format(target)
)
if __opts__['test']:
if not os.path.exists(target):
return _neutral_test(
ret,
('{0} doesn\'t exist and is set to be checked out.').format(target))
svn_cmd = 'svn.list'
rev = 'HEAD'
out = __salt__[svn_cmd](cwd, target, user, username, password, *opts)
return _neutral_test(
ret,
('{0}').format(out))
if not rev:
rev = 'HEAD'
if force:
opts += ('--force',)
if externals is False:
opts += ('--ignore-externals',)
if trust:
opts += ('--trust-server-cert',)
if trust_failures:
opts += ('--trust-server-cert-failures', trust_failures)
out = __salt__[svn_cmd](cwd, name, basename, user, username, password, rev, *opts)
ret['changes']['new'] = name
ret['changes']['comment'] = name + ' was Exported to ' + target
return ret | Export a file or directory from an SVN repository
name
Address and path to the file or directory to be exported.
target
Name of the target directory where the checkout will put the working
directory
rev : None
The name revision number to checkout. Enable "force" if the directory
already exists.
user : None
Name of the user performing repository management operations
username : None
The user to access the name repository with. The svn default is the
current user
password
Connect to the Subversion server with this password
.. versionadded:: 0.17.0
force : False
Continue if conflicts are encountered
overwrite : False
Overwrite existing target
externals : True
Change to False to not checkout or update externals
trust : False
Automatically trust the remote server. SVN's --trust-server-cert
trust_failures : None
Comma-separated list of certificate trust failures, that shall be
ignored. This can be used if trust=True is not sufficient. The
specified string is passed to SVN's --trust-server-cert-failures
option as-is.
.. versionadded:: 2019.2.0 | Below is the the instruction that describes the task:
### Input:
Export a file or directory from an SVN repository
name
Address and path to the file or directory to be exported.
target
Name of the target directory where the checkout will put the working
directory
rev : None
The name revision number to checkout. Enable "force" if the directory
already exists.
user : None
Name of the user performing repository management operations
username : None
The user to access the name repository with. The svn default is the
current user
password
Connect to the Subversion server with this password
.. versionadded:: 0.17.0
force : False
Continue if conflicts are encountered
overwrite : False
Overwrite existing target
externals : True
Change to False to not checkout or update externals
trust : False
Automatically trust the remote server. SVN's --trust-server-cert
trust_failures : None
Comma-separated list of certificate trust failures, that shall be
ignored. This can be used if trust=True is not sufficient. The
specified string is passed to SVN's --trust-server-cert-failures
option as-is.
.. versionadded:: 2019.2.0
### Response:
def export(name,
target=None,
rev=None,
user=None,
username=None,
password=None,
force=False,
overwrite=False,
externals=True,
trust=False,
trust_failures=None):
'''
Export a file or directory from an SVN repository
name
Address and path to the file or directory to be exported.
target
Name of the target directory where the checkout will put the working
directory
rev : None
The name revision number to checkout. Enable "force" if the directory
already exists.
user : None
Name of the user performing repository management operations
username : None
The user to access the name repository with. The svn default is the
current user
password
Connect to the Subversion server with this password
.. versionadded:: 0.17.0
force : False
Continue if conflicts are encountered
overwrite : False
Overwrite existing target
externals : True
Change to False to not checkout or update externals
trust : False
Automatically trust the remote server. SVN's --trust-server-cert
trust_failures : None
Comma-separated list of certificate trust failures, that shall be
ignored. This can be used if trust=True is not sufficient. The
specified string is passed to SVN's --trust-server-cert-failures
option as-is.
.. versionadded:: 2019.2.0
'''
ret = {'name': name, 'result': True, 'comment': '', 'changes': {}}
if not target:
return _fail(ret, 'Target option is required')
svn_cmd = 'svn.export'
cwd, basename = os.path.split(target)
opts = tuple()
if not overwrite and os.path.exists(target) and not os.path.isdir(target):
return _fail(ret,
'The path "{0}" exists and is not '
'a directory.'.format(target)
)
if __opts__['test']:
if not os.path.exists(target):
return _neutral_test(
ret,
('{0} doesn\'t exist and is set to be checked out.').format(target))
svn_cmd = 'svn.list'
rev = 'HEAD'
out = __salt__[svn_cmd](cwd, target, user, username, password, *opts)
return _neutral_test(
ret,
('{0}').format(out))
if not rev:
rev = 'HEAD'
if force:
opts += ('--force',)
if externals is False:
opts += ('--ignore-externals',)
if trust:
opts += ('--trust-server-cert',)
if trust_failures:
opts += ('--trust-server-cert-failures', trust_failures)
out = __salt__[svn_cmd](cwd, name, basename, user, username, password, rev, *opts)
ret['changes']['new'] = name
ret['changes']['comment'] = name + ' was Exported to ' + target
return ret |
def escape(url):
'''
add escape character `|` to `url`
'''
if salt.utils.platform.is_windows():
return url
scheme = urlparse(url).scheme
if not scheme:
if url.startswith('|'):
return url
else:
return '|{0}'.format(url)
elif scheme == 'salt':
path, saltenv = parse(url)
if path.startswith('|'):
return create(path, saltenv)
else:
return create('|{0}'.format(path), saltenv)
else:
return url | add escape character `|` to `url` | Below is the the instruction that describes the task:
### Input:
add escape character `|` to `url`
### Response:
def escape(url):
'''
add escape character `|` to `url`
'''
if salt.utils.platform.is_windows():
return url
scheme = urlparse(url).scheme
if not scheme:
if url.startswith('|'):
return url
else:
return '|{0}'.format(url)
elif scheme == 'salt':
path, saltenv = parse(url)
if path.startswith('|'):
return create(path, saltenv)
else:
return create('|{0}'.format(path), saltenv)
else:
return url |
def get_memory_map_xml(self):
"""! @brief Generate GDB memory map XML.
"""
root = ElementTree.Element('memory-map')
for r in self._context.core.memory_map:
# Look up the region type name. Regions default to ram if gdb doesn't
# have a concept of the region type.
gdbType = GDB_TYPE_MAP.get(r.type, 'ram')
start = hex(r.start).rstrip("L")
length = hex(r.length).rstrip("L")
mem = ElementTree.SubElement(root, 'memory', type=gdbType, start=start, length=length)
if r.is_flash:
prop = ElementTree.SubElement(mem, 'property', name='blocksize')
prop.text = hex(r.blocksize).rstrip("L")
return MAP_XML_HEADER + ElementTree.tostring(root) | ! @brief Generate GDB memory map XML. | Below is the the instruction that describes the task:
### Input:
! @brief Generate GDB memory map XML.
### Response:
def get_memory_map_xml(self):
"""! @brief Generate GDB memory map XML.
"""
root = ElementTree.Element('memory-map')
for r in self._context.core.memory_map:
# Look up the region type name. Regions default to ram if gdb doesn't
# have a concept of the region type.
gdbType = GDB_TYPE_MAP.get(r.type, 'ram')
start = hex(r.start).rstrip("L")
length = hex(r.length).rstrip("L")
mem = ElementTree.SubElement(root, 'memory', type=gdbType, start=start, length=length)
if r.is_flash:
prop = ElementTree.SubElement(mem, 'property', name='blocksize')
prop.text = hex(r.blocksize).rstrip("L")
return MAP_XML_HEADER + ElementTree.tostring(root) |
def add_aggregated_lv_components(network, components):
"""
Aggregates LV load and generation at LV stations
Use this function if you aim for MV calculation only. The according
DataFrames of `components` are extended by load and generators representing
these aggregated respecting the technology type.
Parameters
----------
network : Network
The eDisGo grid topology model overall container
components : dict of :pandas:`pandas.DataFrame<dataframe>`
PyPSA components in tabular format
Returns
-------
:obj:`dict` of :pandas:`pandas.DataFrame<dataframe>`
The dictionary components passed to the function is returned altered.
"""
generators = {}
loads = {}
# collect aggregated generation capacity by type and subtype
# collect aggregated load grouped by sector
for lv_grid in network.mv_grid.lv_grids:
generators.setdefault(lv_grid, {})
for gen in lv_grid.generators:
generators[lv_grid].setdefault(gen.type, {})
generators[lv_grid][gen.type].setdefault(gen.subtype, {})
generators[lv_grid][gen.type][gen.subtype].setdefault(
'capacity', 0)
generators[lv_grid][gen.type][gen.subtype][
'capacity'] += gen.nominal_capacity
generators[lv_grid][gen.type][gen.subtype].setdefault(
'name',
'_'.join([gen.type,
gen.subtype,
'aggregated',
'LV_grid',
str(lv_grid.id)]))
loads.setdefault(lv_grid, {})
for lo in lv_grid.graph.nodes_by_attribute('load'):
for sector, val in lo.consumption.items():
loads[lv_grid].setdefault(sector, 0)
loads[lv_grid][sector] += val
# define dict for DataFrame creation of aggr. generation and load
generator = {'name': [],
'bus': [],
'control': [],
'p_nom': [],
'type': []}
load = {'name': [], 'bus': []}
# fill generators dictionary for DataFrame creation
for lv_grid_obj, lv_grid in generators.items():
for _, gen_type in lv_grid.items():
for _, gen_subtype in gen_type.items():
generator['name'].append(gen_subtype['name'])
generator['bus'].append(
'_'.join(['Bus', lv_grid_obj.station.__repr__('lv')]))
generator['control'].append('PQ')
generator['p_nom'].append(gen_subtype['capacity'])
generator['type'].append("")
# fill loads dictionary for DataFrame creation
for lv_grid_obj, lv_grid in loads.items():
for sector, val in lv_grid.items():
load['name'].append('_'.join(['Load', sector, repr(lv_grid_obj)]))
load['bus'].append(
'_'.join(['Bus', lv_grid_obj.station.__repr__('lv')]))
components['Generator'] = pd.concat(
[components['Generator'], pd.DataFrame(generator).set_index('name')])
components['Load'] = pd.concat(
[components['Load'], pd.DataFrame(load).set_index('name')])
return components | Aggregates LV load and generation at LV stations
Use this function if you aim for MV calculation only. The according
DataFrames of `components` are extended by load and generators representing
these aggregated respecting the technology type.
Parameters
----------
network : Network
The eDisGo grid topology model overall container
components : dict of :pandas:`pandas.DataFrame<dataframe>`
PyPSA components in tabular format
Returns
-------
:obj:`dict` of :pandas:`pandas.DataFrame<dataframe>`
The dictionary components passed to the function is returned altered. | Below is the the instruction that describes the task:
### Input:
Aggregates LV load and generation at LV stations
Use this function if you aim for MV calculation only. The according
DataFrames of `components` are extended by load and generators representing
these aggregated respecting the technology type.
Parameters
----------
network : Network
The eDisGo grid topology model overall container
components : dict of :pandas:`pandas.DataFrame<dataframe>`
PyPSA components in tabular format
Returns
-------
:obj:`dict` of :pandas:`pandas.DataFrame<dataframe>`
The dictionary components passed to the function is returned altered.
### Response:
def add_aggregated_lv_components(network, components):
"""
Aggregates LV load and generation at LV stations
Use this function if you aim for MV calculation only. The according
DataFrames of `components` are extended by load and generators representing
these aggregated respecting the technology type.
Parameters
----------
network : Network
The eDisGo grid topology model overall container
components : dict of :pandas:`pandas.DataFrame<dataframe>`
PyPSA components in tabular format
Returns
-------
:obj:`dict` of :pandas:`pandas.DataFrame<dataframe>`
The dictionary components passed to the function is returned altered.
"""
generators = {}
loads = {}
# collect aggregated generation capacity by type and subtype
# collect aggregated load grouped by sector
for lv_grid in network.mv_grid.lv_grids:
generators.setdefault(lv_grid, {})
for gen in lv_grid.generators:
generators[lv_grid].setdefault(gen.type, {})
generators[lv_grid][gen.type].setdefault(gen.subtype, {})
generators[lv_grid][gen.type][gen.subtype].setdefault(
'capacity', 0)
generators[lv_grid][gen.type][gen.subtype][
'capacity'] += gen.nominal_capacity
generators[lv_grid][gen.type][gen.subtype].setdefault(
'name',
'_'.join([gen.type,
gen.subtype,
'aggregated',
'LV_grid',
str(lv_grid.id)]))
loads.setdefault(lv_grid, {})
for lo in lv_grid.graph.nodes_by_attribute('load'):
for sector, val in lo.consumption.items():
loads[lv_grid].setdefault(sector, 0)
loads[lv_grid][sector] += val
# define dict for DataFrame creation of aggr. generation and load
generator = {'name': [],
'bus': [],
'control': [],
'p_nom': [],
'type': []}
load = {'name': [], 'bus': []}
# fill generators dictionary for DataFrame creation
for lv_grid_obj, lv_grid in generators.items():
for _, gen_type in lv_grid.items():
for _, gen_subtype in gen_type.items():
generator['name'].append(gen_subtype['name'])
generator['bus'].append(
'_'.join(['Bus', lv_grid_obj.station.__repr__('lv')]))
generator['control'].append('PQ')
generator['p_nom'].append(gen_subtype['capacity'])
generator['type'].append("")
# fill loads dictionary for DataFrame creation
for lv_grid_obj, lv_grid in loads.items():
for sector, val in lv_grid.items():
load['name'].append('_'.join(['Load', sector, repr(lv_grid_obj)]))
load['bus'].append(
'_'.join(['Bus', lv_grid_obj.station.__repr__('lv')]))
components['Generator'] = pd.concat(
[components['Generator'], pd.DataFrame(generator).set_index('name')])
components['Load'] = pd.concat(
[components['Load'], pd.DataFrame(load).set_index('name')])
return components |
def assign_taxonomy(
data, min_confidence=0.80, output_fp=None, training_data_fp=None,
fixrank=True, max_memory=None, tmp_dir=tempfile.gettempdir()):
"""Assign taxonomy to each sequence in data with the RDP classifier
data: open fasta file object or list of fasta lines
confidence: minimum support threshold to assign taxonomy to a sequence
output_fp: path to write output; if not provided, result will be
returned in a dict of {seq_id:(taxonomy_assignment,confidence)}
"""
# Going to iterate through this twice in succession, best to force
# evaluation now
data = list(data)
# RDP classifier doesn't preserve identifiers with spaces
# Use lookup table
seq_id_lookup = {}
for seq_id, seq in parse_fasta(data):
seq_id_lookup[seq_id.split()[0]] = seq_id
app_kwargs = {}
if tmp_dir is not None:
app_kwargs['TmpDir'] = tmp_dir
app = RdpClassifier(**app_kwargs)
if max_memory is not None:
app.Parameters['-Xmx'].on(max_memory)
temp_output_file = tempfile.NamedTemporaryFile(
prefix='RdpAssignments_', suffix='.txt', dir=tmp_dir)
app.Parameters['-o'].on(temp_output_file.name)
if training_data_fp is not None:
app.Parameters['-t'].on(training_data_fp)
if fixrank:
app.Parameters['-f'].on('fixrank')
else:
app.Parameters['-f'].on('allrank')
app_result = app(data)
assignments = {}
# ShortSequenceException messages are written to stdout
# Tag these ID's as unassignable
for line in app_result['StdOut']:
excep = parse_rdp_exception(line)
if excep is not None:
_, rdp_id = excep
orig_id = seq_id_lookup[rdp_id]
assignments[orig_id] = ('Unassignable', 1.0)
for line in app_result['Assignments']:
rdp_id, direction, taxa = parse_rdp_assignment(line)
if taxa[0][0] == "Root":
taxa = taxa[1:]
orig_id = seq_id_lookup[rdp_id]
lineage, confidence = get_rdp_lineage(taxa, min_confidence)
if lineage:
assignments[orig_id] = (';'.join(lineage), confidence)
else:
assignments[orig_id] = ('Unclassified', 1.0)
if output_fp:
try:
output_file = open(output_fp, 'w')
except OSError:
raise OSError("Can't open output file for writing: %s" % output_fp)
for seq_id, assignment in assignments.items():
lineage, confidence = assignment
output_file.write(
'%s\t%s\t%1.3f\n' % (seq_id, lineage, confidence))
output_file.close()
return None
else:
return assignments | Assign taxonomy to each sequence in data with the RDP classifier
data: open fasta file object or list of fasta lines
confidence: minimum support threshold to assign taxonomy to a sequence
output_fp: path to write output; if not provided, result will be
returned in a dict of {seq_id:(taxonomy_assignment,confidence)} | Below is the the instruction that describes the task:
### Input:
Assign taxonomy to each sequence in data with the RDP classifier
data: open fasta file object or list of fasta lines
confidence: minimum support threshold to assign taxonomy to a sequence
output_fp: path to write output; if not provided, result will be
returned in a dict of {seq_id:(taxonomy_assignment,confidence)}
### Response:
def assign_taxonomy(
data, min_confidence=0.80, output_fp=None, training_data_fp=None,
fixrank=True, max_memory=None, tmp_dir=tempfile.gettempdir()):
"""Assign taxonomy to each sequence in data with the RDP classifier
data: open fasta file object or list of fasta lines
confidence: minimum support threshold to assign taxonomy to a sequence
output_fp: path to write output; if not provided, result will be
returned in a dict of {seq_id:(taxonomy_assignment,confidence)}
"""
# Going to iterate through this twice in succession, best to force
# evaluation now
data = list(data)
# RDP classifier doesn't preserve identifiers with spaces
# Use lookup table
seq_id_lookup = {}
for seq_id, seq in parse_fasta(data):
seq_id_lookup[seq_id.split()[0]] = seq_id
app_kwargs = {}
if tmp_dir is not None:
app_kwargs['TmpDir'] = tmp_dir
app = RdpClassifier(**app_kwargs)
if max_memory is not None:
app.Parameters['-Xmx'].on(max_memory)
temp_output_file = tempfile.NamedTemporaryFile(
prefix='RdpAssignments_', suffix='.txt', dir=tmp_dir)
app.Parameters['-o'].on(temp_output_file.name)
if training_data_fp is not None:
app.Parameters['-t'].on(training_data_fp)
if fixrank:
app.Parameters['-f'].on('fixrank')
else:
app.Parameters['-f'].on('allrank')
app_result = app(data)
assignments = {}
# ShortSequenceException messages are written to stdout
# Tag these ID's as unassignable
for line in app_result['StdOut']:
excep = parse_rdp_exception(line)
if excep is not None:
_, rdp_id = excep
orig_id = seq_id_lookup[rdp_id]
assignments[orig_id] = ('Unassignable', 1.0)
for line in app_result['Assignments']:
rdp_id, direction, taxa = parse_rdp_assignment(line)
if taxa[0][0] == "Root":
taxa = taxa[1:]
orig_id = seq_id_lookup[rdp_id]
lineage, confidence = get_rdp_lineage(taxa, min_confidence)
if lineage:
assignments[orig_id] = (';'.join(lineage), confidence)
else:
assignments[orig_id] = ('Unclassified', 1.0)
if output_fp:
try:
output_file = open(output_fp, 'w')
except OSError:
raise OSError("Can't open output file for writing: %s" % output_fp)
for seq_id, assignment in assignments.items():
lineage, confidence = assignment
output_file.write(
'%s\t%s\t%1.3f\n' % (seq_id, lineage, confidence))
output_file.close()
return None
else:
return assignments |
def setup(self):
"""
*setup the workspace in the requested location*
**Return:**
- ``None``
"""
self.log.info('starting the ``setup`` method')
# RECURSIVELY CREATE MISSING DIRECTORIES
if not os.path.exists(self.pathToWorkspace):
os.makedirs(self.pathToWorkspace)
if not os.path.exists(self.pathToWorkspace + "/qubits_output"):
os.makedirs(self.pathToWorkspace + "/qubits_output")
# FIND RESOURCES
spectralDB = os.path.dirname(
__file__) + "/resources/qubits_spectral_database"
qubitsSettings = os.path.dirname(
__file__) + "/resources/qubits_settings.yaml"
dstSettings = self.pathToWorkspace + "/qubits_settings.yaml"
# CHECK FOR PRE-EXISTANCE
if os.path.exists(self.pathToWorkspace + "/qubits_spectral_database") or os.path.exists(dstSettings):
self.log.warning(
"A qubits workspace seems to already exist in this location")
sys.exit(0)
# COPY ASSETS TO REQUESTED LOCATION
shutil.copytree(spectralDB, self.pathToWorkspace +
"/qubits_spectral_database")
shutil.copyfile(qubitsSettings, dstSettings)
# ADD USER'S HOME FOLDER INTO SETTINGS
pathToReadFile = dstSettings
try:
self.log.debug("attempting to open the file %s" %
(pathToReadFile,))
readFile = codecs.open(pathToReadFile, encoding='utf-8', mode='r')
thisData = readFile.read()
readFile.close()
except IOError, e:
message = 'could not open the file %s' % (pathToReadFile,)
self.log.critical(message)
raise IOError(message)
readFile.close()
thisData = thisData.replace("/Users/XXXX", self.home)
try:
self.log.debug("attempting to open the file %s" %
(pathToReadFile,))
writeFile = codecs.open(pathToReadFile, encoding='utf-8', mode='w')
except IOError, e:
message = 'could not open the file %s' % (pathToReadFile,)
self.log.critical(message)
raise IOError(message)
writeFile.write(thisData)
writeFile.close()
self.log.info('completed the ``setup`` method')
return None | *setup the workspace in the requested location*
**Return:**
- ``None`` | Below is the the instruction that describes the task:
### Input:
*setup the workspace in the requested location*
**Return:**
- ``None``
### Response:
def setup(self):
"""
*setup the workspace in the requested location*
**Return:**
- ``None``
"""
self.log.info('starting the ``setup`` method')
# RECURSIVELY CREATE MISSING DIRECTORIES
if not os.path.exists(self.pathToWorkspace):
os.makedirs(self.pathToWorkspace)
if not os.path.exists(self.pathToWorkspace + "/qubits_output"):
os.makedirs(self.pathToWorkspace + "/qubits_output")
# FIND RESOURCES
spectralDB = os.path.dirname(
__file__) + "/resources/qubits_spectral_database"
qubitsSettings = os.path.dirname(
__file__) + "/resources/qubits_settings.yaml"
dstSettings = self.pathToWorkspace + "/qubits_settings.yaml"
# CHECK FOR PRE-EXISTANCE
if os.path.exists(self.pathToWorkspace + "/qubits_spectral_database") or os.path.exists(dstSettings):
self.log.warning(
"A qubits workspace seems to already exist in this location")
sys.exit(0)
# COPY ASSETS TO REQUESTED LOCATION
shutil.copytree(spectralDB, self.pathToWorkspace +
"/qubits_spectral_database")
shutil.copyfile(qubitsSettings, dstSettings)
# ADD USER'S HOME FOLDER INTO SETTINGS
pathToReadFile = dstSettings
try:
self.log.debug("attempting to open the file %s" %
(pathToReadFile,))
readFile = codecs.open(pathToReadFile, encoding='utf-8', mode='r')
thisData = readFile.read()
readFile.close()
except IOError, e:
message = 'could not open the file %s' % (pathToReadFile,)
self.log.critical(message)
raise IOError(message)
readFile.close()
thisData = thisData.replace("/Users/XXXX", self.home)
try:
self.log.debug("attempting to open the file %s" %
(pathToReadFile,))
writeFile = codecs.open(pathToReadFile, encoding='utf-8', mode='w')
except IOError, e:
message = 'could not open the file %s' % (pathToReadFile,)
self.log.critical(message)
raise IOError(message)
writeFile.write(thisData)
writeFile.close()
self.log.info('completed the ``setup`` method')
return None |
def delete(self, id):
"""DELETE /layertemplates/id: Delete an existing item."""
# url('LayerTemplates', id=ID)
lt = meta.Session.query(LayerTemplate).get(id)
# use following query for getting a layertemplate owned by current user
#lt = self._get_lt_from_user_by_id(c.user, id)
if lt is None:
abort(404)
meta.Session.delete(lt)
meta.Session.commit() | DELETE /layertemplates/id: Delete an existing item. | Below is the the instruction that describes the task:
### Input:
DELETE /layertemplates/id: Delete an existing item.
### Response:
def delete(self, id):
"""DELETE /layertemplates/id: Delete an existing item."""
# url('LayerTemplates', id=ID)
lt = meta.Session.query(LayerTemplate).get(id)
# use following query for getting a layertemplate owned by current user
#lt = self._get_lt_from_user_by_id(c.user, id)
if lt is None:
abort(404)
meta.Session.delete(lt)
meta.Session.commit() |
def update(did):
"""Update DDO of an existing asset
---
tags:
- ddo
consumes:
- application/json
parameters:
- in: body
name: body
required: true
description: DDO of the asset.
schema:
type: object
required:
- "@context"
- created
- id
- publicKey
- authentication
- proof
- service
properties:
"@context":
description:
example: https://w3id.org/future-method/v1
type: string
id:
description: ID of the asset.
example: did:op:123456789abcdefghi
type: string
created:
description: date of ddo creation.
example: "2016-02-08T16:02:20Z"
type: string
publicKey:
type: array
description: List of public keys.
example: [{"id": "did:op:123456789abcdefghi#keys-1"},
{"type": "Ed25519VerificationKey2018"},
{"owner": "did:op:123456789abcdefghi"},
{"publicKeyBase58": "H3C2AVvLMv6gmMNam3uVAjZpfkcJCwDwnZn6z3wXmqPV"}]
authentication:
type: array
description: List of authentication mechanisms.
example: [{"type": "RsaSignatureAuthentication2018"},
{"publicKey": "did:op:123456789abcdefghi#keys-1"}]
proof:
type: dictionary
description: Information about the creation and creator of the asset.
example: {"type": "UUIDSignature",
"created": "2016-02-08T16:02:20Z",
"creator": "did:example:8uQhQMGzWxR8vw5P3UWH1ja",
"signatureValue": "QNB13Y7Q9...1tzjn4w=="
}
service:
type: array
description: List of services.
example: [{"type": "Access",
"serviceEndpoint":
"http://mybrizo.org/api/v1/brizo/services/consume?pubKey=${
pubKey}&serviceId={serviceId}&url={url}"},
{"type": "Compute",
"serviceEndpoint":
"http://mybrizo.org/api/v1/brizo/services/compute?pubKey=${
pubKey}&serviceId={serviceId}&algo={algo}&container={container}"},
{
"type": "Metadata",
"serviceDefinitionId": "2",
"serviceEndpoint":
"http://myaquarius.org/api/v1/provider/assets/metadata/{did}",
"metadata": {
"base": {
"name": "UK Weather information 2011",
"type": "dataset",
"description": "Weather information of UK including
temperature and humidity",
"dateCreated": "2012-02-01T10:55:11Z",
"author": "Met Office",
"license": "CC-BY",
"copyrightHolder": "Met Office",
"compression": "zip",
"workExample": "stationId,latitude,longitude,datetime,
temperature,humidity/n423432fsd,51.509865,-0.118092,
2011-01-01T10:55:11+00:00,7.2,68",
"files": [{
"contentLength": "4535431",
"contentType": "text/csv",
"encoding": "UTF-8",
"compression": "zip",
"resourceId": "access-log2018-02-13-15-17-29-18386C502CAEA932"
}
],
"encryptedFiles": "0x098213xzckasdf089723hjgdasfkjgasfv",
"links": [{
"name": "Sample of Asset Data",
"type": "sample",
"url": "https://foo.com/sample.csv"
},
{
"name": "Data Format Definition",
"type": "format",
"AssetID":
"4d517500da0acb0d65a716f61330969334630363ce4a6a9d39691026ac7908ea"
}
],
"inLanguage": "en",
"tags": "weather, uk, 2011, temperature, humidity",
"price": 10,
"checksum":
"38803b9e6f04fce3fba4b124524672592264d31847182c689095a081c9e85262"
},
"curation": {
"rating": 0.93,
"numVotes": 123,
"schema": "Binary Voting"
},
"additionalInformation": {
"updateFrecuency": "yearly",
"structuredMarkup": [{
"uri": "http://skos.um.es/unescothes/C01194/jsonld",
"mediaType": "application/ld+json"
},
{
"uri": "http://skos.um.es/unescothes/C01194/turtle",
"mediaType": "text/turtle"
}
]
}
}
}]
responses:
200:
description: Asset successfully updated.
201:
description: Asset successfully registered.
400:
description: One of the required attributes is missing.
404:
description: Invalid asset data.
500:
description: Error
"""
required_attributes = ['@context', 'created', 'id', 'publicKey', 'authentication', 'proof',
'service']
required_metadata_base_attributes = ['name', 'dateCreated', 'author', 'license',
'price', 'encryptedFiles', 'type', 'checksum']
required_metadata_curation_attributes = ['rating', 'numVotes']
assert isinstance(request.json, dict), 'invalid payload format.'
data = request.json
if not data:
logger.error(f'request body seems empty, expecting {required_attributes}')
return 400
msg, status = check_required_attributes(required_attributes, data, 'update')
if msg:
return msg, status
msg, status = check_required_attributes(required_metadata_base_attributes,
_get_base_metadata(data['service']), 'update')
if msg:
return msg, status
msg, status = check_required_attributes(required_metadata_curation_attributes,
_get_curation_metadata(data['service']), 'update')
if msg:
return msg, status
msg, status = check_no_urls_in_files(_get_base_metadata(data['service']), 'register')
if msg:
return msg, status
msg, status = validate_date_format(data['created'])
if msg:
return msg, status
_record = dict()
_record = copy.deepcopy(data)
_record['created'] = datetime.strptime(data['created'], '%Y-%m-%dT%H:%M:%SZ')
try:
if dao.get(did) is None:
register()
return _sanitize_record(_record), 201
else:
for service in _record['service']:
service_id = int(service['serviceDefinitionId'])
if service['type'] == 'Metadata':
_record['service'][service_id]['metadata']['base']['datePublished'] = _get_date(
dao.get(did)['service'])
dao.update(_record, did)
return Response(_sanitize_record(_record), 200, content_type='application/json')
except Exception as err:
return f'Some error: {str(err)}', 500 | Update DDO of an existing asset
---
tags:
- ddo
consumes:
- application/json
parameters:
- in: body
name: body
required: true
description: DDO of the asset.
schema:
type: object
required:
- "@context"
- created
- id
- publicKey
- authentication
- proof
- service
properties:
"@context":
description:
example: https://w3id.org/future-method/v1
type: string
id:
description: ID of the asset.
example: did:op:123456789abcdefghi
type: string
created:
description: date of ddo creation.
example: "2016-02-08T16:02:20Z"
type: string
publicKey:
type: array
description: List of public keys.
example: [{"id": "did:op:123456789abcdefghi#keys-1"},
{"type": "Ed25519VerificationKey2018"},
{"owner": "did:op:123456789abcdefghi"},
{"publicKeyBase58": "H3C2AVvLMv6gmMNam3uVAjZpfkcJCwDwnZn6z3wXmqPV"}]
authentication:
type: array
description: List of authentication mechanisms.
example: [{"type": "RsaSignatureAuthentication2018"},
{"publicKey": "did:op:123456789abcdefghi#keys-1"}]
proof:
type: dictionary
description: Information about the creation and creator of the asset.
example: {"type": "UUIDSignature",
"created": "2016-02-08T16:02:20Z",
"creator": "did:example:8uQhQMGzWxR8vw5P3UWH1ja",
"signatureValue": "QNB13Y7Q9...1tzjn4w=="
}
service:
type: array
description: List of services.
example: [{"type": "Access",
"serviceEndpoint":
"http://mybrizo.org/api/v1/brizo/services/consume?pubKey=${
pubKey}&serviceId={serviceId}&url={url}"},
{"type": "Compute",
"serviceEndpoint":
"http://mybrizo.org/api/v1/brizo/services/compute?pubKey=${
pubKey}&serviceId={serviceId}&algo={algo}&container={container}"},
{
"type": "Metadata",
"serviceDefinitionId": "2",
"serviceEndpoint":
"http://myaquarius.org/api/v1/provider/assets/metadata/{did}",
"metadata": {
"base": {
"name": "UK Weather information 2011",
"type": "dataset",
"description": "Weather information of UK including
temperature and humidity",
"dateCreated": "2012-02-01T10:55:11Z",
"author": "Met Office",
"license": "CC-BY",
"copyrightHolder": "Met Office",
"compression": "zip",
"workExample": "stationId,latitude,longitude,datetime,
temperature,humidity/n423432fsd,51.509865,-0.118092,
2011-01-01T10:55:11+00:00,7.2,68",
"files": [{
"contentLength": "4535431",
"contentType": "text/csv",
"encoding": "UTF-8",
"compression": "zip",
"resourceId": "access-log2018-02-13-15-17-29-18386C502CAEA932"
}
],
"encryptedFiles": "0x098213xzckasdf089723hjgdasfkjgasfv",
"links": [{
"name": "Sample of Asset Data",
"type": "sample",
"url": "https://foo.com/sample.csv"
},
{
"name": "Data Format Definition",
"type": "format",
"AssetID":
"4d517500da0acb0d65a716f61330969334630363ce4a6a9d39691026ac7908ea"
}
],
"inLanguage": "en",
"tags": "weather, uk, 2011, temperature, humidity",
"price": 10,
"checksum":
"38803b9e6f04fce3fba4b124524672592264d31847182c689095a081c9e85262"
},
"curation": {
"rating": 0.93,
"numVotes": 123,
"schema": "Binary Voting"
},
"additionalInformation": {
"updateFrecuency": "yearly",
"structuredMarkup": [{
"uri": "http://skos.um.es/unescothes/C01194/jsonld",
"mediaType": "application/ld+json"
},
{
"uri": "http://skos.um.es/unescothes/C01194/turtle",
"mediaType": "text/turtle"
}
]
}
}
}]
responses:
200:
description: Asset successfully updated.
201:
description: Asset successfully registered.
400:
description: One of the required attributes is missing.
404:
description: Invalid asset data.
500:
description: Error | Below is the the instruction that describes the task:
### Input:
Update DDO of an existing asset
---
tags:
- ddo
consumes:
- application/json
parameters:
- in: body
name: body
required: true
description: DDO of the asset.
schema:
type: object
required:
- "@context"
- created
- id
- publicKey
- authentication
- proof
- service
properties:
"@context":
description:
example: https://w3id.org/future-method/v1
type: string
id:
description: ID of the asset.
example: did:op:123456789abcdefghi
type: string
created:
description: date of ddo creation.
example: "2016-02-08T16:02:20Z"
type: string
publicKey:
type: array
description: List of public keys.
example: [{"id": "did:op:123456789abcdefghi#keys-1"},
{"type": "Ed25519VerificationKey2018"},
{"owner": "did:op:123456789abcdefghi"},
{"publicKeyBase58": "H3C2AVvLMv6gmMNam3uVAjZpfkcJCwDwnZn6z3wXmqPV"}]
authentication:
type: array
description: List of authentication mechanisms.
example: [{"type": "RsaSignatureAuthentication2018"},
{"publicKey": "did:op:123456789abcdefghi#keys-1"}]
proof:
type: dictionary
description: Information about the creation and creator of the asset.
example: {"type": "UUIDSignature",
"created": "2016-02-08T16:02:20Z",
"creator": "did:example:8uQhQMGzWxR8vw5P3UWH1ja",
"signatureValue": "QNB13Y7Q9...1tzjn4w=="
}
service:
type: array
description: List of services.
example: [{"type": "Access",
"serviceEndpoint":
"http://mybrizo.org/api/v1/brizo/services/consume?pubKey=${
pubKey}&serviceId={serviceId}&url={url}"},
{"type": "Compute",
"serviceEndpoint":
"http://mybrizo.org/api/v1/brizo/services/compute?pubKey=${
pubKey}&serviceId={serviceId}&algo={algo}&container={container}"},
{
"type": "Metadata",
"serviceDefinitionId": "2",
"serviceEndpoint":
"http://myaquarius.org/api/v1/provider/assets/metadata/{did}",
"metadata": {
"base": {
"name": "UK Weather information 2011",
"type": "dataset",
"description": "Weather information of UK including
temperature and humidity",
"dateCreated": "2012-02-01T10:55:11Z",
"author": "Met Office",
"license": "CC-BY",
"copyrightHolder": "Met Office",
"compression": "zip",
"workExample": "stationId,latitude,longitude,datetime,
temperature,humidity/n423432fsd,51.509865,-0.118092,
2011-01-01T10:55:11+00:00,7.2,68",
"files": [{
"contentLength": "4535431",
"contentType": "text/csv",
"encoding": "UTF-8",
"compression": "zip",
"resourceId": "access-log2018-02-13-15-17-29-18386C502CAEA932"
}
],
"encryptedFiles": "0x098213xzckasdf089723hjgdasfkjgasfv",
"links": [{
"name": "Sample of Asset Data",
"type": "sample",
"url": "https://foo.com/sample.csv"
},
{
"name": "Data Format Definition",
"type": "format",
"AssetID":
"4d517500da0acb0d65a716f61330969334630363ce4a6a9d39691026ac7908ea"
}
],
"inLanguage": "en",
"tags": "weather, uk, 2011, temperature, humidity",
"price": 10,
"checksum":
"38803b9e6f04fce3fba4b124524672592264d31847182c689095a081c9e85262"
},
"curation": {
"rating": 0.93,
"numVotes": 123,
"schema": "Binary Voting"
},
"additionalInformation": {
"updateFrecuency": "yearly",
"structuredMarkup": [{
"uri": "http://skos.um.es/unescothes/C01194/jsonld",
"mediaType": "application/ld+json"
},
{
"uri": "http://skos.um.es/unescothes/C01194/turtle",
"mediaType": "text/turtle"
}
]
}
}
}]
responses:
200:
description: Asset successfully updated.
201:
description: Asset successfully registered.
400:
description: One of the required attributes is missing.
404:
description: Invalid asset data.
500:
description: Error
### Response:
def update(did):
"""Update DDO of an existing asset
---
tags:
- ddo
consumes:
- application/json
parameters:
- in: body
name: body
required: true
description: DDO of the asset.
schema:
type: object
required:
- "@context"
- created
- id
- publicKey
- authentication
- proof
- service
properties:
"@context":
description:
example: https://w3id.org/future-method/v1
type: string
id:
description: ID of the asset.
example: did:op:123456789abcdefghi
type: string
created:
description: date of ddo creation.
example: "2016-02-08T16:02:20Z"
type: string
publicKey:
type: array
description: List of public keys.
example: [{"id": "did:op:123456789abcdefghi#keys-1"},
{"type": "Ed25519VerificationKey2018"},
{"owner": "did:op:123456789abcdefghi"},
{"publicKeyBase58": "H3C2AVvLMv6gmMNam3uVAjZpfkcJCwDwnZn6z3wXmqPV"}]
authentication:
type: array
description: List of authentication mechanisms.
example: [{"type": "RsaSignatureAuthentication2018"},
{"publicKey": "did:op:123456789abcdefghi#keys-1"}]
proof:
type: dictionary
description: Information about the creation and creator of the asset.
example: {"type": "UUIDSignature",
"created": "2016-02-08T16:02:20Z",
"creator": "did:example:8uQhQMGzWxR8vw5P3UWH1ja",
"signatureValue": "QNB13Y7Q9...1tzjn4w=="
}
service:
type: array
description: List of services.
example: [{"type": "Access",
"serviceEndpoint":
"http://mybrizo.org/api/v1/brizo/services/consume?pubKey=${
pubKey}&serviceId={serviceId}&url={url}"},
{"type": "Compute",
"serviceEndpoint":
"http://mybrizo.org/api/v1/brizo/services/compute?pubKey=${
pubKey}&serviceId={serviceId}&algo={algo}&container={container}"},
{
"type": "Metadata",
"serviceDefinitionId": "2",
"serviceEndpoint":
"http://myaquarius.org/api/v1/provider/assets/metadata/{did}",
"metadata": {
"base": {
"name": "UK Weather information 2011",
"type": "dataset",
"description": "Weather information of UK including
temperature and humidity",
"dateCreated": "2012-02-01T10:55:11Z",
"author": "Met Office",
"license": "CC-BY",
"copyrightHolder": "Met Office",
"compression": "zip",
"workExample": "stationId,latitude,longitude,datetime,
temperature,humidity/n423432fsd,51.509865,-0.118092,
2011-01-01T10:55:11+00:00,7.2,68",
"files": [{
"contentLength": "4535431",
"contentType": "text/csv",
"encoding": "UTF-8",
"compression": "zip",
"resourceId": "access-log2018-02-13-15-17-29-18386C502CAEA932"
}
],
"encryptedFiles": "0x098213xzckasdf089723hjgdasfkjgasfv",
"links": [{
"name": "Sample of Asset Data",
"type": "sample",
"url": "https://foo.com/sample.csv"
},
{
"name": "Data Format Definition",
"type": "format",
"AssetID":
"4d517500da0acb0d65a716f61330969334630363ce4a6a9d39691026ac7908ea"
}
],
"inLanguage": "en",
"tags": "weather, uk, 2011, temperature, humidity",
"price": 10,
"checksum":
"38803b9e6f04fce3fba4b124524672592264d31847182c689095a081c9e85262"
},
"curation": {
"rating": 0.93,
"numVotes": 123,
"schema": "Binary Voting"
},
"additionalInformation": {
"updateFrecuency": "yearly",
"structuredMarkup": [{
"uri": "http://skos.um.es/unescothes/C01194/jsonld",
"mediaType": "application/ld+json"
},
{
"uri": "http://skos.um.es/unescothes/C01194/turtle",
"mediaType": "text/turtle"
}
]
}
}
}]
responses:
200:
description: Asset successfully updated.
201:
description: Asset successfully registered.
400:
description: One of the required attributes is missing.
404:
description: Invalid asset data.
500:
description: Error
"""
required_attributes = ['@context', 'created', 'id', 'publicKey', 'authentication', 'proof',
'service']
required_metadata_base_attributes = ['name', 'dateCreated', 'author', 'license',
'price', 'encryptedFiles', 'type', 'checksum']
required_metadata_curation_attributes = ['rating', 'numVotes']
assert isinstance(request.json, dict), 'invalid payload format.'
data = request.json
if not data:
logger.error(f'request body seems empty, expecting {required_attributes}')
return 400
msg, status = check_required_attributes(required_attributes, data, 'update')
if msg:
return msg, status
msg, status = check_required_attributes(required_metadata_base_attributes,
_get_base_metadata(data['service']), 'update')
if msg:
return msg, status
msg, status = check_required_attributes(required_metadata_curation_attributes,
_get_curation_metadata(data['service']), 'update')
if msg:
return msg, status
msg, status = check_no_urls_in_files(_get_base_metadata(data['service']), 'register')
if msg:
return msg, status
msg, status = validate_date_format(data['created'])
if msg:
return msg, status
_record = dict()
_record = copy.deepcopy(data)
_record['created'] = datetime.strptime(data['created'], '%Y-%m-%dT%H:%M:%SZ')
try:
if dao.get(did) is None:
register()
return _sanitize_record(_record), 201
else:
for service in _record['service']:
service_id = int(service['serviceDefinitionId'])
if service['type'] == 'Metadata':
_record['service'][service_id]['metadata']['base']['datePublished'] = _get_date(
dao.get(did)['service'])
dao.update(_record, did)
return Response(_sanitize_record(_record), 200, content_type='application/json')
except Exception as err:
return f'Some error: {str(err)}', 500 |
def flatten_list(multiply_list):
"""
碾平 list::
>>> a = [1, 2, [3, 4], [[5, 6], [7, 8]]]
>>> flatten_list(a)
[1, 2, 3, 4, 5, 6, 7, 8]
:param multiply_list: 混淆的多层列表
:return: 单层的 list
"""
if isinstance(multiply_list, list):
return [rv for l in multiply_list for rv in flatten_list(l)]
else:
return [multiply_list] | 碾平 list::
>>> a = [1, 2, [3, 4], [[5, 6], [7, 8]]]
>>> flatten_list(a)
[1, 2, 3, 4, 5, 6, 7, 8]
:param multiply_list: 混淆的多层列表
:return: 单层的 list | Below is the the instruction that describes the task:
### Input:
碾平 list::
>>> a = [1, 2, [3, 4], [[5, 6], [7, 8]]]
>>> flatten_list(a)
[1, 2, 3, 4, 5, 6, 7, 8]
:param multiply_list: 混淆的多层列表
:return: 单层的 list
### Response:
def flatten_list(multiply_list):
"""
碾平 list::
>>> a = [1, 2, [3, 4], [[5, 6], [7, 8]]]
>>> flatten_list(a)
[1, 2, 3, 4, 5, 6, 7, 8]
:param multiply_list: 混淆的多层列表
:return: 单层的 list
"""
if isinstance(multiply_list, list):
return [rv for l in multiply_list for rv in flatten_list(l)]
else:
return [multiply_list] |
def get_go2sectiontxt(self):
"""Return a dict with actual header and user GO IDs as keys and their sections as values."""
go2txt = {}
_get_secs = self.hdrobj.get_sections
hdrgo2sectxt = {h:" ".join(_get_secs(h)) for h in self.get_hdrgos()}
usrgo2hdrgo = self.get_usrgo2hdrgo()
for goid, ntgo in self.go2nt.items():
hdrgo = ntgo.GO if ntgo.is_hdrgo else usrgo2hdrgo[ntgo.GO]
go2txt[goid] = hdrgo2sectxt[hdrgo]
return go2txt | Return a dict with actual header and user GO IDs as keys and their sections as values. | Below is the the instruction that describes the task:
### Input:
Return a dict with actual header and user GO IDs as keys and their sections as values.
### Response:
def get_go2sectiontxt(self):
"""Return a dict with actual header and user GO IDs as keys and their sections as values."""
go2txt = {}
_get_secs = self.hdrobj.get_sections
hdrgo2sectxt = {h:" ".join(_get_secs(h)) for h in self.get_hdrgos()}
usrgo2hdrgo = self.get_usrgo2hdrgo()
for goid, ntgo in self.go2nt.items():
hdrgo = ntgo.GO if ntgo.is_hdrgo else usrgo2hdrgo[ntgo.GO]
go2txt[goid] = hdrgo2sectxt[hdrgo]
return go2txt |
def get_fields(self, field_verbose=True, value_verbose=True, fields=[], extra_fields=[], remove_fields = []):
'''
返回字段名及其对应值的列表
field_verbose 为True,返回定义中的字段的verbose_name, False返回其name
value_verbose 为True,返回数据的显示数据,会转换为choice的内容,为False, 返回数据的实际值
fields 指定了要显示的字段
extra_fields 指定了要特殊处理的非field,比如是函数
remove_fields 指定了不显示的字段
'''
field_list = []
for field in self.__class__._meta.fields:
if field.name in remove_fields:
# 不显示的字段,跳过循环
continue
if fields and field.name not in fields:
# fields 不为空列表,即指定了要显示的字段,并且field.name 不再指定的列表中,跳过循环
continue
if field.verbose_name and field_verbose:
value_tuple = (field.verbose_name, self.get_field_value(field, value_verbose))
else:
value_tuple = (field.name, self.get_field_value(field, value_verbose))
field_list.append(value_tuple)
for name in extra_fields:
# 处理函数
method = getattr(self, name)
result = method()
value_tuple = (name, result)
field_list.append(value_tuple)
return field_list | 返回字段名及其对应值的列表
field_verbose 为True,返回定义中的字段的verbose_name, False返回其name
value_verbose 为True,返回数据的显示数据,会转换为choice的内容,为False, 返回数据的实际值
fields 指定了要显示的字段
extra_fields 指定了要特殊处理的非field,比如是函数
remove_fields 指定了不显示的字段 | Below is the the instruction that describes the task:
### Input:
返回字段名及其对应值的列表
field_verbose 为True,返回定义中的字段的verbose_name, False返回其name
value_verbose 为True,返回数据的显示数据,会转换为choice的内容,为False, 返回数据的实际值
fields 指定了要显示的字段
extra_fields 指定了要特殊处理的非field,比如是函数
remove_fields 指定了不显示的字段
### Response:
def get_fields(self, field_verbose=True, value_verbose=True, fields=[], extra_fields=[], remove_fields = []):
'''
返回字段名及其对应值的列表
field_verbose 为True,返回定义中的字段的verbose_name, False返回其name
value_verbose 为True,返回数据的显示数据,会转换为choice的内容,为False, 返回数据的实际值
fields 指定了要显示的字段
extra_fields 指定了要特殊处理的非field,比如是函数
remove_fields 指定了不显示的字段
'''
field_list = []
for field in self.__class__._meta.fields:
if field.name in remove_fields:
# 不显示的字段,跳过循环
continue
if fields and field.name not in fields:
# fields 不为空列表,即指定了要显示的字段,并且field.name 不再指定的列表中,跳过循环
continue
if field.verbose_name and field_verbose:
value_tuple = (field.verbose_name, self.get_field_value(field, value_verbose))
else:
value_tuple = (field.name, self.get_field_value(field, value_verbose))
field_list.append(value_tuple)
for name in extra_fields:
# 处理函数
method = getattr(self, name)
result = method()
value_tuple = (name, result)
field_list.append(value_tuple)
return field_list |
def rowCount(self, index=QModelIndex()):
"""Array row number"""
if self.total_rows <= self.rows_loaded:
return self.total_rows
else:
return self.rows_loaded | Array row number | Below is the the instruction that describes the task:
### Input:
Array row number
### Response:
def rowCount(self, index=QModelIndex()):
"""Array row number"""
if self.total_rows <= self.rows_loaded:
return self.total_rows
else:
return self.rows_loaded |
def _archive_entry_year(self, category):
" Return ARCHIVE_ENTRY_YEAR from settings (if exists) or year of the newest object in category "
year = getattr(settings, 'ARCHIVE_ENTRY_YEAR', None)
if not year:
n = now()
try:
year = Listing.objects.filter(
category__site__id=settings.SITE_ID,
category__tree_path__startswith=category.tree_path,
publish_from__lte=n
).values('publish_from')[0]['publish_from'].year
except:
year = n.year
return year | Return ARCHIVE_ENTRY_YEAR from settings (if exists) or year of the newest object in category | Below is the the instruction that describes the task:
### Input:
Return ARCHIVE_ENTRY_YEAR from settings (if exists) or year of the newest object in category
### Response:
def _archive_entry_year(self, category):
" Return ARCHIVE_ENTRY_YEAR from settings (if exists) or year of the newest object in category "
year = getattr(settings, 'ARCHIVE_ENTRY_YEAR', None)
if not year:
n = now()
try:
year = Listing.objects.filter(
category__site__id=settings.SITE_ID,
category__tree_path__startswith=category.tree_path,
publish_from__lte=n
).values('publish_from')[0]['publish_from'].year
except:
year = n.year
return year |
def list_bookmarks(self, start_date=None, end_date=None, limit=None):
"""List the aggregation's bookmarks."""
query = Search(
using=self.client,
index=self.aggregation_alias,
doc_type=self.bookmark_doc_type
).sort({'date': {'order': 'desc'}})
range_args = {}
if start_date:
range_args['gte'] = self._format_range_dt(
start_date.replace(microsecond=0))
if end_date:
range_args['lte'] = self._format_range_dt(
end_date.replace(microsecond=0))
if range_args:
query = query.filter('range', date=range_args)
return query[0:limit].execute() if limit else query.scan() | List the aggregation's bookmarks. | Below is the the instruction that describes the task:
### Input:
List the aggregation's bookmarks.
### Response:
def list_bookmarks(self, start_date=None, end_date=None, limit=None):
"""List the aggregation's bookmarks."""
query = Search(
using=self.client,
index=self.aggregation_alias,
doc_type=self.bookmark_doc_type
).sort({'date': {'order': 'desc'}})
range_args = {}
if start_date:
range_args['gte'] = self._format_range_dt(
start_date.replace(microsecond=0))
if end_date:
range_args['lte'] = self._format_range_dt(
end_date.replace(microsecond=0))
if range_args:
query = query.filter('range', date=range_args)
return query[0:limit].execute() if limit else query.scan() |
def UpdateHuntObject(self, hunt_id, start_time=None, **kwargs):
"""Updates the hunt object by applying the update function."""
hunt_obj = self.ReadHuntObject(hunt_id)
delta_suffix = "_delta"
for k, v in kwargs.items():
if v is None:
continue
if k.endswith(delta_suffix):
key = k[:-len(delta_suffix)]
current_value = getattr(hunt_obj, key)
setattr(hunt_obj, key, current_value + v)
else:
setattr(hunt_obj, k, v)
if start_time is not None:
hunt_obj.init_start_time = hunt_obj.init_start_time or start_time
hunt_obj.last_start_time = start_time
hunt_obj.last_update_time = rdfvalue.RDFDatetime.Now()
self.hunts[hunt_obj.hunt_id] = hunt_obj | Updates the hunt object by applying the update function. | Below is the the instruction that describes the task:
### Input:
Updates the hunt object by applying the update function.
### Response:
def UpdateHuntObject(self, hunt_id, start_time=None, **kwargs):
"""Updates the hunt object by applying the update function."""
hunt_obj = self.ReadHuntObject(hunt_id)
delta_suffix = "_delta"
for k, v in kwargs.items():
if v is None:
continue
if k.endswith(delta_suffix):
key = k[:-len(delta_suffix)]
current_value = getattr(hunt_obj, key)
setattr(hunt_obj, key, current_value + v)
else:
setattr(hunt_obj, k, v)
if start_time is not None:
hunt_obj.init_start_time = hunt_obj.init_start_time or start_time
hunt_obj.last_start_time = start_time
hunt_obj.last_update_time = rdfvalue.RDFDatetime.Now()
self.hunts[hunt_obj.hunt_id] = hunt_obj |
def action_delete(self, courseid, taskid, path):
""" Delete a file or a directory """
# normalize
path = path.strip()
if not path.startswith("/"):
path = "/" + path
wanted_path = self.verify_path(courseid, taskid, path)
if wanted_path is None:
return self.show_tab_file(courseid, taskid, _("Internal error"))
# special case: cannot delete current directory of the task
if "/" == wanted_path:
return self.show_tab_file(courseid, taskid, _("Internal error"))
try:
self.task_factory.get_task_fs(courseid, taskid).delete(wanted_path)
return self.show_tab_file(courseid, taskid)
except:
return self.show_tab_file(courseid, taskid, _("An error occurred while deleting the files")) | Delete a file or a directory | Below is the the instruction that describes the task:
### Input:
Delete a file or a directory
### Response:
def action_delete(self, courseid, taskid, path):
""" Delete a file or a directory """
# normalize
path = path.strip()
if not path.startswith("/"):
path = "/" + path
wanted_path = self.verify_path(courseid, taskid, path)
if wanted_path is None:
return self.show_tab_file(courseid, taskid, _("Internal error"))
# special case: cannot delete current directory of the task
if "/" == wanted_path:
return self.show_tab_file(courseid, taskid, _("Internal error"))
try:
self.task_factory.get_task_fs(courseid, taskid).delete(wanted_path)
return self.show_tab_file(courseid, taskid)
except:
return self.show_tab_file(courseid, taskid, _("An error occurred while deleting the files")) |
def _try_to_get_extension(obj):
"""
Try to get file extension from given path or file object.
:param obj: a file, file-like object or something
:return: File extension or None
>>> _try_to_get_extension("a.py")
'py'
"""
if is_path(obj):
path = obj
elif is_path_obj(obj):
return obj.suffix[1:]
elif is_file_stream(obj):
try:
path = get_path_from_stream(obj)
except ValueError:
return None
elif is_ioinfo(obj):
path = obj.path
else:
return None
if path:
return get_file_extension(path)
return None | Try to get file extension from given path or file object.
:param obj: a file, file-like object or something
:return: File extension or None
>>> _try_to_get_extension("a.py")
'py' | Below is the the instruction that describes the task:
### Input:
Try to get file extension from given path or file object.
:param obj: a file, file-like object or something
:return: File extension or None
>>> _try_to_get_extension("a.py")
'py'
### Response:
def _try_to_get_extension(obj):
"""
Try to get file extension from given path or file object.
:param obj: a file, file-like object or something
:return: File extension or None
>>> _try_to_get_extension("a.py")
'py'
"""
if is_path(obj):
path = obj
elif is_path_obj(obj):
return obj.suffix[1:]
elif is_file_stream(obj):
try:
path = get_path_from_stream(obj)
except ValueError:
return None
elif is_ioinfo(obj):
path = obj.path
else:
return None
if path:
return get_file_extension(path)
return None |
def vlm_add_input(self, psz_name, psz_input):
'''Add a media's input MRL. This will add the specified one.
@param psz_name: the media to work on.
@param psz_input: the input MRL.
@return: 0 on success, -1 on error.
'''
return libvlc_vlm_add_input(self, str_to_bytes(psz_name), str_to_bytes(psz_input)) | Add a media's input MRL. This will add the specified one.
@param psz_name: the media to work on.
@param psz_input: the input MRL.
@return: 0 on success, -1 on error. | Below is the the instruction that describes the task:
### Input:
Add a media's input MRL. This will add the specified one.
@param psz_name: the media to work on.
@param psz_input: the input MRL.
@return: 0 on success, -1 on error.
### Response:
def vlm_add_input(self, psz_name, psz_input):
'''Add a media's input MRL. This will add the specified one.
@param psz_name: the media to work on.
@param psz_input: the input MRL.
@return: 0 on success, -1 on error.
'''
return libvlc_vlm_add_input(self, str_to_bytes(psz_name), str_to_bytes(psz_input)) |
def IsPropertyInMetaIgnoreCase(classId, key):
""" Methods returns the property meta of the provided key for the given classId. Given key is case insensitive. """
if classId in _ManagedObjectMeta:
for prop in _ManagedObjectMeta[classId]:
if (prop.lower() == key.lower()):
return _ManagedObjectMeta[classId][prop]
if classId in _MethodFactoryMeta:
for prop in _MethodFactoryMeta[classId]:
if (prop.lower() == key.lower()):
return _MethodFactoryMeta[classId][prop]
return None | Methods returns the property meta of the provided key for the given classId. Given key is case insensitive. | Below is the the instruction that describes the task:
### Input:
Methods returns the property meta of the provided key for the given classId. Given key is case insensitive.
### Response:
def IsPropertyInMetaIgnoreCase(classId, key):
""" Methods returns the property meta of the provided key for the given classId. Given key is case insensitive. """
if classId in _ManagedObjectMeta:
for prop in _ManagedObjectMeta[classId]:
if (prop.lower() == key.lower()):
return _ManagedObjectMeta[classId][prop]
if classId in _MethodFactoryMeta:
for prop in _MethodFactoryMeta[classId]:
if (prop.lower() == key.lower()):
return _MethodFactoryMeta[classId][prop]
return None |
def get_hmac(self, key):
"""Returns the keyed HMAC for authentication of this state data.
:param key: the key for the keyed hash function
"""
h = HMAC.new(key, None, SHA256)
h.update(self.iv)
h.update(str(self.chunks).encode())
h.update(self.f_key)
h.update(self.alpha_key)
h.update(str(self.encrypted).encode())
return h.digest() | Returns the keyed HMAC for authentication of this state data.
:param key: the key for the keyed hash function | Below is the the instruction that describes the task:
### Input:
Returns the keyed HMAC for authentication of this state data.
:param key: the key for the keyed hash function
### Response:
def get_hmac(self, key):
"""Returns the keyed HMAC for authentication of this state data.
:param key: the key for the keyed hash function
"""
h = HMAC.new(key, None, SHA256)
h.update(self.iv)
h.update(str(self.chunks).encode())
h.update(self.f_key)
h.update(self.alpha_key)
h.update(str(self.encrypted).encode())
return h.digest() |
def paragraph(node):
"""
Process a paragraph, which includes all content under it
"""
text = ''
if node.string_content is not None:
text = node.string_content
o = nodes.paragraph('', ' '.join(text))
o.line = node.sourcepos[0][0]
for n in MarkDown(node):
o.append(n)
return o | Process a paragraph, which includes all content under it | Below is the the instruction that describes the task:
### Input:
Process a paragraph, which includes all content under it
### Response:
def paragraph(node):
"""
Process a paragraph, which includes all content under it
"""
text = ''
if node.string_content is not None:
text = node.string_content
o = nodes.paragraph('', ' '.join(text))
o.line = node.sourcepos[0][0]
for n in MarkDown(node):
o.append(n)
return o |
def AddMethod(obj, function, name=None):
"""
Adds either a bound method to an instance or the function itself (or an unbound method in Python 2) to a class.
If name is ommited the name of the specified function
is used by default.
Example::
a = A()
def f(self, x, y):
self.z = x + y
AddMethod(f, A, "add")
a.add(2, 4)
print(a.z)
AddMethod(lambda self, i: self.l[i], a, "listIndex")
print(a.listIndex(5))
"""
if name is None:
name = function.__name__
else:
function = RenameFunction(function, name)
# Note the Python version checks - WLB
# Python 3.3 dropped the 3rd parameter from types.MethodType
if hasattr(obj, '__class__') and obj.__class__ is not type:
# "obj" is an instance, so it gets a bound method.
if sys.version_info[:2] > (3, 2):
method = MethodType(function, obj)
else:
method = MethodType(function, obj, obj.__class__)
else:
# Handle classes
method = function
setattr(obj, name, method) | Adds either a bound method to an instance or the function itself (or an unbound method in Python 2) to a class.
If name is ommited the name of the specified function
is used by default.
Example::
a = A()
def f(self, x, y):
self.z = x + y
AddMethod(f, A, "add")
a.add(2, 4)
print(a.z)
AddMethod(lambda self, i: self.l[i], a, "listIndex")
print(a.listIndex(5)) | Below is the the instruction that describes the task:
### Input:
Adds either a bound method to an instance or the function itself (or an unbound method in Python 2) to a class.
If name is ommited the name of the specified function
is used by default.
Example::
a = A()
def f(self, x, y):
self.z = x + y
AddMethod(f, A, "add")
a.add(2, 4)
print(a.z)
AddMethod(lambda self, i: self.l[i], a, "listIndex")
print(a.listIndex(5))
### Response:
def AddMethod(obj, function, name=None):
"""
Adds either a bound method to an instance or the function itself (or an unbound method in Python 2) to a class.
If name is ommited the name of the specified function
is used by default.
Example::
a = A()
def f(self, x, y):
self.z = x + y
AddMethod(f, A, "add")
a.add(2, 4)
print(a.z)
AddMethod(lambda self, i: self.l[i], a, "listIndex")
print(a.listIndex(5))
"""
if name is None:
name = function.__name__
else:
function = RenameFunction(function, name)
# Note the Python version checks - WLB
# Python 3.3 dropped the 3rd parameter from types.MethodType
if hasattr(obj, '__class__') and obj.__class__ is not type:
# "obj" is an instance, so it gets a bound method.
if sys.version_info[:2] > (3, 2):
method = MethodType(function, obj)
else:
method = MethodType(function, obj, obj.__class__)
else:
# Handle classes
method = function
setattr(obj, name, method) |
def NHot(n, *xs, simplify=True):
"""
Return an expression that means
"exactly N input functions are true".
If *simplify* is ``True``, return a simplified expression.
"""
if not isinstance(n, int):
raise TypeError("expected n to be an int")
if not 0 <= n <= len(xs):
fstr = "expected 0 <= n <= {}, got {}"
raise ValueError(fstr.format(len(xs), n))
xs = [Expression.box(x).node for x in xs]
num = len(xs)
terms = list()
for hot_idxs in itertools.combinations(range(num), n):
hot_idxs = set(hot_idxs)
_xs = [xs[i] if i in hot_idxs else exprnode.not_(xs[i])
for i in range(num)]
terms.append(exprnode.and_(*_xs))
y = exprnode.or_(*terms)
if simplify:
y = y.simplify()
return _expr(y) | Return an expression that means
"exactly N input functions are true".
If *simplify* is ``True``, return a simplified expression. | Below is the the instruction that describes the task:
### Input:
Return an expression that means
"exactly N input functions are true".
If *simplify* is ``True``, return a simplified expression.
### Response:
def NHot(n, *xs, simplify=True):
"""
Return an expression that means
"exactly N input functions are true".
If *simplify* is ``True``, return a simplified expression.
"""
if not isinstance(n, int):
raise TypeError("expected n to be an int")
if not 0 <= n <= len(xs):
fstr = "expected 0 <= n <= {}, got {}"
raise ValueError(fstr.format(len(xs), n))
xs = [Expression.box(x).node for x in xs]
num = len(xs)
terms = list()
for hot_idxs in itertools.combinations(range(num), n):
hot_idxs = set(hot_idxs)
_xs = [xs[i] if i in hot_idxs else exprnode.not_(xs[i])
for i in range(num)]
terms.append(exprnode.and_(*_xs))
y = exprnode.or_(*terms)
if simplify:
y = y.simplify()
return _expr(y) |
def run(self, key, value, num_alts):
"""Check value in INFO[key] of record
Currently, only checks for consistent counts are implemented
:param str key: key of INFO entry to check
:param value: value to check
:param int alts: list of alternative alleles, for length
"""
field_info = self.header.get_info_field_info(key)
if not isinstance(value, list):
return
TABLE = {
".": len(value),
"A": num_alts,
"R": num_alts + 1,
"G": binomial(num_alts + 1, 2), # diploid only at the moment
}
expected = TABLE.get(field_info.number, field_info.number)
if len(value) != expected:
tpl = "Number of elements for INFO field {} is {} instead of {}"
warnings.warn(
tpl.format(key, len(value), field_info.number), exceptions.IncorrectListLength
) | Check value in INFO[key] of record
Currently, only checks for consistent counts are implemented
:param str key: key of INFO entry to check
:param value: value to check
:param int alts: list of alternative alleles, for length | Below is the the instruction that describes the task:
### Input:
Check value in INFO[key] of record
Currently, only checks for consistent counts are implemented
:param str key: key of INFO entry to check
:param value: value to check
:param int alts: list of alternative alleles, for length
### Response:
def run(self, key, value, num_alts):
"""Check value in INFO[key] of record
Currently, only checks for consistent counts are implemented
:param str key: key of INFO entry to check
:param value: value to check
:param int alts: list of alternative alleles, for length
"""
field_info = self.header.get_info_field_info(key)
if not isinstance(value, list):
return
TABLE = {
".": len(value),
"A": num_alts,
"R": num_alts + 1,
"G": binomial(num_alts + 1, 2), # diploid only at the moment
}
expected = TABLE.get(field_info.number, field_info.number)
if len(value) != expected:
tpl = "Number of elements for INFO field {} is {} instead of {}"
warnings.warn(
tpl.format(key, len(value), field_info.number), exceptions.IncorrectListLength
) |
def resolve_upload_path(self, filename=None):
"""Resolve upload path for use with the executor.
:param filename: Filename to resolve
:return: Resolved filename, which can be used to access the
given uploaded file in programs executed using this
executor
"""
if filename is None:
return constants.UPLOAD_VOLUME
return os.path.join(constants.UPLOAD_VOLUME, filename) | Resolve upload path for use with the executor.
:param filename: Filename to resolve
:return: Resolved filename, which can be used to access the
given uploaded file in programs executed using this
executor | Below is the the instruction that describes the task:
### Input:
Resolve upload path for use with the executor.
:param filename: Filename to resolve
:return: Resolved filename, which can be used to access the
given uploaded file in programs executed using this
executor
### Response:
def resolve_upload_path(self, filename=None):
"""Resolve upload path for use with the executor.
:param filename: Filename to resolve
:return: Resolved filename, which can be used to access the
given uploaded file in programs executed using this
executor
"""
if filename is None:
return constants.UPLOAD_VOLUME
return os.path.join(constants.UPLOAD_VOLUME, filename) |
def _compute_e2_factor(self, imt, vs30):
"""
Compute and return e2 factor, equation 19, page 80.
"""
e2 = np.zeros_like(vs30)
if imt.name == "PGV":
period = 1
elif imt.name == "PGA":
period = 0
else:
period = imt.period
if period < 0.35:
return e2
else:
idx = vs30 <= 1000
if period >= 0.35 and period <= 2.0:
e2[idx] = (-0.25 * np.log(vs30[idx] / 1000) *
np.log(period / 0.35))
elif period > 2.0:
e2[idx] = (-0.25 * np.log(vs30[idx] / 1000) *
np.log(2.0 / 0.35))
return e2 | Compute and return e2 factor, equation 19, page 80. | Below is the the instruction that describes the task:
### Input:
Compute and return e2 factor, equation 19, page 80.
### Response:
def _compute_e2_factor(self, imt, vs30):
"""
Compute and return e2 factor, equation 19, page 80.
"""
e2 = np.zeros_like(vs30)
if imt.name == "PGV":
period = 1
elif imt.name == "PGA":
period = 0
else:
period = imt.period
if period < 0.35:
return e2
else:
idx = vs30 <= 1000
if period >= 0.35 and period <= 2.0:
e2[idx] = (-0.25 * np.log(vs30[idx] / 1000) *
np.log(period / 0.35))
elif period > 2.0:
e2[idx] = (-0.25 * np.log(vs30[idx] / 1000) *
np.log(2.0 / 0.35))
return e2 |
def OnStartup(self):
"""A handler that is called on client startup."""
# We read the transaction log and fail any requests that are in it. If there
# is anything in the transaction log we assume its there because we crashed
# last time and let the server know.
last_request = self.transaction_log.Get()
if last_request:
status = rdf_flows.GrrStatus(
status=rdf_flows.GrrStatus.ReturnedStatus.CLIENT_KILLED,
error_message="Client killed during transaction")
if self.nanny_controller:
nanny_status = self.nanny_controller.GetNannyStatus()
if nanny_status:
status.nanny_status = nanny_status
self.SendReply(
status,
request_id=last_request.request_id,
response_id=1,
session_id=last_request.session_id,
message_type=rdf_flows.GrrMessage.Type.STATUS)
self.transaction_log.Clear()
# Inform the server that we started.
action = admin.SendStartupInfo(grr_worker=self)
action.Run(None, ttl=1) | A handler that is called on client startup. | Below is the the instruction that describes the task:
### Input:
A handler that is called on client startup.
### Response:
def OnStartup(self):
"""A handler that is called on client startup."""
# We read the transaction log and fail any requests that are in it. If there
# is anything in the transaction log we assume its there because we crashed
# last time and let the server know.
last_request = self.transaction_log.Get()
if last_request:
status = rdf_flows.GrrStatus(
status=rdf_flows.GrrStatus.ReturnedStatus.CLIENT_KILLED,
error_message="Client killed during transaction")
if self.nanny_controller:
nanny_status = self.nanny_controller.GetNannyStatus()
if nanny_status:
status.nanny_status = nanny_status
self.SendReply(
status,
request_id=last_request.request_id,
response_id=1,
session_id=last_request.session_id,
message_type=rdf_flows.GrrMessage.Type.STATUS)
self.transaction_log.Clear()
# Inform the server that we started.
action = admin.SendStartupInfo(grr_worker=self)
action.Run(None, ttl=1) |
def get_all_triggers(bump, file_triggers):
"""Aggregated set of significant figures to bump"""
triggers = set()
if file_triggers:
triggers = triggers.union(detect_file_triggers(config.trigger_patterns))
if bump:
_LOG.debug("trigger: %s bump requested", bump)
triggers.add(bump)
return triggers | Aggregated set of significant figures to bump | Below is the the instruction that describes the task:
### Input:
Aggregated set of significant figures to bump
### Response:
def get_all_triggers(bump, file_triggers):
"""Aggregated set of significant figures to bump"""
triggers = set()
if file_triggers:
triggers = triggers.union(detect_file_triggers(config.trigger_patterns))
if bump:
_LOG.debug("trigger: %s bump requested", bump)
triggers.add(bump)
return triggers |
def apply_correlation(self, sites, imt, residuals, stddev_intra=0):
"""
Apply correlation to randomly sampled residuals.
:param sites:
:class:`~openquake.hazardlib.site.SiteCollection` residuals were
sampled for.
:param imt:
Intensity measure type object, see :mod:`openquake.hazardlib.imt`.
:param residuals:
2d numpy array of sampled residuals, where first dimension
represents sites (the length as ``sites`` parameter) and
second one represents different realizations (samples).
:param stddev_intra:
Intra-event standard deviation array. Note that different sites do
not necessarily have the same intra-event standard deviation.
:returns:
Array of the same structure and semantics as ``residuals``
but with correlations applied.
NB: the correlation matrix is cached. It is computed only once
per IMT for the complete site collection and then the portion
corresponding to the sites is multiplied by the residuals.
"""
# intra-event residual for a single relization is a product
# of lower-triangle decomposed correlation matrix and vector
# of N random numbers (where N is equal to number of sites).
# we need to do that multiplication once per realization
# with the same matrix and different vectors.
try:
corma = self.cache[imt]
except KeyError:
corma = self.get_lower_triangle_correlation_matrix(
sites.complete, imt)
self.cache[imt] = corma
if len(sites.complete) == len(sites):
return numpy.dot(corma, residuals)
# it is important to allocate little memory, this is why I am
# accumulating below; if S is the length of the complete sites
# the correlation matrix has shape (S, S) and the residuals (N, s),
# where s is the number of samples
return numpy.sum(corma[sites.sids, sid] * res
for sid, res in zip(sites.sids, residuals)) | Apply correlation to randomly sampled residuals.
:param sites:
:class:`~openquake.hazardlib.site.SiteCollection` residuals were
sampled for.
:param imt:
Intensity measure type object, see :mod:`openquake.hazardlib.imt`.
:param residuals:
2d numpy array of sampled residuals, where first dimension
represents sites (the length as ``sites`` parameter) and
second one represents different realizations (samples).
:param stddev_intra:
Intra-event standard deviation array. Note that different sites do
not necessarily have the same intra-event standard deviation.
:returns:
Array of the same structure and semantics as ``residuals``
but with correlations applied.
NB: the correlation matrix is cached. It is computed only once
per IMT for the complete site collection and then the portion
corresponding to the sites is multiplied by the residuals. | Below is the the instruction that describes the task:
### Input:
Apply correlation to randomly sampled residuals.
:param sites:
:class:`~openquake.hazardlib.site.SiteCollection` residuals were
sampled for.
:param imt:
Intensity measure type object, see :mod:`openquake.hazardlib.imt`.
:param residuals:
2d numpy array of sampled residuals, where first dimension
represents sites (the length as ``sites`` parameter) and
second one represents different realizations (samples).
:param stddev_intra:
Intra-event standard deviation array. Note that different sites do
not necessarily have the same intra-event standard deviation.
:returns:
Array of the same structure and semantics as ``residuals``
but with correlations applied.
NB: the correlation matrix is cached. It is computed only once
per IMT for the complete site collection and then the portion
corresponding to the sites is multiplied by the residuals.
### Response:
def apply_correlation(self, sites, imt, residuals, stddev_intra=0):
"""
Apply correlation to randomly sampled residuals.
:param sites:
:class:`~openquake.hazardlib.site.SiteCollection` residuals were
sampled for.
:param imt:
Intensity measure type object, see :mod:`openquake.hazardlib.imt`.
:param residuals:
2d numpy array of sampled residuals, where first dimension
represents sites (the length as ``sites`` parameter) and
second one represents different realizations (samples).
:param stddev_intra:
Intra-event standard deviation array. Note that different sites do
not necessarily have the same intra-event standard deviation.
:returns:
Array of the same structure and semantics as ``residuals``
but with correlations applied.
NB: the correlation matrix is cached. It is computed only once
per IMT for the complete site collection and then the portion
corresponding to the sites is multiplied by the residuals.
"""
# intra-event residual for a single relization is a product
# of lower-triangle decomposed correlation matrix and vector
# of N random numbers (where N is equal to number of sites).
# we need to do that multiplication once per realization
# with the same matrix and different vectors.
try:
corma = self.cache[imt]
except KeyError:
corma = self.get_lower_triangle_correlation_matrix(
sites.complete, imt)
self.cache[imt] = corma
if len(sites.complete) == len(sites):
return numpy.dot(corma, residuals)
# it is important to allocate little memory, this is why I am
# accumulating below; if S is the length of the complete sites
# the correlation matrix has shape (S, S) and the residuals (N, s),
# where s is the number of samples
return numpy.sum(corma[sites.sids, sid] * res
for sid, res in zip(sites.sids, residuals)) |
def formatTime(self, record, datefmt=None): # noqa
"""
Overrides formatTime method to use datetime module instead of time module
to display time in microseconds. Time module by default does not resolve
time to microseconds.
"""
if datefmt:
s = datetime.datetime.now().strftime(datefmt)
else:
t = datetime.datetime.now().strftime(self.default_time_format)
s = self.default_msec_format % (t, record.msecs)
return s | Overrides formatTime method to use datetime module instead of time module
to display time in microseconds. Time module by default does not resolve
time to microseconds. | Below is the the instruction that describes the task:
### Input:
Overrides formatTime method to use datetime module instead of time module
to display time in microseconds. Time module by default does not resolve
time to microseconds.
### Response:
def formatTime(self, record, datefmt=None): # noqa
"""
Overrides formatTime method to use datetime module instead of time module
to display time in microseconds. Time module by default does not resolve
time to microseconds.
"""
if datefmt:
s = datetime.datetime.now().strftime(datefmt)
else:
t = datetime.datetime.now().strftime(self.default_time_format)
s = self.default_msec_format % (t, record.msecs)
return s |
def _enrich_link(self, glossary):
"""
Enrich the dict glossary['link'] with an identifier onto the model
"""
try:
Model = apps.get_model(*glossary['link']['model'].split('.'))
obj = Model.objects.get(pk=glossary['link']['pk'])
glossary['link'].update(identifier=str(obj))
except (KeyError, ObjectDoesNotExist):
pass | Enrich the dict glossary['link'] with an identifier onto the model | Below is the the instruction that describes the task:
### Input:
Enrich the dict glossary['link'] with an identifier onto the model
### Response:
def _enrich_link(self, glossary):
"""
Enrich the dict glossary['link'] with an identifier onto the model
"""
try:
Model = apps.get_model(*glossary['link']['model'].split('.'))
obj = Model.objects.get(pk=glossary['link']['pk'])
glossary['link'].update(identifier=str(obj))
except (KeyError, ObjectDoesNotExist):
pass |
def datapoint_indices_for_tensor(self, tensor_index):
""" Returns the indices for all datapoints in the given tensor. """
if tensor_index >= self._num_tensors:
raise ValueError('Tensor index %d is greater than the number of tensors (%d)' %(tensor_index, self._num_tensors))
return self._file_num_to_indices[tensor_index] | Returns the indices for all datapoints in the given tensor. | Below is the the instruction that describes the task:
### Input:
Returns the indices for all datapoints in the given tensor.
### Response:
def datapoint_indices_for_tensor(self, tensor_index):
""" Returns the indices for all datapoints in the given tensor. """
if tensor_index >= self._num_tensors:
raise ValueError('Tensor index %d is greater than the number of tensors (%d)' %(tensor_index, self._num_tensors))
return self._file_num_to_indices[tensor_index] |
def _python_type(self, key, value):
"""Returns proper type from the schema"""
try:
field_type = self._sp_cols[key]['type']
if field_type in ['Number', 'Currency']:
return float(value)
elif field_type == 'DateTime':
# Need to remove the '123;#' from created dates, but we will do it for all dates
# self.date_format = re.compile('\d+-\d+-\d+ \d+:\d+:\d+')
value = self.date_format.search(value).group(0)
# NOTE: I used to round this just date (7/28/2018)
return datetime.strptime(value, '%Y-%m-%d %H:%M:%S')
elif field_type == 'Boolean':
if value == '1':
return 'Yes'
elif value == '0':
return 'No'
else:
return ''
elif field_type in ('User', 'UserMulti'):
# Sometimes the User no longer exists or
# has a diffrent ID number so we just remove the "123;#"
# from the beginning of their name
if value in self.users['sp']:
return self.users['sp'][value]
elif '#' in value:
return value.split('#')[1]
else:
return value
else:
return value
except AttributeError:
return value | Returns proper type from the schema | Below is the the instruction that describes the task:
### Input:
Returns proper type from the schema
### Response:
def _python_type(self, key, value):
"""Returns proper type from the schema"""
try:
field_type = self._sp_cols[key]['type']
if field_type in ['Number', 'Currency']:
return float(value)
elif field_type == 'DateTime':
# Need to remove the '123;#' from created dates, but we will do it for all dates
# self.date_format = re.compile('\d+-\d+-\d+ \d+:\d+:\d+')
value = self.date_format.search(value).group(0)
# NOTE: I used to round this just date (7/28/2018)
return datetime.strptime(value, '%Y-%m-%d %H:%M:%S')
elif field_type == 'Boolean':
if value == '1':
return 'Yes'
elif value == '0':
return 'No'
else:
return ''
elif field_type in ('User', 'UserMulti'):
# Sometimes the User no longer exists or
# has a diffrent ID number so we just remove the "123;#"
# from the beginning of their name
if value in self.users['sp']:
return self.users['sp'][value]
elif '#' in value:
return value.split('#')[1]
else:
return value
else:
return value
except AttributeError:
return value |
def readGif(filename, asNumpy=True):
""" readGif(filename, asNumpy=True)
Read images from an animated GIF file. Returns a list of numpy
arrays, or, if asNumpy is false, a list if PIL images.
"""
# Check PIL
if PIL is None:
raise RuntimeError("Need PIL to read animated gif files.")
# Check Numpy
if np is None:
raise RuntimeError("Need Numpy to read animated gif files.")
# Check whether it exists
if not os.path.isfile(filename):
raise IOError('File not found: ' + str(filename))
# Load file using PIL
pilIm = PIL.Image.open(filename)
pilIm.seek(0)
# Read all images inside
images = []
try:
while True:
# Get image as numpy array
tmp = pilIm.convert() # Make without palette
a = np.asarray(tmp)
if len(a.shape) == 0:
raise MemoryError("Too little memory to convert PIL image to array")
# Store, and next
images.append(a)
pilIm.seek(pilIm.tell() + 1)
except EOFError:
pass
# Convert to normal PIL images if needed
if not asNumpy:
images2 = images
images = []
for im in images2:
images.append(PIL.Image.fromarray(im))
# Done
return images | readGif(filename, asNumpy=True)
Read images from an animated GIF file. Returns a list of numpy
arrays, or, if asNumpy is false, a list if PIL images. | Below is the the instruction that describes the task:
### Input:
readGif(filename, asNumpy=True)
Read images from an animated GIF file. Returns a list of numpy
arrays, or, if asNumpy is false, a list if PIL images.
### Response:
def readGif(filename, asNumpy=True):
""" readGif(filename, asNumpy=True)
Read images from an animated GIF file. Returns a list of numpy
arrays, or, if asNumpy is false, a list if PIL images.
"""
# Check PIL
if PIL is None:
raise RuntimeError("Need PIL to read animated gif files.")
# Check Numpy
if np is None:
raise RuntimeError("Need Numpy to read animated gif files.")
# Check whether it exists
if not os.path.isfile(filename):
raise IOError('File not found: ' + str(filename))
# Load file using PIL
pilIm = PIL.Image.open(filename)
pilIm.seek(0)
# Read all images inside
images = []
try:
while True:
# Get image as numpy array
tmp = pilIm.convert() # Make without palette
a = np.asarray(tmp)
if len(a.shape) == 0:
raise MemoryError("Too little memory to convert PIL image to array")
# Store, and next
images.append(a)
pilIm.seek(pilIm.tell() + 1)
except EOFError:
pass
# Convert to normal PIL images if needed
if not asNumpy:
images2 = images
images = []
for im in images2:
images.append(PIL.Image.fromarray(im))
# Done
return images |
def lookup_thread_id(self):
"""Lookup thread id as required by CommentThread.lookup_thread_id.
This implementation will query GitHub with the required parameters
to try and find the topic for the owner, realm, topic, etc., specified
in init.
"""
query_string = 'in:title "%s" repo:%s/%s' % (
self.topic, self.owner, self.realm)
cache_key = (self.owner, self.realm, self.topic)
result = self.lookup_cache_key(cache_key)
if result is not None:
my_req = self.raw_pull(result)
if my_req.status_code != 200:
result = None # Cached item was no good
elif my_req.json()['title'] != self.topic:
logging.debug('Title must have changed; ignore cache')
result = None
else:
logging.debug('Using cached thread id %s for %s', str(result),
str(cache_key))
return result
data, dummy_hdr = self.raw_search(self.user, self.token, query_string)
if data['total_count'] == 1: # unique match
if data['items'][0]['title'] == self.topic:
result = data['items'][0]['number']
else:
result = None
elif data['total_count'] > 1: # multiple matches since github doesn't
searched_data = [ # have unique search we must filter
item for item in data['items'] if item['title'] == self.topic]
if not searched_data: # no matches
return None
elif len(searched_data) > 1:
raise yap_exceptions.UnableToFindUniqueTopic(
self.topic, data['total_count'], '')
else:
assert len(searched_data) == 1, (
'Confused searching for topic "%s"' % str(self.topic))
result = searched_data[0]['number']
else:
result = None
self.update_cache_key(cache_key, result)
return result | Lookup thread id as required by CommentThread.lookup_thread_id.
This implementation will query GitHub with the required parameters
to try and find the topic for the owner, realm, topic, etc., specified
in init. | Below is the the instruction that describes the task:
### Input:
Lookup thread id as required by CommentThread.lookup_thread_id.
This implementation will query GitHub with the required parameters
to try and find the topic for the owner, realm, topic, etc., specified
in init.
### Response:
def lookup_thread_id(self):
"""Lookup thread id as required by CommentThread.lookup_thread_id.
This implementation will query GitHub with the required parameters
to try and find the topic for the owner, realm, topic, etc., specified
in init.
"""
query_string = 'in:title "%s" repo:%s/%s' % (
self.topic, self.owner, self.realm)
cache_key = (self.owner, self.realm, self.topic)
result = self.lookup_cache_key(cache_key)
if result is not None:
my_req = self.raw_pull(result)
if my_req.status_code != 200:
result = None # Cached item was no good
elif my_req.json()['title'] != self.topic:
logging.debug('Title must have changed; ignore cache')
result = None
else:
logging.debug('Using cached thread id %s for %s', str(result),
str(cache_key))
return result
data, dummy_hdr = self.raw_search(self.user, self.token, query_string)
if data['total_count'] == 1: # unique match
if data['items'][0]['title'] == self.topic:
result = data['items'][0]['number']
else:
result = None
elif data['total_count'] > 1: # multiple matches since github doesn't
searched_data = [ # have unique search we must filter
item for item in data['items'] if item['title'] == self.topic]
if not searched_data: # no matches
return None
elif len(searched_data) > 1:
raise yap_exceptions.UnableToFindUniqueTopic(
self.topic, data['total_count'], '')
else:
assert len(searched_data) == 1, (
'Confused searching for topic "%s"' % str(self.topic))
result = searched_data[0]['number']
else:
result = None
self.update_cache_key(cache_key, result)
return result |
def _concrete_instance(self, instance_doc):
"""Concretize an instance document.
:param dict instance_doc: A document describing an instance. Should come from the API.
:returns: A subclass of :py:class:`bases.BaseInstance`, or None.
:rtype: :py:class:`bases.BaseInstance`
"""
if not isinstance(instance_doc, dict):
return None
# Attempt to instantiate the appropriate class for the given instance document.
try:
service = instance_doc['service']
cls = self._service_class_map[service]
return cls(instance_document=instance_doc, instances=self)
# If construction fails, log the exception and return None.
except Exception as ex:
logger.exception(ex)
logger.error(
'Instance construction failed. You probably need to upgrade to a more '
'recent version of the client. Instance document which generated this '
'warning: {}'.format(instance_doc)
)
return None | Concretize an instance document.
:param dict instance_doc: A document describing an instance. Should come from the API.
:returns: A subclass of :py:class:`bases.BaseInstance`, or None.
:rtype: :py:class:`bases.BaseInstance` | Below is the the instruction that describes the task:
### Input:
Concretize an instance document.
:param dict instance_doc: A document describing an instance. Should come from the API.
:returns: A subclass of :py:class:`bases.BaseInstance`, or None.
:rtype: :py:class:`bases.BaseInstance`
### Response:
def _concrete_instance(self, instance_doc):
"""Concretize an instance document.
:param dict instance_doc: A document describing an instance. Should come from the API.
:returns: A subclass of :py:class:`bases.BaseInstance`, or None.
:rtype: :py:class:`bases.BaseInstance`
"""
if not isinstance(instance_doc, dict):
return None
# Attempt to instantiate the appropriate class for the given instance document.
try:
service = instance_doc['service']
cls = self._service_class_map[service]
return cls(instance_document=instance_doc, instances=self)
# If construction fails, log the exception and return None.
except Exception as ex:
logger.exception(ex)
logger.error(
'Instance construction failed. You probably need to upgrade to a more '
'recent version of the client. Instance document which generated this '
'warning: {}'.format(instance_doc)
)
return None |
def __write(self, s):
"""Write string s to the stream if a whole new block
is ready to be written.
"""
self.buf += s
while len(self.buf) > self.bufsize:
self.fileobj.write(self.buf[:self.bufsize])
self.buf = self.buf[self.bufsize:] | Write string s to the stream if a whole new block
is ready to be written. | Below is the the instruction that describes the task:
### Input:
Write string s to the stream if a whole new block
is ready to be written.
### Response:
def __write(self, s):
"""Write string s to the stream if a whole new block
is ready to be written.
"""
self.buf += s
while len(self.buf) > self.bufsize:
self.fileobj.write(self.buf[:self.bufsize])
self.buf = self.buf[self.bufsize:] |