function
stringlengths 11
56k
| repo_name
stringlengths 5
60
| features
sequence |
---|---|---|
def __init__(self, allow=None, disallow=None, secure=True, *args, **kwargs):
super(TemplateField, self).__init__(*args, **kwargs)
self.validators.append(TemplateValidator(allow, disallow, secure)) | ithinksw/philo | [
50,
12,
50,
3,
1274327279
] |
def __init__(self, field):
self.field = field | ithinksw/philo | [
50,
12,
50,
3,
1274327279
] |
def __get__(self, instance, owner):
if instance is None:
raise AttributeError # ? | ithinksw/philo | [
50,
12,
50,
3,
1274327279
] |
def __set__(self, instance, value):
instance.__dict__[self.field.name] = value
setattr(instance, self.field.attname, json.dumps(value)) | ithinksw/philo | [
50,
12,
50,
3,
1274327279
] |
def __delete__(self, instance):
del(instance.__dict__[self.field.name])
setattr(instance, self.field.attname, json.dumps(None)) | ithinksw/philo | [
50,
12,
50,
3,
1274327279
] |
def get_attname(self):
return "%s_json" % self.name | ithinksw/philo | [
50,
12,
50,
3,
1274327279
] |
def contribute_to_class(self, cls, name):
super(JSONField, self).contribute_to_class(cls, name)
setattr(cls, name, JSONDescriptor(self))
models.signals.pre_init.connect(self.fix_init_kwarg, sender=cls) | ithinksw/philo | [
50,
12,
50,
3,
1274327279
] |
def fix_init_kwarg(self, sender, args, kwargs, **signal_kwargs):
# Anything passed in as self.name is assumed to come from a serializer and
# will be treated as a json string.
if self.name in kwargs:
value = kwargs.pop(self.name) | ithinksw/philo | [
50,
12,
50,
3,
1274327279
] |
def formfield(self, *args, **kwargs):
kwargs["form_class"] = JSONFormField
return super(JSONField, self).formfield(*args, **kwargs) | ithinksw/philo | [
50,
12,
50,
3,
1274327279
] |
def get_internal_type(self):
return "TextField" | ithinksw/philo | [
50,
12,
50,
3,
1274327279
] |
def to_python(self, value):
if not value:
return [] | ithinksw/philo | [
50,
12,
50,
3,
1274327279
] |
def get_prep_value(self, value):
return ','.join(value) | ithinksw/philo | [
50,
12,
50,
3,
1274327279
] |
def formfield(self, **kwargs):
# This is necessary because django hard-codes TypedChoiceField for things with choices.
defaults = {
'widget': forms.CheckboxSelectMultiple,
'choices': self.get_choices(include_blank=False),
'label': capfirst(self.verbose_name),
'required': not self.blank,
'help_text': self.help_text
}
if self.has_default():
if callable(self.default):
defaults['initial'] = self.default
defaults['show_hidden_initial'] = True
else:
defaults['initial'] = self.get_default() | ithinksw/philo | [
50,
12,
50,
3,
1274327279
] |
def validate(self, value, model_instance):
invalid_values = []
for val in value:
try:
validate_slug(val)
except ValidationError:
invalid_values.append(val) | ithinksw/philo | [
50,
12,
50,
3,
1274327279
] |
def _get_choices(self):
if isinstance(self._choices, RegistryIterator):
return self._choices.copy()
elif hasattr(self._choices, 'next'):
choices, self._choices = itertools.tee(self._choices)
return choices
else:
return self._choices | ithinksw/philo | [
50,
12,
50,
3,
1274327279
] |
def load_source(module_name, module_path):
"""Loads a python module from the path of the corresponding file."""
if sys.version_info[0] == 3 and sys.version_info[1] >= 5:
import importlib.util
spec = importlib.util.spec_from_file_location(module_name, module_path)
module = importlib.util.module_from_spec(spec)
spec.loader.exec_module(module)
elif sys.version_info[0] == 3 and sys.version_info[1] < 5:
import importlib.machinery
loader = importlib.machinery.SourceFileLoader(module_name, module_path)
module = loader.load_module()
return module | nficano/python-lambda | [
1427,
229,
1427,
63,
1456456866
] |
def deploy(
src,
requirements=None,
local_package=None,
config_file="config.yaml",
profile_name=None,
preserve_vpc=False, | nficano/python-lambda | [
1427,
229,
1427,
63,
1456456866
] |
def deploy_s3(
src,
requirements=None,
local_package=None,
config_file="config.yaml",
profile_name=None,
preserve_vpc=False, | nficano/python-lambda | [
1427,
229,
1427,
63,
1456456866
] |
def upload(
src,
requirements=None,
local_package=None,
config_file="config.yaml",
profile_name=None, | nficano/python-lambda | [
1427,
229,
1427,
63,
1456456866
] |
def invoke(
src,
event_file="event.json",
config_file="config.yaml",
profile_name=None,
verbose=False, | nficano/python-lambda | [
1427,
229,
1427,
63,
1456456866
] |
def init(src, minimal=False):
"""Copies template files to a given directory.
:param str src:
The path to output the template lambda project files.
:param bool minimal:
Minimal possible template files (excludes event.json).
"""
templates_path = os.path.join(
os.path.dirname(os.path.abspath(__file__)), "project_templates",
)
for filename in os.listdir(templates_path):
if (minimal and filename == "event.json") or filename.endswith(".pyc"):
continue
dest_path = os.path.join(templates_path, filename)
if not os.path.isdir(dest_path):
copy(dest_path, src) | nficano/python-lambda | [
1427,
229,
1427,
63,
1456456866
] |
def get_callable_handler_function(src, handler):
"""Translate a string of the form "module.function" into a callable
function.
:param str src:
The path to your Lambda project containing a valid handler file.
:param str handler:
A dot delimited string representing the `<module>.<function name>`.
"""
# "cd" into `src` directory.
os.chdir(src)
module_name, function_name = handler.split(".")
filename = get_handler_filename(handler)
path_to_module_file = os.path.join(src, filename)
module = load_source(module_name, path_to_module_file)
return getattr(module, function_name) | nficano/python-lambda | [
1427,
229,
1427,
63,
1456456866
] |
def _install_packages(path, packages):
"""Install all packages listed to the target directory.
Ignores any package that includes Python itself and python-lambda as well
since its only needed for deploying and not running the code
:param str path:
Path to copy installed pip packages to.
:param list packages:
A list of packages to be installed via pip.
"""
def _filter_blacklist(package):
blacklist = ["-i", "#", "Python==", "python-lambda=="]
return all(package.startswith(entry) is False for entry in blacklist)
filtered_packages = filter(_filter_blacklist, packages)
for package in filtered_packages:
if package.startswith("-e "):
package = package.replace("-e ", "")
print("Installing {package}".format(package=package))
subprocess.check_call(
[
sys.executable,
"-m",
"pip",
"install",
package,
"-t",
path,
"--ignore-installed",
]
)
print(
"Install directory contents are now: {directory}".format(
directory=os.listdir(path)
)
) | nficano/python-lambda | [
1427,
229,
1427,
63,
1456456866
] |
def get_role_name(region, account_id, role):
"""Shortcut to insert the `account_id` and `role` into the iam string."""
prefix = ARN_PREFIXES.get(region, "aws")
return "arn:{0}:iam::{1}:role/{2}".format(prefix, account_id, role) | nficano/python-lambda | [
1427,
229,
1427,
63,
1456456866
] |
def get_client(
client,
profile_name,
aws_access_key_id,
aws_secret_access_key,
region=None, | nficano/python-lambda | [
1427,
229,
1427,
63,
1456456866
] |
def create_function(cfg, path_to_zip_file, use_s3=False, s3_file=None):
"""Register and upload a function to AWS Lambda."""
print("Creating your new Lambda function")
byte_stream = read(path_to_zip_file, binary_file=True)
profile_name = cfg.get("profile")
aws_access_key_id = cfg.get("aws_access_key_id")
aws_secret_access_key = cfg.get("aws_secret_access_key")
account_id = get_account_id(
profile_name,
aws_access_key_id,
aws_secret_access_key,
cfg.get("region",),
)
role = get_role_name(
cfg.get("region"),
account_id,
cfg.get("role", "lambda_basic_execution"),
)
client = get_client(
"lambda",
profile_name,
aws_access_key_id,
aws_secret_access_key,
cfg.get("region"),
)
# Do we prefer development variable over config?
buck_name = os.environ.get("S3_BUCKET_NAME") or cfg.get("bucket_name")
func_name = os.environ.get("LAMBDA_FUNCTION_NAME") or cfg.get(
"function_name"
)
print("Creating lambda function with name: {}".format(func_name))
if use_s3:
kwargs = {
"FunctionName": func_name,
"Runtime": cfg.get("runtime", "python2.7"),
"Role": role,
"Handler": cfg.get("handler"),
"Code": {
"S3Bucket": "{}".format(buck_name),
"S3Key": "{}".format(s3_file),
},
"Description": cfg.get("description", ""),
"Timeout": cfg.get("timeout", 15),
"MemorySize": cfg.get("memory_size", 512),
"VpcConfig": {
"SubnetIds": cfg.get("subnet_ids", []),
"SecurityGroupIds": cfg.get("security_group_ids", []),
},
"Publish": True,
}
else:
kwargs = {
"FunctionName": func_name,
"Runtime": cfg.get("runtime", "python2.7"),
"Role": role,
"Handler": cfg.get("handler"),
"Code": {"ZipFile": byte_stream},
"Description": cfg.get("description", ""),
"Timeout": cfg.get("timeout", 15),
"MemorySize": cfg.get("memory_size", 512),
"VpcConfig": {
"SubnetIds": cfg.get("subnet_ids", []),
"SecurityGroupIds": cfg.get("security_group_ids", []),
},
"Publish": True,
}
if "tags" in cfg:
kwargs.update(
Tags={key: str(value) for key, value in cfg.get("tags").items()}
)
if "environment_variables" in cfg:
kwargs.update(
Environment={
"Variables": {
key: get_environment_variable_value(value)
for key, value in cfg.get("environment_variables").items()
},
},
)
client.create_function(**kwargs)
concurrency = get_concurrency(cfg)
if concurrency > 0:
client.put_function_concurrency(
FunctionName=func_name, ReservedConcurrentExecutions=concurrency
) | nficano/python-lambda | [
1427,
229,
1427,
63,
1456456866
] |
def upload_s3(cfg, path_to_zip_file, *use_s3):
"""Upload a function to AWS S3."""
print("Uploading your new Lambda function")
profile_name = cfg.get("profile")
aws_access_key_id = cfg.get("aws_access_key_id")
aws_secret_access_key = cfg.get("aws_secret_access_key")
client = get_client(
"s3",
profile_name,
aws_access_key_id,
aws_secret_access_key,
cfg.get("region"),
)
byte_stream = b""
with open(path_to_zip_file, mode="rb") as fh:
byte_stream = fh.read()
s3_key_prefix = cfg.get("s3_key_prefix", "/dist")
checksum = hashlib.new("md5", byte_stream).hexdigest()
timestamp = str(time.time())
filename = "{prefix}{checksum}-{ts}.zip".format(
prefix=s3_key_prefix, checksum=checksum, ts=timestamp,
)
# Do we prefer development variable over config?
buck_name = os.environ.get("S3_BUCKET_NAME") or cfg.get("bucket_name")
func_name = os.environ.get("LAMBDA_FUNCTION_NAME") or cfg.get(
"function_name"
)
kwargs = {
"Bucket": "{}".format(buck_name),
"Key": "{}".format(filename),
"Body": byte_stream,
}
client.put_object(**kwargs)
print("Finished uploading {} to S3 bucket {}".format(func_name, buck_name))
if use_s3:
return filename | nficano/python-lambda | [
1427,
229,
1427,
63,
1456456866
] |
def get_concurrency(cfg):
"""Return the Reserved Concurrent Executions if present in the config"""
concurrency = int(cfg.get("concurrency", 0))
return max(0, concurrency) | nficano/python-lambda | [
1427,
229,
1427,
63,
1456456866
] |
def __init__(self, dict_):
super(self.__class__, self).__init__(dict_) | 2Checkout/2checkout-python | [
24,
19,
24,
6,
1351273541
] |
def find(cls, params=None):
if params is None:
params = dict()
response = cls(Api.call('sales/detail_sale', params))
return response.sale | 2Checkout/2checkout-python | [
24,
19,
24,
6,
1351273541
] |
def list(cls, params=None):
if params is None:
params = dict()
response = cls(Api.call('sales/list_sales', params))
return response.sale_summary | 2Checkout/2checkout-python | [
24,
19,
24,
6,
1351273541
] |
def stop(self, params=None):
if params is None:
params = dict()
if hasattr(self, 'lineitem_id'):
params['lineitem_id'] = self.lineitem_id
return Api.call('sales/stop_lineitem_recurring', params)
elif hasattr(self, 'sale_id'):
active_lineitems = Util.active(self)
if dict(active_lineitems):
result = dict()
i = 0
for k, v in active_lineitems.items():
lineitem_id = v
params = {'lineitem_id': lineitem_id}
result[i] = Api.call('sales/stop_lineitem_recurring', params)
i += 1
response = { "response_code": "OK",
"response_message": str(len(result)) + " lineitems stopped successfully"
}
else:
response = {
"response_code": "NOTICE",
"response_message": "No active recurring lineitems"
}
else:
response = { "response_code": "NOTICE",
"response_message": "This method can only be called on a sale or lineitem"
}
return Sale(response) | 2Checkout/2checkout-python | [
24,
19,
24,
6,
1351273541
] |
def comment(self, params=None):
if params is None:
params = dict()
params['sale_id'] = self.sale_id
return Sale(Api.call('sales/create_comment', params)) | 2Checkout/2checkout-python | [
24,
19,
24,
6,
1351273541
] |
def connect_client():
"""Connects to Mongo client"""
try:
return MongoClient(app.config['DB_HOST'], int(app.config['DB_PORT']))
except errors.ConnectionFailure as e:
raise e | CenterForOpenScience/scinet | [
14,
6,
14,
9,
1368717157
] |
def close_db(error):
"""Closes connection with Mongo client"""
if hasattr(g, 'mongo_client'):
g.mongo_client.close() | CenterForOpenScience/scinet | [
14,
6,
14,
9,
1368717157
] |
def index():
"""Landing page for SciNet"""
return render_template("index.html") | CenterForOpenScience/scinet | [
14,
6,
14,
9,
1368717157
] |
def faq():
"""FAQ page for SciNet"""
return render_template("faq.html") | CenterForOpenScience/scinet | [
14,
6,
14,
9,
1368717157
] |
def leaderboard():
"""Leaderboard page for SciNet"""
get_db()
groups = get_groups(g.groups_collection)
return render_template("leaderboard.html", groups=groups) | CenterForOpenScience/scinet | [
14,
6,
14,
9,
1368717157
] |
def ping_endpoint():
"""API endpoint determines potential article hash exists in db
:return: status code 204 -- hash not present, continue submission
:return: status code 201 -- hash already exists, drop submission
"""
db = get_db()
target_hash = request.form.get('hash')
if db.raw.find({'hash': target_hash}).count():
return Response(status=201)
else:
return Response(status=204) | CenterForOpenScience/scinet | [
14,
6,
14,
9,
1368717157
] |
def ArticleEndpoint():
"""Eventual landing page for searching/retrieving articles"""
if request.method == 'GET':
return render_template("articles.html") | CenterForOpenScience/scinet | [
14,
6,
14,
9,
1368717157
] |
def raw_endpoint():
"""API endpoint for submitting raw article data
:return: status code 405 - invalid JSON or invalid request type
:return: status code 400 - unsupported content-type or invalid publisher
:return: status code 201 - successful submission
"""
# Ensure post's content-type is supported
if request.headers['content-type'] == 'application/json':
# Ensure data is a valid JSON
try:
user_submission = json.loads(request.data)
except ValueError:
return Response(status=405)
# generate UID for new entry
uid = get_id()
# store incoming JSON in raw storage
file_path = os.path.join(
HERE,
'raw_payloads',
str(uid)
)
store_json_to_file(user_submission, file_path)
# hand submission to controller and return Resposne
db = get_db()
controller_response = JSONController(user_submission, db=db, _id=uid).submit()
return controller_response
# User submitted an unsupported content-type
else:
return Response(status=400) | CenterForOpenScience/scinet | [
14,
6,
14,
9,
1368717157
] |
def request_new_group():
# Grab submission form data and prepare email message
data = request.json
msg = "Someone has request that you add {group_name} to the leaderboard \
groups. The groups website is {group_website} and the submitter can \
be reached at {submitter_email}.".format(
group_name=data['new_group_name'],
group_website=data['new_group_website'],
submitter_email=data['submitter_email'])
return Response(status=200)
'''
try:
email(
subject="SciNet: A new group has been requested",
fro="no-reply@scinet.osf.io",
to='harry@scinet.osf.io',
msg=msg)
return Response(status=200)
except:
return Response(status=500)
''' | CenterForOpenScience/scinet | [
14,
6,
14,
9,
1368717157
] |
def not_found(error):
return make_response(jsonify( { 'error': 'Page Not Found' } ), 404) | CenterForOpenScience/scinet | [
14,
6,
14,
9,
1368717157
] |
def main():
argspec = hashivault_argspec()
argspec['name'] = dict(required=True, type='str')
argspec['mount_point'] = dict(required=False, type='str', default='approle')
module = hashivault_init(argspec)
result = hashivault_approle_role_get(module.params)
if result.get('failed'):
module.fail_json(**result)
else:
module.exit_json(**result) | TerryHowe/ansible-modules-hashivault | [
414,
143,
414,
44,
1462216977
] |
def hashivault_approle_role_get(params):
name = params.get('name')
client = hashivault_auth_client(params)
result = client.get_role(name, mount_point=params.get('mount_point'))
return {'role': result} | TerryHowe/ansible-modules-hashivault | [
414,
143,
414,
44,
1462216977
] |
def parse(self, response):
#obtains links from page to page and passes links to parse_playerURL
sel = Selector(response) #define selector based on response object (points to urls in start_urls by default)
url_list = sel.xpath('//a[@class="display-block padding-0"]/@href') #obtain a list of href links that contain relative links of players
for i in url_list:
relative_url = self.clean_str(i.extract()) #i is a selector and hence need to extract it to obtain unicode object
print urljoin(response.url, relative_url) #urljoin is able to merge absolute and relative paths to form 1 coherent link
req = Request(urljoin(response.url, relative_url),callback=self.parse_playerURL) #pass on request with new urls to parse_playerURL
req.headers["User-Agent"] = self.random_ua()
yield req | HashirZahir/FIFA-Player-Ratings | [
8,
8,
8,
1,
1455466084
] |
def parse_playerURL(self, response):
#parses player specific data into items list
site = Selector(response)
items = []
item = PlayerItem()
item['1name'] = (response.url).rsplit("/")[-2].replace("-"," ")
title = self.clean_str(site.xpath('/html/head/title/text()').extract_first())
item['OVR'] = title.partition("FIFA 16 -")[1].split("-")[0]
item['POS'] = self.clean_str(site.xpath('//div[@class="playercard-position"]/text()').extract_first())
#stats = site.xpath('//div[@class="row player-center-container"]/div/a')
stat_names = site.xpath('//span[@class="player-stat-title"]')
stat_values = site.xpath('//span[contains(@class, "player-stat-value")]')
for index in range(len(stat_names)):
attr_name = stat_names[index].xpath('.//text()').extract_first()
item[attr_name] = stat_values[index].xpath('.//text()').extract_first()
items.append(item)
return items | HashirZahir/FIFA-Player-Ratings | [
8,
8,
8,
1,
1455466084
] |
def clean_str(self,ustring):
#removes wierd unicode chars (/u102 bla), whitespaces, tabspaces, etc to form clean string
return str(ustring.encode('ascii', 'replace')).strip() | HashirZahir/FIFA-Player-Ratings | [
8,
8,
8,
1,
1455466084
] |
def random_ua(self):
#randomise user-agent from list to reduce chance of being banned
ua = random.choice(settings.get('USER_AGENT_LIST'))
if ua:
ua='Mozilla/5.0 (Windows NT 6.3; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2226.0 Safari/537.36'
return ua | HashirZahir/FIFA-Player-Ratings | [
8,
8,
8,
1,
1455466084
] |
def __init__(
self,
credential: "AsyncTokenCredential",
subscription_id: str,
base_url: str = "https://management.azure.com",
**kwargs: Any | Azure/azure-sdk-for-python | [
3526,
2256,
3526,
986,
1335285972
] |
def _send_request(
self,
request: HttpRequest,
**kwargs: Any | Azure/azure-sdk-for-python | [
3526,
2256,
3526,
986,
1335285972
] |
def parse_dsn(dsn_string):
"""Parse a connection string and return the associated driver"""
dsn = urlparse(dsn_string)
scheme = dsn.scheme.split('+')[0]
username = password = host = port = None
host = dsn.netloc
if '@' in host:
username, host = host.split('@')
if ':' in username:
username, password = username.split(':')
password = unquote(password)
username = unquote(username)
if ':' in host:
host, port = host.split(':')
port = int(port)
database = dsn.path.split('?')[0][1:]
query = dsn.path.split('?')[1] if '?' in dsn.path else dsn.query
kwargs = dict(parse_qsl(query, True))
if scheme == 'sqlite':
return SQLiteDriver, [dsn.path], {}
elif scheme == 'mysql':
kwargs['user'] = username or 'root'
kwargs['db'] = database
if port:
kwargs['port'] = port
if host:
kwargs['host'] = host
if password:
kwargs['passwd'] = password
return MySQLDriver, [], kwargs
elif scheme == 'postgresql':
kwargs['user'] = username or 'postgres'
kwargs['database'] = database
if port:
kwargs['port'] = port
if 'unix_socket' in kwargs:
kwargs['host'] = kwargs.pop('unix_socket')
elif host:
kwargs['host'] = host
if password:
kwargs['password'] = password
return PostgreSQLDriver, [], kwargs
else:
raise ValueError('Unknown driver %s' % dsn_string) | chriso/gauged | [
340,
10,
340,
2,
1389752506
] |
def make_render_children(separator: str) -> Render:
def render_children(
node: RenderTreeNode,
context: RenderContext,
) -> str:
return separator.join(child.render(context) for child in node.children)
return render_children | executablebooks/mdformat | [
201,
31,
201,
30,
1594476961
] |
def code_inline(node: RenderTreeNode, context: RenderContext) -> str:
code = node.content
all_chars_are_whitespace = not code.strip()
longest_backtick_seq = longest_consecutive_sequence(code, "`")
if longest_backtick_seq:
separator = "`" * (longest_backtick_seq + 1)
return f"{separator} {code} {separator}"
if code.startswith(" ") and code.endswith(" ") and not all_chars_are_whitespace:
return f"` {code} `"
return f"`{code}`" | executablebooks/mdformat | [
201,
31,
201,
30,
1594476961
] |
def html_inline(node: RenderTreeNode, context: RenderContext) -> str:
return node.content | executablebooks/mdformat | [
201,
31,
201,
30,
1594476961
] |
def hardbreak(node: RenderTreeNode, context: RenderContext) -> str:
if _in_block("heading", node):
return "<br /> "
return "\\" + "\n" | executablebooks/mdformat | [
201,
31,
201,
30,
1594476961
] |
def text(node: RenderTreeNode, context: RenderContext) -> str:
"""Process a text token.
Text should always be a child of an inline token. An inline token
should always be enclosed by a heading or a paragraph.
"""
text = node.content
# Escape backslash to prevent it from making unintended escapes.
# This escape has to be first, else we start multiplying backslashes.
text = text.replace("\\", "\\\\")
text = escape_asterisk_emphasis(text) # Escape emphasis/strong marker.
text = escape_underscore_emphasis(text) # Escape emphasis/strong marker.
text = text.replace("[", "\\[") # Escape link label enclosure
text = text.replace("]", "\\]") # Escape link label enclosure
text = text.replace("<", "\\<") # Escape URI enclosure
text = text.replace("`", "\\`") # Escape code span marker
# Escape "&" if it starts a sequence that can be interpreted as
# a character reference.
text = RE_CHAR_REFERENCE.sub(r"\\\g<0>", text)
# The parser can give us consecutive newlines which can break
# the markdown structure. Replace two or more consecutive newlines
# with newline character's decimal reference.
text = text.replace("\n\n", " ")
# If the last character is a "!" and the token next up is a link, we
# have to escape the "!" or else the link will be interpreted as image.
next_sibling = node.next_sibling
if text.endswith("!") and next_sibling and next_sibling.type == "link":
text = text[:-1] + "\\!"
if context.do_wrap and _in_block("paragraph", node):
text = re.sub(r"\s+", WRAP_POINT, text)
return text | executablebooks/mdformat | [
201,
31,
201,
30,
1594476961
] |
def code_block(node: RenderTreeNode, context: RenderContext) -> str:
return fence(node, context) | executablebooks/mdformat | [
201,
31,
201,
30,
1594476961
] |
def _render_inline_as_text(node: RenderTreeNode, context: RenderContext) -> str:
"""Special kludge for image `alt` attributes to conform CommonMark spec.
Don't try to use it! Spec requires to show `alt` content with
stripped markup, instead of simple escaping.
"""
def text_renderer(node: RenderTreeNode, context: RenderContext) -> str:
return node.content
def image_renderer(node: RenderTreeNode, context: RenderContext) -> str:
return _render_inline_as_text(node, context)
inline_renderers: Mapping[str, Render] = defaultdict(
lambda: make_render_children(""),
{
"text": text_renderer,
"image": image_renderer,
"link": link,
"softbreak": softbreak,
},
)
inline_context = RenderContext(
inline_renderers, context.postprocessors, context.options, context.env
)
return make_render_children("")(node, inline_context) | executablebooks/mdformat | [
201,
31,
201,
30,
1594476961
] |
def em(node: RenderTreeNode, context: RenderContext) -> str:
text = make_render_children(separator="")(node, context)
indicator = node.markup
return indicator + text + indicator | executablebooks/mdformat | [
201,
31,
201,
30,
1594476961
] |
def heading(node: RenderTreeNode, context: RenderContext) -> str:
text = make_render_children(separator="")(node, context)
if node.markup == "=":
prefix = "# "
elif node.markup == "-":
prefix = "## "
else: # ATX heading
prefix = node.markup + " "
# There can be newlines in setext headers, but we make an ATX
# header always. Convert newlines to spaces.
text = text.replace("\n", " ")
# If the text ends in a sequence of hashes (#), the hashes will be
# interpreted as an optional closing sequence of the heading, and
# will not be rendered. Escape a line ending hash to prevent this.
if text.endswith("#"):
text = text[:-1] + "\\#"
return prefix + text | executablebooks/mdformat | [
201,
31,
201,
30,
1594476961
] |
def _wrap(text: str, *, width: int | Literal["no"]) -> str:
"""Wrap text at locations pointed by `WRAP_POINT`s.
Converts `WRAP_POINT`s to either a space or newline character, thus
wrapping the text. Already existing whitespace will be preserved as
is.
"""
text, replacements = _prepare_wrap(text)
if width == "no":
return _recover_preserve_chars(text, replacements)
wrapper = textwrap.TextWrapper(
break_long_words=False,
break_on_hyphens=False,
width=width,
expand_tabs=False,
replace_whitespace=False,
)
wrapped = wrapper.fill(text)
wrapped = _recover_preserve_chars(wrapped, replacements)
return " " + wrapped if text.startswith(" ") else wrapped | executablebooks/mdformat | [
201,
31,
201,
30,
1594476961
] |
def _recover_preserve_chars(text: str, replacements: str) -> str:
replacement_iterator = iter(replacements)
return "".join(
next(replacement_iterator) if c == PRESERVE_CHAR else c for c in text
) | executablebooks/mdformat | [
201,
31,
201,
30,
1594476961
] |
def list_item(node: RenderTreeNode, context: RenderContext) -> str:
"""Return one list item as string.
This returns just the content. List item markers and indentation are
added in `bullet_list` and `ordered_list` renderers.
"""
block_separator = "\n" if is_tight_list_item(node) else "\n\n"
text = make_render_children(block_separator)(node, context)
if not text.strip():
return ""
return text | executablebooks/mdformat | [
201,
31,
201,
30,
1594476961
] |
def ordered_list(node: RenderTreeNode, context: RenderContext) -> str:
consecutive_numbering = context.options.get("mdformat", {}).get(
"number", DEFAULT_OPTS["number"]
)
marker_type = get_list_marker_type(node)
first_line_indent = " "
block_separator = "\n" if is_tight_list(node) else "\n\n"
list_len = len(node.children)
starting_number = node.attrs.get("start")
if starting_number is None:
starting_number = 1
assert isinstance(starting_number, int)
if consecutive_numbering:
indent_width = len(
f"{list_len + starting_number - 1}{marker_type}{first_line_indent}"
)
else:
indent_width = len(f"{starting_number}{marker_type}{first_line_indent}")
text = ""
with context.indented(indent_width):
for list_item_index, list_item in enumerate(node.children):
list_item_text = list_item.render(context)
formatted_lines = []
line_iterator = iter(list_item_text.split("\n"))
first_line = next(line_iterator)
if consecutive_numbering:
# Prefix first line of the list item with consecutive numbering,
# padded with zeros to make all markers of even length.
# E.g.
# 002. This is the first list item
# 003. Second item
# ...
# 112. Last item
number = starting_number + list_item_index
pad = len(str(list_len + starting_number - 1))
number_str = str(number).rjust(pad, "0")
formatted_lines.append(
f"{number_str}{marker_type}{first_line_indent}{first_line}"
if first_line
else f"{number_str}{marker_type}"
)
else:
# Prefix first line of first item with the starting number of the
# list. Prefix following list items with the number one
# prefixed by zeros to make the list item marker of even length
# with the first one.
# E.g.
# 5321. This is the first list item
# 0001. Second item
# 0001. Third item
first_item_marker = f"{starting_number}{marker_type}"
other_item_marker = (
"0" * (len(str(starting_number)) - 1) + "1" + marker_type
)
if list_item_index == 0:
formatted_lines.append(
f"{first_item_marker}{first_line_indent}{first_line}"
if first_line
else first_item_marker
)
else:
formatted_lines.append(
f"{other_item_marker}{first_line_indent}{first_line}"
if first_line
else other_item_marker
)
for line in line_iterator:
formatted_lines.append(" " * indent_width + line if line else "")
text += "\n".join(formatted_lines)
if list_item_index != len(node.children) - 1:
text += block_separator
return text | executablebooks/mdformat | [
201,
31,
201,
30,
1594476961
] |
def indented(self, width: int) -> Generator[None, None, None]:
self.env["indent_width"] += width
try:
yield
finally:
self.env["indent_width"] -= width | executablebooks/mdformat | [
201,
31,
201,
30,
1594476961
] |
def do_wrap(self) -> bool:
wrap_mode = self.options.get("mdformat", {}).get("wrap", DEFAULT_OPTS["wrap"])
return isinstance(wrap_mode, int) or wrap_mode == "no" | executablebooks/mdformat | [
201,
31,
201,
30,
1594476961
] |
def get_parent_id(self, name, attrs):
final_attrs = self.build_attrs(attrs, type=self.input_type, name=name)
return final_attrs['id'] | redsolution/django-generic-ratings | [
7,
2,
7,
1,
1310458379
] |
def get_values(self, min_value, max_value, step=1):
decimal_step = Decimal(str(step))
value = Decimal(str(min_value))
while value <= max_value:
yield value
value += decimal_step | redsolution/django-generic-ratings | [
7,
2,
7,
1,
1310458379
] |
def __init__(self, min_value, max_value, step, instance=None,
can_delete_vote=True, key='', read_only=False, default='',
template='ratings/slider_widget.html', attrs=None):
"""
The argument *default* is used when the initial value is None.
"""
super(SliderWidget, self).__init__(attrs)
self.min_value = min_value
self.max_value = max_value
self.step = step
self.instance = instance
self.can_delete_vote = can_delete_vote
self.read_only = read_only
self.default = default
self.template = template
self.key = key | redsolution/django-generic-ratings | [
7,
2,
7,
1,
1310458379
] |
def render(self, name, value, attrs=None):
context = self.get_context(name, value, attrs or {})
return render_to_string(self.template, context) | redsolution/django-generic-ratings | [
7,
2,
7,
1,
1310458379
] |
def __init__(self, min_value, max_value, step, instance=None,
can_delete_vote=True, key='', read_only=False,
template='ratings/star_widget.html', attrs=None):
super(StarWidget, self).__init__(attrs)
self.min_value = min_value
self.max_value = max_value
self.step = step
self.instance = instance
self.can_delete_vote = can_delete_vote
self.read_only = read_only
self.template = template
self.key = key | redsolution/django-generic-ratings | [
7,
2,
7,
1,
1310458379
] |
def _get_value(self, original, split):
if original:
value = round(original * split) / split
return Decimal(str(value)) | redsolution/django-generic-ratings | [
7,
2,
7,
1,
1310458379
] |
def __init__(self, min_value, max_value, instance=None,
can_delete_vote=True, template='ratings/like_widget.html', attrs=None):
super(LikeWidget, self).__init__(attrs)
self.min_value = min_value
self.max_value = max_value
self.instance = instance
self.can_delete_vote = can_delete_vote
self.template = template | redsolution/django-generic-ratings | [
7,
2,
7,
1,
1310458379
] |
def get_context(self, name, value, attrs=None):
# here we convert *min_value*, *max_value* and *step*
# to string to avoid odd behaviours of Django localization
# in the template (and, for backward compatibility we do not
# want to use the *unlocalize* filter)
attrs['type'] = 'hidden'
return {
'min_value': str(self.min_value),
'max_value': str(self.max_value),
'can_delete_vote': self.can_delete_vote,
'parent': super(LikeWidget, self).render(name, value, attrs),
'parent_id': self.get_parent_id(name, attrs),
'value': str(value),
'like_id': self.get_widget_id('like', name),
} | redsolution/django-generic-ratings | [
7,
2,
7,
1,
1310458379
] |
def get_stats_result(self, request):
"""Return the StatsResult object for this statistic"""
stats_datasets = StatsMakerDataverses(**request.GET.dict())
pub_state = self.get_pub_state(request)
if pub_state == self.PUB_STATE_ALL:
stats_result = stats_datasets.get_dataverse_counts_by_month()
elif pub_state == self.PUB_STATE_UNPUBLISHED:
stats_result = stats_datasets.get_dataverse_counts_by_month_unpublished()
else:
stats_result = stats_datasets.get_dataverse_counts_by_month_published()
return stats_result | IQSS/miniverse | [
2,
7,
2,
26,
1467318581
] |
def get_stats_result(self, request):
"""Return the StatsResult object for this statistic"""
stats_datasets = StatsMakerDataverses(**request.GET.dict())
pub_state = self.get_pub_state(request)
if pub_state == self.PUB_STATE_ALL:
stats_result = stats_datasets.get_dataverse_count()
elif pub_state == self.PUB_STATE_UNPUBLISHED:
stats_result = stats_datasets.get_dataverse_count_unpublished()
else:
stats_result = stats_datasets.get_dataverse_count_published()
return stats_result | IQSS/miniverse | [
2,
7,
2,
26,
1467318581
] |
def get_stats_result(self, request):
"""Return the StatsResult object for this statistic"""
stats_datasets = StatsMakerDataverses(**request.GET.dict())
pub_state = self.get_pub_state(request)
if pub_state == self.PUB_STATE_ALL:
stats_result = stats_datasets.get_dataverse_affiliation_counts()
elif pub_state == self.PUB_STATE_UNPUBLISHED:
stats_result = stats_datasets.get_dataverse_affiliation_counts_unpublished()
else:
stats_result = stats_datasets.get_dataverse_affiliation_counts_published()
return stats_result | IQSS/miniverse | [
2,
7,
2,
26,
1467318581
] |
def is_show_uncategorized(self, request):
"""Return the result of the "?show_uncategorized" query string param"""
show_uncategorized = request.GET.get('show_uncategorized', False)
if show_uncategorized is True or show_uncategorized == 'true':
return True
return False | IQSS/miniverse | [
2,
7,
2,
26,
1467318581
] |
def read(*paths):
"""Build a file path from *paths* and return the contents."""
with open(os.path.join(*paths), 'r') as f:
return f.read() | keithhamilton/transposer | [
2,
1,
2,
1,
1422066024
] |
def snfpsf(wave, psfparams, header, psftype):
"""Create a 3-d PSF based on SNFactory-specific parameterization of
Gaussian + Moffat PSF parameters and ADR."""
# Get Gaussian+Moffat parameters at each wavelength.
relwave = wave / REFWAVE - 1.0
ellipticity = abs(psfparams[0]) * np.ones_like(wave)
alpha = np.abs(psfparams[1] +
psfparams[2] * relwave +
psfparams[3] * relwave**2)
# correlated parameters (coefficients determined externally)
sigma = 0.545 + 0.215 * alpha # Gaussian parameter
beta = 1.685 + 0.345 * alpha # Moffat parameter
eta = 1.040 + 0.0 * alpha # gaussian ampl. / moffat ampl.
# Atmospheric differential refraction (ADR): Because of ADR,
# the center of the PSF will be different at each wavelength,
# by an amount that we can determine (pretty well) from the
# atmospheric conditions and the pointing and angle of the
# instrument. We calculate the offsets here as a function of
# observation and wavelength and input these to the model.
# Correction to parallactic angle and airmass for 2nd-order effects
# such as MLA rotation, mechanical flexures or finite-exposure
# corrections. These values have been trained on faint-std star
# exposures.
#
# `predict_adr_params` uses 'AIRMASS', 'PARANG' and 'CHANNEL' keys
# in input dictionary.
delta, theta = Hyper_PSF3D_PL.predict_adr_params(header)
# check for crazy values of pressure and temperature, and assign default
# values.
pressure = header.get('PRESSURE', 617.)
if not 550. < pressure < 650.:
pressure = 617.
temp = header.get('TEMP', 2.)
if not -20. < temp < 20.:
temp = 2.
adr = ADR(pressure, temp, lref=REFWAVE, delta=delta, theta=theta)
adr_refract = adr.refract(0, 0, wave, unit=SPAXEL_SIZE) | snfactory/cubefit | [
4,
5,
4,
7,
1402950942
] |
def setup_logging(loglevel, logfname=None):
# if loglevel isn't an integer, parse it as "debug", "info", etc:
if not isinstance(loglevel, int):
loglevel = getattr(logging, loglevel.upper(), None)
if not isinstance(loglevel, int):
print('Invalid log level: %s' % loglevel)
exit(1)
# remove logfile if it already exists
if logfname is not None and os.path.exists(logfname):
os.remove(logfname)
logging.basicConfig(filename=logfname, format="%(levelname)s %(message)s",
level=loglevel) | snfactory/cubefit | [
4,
5,
4,
7,
1402950942
] |
def cubefit_subtract(argv=None):
DESCRIPTION = \ | snfactory/cubefit | [
4,
5,
4,
7,
1402950942
] |
def __init__(self, inputfiles):
"""
:param inputfiles: list of pdb files needed for averaging
"""
self.inputs = inputfiles
self.size = []
self.nbknots = None
self.radius = None
self.coordknots = [] | kif/freesas | [
7,
4,
7,
11,
1415043031
] |
def spatial_extent(self):
"""
Calculate the maximal extent of input models | kif/freesas | [
7,
4,
7,
11,
1415043031
] |
def calc_radius(self, nbknots=None):
"""
Calculate the radius of each point of a hexagonal close-packed grid,
knowing the total volume and the number of knots in this grid.
:param nbknots: number of knots wanted for the grid
:return radius: the radius of each knot of the grid
"""
if len(self.size)==0:
self.spatial_extent()
nbknots = nbknots if nbknots is not None else 5000
size = self.size
dx = size[0] - size[3]
dy = size[1] - size[4]
dz = size[2] - size[5]
volume = dx * dy * dz
density = numpy.pi / (3*2**0.5)
radius = ((3 /( 4 * numpy.pi)) * density * volume / nbknots)**(1.0/3)
self.radius = radius
return radius | kif/freesas | [
7,
4,
7,
11,
1415043031
] |
def __init__(self, inputfiles, grid):
"""
:param inputfiles: list of pdb files of aligned models
:param grid: 2d-array coordinates of each point of a grid, fourth column full of zeros
"""
self.inputfiles = inputfiles
self.models = []
self.header = []
self.radius = None
self.atoms = []
self.grid = grid | kif/freesas | [
7,
4,
7,
11,
1415043031
] |
def read_files(self, reference=None):
"""
Read all the pdb file in the inputfiles list, creating SASModels.
The SASModels created are save in a list, the reference model is the first model in the list.
:param reference: position of the reference model file in the inputfiles list
"""
ref = reference if reference is not None else 0
inputfiles = self.inputfiles
models = []
models.append(SASModel(inputfiles[ref]))
for i in range(len(inputfiles)):
if i==ref:
continue
else:
models.append(SASModel(inputfiles[i]))
self.models = models
return models | kif/freesas | [
7,
4,
7,
11,
1415043031
] |
def assign_occupancy(self):
"""
For each point of the grid, total occupancy and contribution factor are computed and saved.
The grid is then ordered with decreasing value of occupancy.
The fourth column of the array correspond to the occupancy of the point and the fifth to
the contribution for this point.
:return sortedgrid: 2d-array, coordinates of each point of the grid
"""
grid = self.grid
nbknots = grid.shape[0]
grid = numpy.append(grid, numpy.zeros((nbknots, 1), dtype="float"), axis=1)
for i in range(nbknots):
occ, contrib = self.calc_occupancy(grid[i, 0:3])
grid[i, 3] = occ
grid[i, 4] = contrib
order = numpy.argsort(grid, axis=0)[:, -2]
sortedgrid = numpy.empty_like(grid)
for i in range(nbknots):
sortedgrid[nbknots - i - 1, :] = grid[order[i], :]
return sortedgrid | kif/freesas | [
7,
4,
7,
11,
1415043031
] |
def add_arguments(self, parser):
parser.add_argument(
'--send-out-for-real', action='store_true', default=False,
help='Send information to the instructors.',
)
parser.add_argument(
'--no-may-contact-only', action='store_true', default=False,
help='Include instructors not willing to be contacted.',
)
parser.add_argument(
'--django-mailing', action='store_true', default=False,
help='Use Django mailing system. This requires some environmental '
'variables to be set, see `settings.py`.',
)
parser.add_argument(
'-s', '--sender', action='store',
default='workshops@carpentries.org',
help='E-mail used in "from:" field.',
) | swcarpentry/amy | [
97,
67,
97,
107,
1417450034
] |
def fetch_activity(self, may_contact_only=True):
roles = Role.objects.filter(name__in=['instructor', 'helper'])
instructor_badges = Badge.objects.instructor_badges()
instructors = Person.objects.filter(badges__in=instructor_badges)
instructors = instructors.exclude(email__isnull=True)
if may_contact_only:
instructors = instructors.exclude(may_contact=False)
# let's get some things faster
instructors = instructors.select_related('airport') \
.prefetch_related('task_set', 'lessons',
'award_set', 'badges')
# don't repeat the records
instructors = instructors.distinct()
result = []
for person in instructors:
tasks = person.task_set.filter(role__in=roles) \
.select_related('event', 'role')
record = {
'person': person,
'lessons': person.lessons.all(),
'instructor_awards': person.award_set.filter(
badge__in=person.badges.instructor_badges()
),
'tasks': zip(tasks,
self.foreign_tasks(tasks, person, roles)),
}
result.append(record)
return result | swcarpentry/amy | [
97,
67,
97,
107,
1417450034
] |
def subject(self, record):
# in future we can vary the subject depending on the record details
return 'Updating your Software Carpentry information' | swcarpentry/amy | [
97,
67,
97,
107,
1417450034
] |
def send_message(self, subject, message, sender, recipient, for_real=False,
django_mailing=False):
if for_real:
if django_mailing:
send_mail(subject, message, sender, [recipient])
else:
command = 'mail -s "{subject}" -r {sender} {recipient}'.format(
subject=subject,
sender=sender,
recipient=recipient,
)
writer = os.popen(command, 'w')
writer.write(message)
writer.close()
if self.verbosity >= 2:
# write only a header
self.stdout.write('-' * 40 + '\n')
self.stdout.write('To: {}\n'.format(recipient))
self.stdout.write('Subject: {}\n'.format(subject))
self.stdout.write('From: {}\n'.format(sender))
if self.verbosity >= 3:
# write whole message out
self.stdout.write(message + '\n') | swcarpentry/amy | [
97,
67,
97,
107,
1417450034
] |
def build_delete_request(
scope: str,
policy_assignment_name: str,
**kwargs: Any | Azure/azure-sdk-for-python | [
3526,
2256,
3526,
986,
1335285972
] |
def build_create_request(
scope: str,
policy_assignment_name: str,
*,
json: JSONType = None,
content: Any = None,
**kwargs: Any | Azure/azure-sdk-for-python | [
3526,
2256,
3526,
986,
1335285972
] |
def build_get_request(
scope: str,
policy_assignment_name: str,
**kwargs: Any | Azure/azure-sdk-for-python | [
3526,
2256,
3526,
986,
1335285972
] |
def build_list_for_resource_group_request(
resource_group_name: str,
subscription_id: str,
*,
filter: Optional[str] = None,
**kwargs: Any | Azure/azure-sdk-for-python | [
3526,
2256,
3526,
986,
1335285972
] |
def build_list_for_resource_request(
resource_group_name: str,
resource_provider_namespace: str,
parent_resource_path: str,
resource_type: str,
resource_name: str,
subscription_id: str,
*,
filter: Optional[str] = None,
**kwargs: Any | Azure/azure-sdk-for-python | [
3526,
2256,
3526,
986,
1335285972
] |
def build_list_request(
subscription_id: str,
*,
filter: Optional[str] = None,
**kwargs: Any | Azure/azure-sdk-for-python | [
3526,
2256,
3526,
986,
1335285972
] |
def build_delete_by_id_request(
policy_assignment_id: str,
**kwargs: Any | Azure/azure-sdk-for-python | [
3526,
2256,
3526,
986,
1335285972
] |
End of preview. Expand
in Dataset Viewer.
- Downloads last month
- 42