repository_name
stringlengths 5
67
| func_path_in_repository
stringlengths 4
234
| func_name
stringlengths 0
314
| whole_func_string
stringlengths 52
3.87M
| language
stringclasses 6
values | func_code_string
stringlengths 52
3.87M
| func_code_tokens
sequencelengths 15
672k
| func_documentation_string
stringlengths 1
47.2k
| func_documentation_tokens
sequencelengths 1
3.92k
| split_name
stringclasses 1
value | func_code_url
stringlengths 85
339
|
---|---|---|---|---|---|---|---|---|---|---|
pypyr/pypyr-cli | pypyr/parser/keyvaluepairs.py | get_parsed_context | def get_parsed_context(context_arg):
"""Parse input context string and returns context as dictionary."""
if not context_arg:
logger.debug("pipeline invoked without context arg set. For "
"this keyvaluepairs parser you're looking for "
"something like: "
"pypyr pipelinename 'key1=value1,key2=value2'.")
return None
logger.debug("starting")
# for each comma-delimited element, project key=value
return dict(element.split('=') for element in context_arg.split(',')) | python | def get_parsed_context(context_arg):
"""Parse input context string and returns context as dictionary."""
if not context_arg:
logger.debug("pipeline invoked without context arg set. For "
"this keyvaluepairs parser you're looking for "
"something like: "
"pypyr pipelinename 'key1=value1,key2=value2'.")
return None
logger.debug("starting")
# for each comma-delimited element, project key=value
return dict(element.split('=') for element in context_arg.split(',')) | [
"def",
"get_parsed_context",
"(",
"context_arg",
")",
":",
"if",
"not",
"context_arg",
":",
"logger",
".",
"debug",
"(",
"\"pipeline invoked without context arg set. For \"",
"\"this keyvaluepairs parser you're looking for \"",
"\"something like: \"",
"\"pypyr pipelinename 'key1=value1,key2=value2'.\"",
")",
"return",
"None",
"logger",
".",
"debug",
"(",
"\"starting\"",
")",
"# for each comma-delimited element, project key=value",
"return",
"dict",
"(",
"element",
".",
"split",
"(",
"'='",
")",
"for",
"element",
"in",
"context_arg",
".",
"split",
"(",
"','",
")",
")"
] | Parse input context string and returns context as dictionary. | [
"Parse",
"input",
"context",
"string",
"and",
"returns",
"context",
"as",
"dictionary",
"."
] | train | https://github.com/pypyr/pypyr-cli/blob/4003f999cd5eb030b4c7407317de728f5115a80f/pypyr/parser/keyvaluepairs.py#L18-L29 |
pypyr/pypyr-cli | pypyr/steps/fetchjson.py | run_step | def run_step(context):
"""Load a json file into the pypyr context.
json parsed from the file will be merged into the pypyr context. This will
overwrite existing values if the same keys are already in there.
I.e if file json has {'eggs' : 'boiled'} and context {'eggs': 'fried'}
already exists, returned context['eggs'] will be 'boiled'.
The json should not be an array [] on the top level, but rather an Object.
Args:
context: pypyr.context.Context. Mandatory.
The following context key must exist
- fetchJson
- path. path-like. Path to file on disk.
- key. string. If exists, write json structure to this
context key. Else json writes to context root.
Also supports a passing path as string to fetchJson, but in this case you
won't be able to specify a key.
All inputs support formatting expressions.
Returns:
None. updates context arg.
Raises:
FileNotFoundError: take a guess
pypyr.errors.KeyNotInContextError: fetchJson.path missing in context.
pypyr.errors.KeyInContextHasNoValueError: fetchJson.path exists but is
None.
"""
logger.debug("started")
deprecated(context)
context.assert_key_has_value(key='fetchJson', caller=__name__)
fetch_json_input = context.get_formatted('fetchJson')
if isinstance(fetch_json_input, str):
file_path = fetch_json_input
destination_key_expression = None
else:
context.assert_child_key_has_value(parent='fetchJson',
child='path',
caller=__name__)
file_path = fetch_json_input['path']
destination_key_expression = fetch_json_input.get('key', None)
logger.debug(f"attempting to open file: {file_path}")
with open(file_path) as json_file:
payload = json.load(json_file)
if destination_key_expression:
destination_key = context.get_formatted_iterable(
destination_key_expression)
logger.debug(f"json file loaded. Writing to context {destination_key}")
context[destination_key] = payload
else:
if not isinstance(payload, MutableMapping):
raise TypeError(
'json input should describe an object at the top '
'level when fetchJsonKey isn\'t specified. You should have '
'something like {"key1": "value1", "key2": "value2"} '
'in the json top-level, not ["value1", "value2"]')
logger.debug("json file loaded. Merging into pypyr context. . .")
context.update(payload)
logger.info(f"json file written into pypyr context. Count: {len(payload)}")
logger.debug("done") | python | def run_step(context):
"""Load a json file into the pypyr context.
json parsed from the file will be merged into the pypyr context. This will
overwrite existing values if the same keys are already in there.
I.e if file json has {'eggs' : 'boiled'} and context {'eggs': 'fried'}
already exists, returned context['eggs'] will be 'boiled'.
The json should not be an array [] on the top level, but rather an Object.
Args:
context: pypyr.context.Context. Mandatory.
The following context key must exist
- fetchJson
- path. path-like. Path to file on disk.
- key. string. If exists, write json structure to this
context key. Else json writes to context root.
Also supports a passing path as string to fetchJson, but in this case you
won't be able to specify a key.
All inputs support formatting expressions.
Returns:
None. updates context arg.
Raises:
FileNotFoundError: take a guess
pypyr.errors.KeyNotInContextError: fetchJson.path missing in context.
pypyr.errors.KeyInContextHasNoValueError: fetchJson.path exists but is
None.
"""
logger.debug("started")
deprecated(context)
context.assert_key_has_value(key='fetchJson', caller=__name__)
fetch_json_input = context.get_formatted('fetchJson')
if isinstance(fetch_json_input, str):
file_path = fetch_json_input
destination_key_expression = None
else:
context.assert_child_key_has_value(parent='fetchJson',
child='path',
caller=__name__)
file_path = fetch_json_input['path']
destination_key_expression = fetch_json_input.get('key', None)
logger.debug(f"attempting to open file: {file_path}")
with open(file_path) as json_file:
payload = json.load(json_file)
if destination_key_expression:
destination_key = context.get_formatted_iterable(
destination_key_expression)
logger.debug(f"json file loaded. Writing to context {destination_key}")
context[destination_key] = payload
else:
if not isinstance(payload, MutableMapping):
raise TypeError(
'json input should describe an object at the top '
'level when fetchJsonKey isn\'t specified. You should have '
'something like {"key1": "value1", "key2": "value2"} '
'in the json top-level, not ["value1", "value2"]')
logger.debug("json file loaded. Merging into pypyr context. . .")
context.update(payload)
logger.info(f"json file written into pypyr context. Count: {len(payload)}")
logger.debug("done") | [
"def",
"run_step",
"(",
"context",
")",
":",
"logger",
".",
"debug",
"(",
"\"started\"",
")",
"deprecated",
"(",
"context",
")",
"context",
".",
"assert_key_has_value",
"(",
"key",
"=",
"'fetchJson'",
",",
"caller",
"=",
"__name__",
")",
"fetch_json_input",
"=",
"context",
".",
"get_formatted",
"(",
"'fetchJson'",
")",
"if",
"isinstance",
"(",
"fetch_json_input",
",",
"str",
")",
":",
"file_path",
"=",
"fetch_json_input",
"destination_key_expression",
"=",
"None",
"else",
":",
"context",
".",
"assert_child_key_has_value",
"(",
"parent",
"=",
"'fetchJson'",
",",
"child",
"=",
"'path'",
",",
"caller",
"=",
"__name__",
")",
"file_path",
"=",
"fetch_json_input",
"[",
"'path'",
"]",
"destination_key_expression",
"=",
"fetch_json_input",
".",
"get",
"(",
"'key'",
",",
"None",
")",
"logger",
".",
"debug",
"(",
"f\"attempting to open file: {file_path}\"",
")",
"with",
"open",
"(",
"file_path",
")",
"as",
"json_file",
":",
"payload",
"=",
"json",
".",
"load",
"(",
"json_file",
")",
"if",
"destination_key_expression",
":",
"destination_key",
"=",
"context",
".",
"get_formatted_iterable",
"(",
"destination_key_expression",
")",
"logger",
".",
"debug",
"(",
"f\"json file loaded. Writing to context {destination_key}\"",
")",
"context",
"[",
"destination_key",
"]",
"=",
"payload",
"else",
":",
"if",
"not",
"isinstance",
"(",
"payload",
",",
"MutableMapping",
")",
":",
"raise",
"TypeError",
"(",
"'json input should describe an object at the top '",
"'level when fetchJsonKey isn\\'t specified. You should have '",
"'something like {\"key1\": \"value1\", \"key2\": \"value2\"} '",
"'in the json top-level, not [\"value1\", \"value2\"]'",
")",
"logger",
".",
"debug",
"(",
"\"json file loaded. Merging into pypyr context. . .\"",
")",
"context",
".",
"update",
"(",
"payload",
")",
"logger",
".",
"info",
"(",
"f\"json file written into pypyr context. Count: {len(payload)}\"",
")",
"logger",
".",
"debug",
"(",
"\"done\"",
")"
] | Load a json file into the pypyr context.
json parsed from the file will be merged into the pypyr context. This will
overwrite existing values if the same keys are already in there.
I.e if file json has {'eggs' : 'boiled'} and context {'eggs': 'fried'}
already exists, returned context['eggs'] will be 'boiled'.
The json should not be an array [] on the top level, but rather an Object.
Args:
context: pypyr.context.Context. Mandatory.
The following context key must exist
- fetchJson
- path. path-like. Path to file on disk.
- key. string. If exists, write json structure to this
context key. Else json writes to context root.
Also supports a passing path as string to fetchJson, but in this case you
won't be able to specify a key.
All inputs support formatting expressions.
Returns:
None. updates context arg.
Raises:
FileNotFoundError: take a guess
pypyr.errors.KeyNotInContextError: fetchJson.path missing in context.
pypyr.errors.KeyInContextHasNoValueError: fetchJson.path exists but is
None. | [
"Load",
"a",
"json",
"file",
"into",
"the",
"pypyr",
"context",
"."
] | train | https://github.com/pypyr/pypyr-cli/blob/4003f999cd5eb030b4c7407317de728f5115a80f/pypyr/steps/fetchjson.py#L10-L82 |
pypyr/pypyr-cli | pypyr/steps/fetchjson.py | deprecated | def deprecated(context):
"""Create new style in params from deprecated."""
if 'fetchJsonPath' in context:
context.assert_key_has_value(key='fetchJsonPath', caller=__name__)
context['fetchJson'] = {'path': context['fetchJsonPath']}
if 'fetchJsonKey' in context:
context['fetchJson']['key'] = context.get('fetchJsonKey', None)
logger.warning("fetchJsonPath and fetchJsonKey "
"are deprecated. They will stop working upon the next "
"major release. Use the new context key fetchJson "
"instead. It's a lot better, promise! For the moment "
"pypyr is creating the new fetchJson key for you "
"under the hood.") | python | def deprecated(context):
"""Create new style in params from deprecated."""
if 'fetchJsonPath' in context:
context.assert_key_has_value(key='fetchJsonPath', caller=__name__)
context['fetchJson'] = {'path': context['fetchJsonPath']}
if 'fetchJsonKey' in context:
context['fetchJson']['key'] = context.get('fetchJsonKey', None)
logger.warning("fetchJsonPath and fetchJsonKey "
"are deprecated. They will stop working upon the next "
"major release. Use the new context key fetchJson "
"instead. It's a lot better, promise! For the moment "
"pypyr is creating the new fetchJson key for you "
"under the hood.") | [
"def",
"deprecated",
"(",
"context",
")",
":",
"if",
"'fetchJsonPath'",
"in",
"context",
":",
"context",
".",
"assert_key_has_value",
"(",
"key",
"=",
"'fetchJsonPath'",
",",
"caller",
"=",
"__name__",
")",
"context",
"[",
"'fetchJson'",
"]",
"=",
"{",
"'path'",
":",
"context",
"[",
"'fetchJsonPath'",
"]",
"}",
"if",
"'fetchJsonKey'",
"in",
"context",
":",
"context",
"[",
"'fetchJson'",
"]",
"[",
"'key'",
"]",
"=",
"context",
".",
"get",
"(",
"'fetchJsonKey'",
",",
"None",
")",
"logger",
".",
"warning",
"(",
"\"fetchJsonPath and fetchJsonKey \"",
"\"are deprecated. They will stop working upon the next \"",
"\"major release. Use the new context key fetchJson \"",
"\"instead. It's a lot better, promise! For the moment \"",
"\"pypyr is creating the new fetchJson key for you \"",
"\"under the hood.\"",
")"
] | Create new style in params from deprecated. | [
"Create",
"new",
"style",
"in",
"params",
"from",
"deprecated",
"."
] | train | https://github.com/pypyr/pypyr-cli/blob/4003f999cd5eb030b4c7407317de728f5115a80f/pypyr/steps/fetchjson.py#L85-L100 |
bradmontgomery/django-querycount | querycount/middleware.py | QueryCountMiddleware._ignore_request | def _ignore_request(self, path):
"""Check to see if we should ignore the request."""
return any([
re.match(pattern, path) for pattern in QC_SETTINGS['IGNORE_REQUEST_PATTERNS']
]) | python | def _ignore_request(self, path):
"""Check to see if we should ignore the request."""
return any([
re.match(pattern, path) for pattern in QC_SETTINGS['IGNORE_REQUEST_PATTERNS']
]) | [
"def",
"_ignore_request",
"(",
"self",
",",
"path",
")",
":",
"return",
"any",
"(",
"[",
"re",
".",
"match",
"(",
"pattern",
",",
"path",
")",
"for",
"pattern",
"in",
"QC_SETTINGS",
"[",
"'IGNORE_REQUEST_PATTERNS'",
"]",
"]",
")"
] | Check to see if we should ignore the request. | [
"Check",
"to",
"see",
"if",
"we",
"should",
"ignore",
"the",
"request",
"."
] | train | https://github.com/bradmontgomery/django-querycount/blob/61a380d98bc55e926c011367ecc2031102c3484c/querycount/middleware.py#L83-L87 |
bradmontgomery/django-querycount | querycount/middleware.py | QueryCountMiddleware._ignore_sql | def _ignore_sql(self, query):
"""Check to see if we should ignore the sql query."""
return any([
re.search(pattern, query.get('sql')) for pattern in QC_SETTINGS['IGNORE_SQL_PATTERNS']
]) | python | def _ignore_sql(self, query):
"""Check to see if we should ignore the sql query."""
return any([
re.search(pattern, query.get('sql')) for pattern in QC_SETTINGS['IGNORE_SQL_PATTERNS']
]) | [
"def",
"_ignore_sql",
"(",
"self",
",",
"query",
")",
":",
"return",
"any",
"(",
"[",
"re",
".",
"search",
"(",
"pattern",
",",
"query",
".",
"get",
"(",
"'sql'",
")",
")",
"for",
"pattern",
"in",
"QC_SETTINGS",
"[",
"'IGNORE_SQL_PATTERNS'",
"]",
"]",
")"
] | Check to see if we should ignore the sql query. | [
"Check",
"to",
"see",
"if",
"we",
"should",
"ignore",
"the",
"sql",
"query",
"."
] | train | https://github.com/bradmontgomery/django-querycount/blob/61a380d98bc55e926c011367ecc2031102c3484c/querycount/middleware.py#L89-L93 |
bradmontgomery/django-querycount | querycount/middleware.py | QueryCountMiddleware._duplicate_queries | def _duplicate_queries(self, output):
"""Appends the most common duplicate queries to the given output."""
if QC_SETTINGS['DISPLAY_DUPLICATES']:
for query, count in self.queries.most_common(QC_SETTINGS['DISPLAY_DUPLICATES']):
lines = ['\nRepeated {0} times.'.format(count)]
lines += wrap(query)
lines = "\n".join(lines) + "\n"
output += self._colorize(lines, count)
return output | python | def _duplicate_queries(self, output):
"""Appends the most common duplicate queries to the given output."""
if QC_SETTINGS['DISPLAY_DUPLICATES']:
for query, count in self.queries.most_common(QC_SETTINGS['DISPLAY_DUPLICATES']):
lines = ['\nRepeated {0} times.'.format(count)]
lines += wrap(query)
lines = "\n".join(lines) + "\n"
output += self._colorize(lines, count)
return output | [
"def",
"_duplicate_queries",
"(",
"self",
",",
"output",
")",
":",
"if",
"QC_SETTINGS",
"[",
"'DISPLAY_DUPLICATES'",
"]",
":",
"for",
"query",
",",
"count",
"in",
"self",
".",
"queries",
".",
"most_common",
"(",
"QC_SETTINGS",
"[",
"'DISPLAY_DUPLICATES'",
"]",
")",
":",
"lines",
"=",
"[",
"'\\nRepeated {0} times.'",
".",
"format",
"(",
"count",
")",
"]",
"lines",
"+=",
"wrap",
"(",
"query",
")",
"lines",
"=",
"\"\\n\"",
".",
"join",
"(",
"lines",
")",
"+",
"\"\\n\"",
"output",
"+=",
"self",
".",
"_colorize",
"(",
"lines",
",",
"count",
")",
"return",
"output"
] | Appends the most common duplicate queries to the given output. | [
"Appends",
"the",
"most",
"common",
"duplicate",
"queries",
"to",
"the",
"given",
"output",
"."
] | train | https://github.com/bradmontgomery/django-querycount/blob/61a380d98bc55e926c011367ecc2031102c3484c/querycount/middleware.py#L142-L150 |
bradmontgomery/django-querycount | querycount/middleware.py | QueryCountMiddleware._calculate_num_queries | def _calculate_num_queries(self):
"""
Calculate the total number of request and response queries.
Used for count header and count table.
"""
request_totals = self._totals("request")
response_totals = self._totals("response")
return request_totals[2] + response_totals[2] | python | def _calculate_num_queries(self):
"""
Calculate the total number of request and response queries.
Used for count header and count table.
"""
request_totals = self._totals("request")
response_totals = self._totals("response")
return request_totals[2] + response_totals[2] | [
"def",
"_calculate_num_queries",
"(",
"self",
")",
":",
"request_totals",
"=",
"self",
".",
"_totals",
"(",
"\"request\"",
")",
"response_totals",
"=",
"self",
".",
"_totals",
"(",
"\"response\"",
")",
"return",
"request_totals",
"[",
"2",
"]",
"+",
"response_totals",
"[",
"2",
"]"
] | Calculate the total number of request and response queries.
Used for count header and count table. | [
"Calculate",
"the",
"total",
"number",
"of",
"request",
"and",
"response",
"queries",
".",
"Used",
"for",
"count",
"header",
"and",
"count",
"table",
"."
] | train | https://github.com/bradmontgomery/django-querycount/blob/61a380d98bc55e926c011367ecc2031102c3484c/querycount/middleware.py#L193-L201 |
bradmontgomery/django-querycount | querycount/qc_settings.py | _process_settings | def _process_settings(**kwargs):
"""
Apply user supplied settings.
"""
# If we are in this method due to a signal, only reload for our settings
setting_name = kwargs.get('setting', None)
if setting_name is not None and setting_name != 'QUERYCOUNT':
return
# Support the old-style settings
if getattr(settings, 'QUERYCOUNT_THRESHOLDS', False):
QC_SETTINGS['THRESHOLDS'] = settings.QUERYCOUNT_THRESHOLDS
# Apply new-style settings
if not getattr(settings, 'QUERYCOUNT', False):
return
# Duplicate display is a special case, configure it specifically
if 'DISPLAY_DUPLICATES' in settings.QUERYCOUNT:
duplicate_settings = settings.QUERYCOUNT['DISPLAY_DUPLICATES']
if duplicate_settings is not None:
duplicate_settings = int(duplicate_settings)
QC_SETTINGS['DISPLAY_DUPLICATES'] = duplicate_settings
# Apply the rest of the setting overrides
for key in ['THRESHOLDS',
'IGNORE_REQUEST_PATTERNS',
'IGNORE_SQL_PATTERNS',
'IGNORE_PATTERNS',
'RESPONSE_HEADER']:
if key in settings.QUERYCOUNT:
QC_SETTINGS[key] = settings.QUERYCOUNT[key] | python | def _process_settings(**kwargs):
"""
Apply user supplied settings.
"""
# If we are in this method due to a signal, only reload for our settings
setting_name = kwargs.get('setting', None)
if setting_name is not None and setting_name != 'QUERYCOUNT':
return
# Support the old-style settings
if getattr(settings, 'QUERYCOUNT_THRESHOLDS', False):
QC_SETTINGS['THRESHOLDS'] = settings.QUERYCOUNT_THRESHOLDS
# Apply new-style settings
if not getattr(settings, 'QUERYCOUNT', False):
return
# Duplicate display is a special case, configure it specifically
if 'DISPLAY_DUPLICATES' in settings.QUERYCOUNT:
duplicate_settings = settings.QUERYCOUNT['DISPLAY_DUPLICATES']
if duplicate_settings is not None:
duplicate_settings = int(duplicate_settings)
QC_SETTINGS['DISPLAY_DUPLICATES'] = duplicate_settings
# Apply the rest of the setting overrides
for key in ['THRESHOLDS',
'IGNORE_REQUEST_PATTERNS',
'IGNORE_SQL_PATTERNS',
'IGNORE_PATTERNS',
'RESPONSE_HEADER']:
if key in settings.QUERYCOUNT:
QC_SETTINGS[key] = settings.QUERYCOUNT[key] | [
"def",
"_process_settings",
"(",
"*",
"*",
"kwargs",
")",
":",
"# If we are in this method due to a signal, only reload for our settings",
"setting_name",
"=",
"kwargs",
".",
"get",
"(",
"'setting'",
",",
"None",
")",
"if",
"setting_name",
"is",
"not",
"None",
"and",
"setting_name",
"!=",
"'QUERYCOUNT'",
":",
"return",
"# Support the old-style settings",
"if",
"getattr",
"(",
"settings",
",",
"'QUERYCOUNT_THRESHOLDS'",
",",
"False",
")",
":",
"QC_SETTINGS",
"[",
"'THRESHOLDS'",
"]",
"=",
"settings",
".",
"QUERYCOUNT_THRESHOLDS",
"# Apply new-style settings",
"if",
"not",
"getattr",
"(",
"settings",
",",
"'QUERYCOUNT'",
",",
"False",
")",
":",
"return",
"# Duplicate display is a special case, configure it specifically",
"if",
"'DISPLAY_DUPLICATES'",
"in",
"settings",
".",
"QUERYCOUNT",
":",
"duplicate_settings",
"=",
"settings",
".",
"QUERYCOUNT",
"[",
"'DISPLAY_DUPLICATES'",
"]",
"if",
"duplicate_settings",
"is",
"not",
"None",
":",
"duplicate_settings",
"=",
"int",
"(",
"duplicate_settings",
")",
"QC_SETTINGS",
"[",
"'DISPLAY_DUPLICATES'",
"]",
"=",
"duplicate_settings",
"# Apply the rest of the setting overrides",
"for",
"key",
"in",
"[",
"'THRESHOLDS'",
",",
"'IGNORE_REQUEST_PATTERNS'",
",",
"'IGNORE_SQL_PATTERNS'",
",",
"'IGNORE_PATTERNS'",
",",
"'RESPONSE_HEADER'",
"]",
":",
"if",
"key",
"in",
"settings",
".",
"QUERYCOUNT",
":",
"QC_SETTINGS",
"[",
"key",
"]",
"=",
"settings",
".",
"QUERYCOUNT",
"[",
"key",
"]"
] | Apply user supplied settings. | [
"Apply",
"user",
"supplied",
"settings",
"."
] | train | https://github.com/bradmontgomery/django-querycount/blob/61a380d98bc55e926c011367ecc2031102c3484c/querycount/qc_settings.py#L23-L55 |
xiyouMc/ncmbot | ncmbot/core.py | login | def login(password, phone=None, email=None, rememberLogin=True):
""" 登录接口,返回 :class:'Response' 对象
:param password: 网易云音乐的密码
:param phone: (optional) 手机登录
:param email: (optional) 邮箱登录
:param rememberLogin: (optional) 是否记住密码,默认 True
"""
if (phone is None) and (email is None):
raise ParamsError()
if password is None:
raise ParamsError()
r = NCloudBot()
# r.username = phone or email
md5 = hashlib.md5()
md5.update(password)
password = md5.hexdigest()
print password
r.data = {'password': password, 'rememberLogin': rememberLogin}
if phone is not None:
r.data['phone'] = phone
r.method = 'LOGIN'
else:
r.data['username'] = email
r.method = 'EMAIL_LOGIN'
r.send()
return r.response | python | def login(password, phone=None, email=None, rememberLogin=True):
""" 登录接口,返回 :class:'Response' 对象
:param password: 网易云音乐的密码
:param phone: (optional) 手机登录
:param email: (optional) 邮箱登录
:param rememberLogin: (optional) 是否记住密码,默认 True
"""
if (phone is None) and (email is None):
raise ParamsError()
if password is None:
raise ParamsError()
r = NCloudBot()
# r.username = phone or email
md5 = hashlib.md5()
md5.update(password)
password = md5.hexdigest()
print password
r.data = {'password': password, 'rememberLogin': rememberLogin}
if phone is not None:
r.data['phone'] = phone
r.method = 'LOGIN'
else:
r.data['username'] = email
r.method = 'EMAIL_LOGIN'
r.send()
return r.response | [
"def",
"login",
"(",
"password",
",",
"phone",
"=",
"None",
",",
"email",
"=",
"None",
",",
"rememberLogin",
"=",
"True",
")",
":",
"if",
"(",
"phone",
"is",
"None",
")",
"and",
"(",
"email",
"is",
"None",
")",
":",
"raise",
"ParamsError",
"(",
")",
"if",
"password",
"is",
"None",
":",
"raise",
"ParamsError",
"(",
")",
"r",
"=",
"NCloudBot",
"(",
")",
"# r.username = phone or email",
"md5",
"=",
"hashlib",
".",
"md5",
"(",
")",
"md5",
".",
"update",
"(",
"password",
")",
"password",
"=",
"md5",
".",
"hexdigest",
"(",
")",
"print",
"password",
"r",
".",
"data",
"=",
"{",
"'password'",
":",
"password",
",",
"'rememberLogin'",
":",
"rememberLogin",
"}",
"if",
"phone",
"is",
"not",
"None",
":",
"r",
".",
"data",
"[",
"'phone'",
"]",
"=",
"phone",
"r",
".",
"method",
"=",
"'LOGIN'",
"else",
":",
"r",
".",
"data",
"[",
"'username'",
"]",
"=",
"email",
"r",
".",
"method",
"=",
"'EMAIL_LOGIN'",
"r",
".",
"send",
"(",
")",
"return",
"r",
".",
"response"
] | 登录接口,返回 :class:'Response' 对象
:param password: 网易云音乐的密码
:param phone: (optional) 手机登录
:param email: (optional) 邮箱登录
:param rememberLogin: (optional) 是否记住密码,默认 True | [
"登录接口,返回",
":",
"class",
":",
"Response",
"对象",
":",
"param",
"password",
":",
"网易云音乐的密码",
":",
"param",
"phone",
":",
"(",
"optional",
")",
"手机登录",
":",
"param",
"email",
":",
"(",
"optional",
")",
"邮箱登录",
":",
"param",
"rememberLogin",
":",
"(",
"optional",
")",
"是否记住密码,默认",
"True"
] | train | https://github.com/xiyouMc/ncmbot/blob/c4832f3ee7630ba104a89559f09c1fc366d1547b/ncmbot/core.py#L218-L245 |
xiyouMc/ncmbot | ncmbot/core.py | user_play_list | def user_play_list(uid, offset=0, limit=1000):
"""获取用户歌单,包含收藏的歌单
:param uid: 用户的ID,可通过登录或者其他接口获取
:param offset: (optional) 分段起始位置,默认 0
:param limit: (optional) 数据上限多少行,默认 1000
"""
if uid is None:
raise ParamsError()
r = NCloudBot()
r.method = 'USER_PLAY_LIST'
r.data = {'offset': offset, 'uid': uid, 'limit': limit, 'csrf_token': ''}
r.send()
return r.response | python | def user_play_list(uid, offset=0, limit=1000):
"""获取用户歌单,包含收藏的歌单
:param uid: 用户的ID,可通过登录或者其他接口获取
:param offset: (optional) 分段起始位置,默认 0
:param limit: (optional) 数据上限多少行,默认 1000
"""
if uid is None:
raise ParamsError()
r = NCloudBot()
r.method = 'USER_PLAY_LIST'
r.data = {'offset': offset, 'uid': uid, 'limit': limit, 'csrf_token': ''}
r.send()
return r.response | [
"def",
"user_play_list",
"(",
"uid",
",",
"offset",
"=",
"0",
",",
"limit",
"=",
"1000",
")",
":",
"if",
"uid",
"is",
"None",
":",
"raise",
"ParamsError",
"(",
")",
"r",
"=",
"NCloudBot",
"(",
")",
"r",
".",
"method",
"=",
"'USER_PLAY_LIST'",
"r",
".",
"data",
"=",
"{",
"'offset'",
":",
"offset",
",",
"'uid'",
":",
"uid",
",",
"'limit'",
":",
"limit",
",",
"'csrf_token'",
":",
"''",
"}",
"r",
".",
"send",
"(",
")",
"return",
"r",
".",
"response"
] | 获取用户歌单,包含收藏的歌单
:param uid: 用户的ID,可通过登录或者其他接口获取
:param offset: (optional) 分段起始位置,默认 0
:param limit: (optional) 数据上限多少行,默认 1000 | [
"获取用户歌单,包含收藏的歌单",
":",
"param",
"uid",
":",
"用户的ID,可通过登录或者其他接口获取",
":",
"param",
"offset",
":",
"(",
"optional",
")",
"分段起始位置,默认",
"0",
":",
"param",
"limit",
":",
"(",
"optional",
")",
"数据上限多少行,默认",
"1000"
] | train | https://github.com/xiyouMc/ncmbot/blob/c4832f3ee7630ba104a89559f09c1fc366d1547b/ncmbot/core.py#L248-L261 |
xiyouMc/ncmbot | ncmbot/core.py | user_dj | def user_dj(uid, offset=0, limit=30):
"""获取用户电台数据
:param uid: 用户的ID,可通过登录或者其他接口获取
:param offset: (optional) 分段起始位置,默认 0
:param limit: (optional) 数据上限多少行,默认 30
"""
if uid is None:
raise ParamsError()
r = NCloudBot()
r.method = 'USER_DJ'
r.data = {'offset': offset, 'limit': limit, "csrf_token": ""}
r.params = {'uid': uid}
r.send()
return r.response | python | def user_dj(uid, offset=0, limit=30):
"""获取用户电台数据
:param uid: 用户的ID,可通过登录或者其他接口获取
:param offset: (optional) 分段起始位置,默认 0
:param limit: (optional) 数据上限多少行,默认 30
"""
if uid is None:
raise ParamsError()
r = NCloudBot()
r.method = 'USER_DJ'
r.data = {'offset': offset, 'limit': limit, "csrf_token": ""}
r.params = {'uid': uid}
r.send()
return r.response | [
"def",
"user_dj",
"(",
"uid",
",",
"offset",
"=",
"0",
",",
"limit",
"=",
"30",
")",
":",
"if",
"uid",
"is",
"None",
":",
"raise",
"ParamsError",
"(",
")",
"r",
"=",
"NCloudBot",
"(",
")",
"r",
".",
"method",
"=",
"'USER_DJ'",
"r",
".",
"data",
"=",
"{",
"'offset'",
":",
"offset",
",",
"'limit'",
":",
"limit",
",",
"\"csrf_token\"",
":",
"\"\"",
"}",
"r",
".",
"params",
"=",
"{",
"'uid'",
":",
"uid",
"}",
"r",
".",
"send",
"(",
")",
"return",
"r",
".",
"response"
] | 获取用户电台数据
:param uid: 用户的ID,可通过登录或者其他接口获取
:param offset: (optional) 分段起始位置,默认 0
:param limit: (optional) 数据上限多少行,默认 30 | [
"获取用户电台数据"
] | train | https://github.com/xiyouMc/ncmbot/blob/c4832f3ee7630ba104a89559f09c1fc366d1547b/ncmbot/core.py#L264-L279 |
xiyouMc/ncmbot | ncmbot/core.py | search | def search(keyword, type=1, offset=0, limit=30):
"""搜索歌曲,支持搜索歌曲、歌手、专辑等
:param keyword: 关键词
:param type: (optional) 搜索类型,1: 单曲, 100: 歌手, 1000: 歌单, 1002: 用户
:param offset: (optional) 分段起始位置,默认 0
:param limit: (optional) 数据上限多少行,默认 30
"""
if keyword is None:
raise ParamsError()
r = NCloudBot()
r.method = 'SEARCH'
r.data = {
's': keyword,
'limit': str(limit),
'type': str(type),
'offset': str(offset)
}
r.send()
return r.response | python | def search(keyword, type=1, offset=0, limit=30):
"""搜索歌曲,支持搜索歌曲、歌手、专辑等
:param keyword: 关键词
:param type: (optional) 搜索类型,1: 单曲, 100: 歌手, 1000: 歌单, 1002: 用户
:param offset: (optional) 分段起始位置,默认 0
:param limit: (optional) 数据上限多少行,默认 30
"""
if keyword is None:
raise ParamsError()
r = NCloudBot()
r.method = 'SEARCH'
r.data = {
's': keyword,
'limit': str(limit),
'type': str(type),
'offset': str(offset)
}
r.send()
return r.response | [
"def",
"search",
"(",
"keyword",
",",
"type",
"=",
"1",
",",
"offset",
"=",
"0",
",",
"limit",
"=",
"30",
")",
":",
"if",
"keyword",
"is",
"None",
":",
"raise",
"ParamsError",
"(",
")",
"r",
"=",
"NCloudBot",
"(",
")",
"r",
".",
"method",
"=",
"'SEARCH'",
"r",
".",
"data",
"=",
"{",
"'s'",
":",
"keyword",
",",
"'limit'",
":",
"str",
"(",
"limit",
")",
",",
"'type'",
":",
"str",
"(",
"type",
")",
",",
"'offset'",
":",
"str",
"(",
"offset",
")",
"}",
"r",
".",
"send",
"(",
")",
"return",
"r",
".",
"response"
] | 搜索歌曲,支持搜索歌曲、歌手、专辑等
:param keyword: 关键词
:param type: (optional) 搜索类型,1: 单曲, 100: 歌手, 1000: 歌单, 1002: 用户
:param offset: (optional) 分段起始位置,默认 0
:param limit: (optional) 数据上限多少行,默认 30 | [
"搜索歌曲,支持搜索歌曲、歌手、专辑等"
] | train | https://github.com/xiyouMc/ncmbot/blob/c4832f3ee7630ba104a89559f09c1fc366d1547b/ncmbot/core.py#L282-L302 |
xiyouMc/ncmbot | ncmbot/core.py | user_follows | def user_follows(uid, offset='0', limit=30):
"""获取用户关注列表
:param uid: 用户的ID,可通过登录或者其他接口获取
:param offset: (optional) 分段起始位置,默认 0
:param limit: (optional) 数据上限多少行,默认 30
"""
if uid is None:
raise ParamsError()
r = NCloudBot()
r.method = 'USER_FOLLOWS'
r.params = {'uid': uid}
r.data = {'offset': offset, 'limit': limit, 'order': True}
r.send()
return r.response | python | def user_follows(uid, offset='0', limit=30):
"""获取用户关注列表
:param uid: 用户的ID,可通过登录或者其他接口获取
:param offset: (optional) 分段起始位置,默认 0
:param limit: (optional) 数据上限多少行,默认 30
"""
if uid is None:
raise ParamsError()
r = NCloudBot()
r.method = 'USER_FOLLOWS'
r.params = {'uid': uid}
r.data = {'offset': offset, 'limit': limit, 'order': True}
r.send()
return r.response | [
"def",
"user_follows",
"(",
"uid",
",",
"offset",
"=",
"'0'",
",",
"limit",
"=",
"30",
")",
":",
"if",
"uid",
"is",
"None",
":",
"raise",
"ParamsError",
"(",
")",
"r",
"=",
"NCloudBot",
"(",
")",
"r",
".",
"method",
"=",
"'USER_FOLLOWS'",
"r",
".",
"params",
"=",
"{",
"'uid'",
":",
"uid",
"}",
"r",
".",
"data",
"=",
"{",
"'offset'",
":",
"offset",
",",
"'limit'",
":",
"limit",
",",
"'order'",
":",
"True",
"}",
"r",
".",
"send",
"(",
")",
"return",
"r",
".",
"response"
] | 获取用户关注列表
:param uid: 用户的ID,可通过登录或者其他接口获取
:param offset: (optional) 分段起始位置,默认 0
:param limit: (optional) 数据上限多少行,默认 30 | [
"获取用户关注列表"
] | train | https://github.com/xiyouMc/ncmbot/blob/c4832f3ee7630ba104a89559f09c1fc366d1547b/ncmbot/core.py#L305-L320 |
xiyouMc/ncmbot | ncmbot/core.py | user_event | def user_event(uid):
"""获取用户动态
:param uid: 用户的ID,可通过登录或者其他接口获取
"""
if uid is None:
raise ParamsError()
r = NCloudBot()
r.method = 'USER_EVENT'
r.params = {'uid': uid}
r.data = {'time': -1, 'getcounts': True, "csrf_token": ""}
r.send()
return r.response | python | def user_event(uid):
"""获取用户动态
:param uid: 用户的ID,可通过登录或者其他接口获取
"""
if uid is None:
raise ParamsError()
r = NCloudBot()
r.method = 'USER_EVENT'
r.params = {'uid': uid}
r.data = {'time': -1, 'getcounts': True, "csrf_token": ""}
r.send()
return r.response | [
"def",
"user_event",
"(",
"uid",
")",
":",
"if",
"uid",
"is",
"None",
":",
"raise",
"ParamsError",
"(",
")",
"r",
"=",
"NCloudBot",
"(",
")",
"r",
".",
"method",
"=",
"'USER_EVENT'",
"r",
".",
"params",
"=",
"{",
"'uid'",
":",
"uid",
"}",
"r",
".",
"data",
"=",
"{",
"'time'",
":",
"-",
"1",
",",
"'getcounts'",
":",
"True",
",",
"\"csrf_token\"",
":",
"\"\"",
"}",
"r",
".",
"send",
"(",
")",
"return",
"r",
".",
"response"
] | 获取用户动态
:param uid: 用户的ID,可通过登录或者其他接口获取 | [
"获取用户动态"
] | train | https://github.com/xiyouMc/ncmbot/blob/c4832f3ee7630ba104a89559f09c1fc366d1547b/ncmbot/core.py#L345-L358 |
xiyouMc/ncmbot | ncmbot/core.py | user_record | def user_record(uid, type=0):
"""获取用户的播放列表,必须登录
:param uid: 用户的ID,可通过登录或者其他接口获取
:param type: (optional) 数据类型,0:获取所有记录,1:获取 weekData
"""
if uid is None:
raise ParamsError()
r = NCloudBot()
r.method = 'USER_RECORD'
r.data = {'type': type, 'uid': uid, "csrf_token": ""}
r.send()
return r.response | python | def user_record(uid, type=0):
"""获取用户的播放列表,必须登录
:param uid: 用户的ID,可通过登录或者其他接口获取
:param type: (optional) 数据类型,0:获取所有记录,1:获取 weekData
"""
if uid is None:
raise ParamsError()
r = NCloudBot()
r.method = 'USER_RECORD'
r.data = {'type': type, 'uid': uid, "csrf_token": ""}
r.send()
return r.response | [
"def",
"user_record",
"(",
"uid",
",",
"type",
"=",
"0",
")",
":",
"if",
"uid",
"is",
"None",
":",
"raise",
"ParamsError",
"(",
")",
"r",
"=",
"NCloudBot",
"(",
")",
"r",
".",
"method",
"=",
"'USER_RECORD'",
"r",
".",
"data",
"=",
"{",
"'type'",
":",
"type",
",",
"'uid'",
":",
"uid",
",",
"\"csrf_token\"",
":",
"\"\"",
"}",
"r",
".",
"send",
"(",
")",
"return",
"r",
".",
"response"
] | 获取用户的播放列表,必须登录
:param uid: 用户的ID,可通过登录或者其他接口获取
:param type: (optional) 数据类型,0:获取所有记录,1:获取 weekData | [
"获取用户的播放列表",
"必须登录"
] | train | https://github.com/xiyouMc/ncmbot/blob/c4832f3ee7630ba104a89559f09c1fc366d1547b/ncmbot/core.py#L361-L374 |
xiyouMc/ncmbot | ncmbot/core.py | event | def event():
"""获取好友的动态,包括分享视频、音乐、动态等
"""
r = NCloudBot()
r.method = 'EVENT'
r.data = {"csrf_token": ""}
r.send()
return r.response | python | def event():
"""获取好友的动态,包括分享视频、音乐、动态等
"""
r = NCloudBot()
r.method = 'EVENT'
r.data = {"csrf_token": ""}
r.send()
return r.response | [
"def",
"event",
"(",
")",
":",
"r",
"=",
"NCloudBot",
"(",
")",
"r",
".",
"method",
"=",
"'EVENT'",
"r",
".",
"data",
"=",
"{",
"\"csrf_token\"",
":",
"\"\"",
"}",
"r",
".",
"send",
"(",
")",
"return",
"r",
".",
"response"
] | 获取好友的动态,包括分享视频、音乐、动态等 | [
"获取好友的动态,包括分享视频、音乐、动态等"
] | train | https://github.com/xiyouMc/ncmbot/blob/c4832f3ee7630ba104a89559f09c1fc366d1547b/ncmbot/core.py#L377-L386 |
xiyouMc/ncmbot | ncmbot/core.py | top_playlist_highquality | def top_playlist_highquality(cat='全部', offset=0, limit=20):
"""获取网易云音乐的精品歌单
:param cat: (optional) 歌单类型,默认 ‘全部’,比如 华语、欧美等
:param offset: (optional) 分段起始位置,默认 0
:param limit: (optional) 数据上限多少行,默认 20
"""
r = NCloudBot()
r.method = 'TOP_PLAYLIST_HIGHQUALITY'
r.data = {'cat': cat, 'offset': offset, 'limit': limit}
r.send()
return r.response | python | def top_playlist_highquality(cat='全部', offset=0, limit=20):
"""获取网易云音乐的精品歌单
:param cat: (optional) 歌单类型,默认 ‘全部’,比如 华语、欧美等
:param offset: (optional) 分段起始位置,默认 0
:param limit: (optional) 数据上限多少行,默认 20
"""
r = NCloudBot()
r.method = 'TOP_PLAYLIST_HIGHQUALITY'
r.data = {'cat': cat, 'offset': offset, 'limit': limit}
r.send()
return r.response | [
"def",
"top_playlist_highquality",
"(",
"cat",
"=",
"'全部', of",
"f",
"et=0, ",
"l",
"i",
"m",
"t=20)",
":",
"",
"",
"",
"r",
"=",
"NCloudBot",
"(",
")",
"r",
".",
"method",
"=",
"'TOP_PLAYLIST_HIGHQUALITY'",
"r",
".",
"data",
"=",
"{",
"'cat'",
":",
"cat",
",",
"'offset'",
":",
"offset",
",",
"'limit'",
":",
"limit",
"}",
"r",
".",
"send",
"(",
")",
"return",
"r",
".",
"response"
] | 获取网易云音乐的精品歌单
:param cat: (optional) 歌单类型,默认 ‘全部’,比如 华语、欧美等
:param offset: (optional) 分段起始位置,默认 0
:param limit: (optional) 数据上限多少行,默认 20 | [
"获取网易云音乐的精品歌单"
] | train | https://github.com/xiyouMc/ncmbot/blob/c4832f3ee7630ba104a89559f09c1fc366d1547b/ncmbot/core.py#L390-L402 |
xiyouMc/ncmbot | ncmbot/core.py | play_list_detail | def play_list_detail(id, limit=20):
"""获取歌单中的所有音乐。由于获取精品中,只能看到歌单名字和 ID 并没有歌单的音乐,因此增加该接口传入歌单 ID
获取歌单中的所有音乐.
:param id: 歌单的ID
:param limit: (optional) 数据上限多少行,默认 20
"""
if id is None:
raise ParamsError()
r = NCloudBot()
r.method = 'PLAY_LIST_DETAIL'
r.data = {'id': id, 'limit': limit, "csrf_token": ""}
r.send()
return r.response | python | def play_list_detail(id, limit=20):
"""获取歌单中的所有音乐。由于获取精品中,只能看到歌单名字和 ID 并没有歌单的音乐,因此增加该接口传入歌单 ID
获取歌单中的所有音乐.
:param id: 歌单的ID
:param limit: (optional) 数据上限多少行,默认 20
"""
if id is None:
raise ParamsError()
r = NCloudBot()
r.method = 'PLAY_LIST_DETAIL'
r.data = {'id': id, 'limit': limit, "csrf_token": ""}
r.send()
return r.response | [
"def",
"play_list_detail",
"(",
"id",
",",
"limit",
"=",
"20",
")",
":",
"if",
"id",
"is",
"None",
":",
"raise",
"ParamsError",
"(",
")",
"r",
"=",
"NCloudBot",
"(",
")",
"r",
".",
"method",
"=",
"'PLAY_LIST_DETAIL'",
"r",
".",
"data",
"=",
"{",
"'id'",
":",
"id",
",",
"'limit'",
":",
"limit",
",",
"\"csrf_token\"",
":",
"\"\"",
"}",
"r",
".",
"send",
"(",
")",
"return",
"r",
".",
"response"
] | 获取歌单中的所有音乐。由于获取精品中,只能看到歌单名字和 ID 并没有歌单的音乐,因此增加该接口传入歌单 ID
获取歌单中的所有音乐.
:param id: 歌单的ID
:param limit: (optional) 数据上限多少行,默认 20 | [
"获取歌单中的所有音乐。由于获取精品中,只能看到歌单名字和",
"ID",
"并没有歌单的音乐,因此增加该接口传入歌单",
"ID",
"获取歌单中的所有音乐",
"."
] | train | https://github.com/xiyouMc/ncmbot/blob/c4832f3ee7630ba104a89559f09c1fc366d1547b/ncmbot/core.py#L406-L420 |
xiyouMc/ncmbot | ncmbot/core.py | music_url | def music_url(ids=[]):
"""通过歌曲 ID 获取歌曲下载地址
:param ids: 歌曲 ID 的 list
"""
if not isinstance(ids, list):
raise ParamsError()
r = NCloudBot()
r.method = 'MUSIC_URL'
r.data = {'ids': ids, 'br': 999000, "csrf_token": ""}
r.send()
return r.response | python | def music_url(ids=[]):
"""通过歌曲 ID 获取歌曲下载地址
:param ids: 歌曲 ID 的 list
"""
if not isinstance(ids, list):
raise ParamsError()
r = NCloudBot()
r.method = 'MUSIC_URL'
r.data = {'ids': ids, 'br': 999000, "csrf_token": ""}
r.send()
return r.response | [
"def",
"music_url",
"(",
"ids",
"=",
"[",
"]",
")",
":",
"if",
"not",
"isinstance",
"(",
"ids",
",",
"list",
")",
":",
"raise",
"ParamsError",
"(",
")",
"r",
"=",
"NCloudBot",
"(",
")",
"r",
".",
"method",
"=",
"'MUSIC_URL'",
"r",
".",
"data",
"=",
"{",
"'ids'",
":",
"ids",
",",
"'br'",
":",
"999000",
",",
"\"csrf_token\"",
":",
"\"\"",
"}",
"r",
".",
"send",
"(",
")",
"return",
"r",
".",
"response"
] | 通过歌曲 ID 获取歌曲下载地址
:param ids: 歌曲 ID 的 list | [
"通过歌曲",
"ID",
"获取歌曲下载地址"
] | train | https://github.com/xiyouMc/ncmbot/blob/c4832f3ee7630ba104a89559f09c1fc366d1547b/ncmbot/core.py#L424-L436 |
xiyouMc/ncmbot | ncmbot/core.py | lyric | def lyric(id):
"""通过歌曲 ID 获取歌曲歌词地址
:param id: 歌曲ID
"""
if id is None:
raise ParamsError()
r = NCloudBot()
r.method = 'LYRIC'
r.params = {'id': id}
r.send()
return r.response | python | def lyric(id):
"""通过歌曲 ID 获取歌曲歌词地址
:param id: 歌曲ID
"""
if id is None:
raise ParamsError()
r = NCloudBot()
r.method = 'LYRIC'
r.params = {'id': id}
r.send()
return r.response | [
"def",
"lyric",
"(",
"id",
")",
":",
"if",
"id",
"is",
"None",
":",
"raise",
"ParamsError",
"(",
")",
"r",
"=",
"NCloudBot",
"(",
")",
"r",
".",
"method",
"=",
"'LYRIC'",
"r",
".",
"params",
"=",
"{",
"'id'",
":",
"id",
"}",
"r",
".",
"send",
"(",
")",
"return",
"r",
".",
"response"
] | 通过歌曲 ID 获取歌曲歌词地址
:param id: 歌曲ID | [
"通过歌曲",
"ID",
"获取歌曲歌词地址"
] | train | https://github.com/xiyouMc/ncmbot/blob/c4832f3ee7630ba104a89559f09c1fc366d1547b/ncmbot/core.py#L440-L452 |
xiyouMc/ncmbot | ncmbot/core.py | music_comment | def music_comment(id, offset=0, limit=20):
"""获取歌曲的评论列表
:param id: 歌曲 ID
:param offset: (optional) 分段起始位置,默认 0
:param limit: (optional) 数据上限多少行,默认 20
"""
if id is None:
raise ParamsError()
r = NCloudBot()
r.method = 'MUSIC_COMMENT'
r.params = {'id': id}
r.data = {'offset': offset, 'limit': limit, 'rid': id, "csrf_token": ""}
r.send()
return r.response | python | def music_comment(id, offset=0, limit=20):
"""获取歌曲的评论列表
:param id: 歌曲 ID
:param offset: (optional) 分段起始位置,默认 0
:param limit: (optional) 数据上限多少行,默认 20
"""
if id is None:
raise ParamsError()
r = NCloudBot()
r.method = 'MUSIC_COMMENT'
r.params = {'id': id}
r.data = {'offset': offset, 'limit': limit, 'rid': id, "csrf_token": ""}
r.send()
return r.response | [
"def",
"music_comment",
"(",
"id",
",",
"offset",
"=",
"0",
",",
"limit",
"=",
"20",
")",
":",
"if",
"id",
"is",
"None",
":",
"raise",
"ParamsError",
"(",
")",
"r",
"=",
"NCloudBot",
"(",
")",
"r",
".",
"method",
"=",
"'MUSIC_COMMENT'",
"r",
".",
"params",
"=",
"{",
"'id'",
":",
"id",
"}",
"r",
".",
"data",
"=",
"{",
"'offset'",
":",
"offset",
",",
"'limit'",
":",
"limit",
",",
"'rid'",
":",
"id",
",",
"\"csrf_token\"",
":",
"\"\"",
"}",
"r",
".",
"send",
"(",
")",
"return",
"r",
".",
"response"
] | 获取歌曲的评论列表
:param id: 歌曲 ID
:param offset: (optional) 分段起始位置,默认 0
:param limit: (optional) 数据上限多少行,默认 20 | [
"获取歌曲的评论列表"
] | train | https://github.com/xiyouMc/ncmbot/blob/c4832f3ee7630ba104a89559f09c1fc366d1547b/ncmbot/core.py#L456-L471 |
xiyouMc/ncmbot | ncmbot/core.py | song_detail | def song_detail(ids):
"""通过歌曲 ID 获取歌曲的详细信息
:param ids: 歌曲 ID 的 list
"""
if not isinstance(ids, list):
raise ParamsError()
c = []
for id in ids:
c.append({'id': id})
r = NCloudBot()
r.method = 'SONG_DETAIL'
r.data = {'c': json.dumps(c), 'ids': c, "csrf_token": ""}
r.send()
return r.response | python | def song_detail(ids):
"""通过歌曲 ID 获取歌曲的详细信息
:param ids: 歌曲 ID 的 list
"""
if not isinstance(ids, list):
raise ParamsError()
c = []
for id in ids:
c.append({'id': id})
r = NCloudBot()
r.method = 'SONG_DETAIL'
r.data = {'c': json.dumps(c), 'ids': c, "csrf_token": ""}
r.send()
return r.response | [
"def",
"song_detail",
"(",
"ids",
")",
":",
"if",
"not",
"isinstance",
"(",
"ids",
",",
"list",
")",
":",
"raise",
"ParamsError",
"(",
")",
"c",
"=",
"[",
"]",
"for",
"id",
"in",
"ids",
":",
"c",
".",
"append",
"(",
"{",
"'id'",
":",
"id",
"}",
")",
"r",
"=",
"NCloudBot",
"(",
")",
"r",
".",
"method",
"=",
"'SONG_DETAIL'",
"r",
".",
"data",
"=",
"{",
"'c'",
":",
"json",
".",
"dumps",
"(",
"c",
")",
",",
"'ids'",
":",
"c",
",",
"\"csrf_token\"",
":",
"\"\"",
"}",
"r",
".",
"send",
"(",
")",
"return",
"r",
".",
"response"
] | 通过歌曲 ID 获取歌曲的详细信息
:param ids: 歌曲 ID 的 list | [
"通过歌曲",
"ID",
"获取歌曲的详细信息"
] | train | https://github.com/xiyouMc/ncmbot/blob/c4832f3ee7630ba104a89559f09c1fc366d1547b/ncmbot/core.py#L488-L503 |
xiyouMc/ncmbot | ncmbot/core.py | personal_fm | def personal_fm():
""" 个人的 FM ,必须在登录之后调用,即 login 之后调用
"""
r = NCloudBot()
r.method = 'PERSONAL_FM'
r.data = {"csrf_token": ""}
r.send()
return r.response | python | def personal_fm():
""" 个人的 FM ,必须在登录之后调用,即 login 之后调用
"""
r = NCloudBot()
r.method = 'PERSONAL_FM'
r.data = {"csrf_token": ""}
r.send()
return r.response | [
"def",
"personal_fm",
"(",
")",
":",
"r",
"=",
"NCloudBot",
"(",
")",
"r",
".",
"method",
"=",
"'PERSONAL_FM'",
"r",
".",
"data",
"=",
"{",
"\"csrf_token\"",
":",
"\"\"",
"}",
"r",
".",
"send",
"(",
")",
"return",
"r",
".",
"response"
] | 个人的 FM ,必须在登录之后调用,即 login 之后调用 | [
"个人的",
"FM",
"必须在登录之后调用,即",
"login",
"之后调用"
] | train | https://github.com/xiyouMc/ncmbot/blob/c4832f3ee7630ba104a89559f09c1fc366d1547b/ncmbot/core.py#L507-L514 |
xiyouMc/ncmbot | ncmbot/core.py | NCloudBot._get_webapi_requests | def _get_webapi_requests(self):
"""Update headers of webapi for Requests."""
headers = {
'Accept':
'*/*',
'Accept-Language':
'zh-CN,zh;q=0.8,gl;q=0.6,zh-TW;q=0.4',
'Connection':
'keep-alive',
'Content-Type':
'application/x-www-form-urlencoded',
'Referer':
'http://music.163.com',
'Host':
'music.163.com',
'User-Agent':
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/33.0.1750.152 Safari/537.36'
}
NCloudBot.req.headers.update(headers)
return NCloudBot.req | python | def _get_webapi_requests(self):
"""Update headers of webapi for Requests."""
headers = {
'Accept':
'*/*',
'Accept-Language':
'zh-CN,zh;q=0.8,gl;q=0.6,zh-TW;q=0.4',
'Connection':
'keep-alive',
'Content-Type':
'application/x-www-form-urlencoded',
'Referer':
'http://music.163.com',
'Host':
'music.163.com',
'User-Agent':
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/33.0.1750.152 Safari/537.36'
}
NCloudBot.req.headers.update(headers)
return NCloudBot.req | [
"def",
"_get_webapi_requests",
"(",
"self",
")",
":",
"headers",
"=",
"{",
"'Accept'",
":",
"'*/*'",
",",
"'Accept-Language'",
":",
"'zh-CN,zh;q=0.8,gl;q=0.6,zh-TW;q=0.4'",
",",
"'Connection'",
":",
"'keep-alive'",
",",
"'Content-Type'",
":",
"'application/x-www-form-urlencoded'",
",",
"'Referer'",
":",
"'http://music.163.com'",
",",
"'Host'",
":",
"'music.163.com'",
",",
"'User-Agent'",
":",
"'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/33.0.1750.152 Safari/537.36'",
"}",
"NCloudBot",
".",
"req",
".",
"headers",
".",
"update",
"(",
"headers",
")",
"return",
"NCloudBot",
".",
"req"
] | Update headers of webapi for Requests. | [
"Update",
"headers",
"of",
"webapi",
"for",
"Requests",
"."
] | train | https://github.com/xiyouMc/ncmbot/blob/c4832f3ee7630ba104a89559f09c1fc366d1547b/ncmbot/core.py#L104-L124 |
xiyouMc/ncmbot | ncmbot/core.py | NCloudBot._build_response | def _build_response(self, resp):
"""Build internal Response object from given response."""
# rememberLogin
# if self.method is 'LOGIN' and resp.json().get('code') == 200:
# cookiesJar.save_cookies(resp, NCloudBot.username)
self.response.content = resp.content
self.response.status_code = resp.status_code
self.response.headers = resp.headers | python | def _build_response(self, resp):
"""Build internal Response object from given response."""
# rememberLogin
# if self.method is 'LOGIN' and resp.json().get('code') == 200:
# cookiesJar.save_cookies(resp, NCloudBot.username)
self.response.content = resp.content
self.response.status_code = resp.status_code
self.response.headers = resp.headers | [
"def",
"_build_response",
"(",
"self",
",",
"resp",
")",
":",
"# rememberLogin",
"# if self.method is 'LOGIN' and resp.json().get('code') == 200:",
"# cookiesJar.save_cookies(resp, NCloudBot.username)",
"self",
".",
"response",
".",
"content",
"=",
"resp",
".",
"content",
"self",
".",
"response",
".",
"status_code",
"=",
"resp",
".",
"status_code",
"self",
".",
"response",
".",
"headers",
"=",
"resp",
".",
"headers"
] | Build internal Response object from given response. | [
"Build",
"internal",
"Response",
"object",
"from",
"given",
"response",
"."
] | train | https://github.com/xiyouMc/ncmbot/blob/c4832f3ee7630ba104a89559f09c1fc366d1547b/ncmbot/core.py#L141-L148 |
xiyouMc/ncmbot | ncmbot/core.py | NCloudBot.send | def send(self):
"""Sens the request."""
success = False
if self.method is None:
raise ParamsError()
try:
if self.method == 'SEARCH':
req = self._get_requests()
_url = self.__NETEAST_HOST + self._METHODS[self.method]
resp = req.post(_url, data=self.data)
self._build_response(resp)
self.response.ok = True
else:
if isinstance(self.data, dict):
data = encrypted_request(self.data)
req = self._get_webapi_requests()
_url = self.__NETEAST_HOST + self._METHODS[self.method]
if self.method in ('USER_DJ', 'USER_FOLLOWS', 'USER_EVENT'):
_url = _url % self.params['uid']
if self.method in ('LYRIC', 'MUSIC_COMMENT'):
_url = _url % self.params['id']
# GET
if self.method in ('LYRIC'):
resp = req.get(_url)
else:
resp = req.post(_url, data=data)
self._build_response(resp)
self.response.ok = True
except Exception as why:
traceback.print_exc()
print 'Requests Exception', why
# self._build_response(why)
self.response.error = why | python | def send(self):
"""Sens the request."""
success = False
if self.method is None:
raise ParamsError()
try:
if self.method == 'SEARCH':
req = self._get_requests()
_url = self.__NETEAST_HOST + self._METHODS[self.method]
resp = req.post(_url, data=self.data)
self._build_response(resp)
self.response.ok = True
else:
if isinstance(self.data, dict):
data = encrypted_request(self.data)
req = self._get_webapi_requests()
_url = self.__NETEAST_HOST + self._METHODS[self.method]
if self.method in ('USER_DJ', 'USER_FOLLOWS', 'USER_EVENT'):
_url = _url % self.params['uid']
if self.method in ('LYRIC', 'MUSIC_COMMENT'):
_url = _url % self.params['id']
# GET
if self.method in ('LYRIC'):
resp = req.get(_url)
else:
resp = req.post(_url, data=data)
self._build_response(resp)
self.response.ok = True
except Exception as why:
traceback.print_exc()
print 'Requests Exception', why
# self._build_response(why)
self.response.error = why | [
"def",
"send",
"(",
"self",
")",
":",
"success",
"=",
"False",
"if",
"self",
".",
"method",
"is",
"None",
":",
"raise",
"ParamsError",
"(",
")",
"try",
":",
"if",
"self",
".",
"method",
"==",
"'SEARCH'",
":",
"req",
"=",
"self",
".",
"_get_requests",
"(",
")",
"_url",
"=",
"self",
".",
"__NETEAST_HOST",
"+",
"self",
".",
"_METHODS",
"[",
"self",
".",
"method",
"]",
"resp",
"=",
"req",
".",
"post",
"(",
"_url",
",",
"data",
"=",
"self",
".",
"data",
")",
"self",
".",
"_build_response",
"(",
"resp",
")",
"self",
".",
"response",
".",
"ok",
"=",
"True",
"else",
":",
"if",
"isinstance",
"(",
"self",
".",
"data",
",",
"dict",
")",
":",
"data",
"=",
"encrypted_request",
"(",
"self",
".",
"data",
")",
"req",
"=",
"self",
".",
"_get_webapi_requests",
"(",
")",
"_url",
"=",
"self",
".",
"__NETEAST_HOST",
"+",
"self",
".",
"_METHODS",
"[",
"self",
".",
"method",
"]",
"if",
"self",
".",
"method",
"in",
"(",
"'USER_DJ'",
",",
"'USER_FOLLOWS'",
",",
"'USER_EVENT'",
")",
":",
"_url",
"=",
"_url",
"%",
"self",
".",
"params",
"[",
"'uid'",
"]",
"if",
"self",
".",
"method",
"in",
"(",
"'LYRIC'",
",",
"'MUSIC_COMMENT'",
")",
":",
"_url",
"=",
"_url",
"%",
"self",
".",
"params",
"[",
"'id'",
"]",
"# GET",
"if",
"self",
".",
"method",
"in",
"(",
"'LYRIC'",
")",
":",
"resp",
"=",
"req",
".",
"get",
"(",
"_url",
")",
"else",
":",
"resp",
"=",
"req",
".",
"post",
"(",
"_url",
",",
"data",
"=",
"data",
")",
"self",
".",
"_build_response",
"(",
"resp",
")",
"self",
".",
"response",
".",
"ok",
"=",
"True",
"except",
"Exception",
"as",
"why",
":",
"traceback",
".",
"print_exc",
"(",
")",
"print",
"'Requests Exception'",
",",
"why",
"# self._build_response(why)",
"self",
".",
"response",
".",
"error",
"=",
"why"
] | Sens the request. | [
"Sens",
"the",
"request",
"."
] | train | https://github.com/xiyouMc/ncmbot/blob/c4832f3ee7630ba104a89559f09c1fc366d1547b/ncmbot/core.py#L150-L185 |
xiyouMc/ncmbot | ncmbot/core.py | Response.json | def json(self):
"""Returns the json-encoded content of a response, if any."""
if not self.headers and len(self.content) > 3:
encoding = get_encoding_from_headers(self.headers)
if encoding is not None:
return json.loads(self.content.decode(encoding))
return json.loads(self.content) | python | def json(self):
"""Returns the json-encoded content of a response, if any."""
if not self.headers and len(self.content) > 3:
encoding = get_encoding_from_headers(self.headers)
if encoding is not None:
return json.loads(self.content.decode(encoding))
return json.loads(self.content) | [
"def",
"json",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"headers",
"and",
"len",
"(",
"self",
".",
"content",
")",
">",
"3",
":",
"encoding",
"=",
"get_encoding_from_headers",
"(",
"self",
".",
"headers",
")",
"if",
"encoding",
"is",
"not",
"None",
":",
"return",
"json",
".",
"loads",
"(",
"self",
".",
"content",
".",
"decode",
"(",
"encoding",
")",
")",
"return",
"json",
".",
"loads",
"(",
"self",
".",
"content",
")"
] | Returns the json-encoded content of a response, if any. | [
"Returns",
"the",
"json",
"-",
"encoded",
"content",
"of",
"a",
"response",
"if",
"any",
"."
] | train | https://github.com/xiyouMc/ncmbot/blob/c4832f3ee7630ba104a89559f09c1fc366d1547b/ncmbot/core.py#L208-L215 |
has2k1/plydata | plydata/options.py | set_option | def set_option(name, value):
"""
Set plydata option
Parameters
----------
name : str
Name of the option
value : object
New value of the option
Returns
-------
old : object
Old value of the option
See also
--------
:class:`options`
"""
old = get_option(name)
globals()[name] = value
return old | python | def set_option(name, value):
"""
Set plydata option
Parameters
----------
name : str
Name of the option
value : object
New value of the option
Returns
-------
old : object
Old value of the option
See also
--------
:class:`options`
"""
old = get_option(name)
globals()[name] = value
return old | [
"def",
"set_option",
"(",
"name",
",",
"value",
")",
":",
"old",
"=",
"get_option",
"(",
"name",
")",
"globals",
"(",
")",
"[",
"name",
"]",
"=",
"value",
"return",
"old"
] | Set plydata option
Parameters
----------
name : str
Name of the option
value : object
New value of the option
Returns
-------
old : object
Old value of the option
See also
--------
:class:`options` | [
"Set",
"plydata",
"option"
] | train | https://github.com/has2k1/plydata/blob/d8ca85ff70eee621e96f7c74034e90fec16e8b61/plydata/options.py#L45-L67 |
has2k1/plydata | plydata/dataframe/two_table.py | _join | def _join(verb):
"""
Join helper
"""
data = pd.merge(verb.x, verb.y, **verb.kwargs)
# Preserve x groups
if isinstance(verb.x, GroupedDataFrame):
data.plydata_groups = list(verb.x.plydata_groups)
return data | python | def _join(verb):
"""
Join helper
"""
data = pd.merge(verb.x, verb.y, **verb.kwargs)
# Preserve x groups
if isinstance(verb.x, GroupedDataFrame):
data.plydata_groups = list(verb.x.plydata_groups)
return data | [
"def",
"_join",
"(",
"verb",
")",
":",
"data",
"=",
"pd",
".",
"merge",
"(",
"verb",
".",
"x",
",",
"verb",
".",
"y",
",",
"*",
"*",
"verb",
".",
"kwargs",
")",
"# Preserve x groups",
"if",
"isinstance",
"(",
"verb",
".",
"x",
",",
"GroupedDataFrame",
")",
":",
"data",
".",
"plydata_groups",
"=",
"list",
"(",
"verb",
".",
"x",
".",
"plydata_groups",
")",
"return",
"data"
] | Join helper | [
"Join",
"helper"
] | train | https://github.com/has2k1/plydata/blob/d8ca85ff70eee621e96f7c74034e90fec16e8b61/plydata/dataframe/two_table.py#L50-L59 |
has2k1/plydata | plydata/types.py | GroupedDataFrame.groupby | def groupby(self, by=None, **kwargs):
"""
Group by and do not sort (unless specified)
For plydata use cases, there is no need to specify
group columns.
"""
if by is None:
by = self.plydata_groups
# Turn off sorting by groups messes with some verbs
if 'sort' not in kwargs:
kwargs['sort'] = False
return super().groupby(by, **kwargs) | python | def groupby(self, by=None, **kwargs):
"""
Group by and do not sort (unless specified)
For plydata use cases, there is no need to specify
group columns.
"""
if by is None:
by = self.plydata_groups
# Turn off sorting by groups messes with some verbs
if 'sort' not in kwargs:
kwargs['sort'] = False
return super().groupby(by, **kwargs) | [
"def",
"groupby",
"(",
"self",
",",
"by",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"by",
"is",
"None",
":",
"by",
"=",
"self",
".",
"plydata_groups",
"# Turn off sorting by groups messes with some verbs",
"if",
"'sort'",
"not",
"in",
"kwargs",
":",
"kwargs",
"[",
"'sort'",
"]",
"=",
"False",
"return",
"super",
"(",
")",
".",
"groupby",
"(",
"by",
",",
"*",
"*",
"kwargs",
")"
] | Group by and do not sort (unless specified)
For plydata use cases, there is no need to specify
group columns. | [
"Group",
"by",
"and",
"do",
"not",
"sort",
"(",
"unless",
"specified",
")"
] | train | https://github.com/has2k1/plydata/blob/d8ca85ff70eee621e96f7c74034e90fec16e8b61/plydata/types.py#L33-L47 |
has2k1/plydata | plydata/types.py | GroupedDataFrame.group_indices | def group_indices(self):
"""
Return group indices
"""
# No groups
if not self.plydata_groups:
return np.ones(len(self), dtype=int)
grouper = self.groupby()
indices = np.empty(len(self), dtype=int)
for i, (_, idx) in enumerate(sorted(grouper.indices.items())):
indices[idx] = i
return indices | python | def group_indices(self):
"""
Return group indices
"""
# No groups
if not self.plydata_groups:
return np.ones(len(self), dtype=int)
grouper = self.groupby()
indices = np.empty(len(self), dtype=int)
for i, (_, idx) in enumerate(sorted(grouper.indices.items())):
indices[idx] = i
return indices | [
"def",
"group_indices",
"(",
"self",
")",
":",
"# No groups",
"if",
"not",
"self",
".",
"plydata_groups",
":",
"return",
"np",
".",
"ones",
"(",
"len",
"(",
"self",
")",
",",
"dtype",
"=",
"int",
")",
"grouper",
"=",
"self",
".",
"groupby",
"(",
")",
"indices",
"=",
"np",
".",
"empty",
"(",
"len",
"(",
"self",
")",
",",
"dtype",
"=",
"int",
")",
"for",
"i",
",",
"(",
"_",
",",
"idx",
")",
"in",
"enumerate",
"(",
"sorted",
"(",
"grouper",
".",
"indices",
".",
"items",
"(",
")",
")",
")",
":",
"indices",
"[",
"idx",
"]",
"=",
"i",
"return",
"indices"
] | Return group indices | [
"Return",
"group",
"indices"
] | train | https://github.com/has2k1/plydata/blob/d8ca85ff70eee621e96f7c74034e90fec16e8b61/plydata/types.py#L49-L61 |
has2k1/plydata | plydata/dataframe/helpers.py | _make_verb_helper | def _make_verb_helper(verb_func, add_groups=False):
"""
Create function that prepares verb for the verb function
The functions created add expressions to be evaluated to
the verb, then call the core verb function
Parameters
----------
verb_func : function
Core verb function. This is the function called after
expressions created and added to the verb. The core
function should be one of those that implement verbs that
evaluate expressions.
add_groups : bool
If True, a groups attribute is added to the verb. The
groups are the columns created after evaluating the
expressions.
Returns
-------
out : function
A function that implements a helper verb.
"""
@wraps(verb_func)
def _verb_func(verb):
verb.expressions, new_columns = build_expressions(verb)
if add_groups:
verb.groups = new_columns
return verb_func(verb)
return _verb_func | python | def _make_verb_helper(verb_func, add_groups=False):
"""
Create function that prepares verb for the verb function
The functions created add expressions to be evaluated to
the verb, then call the core verb function
Parameters
----------
verb_func : function
Core verb function. This is the function called after
expressions created and added to the verb. The core
function should be one of those that implement verbs that
evaluate expressions.
add_groups : bool
If True, a groups attribute is added to the verb. The
groups are the columns created after evaluating the
expressions.
Returns
-------
out : function
A function that implements a helper verb.
"""
@wraps(verb_func)
def _verb_func(verb):
verb.expressions, new_columns = build_expressions(verb)
if add_groups:
verb.groups = new_columns
return verb_func(verb)
return _verb_func | [
"def",
"_make_verb_helper",
"(",
"verb_func",
",",
"add_groups",
"=",
"False",
")",
":",
"@",
"wraps",
"(",
"verb_func",
")",
"def",
"_verb_func",
"(",
"verb",
")",
":",
"verb",
".",
"expressions",
",",
"new_columns",
"=",
"build_expressions",
"(",
"verb",
")",
"if",
"add_groups",
":",
"verb",
".",
"groups",
"=",
"new_columns",
"return",
"verb_func",
"(",
"verb",
")",
"return",
"_verb_func"
] | Create function that prepares verb for the verb function
The functions created add expressions to be evaluated to
the verb, then call the core verb function
Parameters
----------
verb_func : function
Core verb function. This is the function called after
expressions created and added to the verb. The core
function should be one of those that implement verbs that
evaluate expressions.
add_groups : bool
If True, a groups attribute is added to the verb. The
groups are the columns created after evaluating the
expressions.
Returns
-------
out : function
A function that implements a helper verb. | [
"Create",
"function",
"that",
"prepares",
"verb",
"for",
"the",
"verb",
"function"
] | train | https://github.com/has2k1/plydata/blob/d8ca85ff70eee621e96f7c74034e90fec16e8b61/plydata/dataframe/helpers.py#L156-L188 |
has2k1/plydata | plydata/dataframe/common.py | _get_base_dataframe | def _get_base_dataframe(df):
"""
Remove all columns other than those grouped on
"""
if isinstance(df, GroupedDataFrame):
base_df = GroupedDataFrame(
df.loc[:, df.plydata_groups], df.plydata_groups,
copy=True)
else:
base_df = pd.DataFrame(index=df.index)
return base_df | python | def _get_base_dataframe(df):
"""
Remove all columns other than those grouped on
"""
if isinstance(df, GroupedDataFrame):
base_df = GroupedDataFrame(
df.loc[:, df.plydata_groups], df.plydata_groups,
copy=True)
else:
base_df = pd.DataFrame(index=df.index)
return base_df | [
"def",
"_get_base_dataframe",
"(",
"df",
")",
":",
"if",
"isinstance",
"(",
"df",
",",
"GroupedDataFrame",
")",
":",
"base_df",
"=",
"GroupedDataFrame",
"(",
"df",
".",
"loc",
"[",
":",
",",
"df",
".",
"plydata_groups",
"]",
",",
"df",
".",
"plydata_groups",
",",
"copy",
"=",
"True",
")",
"else",
":",
"base_df",
"=",
"pd",
".",
"DataFrame",
"(",
"index",
"=",
"df",
".",
"index",
")",
"return",
"base_df"
] | Remove all columns other than those grouped on | [
"Remove",
"all",
"columns",
"other",
"than",
"those",
"grouped",
"on"
] | train | https://github.com/has2k1/plydata/blob/d8ca85ff70eee621e96f7c74034e90fec16e8b61/plydata/dataframe/common.py#L27-L37 |
has2k1/plydata | plydata/dataframe/common.py | _add_group_columns | def _add_group_columns(data, gdf):
"""
Add group columns to data with a value from the grouped dataframe
It is assumed that the grouped dataframe contains a single group
>>> data = pd.DataFrame({
... 'x': [5, 6, 7]})
>>> gdf = GroupedDataFrame({
... 'g': list('aaa'),
... 'x': range(3)}, groups=['g'])
>>> _add_group_columns(data, gdf)
g x
0 a 5
1 a 6
2 a 7
"""
n = len(data)
if isinstance(gdf, GroupedDataFrame):
for i, col in enumerate(gdf.plydata_groups):
if col not in data:
group_values = [gdf[col].iloc[0]] * n
# Need to be careful and maintain the dtypes
# of the group columns
if pdtypes.is_categorical_dtype(gdf[col]):
col_values = pd.Categorical(
group_values,
categories=gdf[col].cat.categories,
ordered=gdf[col].cat.ordered
)
else:
col_values = pd.Series(
group_values,
index=data.index,
dtype=gdf[col].dtype
)
# Group columns come first
data.insert(i, col, col_values)
return data | python | def _add_group_columns(data, gdf):
"""
Add group columns to data with a value from the grouped dataframe
It is assumed that the grouped dataframe contains a single group
>>> data = pd.DataFrame({
... 'x': [5, 6, 7]})
>>> gdf = GroupedDataFrame({
... 'g': list('aaa'),
... 'x': range(3)}, groups=['g'])
>>> _add_group_columns(data, gdf)
g x
0 a 5
1 a 6
2 a 7
"""
n = len(data)
if isinstance(gdf, GroupedDataFrame):
for i, col in enumerate(gdf.plydata_groups):
if col not in data:
group_values = [gdf[col].iloc[0]] * n
# Need to be careful and maintain the dtypes
# of the group columns
if pdtypes.is_categorical_dtype(gdf[col]):
col_values = pd.Categorical(
group_values,
categories=gdf[col].cat.categories,
ordered=gdf[col].cat.ordered
)
else:
col_values = pd.Series(
group_values,
index=data.index,
dtype=gdf[col].dtype
)
# Group columns come first
data.insert(i, col, col_values)
return data | [
"def",
"_add_group_columns",
"(",
"data",
",",
"gdf",
")",
":",
"n",
"=",
"len",
"(",
"data",
")",
"if",
"isinstance",
"(",
"gdf",
",",
"GroupedDataFrame",
")",
":",
"for",
"i",
",",
"col",
"in",
"enumerate",
"(",
"gdf",
".",
"plydata_groups",
")",
":",
"if",
"col",
"not",
"in",
"data",
":",
"group_values",
"=",
"[",
"gdf",
"[",
"col",
"]",
".",
"iloc",
"[",
"0",
"]",
"]",
"*",
"n",
"# Need to be careful and maintain the dtypes",
"# of the group columns",
"if",
"pdtypes",
".",
"is_categorical_dtype",
"(",
"gdf",
"[",
"col",
"]",
")",
":",
"col_values",
"=",
"pd",
".",
"Categorical",
"(",
"group_values",
",",
"categories",
"=",
"gdf",
"[",
"col",
"]",
".",
"cat",
".",
"categories",
",",
"ordered",
"=",
"gdf",
"[",
"col",
"]",
".",
"cat",
".",
"ordered",
")",
"else",
":",
"col_values",
"=",
"pd",
".",
"Series",
"(",
"group_values",
",",
"index",
"=",
"data",
".",
"index",
",",
"dtype",
"=",
"gdf",
"[",
"col",
"]",
".",
"dtype",
")",
"# Group columns come first",
"data",
".",
"insert",
"(",
"i",
",",
"col",
",",
"col_values",
")",
"return",
"data"
] | Add group columns to data with a value from the grouped dataframe
It is assumed that the grouped dataframe contains a single group
>>> data = pd.DataFrame({
... 'x': [5, 6, 7]})
>>> gdf = GroupedDataFrame({
... 'g': list('aaa'),
... 'x': range(3)}, groups=['g'])
>>> _add_group_columns(data, gdf)
g x
0 a 5
1 a 6
2 a 7 | [
"Add",
"group",
"columns",
"to",
"data",
"with",
"a",
"value",
"from",
"the",
"grouped",
"dataframe"
] | train | https://github.com/has2k1/plydata/blob/d8ca85ff70eee621e96f7c74034e90fec16e8b61/plydata/dataframe/common.py#L40-L78 |
has2k1/plydata | plydata/dataframe/common.py | _create_column | def _create_column(data, col, value):
"""
Create column in dataframe
Helper method meant to deal with problematic
column values. e.g When the series index does
not match that of the data.
Parameters
----------
data : pandas.DataFrame
dataframe in which to insert value
col : column label
Column name
value : object
Value to assign to column
Returns
-------
data : pandas.DataFrame
Modified original dataframe
>>> df = pd.DataFrame({'x': [1, 2, 3]})
>>> y = pd.Series([11, 12, 13], index=[21, 22, 23])
Data index and value index do not match
>>> _create_column(df, 'y', y)
x y
0 1 11
1 2 12
2 3 13
Non-empty dataframe, scalar value
>>> _create_column(df, 'z', 3)
x y z
0 1 11 3
1 2 12 3
2 3 13 3
Empty dataframe, scalar value
>>> df = pd.DataFrame()
>>> _create_column(df, 'w', 3)
w
0 3
>>> _create_column(df, 'z', 'abc')
w z
0 3 abc
"""
with suppress(AttributeError):
# If the index of a series and the dataframe
# in which the series will be assigned to a
# column do not match, missing values/NaNs
# are created. We do not want that.
if not value.index.equals(data.index):
if len(value) == len(data):
value.index = data.index
else:
value.reset_index(drop=True, inplace=True)
# You cannot assign a scalar value to a dataframe
# without an index. You need an interable value.
if data.index.empty:
try:
len(value)
except TypeError:
scalar = True
else:
scalar = isinstance(value, str)
if scalar:
value = [value]
data[col] = value
return data | python | def _create_column(data, col, value):
"""
Create column in dataframe
Helper method meant to deal with problematic
column values. e.g When the series index does
not match that of the data.
Parameters
----------
data : pandas.DataFrame
dataframe in which to insert value
col : column label
Column name
value : object
Value to assign to column
Returns
-------
data : pandas.DataFrame
Modified original dataframe
>>> df = pd.DataFrame({'x': [1, 2, 3]})
>>> y = pd.Series([11, 12, 13], index=[21, 22, 23])
Data index and value index do not match
>>> _create_column(df, 'y', y)
x y
0 1 11
1 2 12
2 3 13
Non-empty dataframe, scalar value
>>> _create_column(df, 'z', 3)
x y z
0 1 11 3
1 2 12 3
2 3 13 3
Empty dataframe, scalar value
>>> df = pd.DataFrame()
>>> _create_column(df, 'w', 3)
w
0 3
>>> _create_column(df, 'z', 'abc')
w z
0 3 abc
"""
with suppress(AttributeError):
# If the index of a series and the dataframe
# in which the series will be assigned to a
# column do not match, missing values/NaNs
# are created. We do not want that.
if not value.index.equals(data.index):
if len(value) == len(data):
value.index = data.index
else:
value.reset_index(drop=True, inplace=True)
# You cannot assign a scalar value to a dataframe
# without an index. You need an interable value.
if data.index.empty:
try:
len(value)
except TypeError:
scalar = True
else:
scalar = isinstance(value, str)
if scalar:
value = [value]
data[col] = value
return data | [
"def",
"_create_column",
"(",
"data",
",",
"col",
",",
"value",
")",
":",
"with",
"suppress",
"(",
"AttributeError",
")",
":",
"# If the index of a series and the dataframe",
"# in which the series will be assigned to a",
"# column do not match, missing values/NaNs",
"# are created. We do not want that.",
"if",
"not",
"value",
".",
"index",
".",
"equals",
"(",
"data",
".",
"index",
")",
":",
"if",
"len",
"(",
"value",
")",
"==",
"len",
"(",
"data",
")",
":",
"value",
".",
"index",
"=",
"data",
".",
"index",
"else",
":",
"value",
".",
"reset_index",
"(",
"drop",
"=",
"True",
",",
"inplace",
"=",
"True",
")",
"# You cannot assign a scalar value to a dataframe",
"# without an index. You need an interable value.",
"if",
"data",
".",
"index",
".",
"empty",
":",
"try",
":",
"len",
"(",
"value",
")",
"except",
"TypeError",
":",
"scalar",
"=",
"True",
"else",
":",
"scalar",
"=",
"isinstance",
"(",
"value",
",",
"str",
")",
"if",
"scalar",
":",
"value",
"=",
"[",
"value",
"]",
"data",
"[",
"col",
"]",
"=",
"value",
"return",
"data"
] | Create column in dataframe
Helper method meant to deal with problematic
column values. e.g When the series index does
not match that of the data.
Parameters
----------
data : pandas.DataFrame
dataframe in which to insert value
col : column label
Column name
value : object
Value to assign to column
Returns
-------
data : pandas.DataFrame
Modified original dataframe
>>> df = pd.DataFrame({'x': [1, 2, 3]})
>>> y = pd.Series([11, 12, 13], index=[21, 22, 23])
Data index and value index do not match
>>> _create_column(df, 'y', y)
x y
0 1 11
1 2 12
2 3 13
Non-empty dataframe, scalar value
>>> _create_column(df, 'z', 3)
x y z
0 1 11 3
1 2 12 3
2 3 13 3
Empty dataframe, scalar value
>>> df = pd.DataFrame()
>>> _create_column(df, 'w', 3)
w
0 3
>>> _create_column(df, 'z', 'abc')
w z
0 3 abc | [
"Create",
"column",
"in",
"dataframe"
] | train | https://github.com/has2k1/plydata/blob/d8ca85ff70eee621e96f7c74034e90fec16e8b61/plydata/dataframe/common.py#L81-L157 |
has2k1/plydata | plydata/dataframe/common.py | build_expressions | def build_expressions(verb):
"""
Build expressions for helper verbs
Parameters
----------
verb : verb
A verb with a *functions* attribute.
Returns
-------
out : tuple
(List of Expressions, New columns). The expressions and the
new columns in which the results of those expressions will
be stored. Even when a result will stored in a column with
an existing label, that column is still considered new,
i.e An expression ``x='x+1'``, will create a new_column `x`
to replace an old column `x`.
"""
def partial(func, col, *args, **kwargs):
"""
Make a function that acts on a column in a dataframe
Parameters
----------
func : callable
Function
col : str
Column
args : tuple
Arguments to pass to func
kwargs : dict
Keyword arguments to func
Results
-------
new_func : callable
Function that takes a dataframe, and calls the
original function on a column in the dataframe.
"""
def new_func(gdf):
return func(gdf[col], *args, **kwargs)
return new_func
def make_statement(func, col):
"""
A statement of function called on a column in a dataframe
Parameters
----------
func : str or callable
Function to call on a dataframe column
col : str
Column
"""
if isinstance(func, str):
expr = '{}({})'.format(func, col)
elif callable(func):
expr = partial(func, col, *verb.args, **verb.kwargs)
else:
raise TypeError("{} is not a function".format(func))
return expr
def func_name(func):
"""
Return name of a function.
If the function is `np.sin`, we return `sin`.
"""
if isinstance(func, str):
return func
try:
return func.__name__
except AttributeError:
return ''
# Generate function names. They act as identifiers (postfixed
# to the original columns) in the new_column names.
if isinstance(verb.functions, (tuple, list)):
names = (func_name(func) for func in verb.functions)
names_and_functions = zip(names, verb.functions)
else:
names_and_functions = verb.functions.items()
# Create statements for the expressions
# and postfix identifiers
columns = Selector.get(verb) # columns to act on
postfixes = []
stmts = []
for name, func in names_and_functions:
postfixes.append(name)
for col in columns:
stmts.append(make_statement(func, col))
if not stmts:
stmts = columns
# Names of the new columns
# e.g col1_mean, col2_mean, col1_std, col2_std
add_postfix = (isinstance(verb.functions, dict) or
len(verb.functions) > 1)
if add_postfix:
fmt = '{}_{}'.format
new_columns = [fmt(c, p) for p in postfixes for c in columns]
else:
new_columns = columns
expressions = [Expression(stmt, col)
for stmt, col in zip(stmts, new_columns)]
return expressions, new_columns | python | def build_expressions(verb):
"""
Build expressions for helper verbs
Parameters
----------
verb : verb
A verb with a *functions* attribute.
Returns
-------
out : tuple
(List of Expressions, New columns). The expressions and the
new columns in which the results of those expressions will
be stored. Even when a result will stored in a column with
an existing label, that column is still considered new,
i.e An expression ``x='x+1'``, will create a new_column `x`
to replace an old column `x`.
"""
def partial(func, col, *args, **kwargs):
"""
Make a function that acts on a column in a dataframe
Parameters
----------
func : callable
Function
col : str
Column
args : tuple
Arguments to pass to func
kwargs : dict
Keyword arguments to func
Results
-------
new_func : callable
Function that takes a dataframe, and calls the
original function on a column in the dataframe.
"""
def new_func(gdf):
return func(gdf[col], *args, **kwargs)
return new_func
def make_statement(func, col):
"""
A statement of function called on a column in a dataframe
Parameters
----------
func : str or callable
Function to call on a dataframe column
col : str
Column
"""
if isinstance(func, str):
expr = '{}({})'.format(func, col)
elif callable(func):
expr = partial(func, col, *verb.args, **verb.kwargs)
else:
raise TypeError("{} is not a function".format(func))
return expr
def func_name(func):
"""
Return name of a function.
If the function is `np.sin`, we return `sin`.
"""
if isinstance(func, str):
return func
try:
return func.__name__
except AttributeError:
return ''
# Generate function names. They act as identifiers (postfixed
# to the original columns) in the new_column names.
if isinstance(verb.functions, (tuple, list)):
names = (func_name(func) for func in verb.functions)
names_and_functions = zip(names, verb.functions)
else:
names_and_functions = verb.functions.items()
# Create statements for the expressions
# and postfix identifiers
columns = Selector.get(verb) # columns to act on
postfixes = []
stmts = []
for name, func in names_and_functions:
postfixes.append(name)
for col in columns:
stmts.append(make_statement(func, col))
if not stmts:
stmts = columns
# Names of the new columns
# e.g col1_mean, col2_mean, col1_std, col2_std
add_postfix = (isinstance(verb.functions, dict) or
len(verb.functions) > 1)
if add_postfix:
fmt = '{}_{}'.format
new_columns = [fmt(c, p) for p in postfixes for c in columns]
else:
new_columns = columns
expressions = [Expression(stmt, col)
for stmt, col in zip(stmts, new_columns)]
return expressions, new_columns | [
"def",
"build_expressions",
"(",
"verb",
")",
":",
"def",
"partial",
"(",
"func",
",",
"col",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"\"\"\"\n Make a function that acts on a column in a dataframe\n\n Parameters\n ----------\n func : callable\n Function\n col : str\n Column\n args : tuple\n Arguments to pass to func\n kwargs : dict\n Keyword arguments to func\n\n Results\n -------\n new_func : callable\n Function that takes a dataframe, and calls the\n original function on a column in the dataframe.\n \"\"\"",
"def",
"new_func",
"(",
"gdf",
")",
":",
"return",
"func",
"(",
"gdf",
"[",
"col",
"]",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"return",
"new_func",
"def",
"make_statement",
"(",
"func",
",",
"col",
")",
":",
"\"\"\"\n A statement of function called on a column in a dataframe\n\n Parameters\n ----------\n func : str or callable\n Function to call on a dataframe column\n col : str\n Column\n \"\"\"",
"if",
"isinstance",
"(",
"func",
",",
"str",
")",
":",
"expr",
"=",
"'{}({})'",
".",
"format",
"(",
"func",
",",
"col",
")",
"elif",
"callable",
"(",
"func",
")",
":",
"expr",
"=",
"partial",
"(",
"func",
",",
"col",
",",
"*",
"verb",
".",
"args",
",",
"*",
"*",
"verb",
".",
"kwargs",
")",
"else",
":",
"raise",
"TypeError",
"(",
"\"{} is not a function\"",
".",
"format",
"(",
"func",
")",
")",
"return",
"expr",
"def",
"func_name",
"(",
"func",
")",
":",
"\"\"\"\n Return name of a function.\n\n If the function is `np.sin`, we return `sin`.\n \"\"\"",
"if",
"isinstance",
"(",
"func",
",",
"str",
")",
":",
"return",
"func",
"try",
":",
"return",
"func",
".",
"__name__",
"except",
"AttributeError",
":",
"return",
"''",
"# Generate function names. They act as identifiers (postfixed",
"# to the original columns) in the new_column names.",
"if",
"isinstance",
"(",
"verb",
".",
"functions",
",",
"(",
"tuple",
",",
"list",
")",
")",
":",
"names",
"=",
"(",
"func_name",
"(",
"func",
")",
"for",
"func",
"in",
"verb",
".",
"functions",
")",
"names_and_functions",
"=",
"zip",
"(",
"names",
",",
"verb",
".",
"functions",
")",
"else",
":",
"names_and_functions",
"=",
"verb",
".",
"functions",
".",
"items",
"(",
")",
"# Create statements for the expressions",
"# and postfix identifiers",
"columns",
"=",
"Selector",
".",
"get",
"(",
"verb",
")",
"# columns to act on",
"postfixes",
"=",
"[",
"]",
"stmts",
"=",
"[",
"]",
"for",
"name",
",",
"func",
"in",
"names_and_functions",
":",
"postfixes",
".",
"append",
"(",
"name",
")",
"for",
"col",
"in",
"columns",
":",
"stmts",
".",
"append",
"(",
"make_statement",
"(",
"func",
",",
"col",
")",
")",
"if",
"not",
"stmts",
":",
"stmts",
"=",
"columns",
"# Names of the new columns",
"# e.g col1_mean, col2_mean, col1_std, col2_std",
"add_postfix",
"=",
"(",
"isinstance",
"(",
"verb",
".",
"functions",
",",
"dict",
")",
"or",
"len",
"(",
"verb",
".",
"functions",
")",
">",
"1",
")",
"if",
"add_postfix",
":",
"fmt",
"=",
"'{}_{}'",
".",
"format",
"new_columns",
"=",
"[",
"fmt",
"(",
"c",
",",
"p",
")",
"for",
"p",
"in",
"postfixes",
"for",
"c",
"in",
"columns",
"]",
"else",
":",
"new_columns",
"=",
"columns",
"expressions",
"=",
"[",
"Expression",
"(",
"stmt",
",",
"col",
")",
"for",
"stmt",
",",
"col",
"in",
"zip",
"(",
"stmts",
",",
"new_columns",
")",
"]",
"return",
"expressions",
",",
"new_columns"
] | Build expressions for helper verbs
Parameters
----------
verb : verb
A verb with a *functions* attribute.
Returns
-------
out : tuple
(List of Expressions, New columns). The expressions and the
new columns in which the results of those expressions will
be stored. Even when a result will stored in a column with
an existing label, that column is still considered new,
i.e An expression ``x='x+1'``, will create a new_column `x`
to replace an old column `x`. | [
"Build",
"expressions",
"for",
"helper",
"verbs"
] | train | https://github.com/has2k1/plydata/blob/d8ca85ff70eee621e96f7c74034e90fec16e8b61/plydata/dataframe/common.py#L502-L613 |
has2k1/plydata | plydata/dataframe/common.py | Evaluator.process | def process(self):
"""
Run the expressions
Returns
-------
out : pandas.DataFrame
Resulting data
"""
# Short cut
if self._all_expressions_evaluated():
if self.drop:
# Drop extra columns. They do not correspond to
# any expressions.
columns = [expr.column for expr in self.expressions]
self.data = self.data.loc[:, columns]
return self.data
# group_by
# evaluate expressions
# combine columns
# concat evalutated group data and clean up index and group
gdfs = self._get_group_dataframes()
egdfs = self._evaluate_expressions(gdfs)
edata = self._concat(egdfs)
return edata | python | def process(self):
"""
Run the expressions
Returns
-------
out : pandas.DataFrame
Resulting data
"""
# Short cut
if self._all_expressions_evaluated():
if self.drop:
# Drop extra columns. They do not correspond to
# any expressions.
columns = [expr.column for expr in self.expressions]
self.data = self.data.loc[:, columns]
return self.data
# group_by
# evaluate expressions
# combine columns
# concat evalutated group data and clean up index and group
gdfs = self._get_group_dataframes()
egdfs = self._evaluate_expressions(gdfs)
edata = self._concat(egdfs)
return edata | [
"def",
"process",
"(",
"self",
")",
":",
"# Short cut",
"if",
"self",
".",
"_all_expressions_evaluated",
"(",
")",
":",
"if",
"self",
".",
"drop",
":",
"# Drop extra columns. They do not correspond to",
"# any expressions.",
"columns",
"=",
"[",
"expr",
".",
"column",
"for",
"expr",
"in",
"self",
".",
"expressions",
"]",
"self",
".",
"data",
"=",
"self",
".",
"data",
".",
"loc",
"[",
":",
",",
"columns",
"]",
"return",
"self",
".",
"data",
"# group_by",
"# evaluate expressions",
"# combine columns",
"# concat evalutated group data and clean up index and group",
"gdfs",
"=",
"self",
".",
"_get_group_dataframes",
"(",
")",
"egdfs",
"=",
"self",
".",
"_evaluate_expressions",
"(",
"gdfs",
")",
"edata",
"=",
"self",
".",
"_concat",
"(",
"egdfs",
")",
"return",
"edata"
] | Run the expressions
Returns
-------
out : pandas.DataFrame
Resulting data | [
"Run",
"the",
"expressions"
] | train | https://github.com/has2k1/plydata/blob/d8ca85ff70eee621e96f7c74034e90fec16e8b61/plydata/dataframe/common.py#L195-L220 |
has2k1/plydata | plydata/dataframe/common.py | Evaluator._all_expressions_evaluated | def _all_expressions_evaluated(self):
"""
Return True all expressions match with the columns
Saves some processor cycles
"""
def present(expr):
return expr.stmt == expr.column and expr.column in self.data
return all(present(expr) for expr in self.expressions) | python | def _all_expressions_evaluated(self):
"""
Return True all expressions match with the columns
Saves some processor cycles
"""
def present(expr):
return expr.stmt == expr.column and expr.column in self.data
return all(present(expr) for expr in self.expressions) | [
"def",
"_all_expressions_evaluated",
"(",
"self",
")",
":",
"def",
"present",
"(",
"expr",
")",
":",
"return",
"expr",
".",
"stmt",
"==",
"expr",
".",
"column",
"and",
"expr",
".",
"column",
"in",
"self",
".",
"data",
"return",
"all",
"(",
"present",
"(",
"expr",
")",
"for",
"expr",
"in",
"self",
".",
"expressions",
")"
] | Return True all expressions match with the columns
Saves some processor cycles | [
"Return",
"True",
"all",
"expressions",
"match",
"with",
"the",
"columns"
] | train | https://github.com/has2k1/plydata/blob/d8ca85ff70eee621e96f7c74034e90fec16e8b61/plydata/dataframe/common.py#L222-L230 |
has2k1/plydata | plydata/dataframe/common.py | Evaluator._get_group_dataframes | def _get_group_dataframes(self):
"""
Get group dataframes
Returns
-------
out : tuple or generator
Group dataframes
"""
if isinstance(self.data, GroupedDataFrame):
grouper = self.data.groupby()
# groupby on categorical columns uses the categories
# even if they are not present in the data. This
# leads to empty groups. We exclude them.
return (gdf for _, gdf in grouper if not gdf.empty)
else:
return (self.data, ) | python | def _get_group_dataframes(self):
"""
Get group dataframes
Returns
-------
out : tuple or generator
Group dataframes
"""
if isinstance(self.data, GroupedDataFrame):
grouper = self.data.groupby()
# groupby on categorical columns uses the categories
# even if they are not present in the data. This
# leads to empty groups. We exclude them.
return (gdf for _, gdf in grouper if not gdf.empty)
else:
return (self.data, ) | [
"def",
"_get_group_dataframes",
"(",
"self",
")",
":",
"if",
"isinstance",
"(",
"self",
".",
"data",
",",
"GroupedDataFrame",
")",
":",
"grouper",
"=",
"self",
".",
"data",
".",
"groupby",
"(",
")",
"# groupby on categorical columns uses the categories",
"# even if they are not present in the data. This",
"# leads to empty groups. We exclude them.",
"return",
"(",
"gdf",
"for",
"_",
",",
"gdf",
"in",
"grouper",
"if",
"not",
"gdf",
".",
"empty",
")",
"else",
":",
"return",
"(",
"self",
".",
"data",
",",
")"
] | Get group dataframes
Returns
-------
out : tuple or generator
Group dataframes | [
"Get",
"group",
"dataframes"
] | train | https://github.com/has2k1/plydata/blob/d8ca85ff70eee621e96f7c74034e90fec16e8b61/plydata/dataframe/common.py#L232-L248 |
has2k1/plydata | plydata/dataframe/common.py | Evaluator._evaluate_group_dataframe | def _evaluate_group_dataframe(self, gdf):
"""
Evaluate a single group dataframe
Parameters
----------
gdf : pandas.DataFrame
Input group dataframe
Returns
-------
out : pandas.DataFrame
Result data
"""
gdf._is_copy = None
result_index = gdf.index if self.keep_index else []
data = pd.DataFrame(index=result_index)
for expr in self.expressions:
value = expr.evaluate(gdf, self.env)
if isinstance(value, pd.DataFrame):
data = value
break
else:
_create_column(data, expr.column, value)
data = _add_group_columns(data, gdf)
return data | python | def _evaluate_group_dataframe(self, gdf):
"""
Evaluate a single group dataframe
Parameters
----------
gdf : pandas.DataFrame
Input group dataframe
Returns
-------
out : pandas.DataFrame
Result data
"""
gdf._is_copy = None
result_index = gdf.index if self.keep_index else []
data = pd.DataFrame(index=result_index)
for expr in self.expressions:
value = expr.evaluate(gdf, self.env)
if isinstance(value, pd.DataFrame):
data = value
break
else:
_create_column(data, expr.column, value)
data = _add_group_columns(data, gdf)
return data | [
"def",
"_evaluate_group_dataframe",
"(",
"self",
",",
"gdf",
")",
":",
"gdf",
".",
"_is_copy",
"=",
"None",
"result_index",
"=",
"gdf",
".",
"index",
"if",
"self",
".",
"keep_index",
"else",
"[",
"]",
"data",
"=",
"pd",
".",
"DataFrame",
"(",
"index",
"=",
"result_index",
")",
"for",
"expr",
"in",
"self",
".",
"expressions",
":",
"value",
"=",
"expr",
".",
"evaluate",
"(",
"gdf",
",",
"self",
".",
"env",
")",
"if",
"isinstance",
"(",
"value",
",",
"pd",
".",
"DataFrame",
")",
":",
"data",
"=",
"value",
"break",
"else",
":",
"_create_column",
"(",
"data",
",",
"expr",
".",
"column",
",",
"value",
")",
"data",
"=",
"_add_group_columns",
"(",
"data",
",",
"gdf",
")",
"return",
"data"
] | Evaluate a single group dataframe
Parameters
----------
gdf : pandas.DataFrame
Input group dataframe
Returns
-------
out : pandas.DataFrame
Result data | [
"Evaluate",
"a",
"single",
"group",
"dataframe"
] | train | https://github.com/has2k1/plydata/blob/d8ca85ff70eee621e96f7c74034e90fec16e8b61/plydata/dataframe/common.py#L266-L291 |
has2k1/plydata | plydata/dataframe/common.py | Evaluator._concat | def _concat(self, egdfs):
"""
Concatenate evaluated group dataframes
Parameters
----------
egdfs : iterable
Evaluated dataframes
Returns
-------
edata : pandas.DataFrame
Evaluated data
"""
egdfs = list(egdfs)
edata = pd.concat(egdfs, axis=0, ignore_index=False, copy=False)
# groupby can mixup the rows. We try to maintain the original
# order, but we can only do that if the result has a one to
# one relationship with the original
one2one = (
self.keep_index and
not any(edata.index.duplicated()) and
len(edata.index) == len(self.data.index))
if one2one:
edata = edata.sort_index()
else:
edata.reset_index(drop=True, inplace=True)
# Maybe this should happen in the verb functions
if self.keep_groups and self.groups:
edata = GroupedDataFrame(edata, groups=self.groups)
return edata | python | def _concat(self, egdfs):
"""
Concatenate evaluated group dataframes
Parameters
----------
egdfs : iterable
Evaluated dataframes
Returns
-------
edata : pandas.DataFrame
Evaluated data
"""
egdfs = list(egdfs)
edata = pd.concat(egdfs, axis=0, ignore_index=False, copy=False)
# groupby can mixup the rows. We try to maintain the original
# order, but we can only do that if the result has a one to
# one relationship with the original
one2one = (
self.keep_index and
not any(edata.index.duplicated()) and
len(edata.index) == len(self.data.index))
if one2one:
edata = edata.sort_index()
else:
edata.reset_index(drop=True, inplace=True)
# Maybe this should happen in the verb functions
if self.keep_groups and self.groups:
edata = GroupedDataFrame(edata, groups=self.groups)
return edata | [
"def",
"_concat",
"(",
"self",
",",
"egdfs",
")",
":",
"egdfs",
"=",
"list",
"(",
"egdfs",
")",
"edata",
"=",
"pd",
".",
"concat",
"(",
"egdfs",
",",
"axis",
"=",
"0",
",",
"ignore_index",
"=",
"False",
",",
"copy",
"=",
"False",
")",
"# groupby can mixup the rows. We try to maintain the original",
"# order, but we can only do that if the result has a one to",
"# one relationship with the original",
"one2one",
"=",
"(",
"self",
".",
"keep_index",
"and",
"not",
"any",
"(",
"edata",
".",
"index",
".",
"duplicated",
"(",
")",
")",
"and",
"len",
"(",
"edata",
".",
"index",
")",
"==",
"len",
"(",
"self",
".",
"data",
".",
"index",
")",
")",
"if",
"one2one",
":",
"edata",
"=",
"edata",
".",
"sort_index",
"(",
")",
"else",
":",
"edata",
".",
"reset_index",
"(",
"drop",
"=",
"True",
",",
"inplace",
"=",
"True",
")",
"# Maybe this should happen in the verb functions",
"if",
"self",
".",
"keep_groups",
"and",
"self",
".",
"groups",
":",
"edata",
"=",
"GroupedDataFrame",
"(",
"edata",
",",
"groups",
"=",
"self",
".",
"groups",
")",
"return",
"edata"
] | Concatenate evaluated group dataframes
Parameters
----------
egdfs : iterable
Evaluated dataframes
Returns
-------
edata : pandas.DataFrame
Evaluated data | [
"Concatenate",
"evaluated",
"group",
"dataframes"
] | train | https://github.com/has2k1/plydata/blob/d8ca85ff70eee621e96f7c74034e90fec16e8b61/plydata/dataframe/common.py#L293-L325 |
has2k1/plydata | plydata/dataframe/common.py | Selector._resolve_slices | def _resolve_slices(data_columns, names):
"""
Convert any slices into column names
Parameters
----------
data_columns : pandas.Index
Dataframe columns
names : tuple
Names (including slices) of columns in the
dataframe.
Returns
-------
out : tuple
Names of columns in the dataframe. Has no
slices.
"""
def _get_slice_cols(sc):
"""
Convert slice to list of names
"""
# Just like pandas.DataFrame.loc the stop
# column is included
idx_start = data_columns.get_loc(sc.start)
idx_stop = data_columns.get_loc(sc.stop) + 1
return data_columns[idx_start:idx_stop:sc.step]
result = []
for col in names:
if isinstance(col, slice):
result.extend(_get_slice_cols(col))
else:
result.append(col)
return tuple(result) | python | def _resolve_slices(data_columns, names):
"""
Convert any slices into column names
Parameters
----------
data_columns : pandas.Index
Dataframe columns
names : tuple
Names (including slices) of columns in the
dataframe.
Returns
-------
out : tuple
Names of columns in the dataframe. Has no
slices.
"""
def _get_slice_cols(sc):
"""
Convert slice to list of names
"""
# Just like pandas.DataFrame.loc the stop
# column is included
idx_start = data_columns.get_loc(sc.start)
idx_stop = data_columns.get_loc(sc.stop) + 1
return data_columns[idx_start:idx_stop:sc.step]
result = []
for col in names:
if isinstance(col, slice):
result.extend(_get_slice_cols(col))
else:
result.append(col)
return tuple(result) | [
"def",
"_resolve_slices",
"(",
"data_columns",
",",
"names",
")",
":",
"def",
"_get_slice_cols",
"(",
"sc",
")",
":",
"\"\"\"\n Convert slice to list of names\n \"\"\"",
"# Just like pandas.DataFrame.loc the stop",
"# column is included",
"idx_start",
"=",
"data_columns",
".",
"get_loc",
"(",
"sc",
".",
"start",
")",
"idx_stop",
"=",
"data_columns",
".",
"get_loc",
"(",
"sc",
".",
"stop",
")",
"+",
"1",
"return",
"data_columns",
"[",
"idx_start",
":",
"idx_stop",
":",
"sc",
".",
"step",
"]",
"result",
"=",
"[",
"]",
"for",
"col",
"in",
"names",
":",
"if",
"isinstance",
"(",
"col",
",",
"slice",
")",
":",
"result",
".",
"extend",
"(",
"_get_slice_cols",
"(",
"col",
")",
")",
"else",
":",
"result",
".",
"append",
"(",
"col",
")",
"return",
"tuple",
"(",
"result",
")"
] | Convert any slices into column names
Parameters
----------
data_columns : pandas.Index
Dataframe columns
names : tuple
Names (including slices) of columns in the
dataframe.
Returns
-------
out : tuple
Names of columns in the dataframe. Has no
slices. | [
"Convert",
"any",
"slices",
"into",
"column",
"names"
] | train | https://github.com/has2k1/plydata/blob/d8ca85ff70eee621e96f7c74034e90fec16e8b61/plydata/dataframe/common.py#L333-L367 |
has2k1/plydata | plydata/dataframe/common.py | Selector.select | def select(cls, verb):
"""
Return selected columns for the select verb
Parameters
----------
verb : object
verb with the column selection attributes:
- names
- startswith
- endswith
- contains
- matches
"""
columns = verb.data.columns
contains = verb.contains
matches = verb.matches
groups = _get_groups(verb)
names = cls._resolve_slices(columns, verb.names)
names_set = set(names)
groups_set = set(groups)
lst = [[]]
if names or groups:
# group variable missing from the selection are prepended
missing = [g for g in groups if g not in names_set]
missing_set = set(missing)
c1 = missing + [x for x in names if x not in missing_set]
lst.append(c1)
if verb.startswith:
c2 = [x for x in columns
if isinstance(x, str) and x.startswith(verb.startswith)]
lst.append(c2)
if verb.endswith:
c3 = [x for x in columns if
isinstance(x, str) and x.endswith(verb.endswith)]
lst.append(c3)
if contains:
c4 = []
for col in columns:
if (isinstance(col, str) and
any(s in col for s in contains)):
c4.append(col)
lst.append(c4)
if matches:
c5 = []
patterns = [x if hasattr(x, 'match') else re.compile(x)
for x in matches]
for col in columns:
if isinstance(col, str):
if any(bool(p.match(col)) for p in patterns):
c5.append(col)
lst.append(c5)
selected = unique(list(itertools.chain(*lst)))
if verb.drop:
to_drop = [col for col in selected if col not in groups_set]
selected = [col for col in columns if col not in to_drop]
return selected | python | def select(cls, verb):
"""
Return selected columns for the select verb
Parameters
----------
verb : object
verb with the column selection attributes:
- names
- startswith
- endswith
- contains
- matches
"""
columns = verb.data.columns
contains = verb.contains
matches = verb.matches
groups = _get_groups(verb)
names = cls._resolve_slices(columns, verb.names)
names_set = set(names)
groups_set = set(groups)
lst = [[]]
if names or groups:
# group variable missing from the selection are prepended
missing = [g for g in groups if g not in names_set]
missing_set = set(missing)
c1 = missing + [x for x in names if x not in missing_set]
lst.append(c1)
if verb.startswith:
c2 = [x for x in columns
if isinstance(x, str) and x.startswith(verb.startswith)]
lst.append(c2)
if verb.endswith:
c3 = [x for x in columns if
isinstance(x, str) and x.endswith(verb.endswith)]
lst.append(c3)
if contains:
c4 = []
for col in columns:
if (isinstance(col, str) and
any(s in col for s in contains)):
c4.append(col)
lst.append(c4)
if matches:
c5 = []
patterns = [x if hasattr(x, 'match') else re.compile(x)
for x in matches]
for col in columns:
if isinstance(col, str):
if any(bool(p.match(col)) for p in patterns):
c5.append(col)
lst.append(c5)
selected = unique(list(itertools.chain(*lst)))
if verb.drop:
to_drop = [col for col in selected if col not in groups_set]
selected = [col for col in columns if col not in to_drop]
return selected | [
"def",
"select",
"(",
"cls",
",",
"verb",
")",
":",
"columns",
"=",
"verb",
".",
"data",
".",
"columns",
"contains",
"=",
"verb",
".",
"contains",
"matches",
"=",
"verb",
".",
"matches",
"groups",
"=",
"_get_groups",
"(",
"verb",
")",
"names",
"=",
"cls",
".",
"_resolve_slices",
"(",
"columns",
",",
"verb",
".",
"names",
")",
"names_set",
"=",
"set",
"(",
"names",
")",
"groups_set",
"=",
"set",
"(",
"groups",
")",
"lst",
"=",
"[",
"[",
"]",
"]",
"if",
"names",
"or",
"groups",
":",
"# group variable missing from the selection are prepended",
"missing",
"=",
"[",
"g",
"for",
"g",
"in",
"groups",
"if",
"g",
"not",
"in",
"names_set",
"]",
"missing_set",
"=",
"set",
"(",
"missing",
")",
"c1",
"=",
"missing",
"+",
"[",
"x",
"for",
"x",
"in",
"names",
"if",
"x",
"not",
"in",
"missing_set",
"]",
"lst",
".",
"append",
"(",
"c1",
")",
"if",
"verb",
".",
"startswith",
":",
"c2",
"=",
"[",
"x",
"for",
"x",
"in",
"columns",
"if",
"isinstance",
"(",
"x",
",",
"str",
")",
"and",
"x",
".",
"startswith",
"(",
"verb",
".",
"startswith",
")",
"]",
"lst",
".",
"append",
"(",
"c2",
")",
"if",
"verb",
".",
"endswith",
":",
"c3",
"=",
"[",
"x",
"for",
"x",
"in",
"columns",
"if",
"isinstance",
"(",
"x",
",",
"str",
")",
"and",
"x",
".",
"endswith",
"(",
"verb",
".",
"endswith",
")",
"]",
"lst",
".",
"append",
"(",
"c3",
")",
"if",
"contains",
":",
"c4",
"=",
"[",
"]",
"for",
"col",
"in",
"columns",
":",
"if",
"(",
"isinstance",
"(",
"col",
",",
"str",
")",
"and",
"any",
"(",
"s",
"in",
"col",
"for",
"s",
"in",
"contains",
")",
")",
":",
"c4",
".",
"append",
"(",
"col",
")",
"lst",
".",
"append",
"(",
"c4",
")",
"if",
"matches",
":",
"c5",
"=",
"[",
"]",
"patterns",
"=",
"[",
"x",
"if",
"hasattr",
"(",
"x",
",",
"'match'",
")",
"else",
"re",
".",
"compile",
"(",
"x",
")",
"for",
"x",
"in",
"matches",
"]",
"for",
"col",
"in",
"columns",
":",
"if",
"isinstance",
"(",
"col",
",",
"str",
")",
":",
"if",
"any",
"(",
"bool",
"(",
"p",
".",
"match",
"(",
"col",
")",
")",
"for",
"p",
"in",
"patterns",
")",
":",
"c5",
".",
"append",
"(",
"col",
")",
"lst",
".",
"append",
"(",
"c5",
")",
"selected",
"=",
"unique",
"(",
"list",
"(",
"itertools",
".",
"chain",
"(",
"*",
"lst",
")",
")",
")",
"if",
"verb",
".",
"drop",
":",
"to_drop",
"=",
"[",
"col",
"for",
"col",
"in",
"selected",
"if",
"col",
"not",
"in",
"groups_set",
"]",
"selected",
"=",
"[",
"col",
"for",
"col",
"in",
"columns",
"if",
"col",
"not",
"in",
"to_drop",
"]",
"return",
"selected"
] | Return selected columns for the select verb
Parameters
----------
verb : object
verb with the column selection attributes:
- names
- startswith
- endswith
- contains
- matches | [
"Return",
"selected",
"columns",
"for",
"the",
"select",
"verb"
] | train | https://github.com/has2k1/plydata/blob/d8ca85ff70eee621e96f7c74034e90fec16e8b61/plydata/dataframe/common.py#L370-L437 |
has2k1/plydata | plydata/dataframe/common.py | Selector._all | def _all(cls, verb):
"""
A verb
"""
groups = set(_get_groups(verb))
return [col for col in verb.data if col not in groups] | python | def _all(cls, verb):
"""
A verb
"""
groups = set(_get_groups(verb))
return [col for col in verb.data if col not in groups] | [
"def",
"_all",
"(",
"cls",
",",
"verb",
")",
":",
"groups",
"=",
"set",
"(",
"_get_groups",
"(",
"verb",
")",
")",
"return",
"[",
"col",
"for",
"col",
"in",
"verb",
".",
"data",
"if",
"col",
"not",
"in",
"groups",
"]"
] | A verb | [
"A",
"verb"
] | train | https://github.com/has2k1/plydata/blob/d8ca85ff70eee621e96f7c74034e90fec16e8b61/plydata/dataframe/common.py#L440-L445 |
has2k1/plydata | plydata/dataframe/common.py | Selector._at | def _at(cls, verb):
"""
A verb with a select text match
"""
# Named (listed) columns are always included
columns = cls.select(verb)
final_columns_set = set(cls.select(verb))
groups_set = set(_get_groups(verb))
final_columns_set -= groups_set - set(verb.names)
def pred(col):
if col not in verb.data:
raise KeyError(
"Unknown column name, {!r}".format(col))
return col in final_columns_set
return [col for col in columns if pred(col)] | python | def _at(cls, verb):
"""
A verb with a select text match
"""
# Named (listed) columns are always included
columns = cls.select(verb)
final_columns_set = set(cls.select(verb))
groups_set = set(_get_groups(verb))
final_columns_set -= groups_set - set(verb.names)
def pred(col):
if col not in verb.data:
raise KeyError(
"Unknown column name, {!r}".format(col))
return col in final_columns_set
return [col for col in columns if pred(col)] | [
"def",
"_at",
"(",
"cls",
",",
"verb",
")",
":",
"# Named (listed) columns are always included",
"columns",
"=",
"cls",
".",
"select",
"(",
"verb",
")",
"final_columns_set",
"=",
"set",
"(",
"cls",
".",
"select",
"(",
"verb",
")",
")",
"groups_set",
"=",
"set",
"(",
"_get_groups",
"(",
"verb",
")",
")",
"final_columns_set",
"-=",
"groups_set",
"-",
"set",
"(",
"verb",
".",
"names",
")",
"def",
"pred",
"(",
"col",
")",
":",
"if",
"col",
"not",
"in",
"verb",
".",
"data",
":",
"raise",
"KeyError",
"(",
"\"Unknown column name, {!r}\"",
".",
"format",
"(",
"col",
")",
")",
"return",
"col",
"in",
"final_columns_set",
"return",
"[",
"col",
"for",
"col",
"in",
"columns",
"if",
"pred",
"(",
"col",
")",
"]"
] | A verb with a select text match | [
"A",
"verb",
"with",
"a",
"select",
"text",
"match"
] | train | https://github.com/has2k1/plydata/blob/d8ca85ff70eee621e96f7c74034e90fec16e8b61/plydata/dataframe/common.py#L448-L464 |
has2k1/plydata | plydata/dataframe/common.py | Selector._if | def _if(cls, verb):
"""
A verb with a predicate function
"""
pred = verb.predicate
data = verb.data
groups = set(_get_groups(verb))
# force predicate
if isinstance(pred, str):
if not pred.endswith('_dtype'):
pred = '{}_dtype'.format(pred)
pred = getattr(pdtypes, pred)
elif pdtypes.is_bool_dtype(np.array(pred)):
# Turn boolean array into a predicate function
it = iter(pred)
def pred(col):
return next(it)
return [col for col in data
if pred(data[col]) and col not in groups] | python | def _if(cls, verb):
"""
A verb with a predicate function
"""
pred = verb.predicate
data = verb.data
groups = set(_get_groups(verb))
# force predicate
if isinstance(pred, str):
if not pred.endswith('_dtype'):
pred = '{}_dtype'.format(pred)
pred = getattr(pdtypes, pred)
elif pdtypes.is_bool_dtype(np.array(pred)):
# Turn boolean array into a predicate function
it = iter(pred)
def pred(col):
return next(it)
return [col for col in data
if pred(data[col]) and col not in groups] | [
"def",
"_if",
"(",
"cls",
",",
"verb",
")",
":",
"pred",
"=",
"verb",
".",
"predicate",
"data",
"=",
"verb",
".",
"data",
"groups",
"=",
"set",
"(",
"_get_groups",
"(",
"verb",
")",
")",
"# force predicate",
"if",
"isinstance",
"(",
"pred",
",",
"str",
")",
":",
"if",
"not",
"pred",
".",
"endswith",
"(",
"'_dtype'",
")",
":",
"pred",
"=",
"'{}_dtype'",
".",
"format",
"(",
"pred",
")",
"pred",
"=",
"getattr",
"(",
"pdtypes",
",",
"pred",
")",
"elif",
"pdtypes",
".",
"is_bool_dtype",
"(",
"np",
".",
"array",
"(",
"pred",
")",
")",
":",
"# Turn boolean array into a predicate function",
"it",
"=",
"iter",
"(",
"pred",
")",
"def",
"pred",
"(",
"col",
")",
":",
"return",
"next",
"(",
"it",
")",
"return",
"[",
"col",
"for",
"col",
"in",
"data",
"if",
"pred",
"(",
"data",
"[",
"col",
"]",
")",
"and",
"col",
"not",
"in",
"groups",
"]"
] | A verb with a predicate function | [
"A",
"verb",
"with",
"a",
"predicate",
"function"
] | train | https://github.com/has2k1/plydata/blob/d8ca85ff70eee621e96f7c74034e90fec16e8b61/plydata/dataframe/common.py#L467-L488 |
has2k1/plydata | plydata/operators.py | get_verb_function | def get_verb_function(data, verb):
"""
Return function that implements the verb for given data type
"""
try:
module = type_lookup[type(data)]
except KeyError:
# Some guess work for subclasses
for type_, mod in type_lookup.items():
if isinstance(data, type_):
module = mod
break
try:
return getattr(module, verb)
except (NameError, AttributeError):
msg = "Data source of type '{}' is not supported."
raise TypeError(msg.format(type(data))) | python | def get_verb_function(data, verb):
"""
Return function that implements the verb for given data type
"""
try:
module = type_lookup[type(data)]
except KeyError:
# Some guess work for subclasses
for type_, mod in type_lookup.items():
if isinstance(data, type_):
module = mod
break
try:
return getattr(module, verb)
except (NameError, AttributeError):
msg = "Data source of type '{}' is not supported."
raise TypeError(msg.format(type(data))) | [
"def",
"get_verb_function",
"(",
"data",
",",
"verb",
")",
":",
"try",
":",
"module",
"=",
"type_lookup",
"[",
"type",
"(",
"data",
")",
"]",
"except",
"KeyError",
":",
"# Some guess work for subclasses",
"for",
"type_",
",",
"mod",
"in",
"type_lookup",
".",
"items",
"(",
")",
":",
"if",
"isinstance",
"(",
"data",
",",
"type_",
")",
":",
"module",
"=",
"mod",
"break",
"try",
":",
"return",
"getattr",
"(",
"module",
",",
"verb",
")",
"except",
"(",
"NameError",
",",
"AttributeError",
")",
":",
"msg",
"=",
"\"Data source of type '{}' is not supported.\"",
"raise",
"TypeError",
"(",
"msg",
".",
"format",
"(",
"type",
"(",
"data",
")",
")",
")"
] | Return function that implements the verb for given data type | [
"Return",
"function",
"that",
"implements",
"the",
"verb",
"for",
"given",
"data",
"type"
] | train | https://github.com/has2k1/plydata/blob/d8ca85ff70eee621e96f7c74034e90fec16e8b61/plydata/operators.py#L23-L39 |
has2k1/plydata | plydata/expressions.py | Expression | def Expression(*args, **kwargs):
"""
Return an appropriate Expression given the arguments
Parameters
----------
args : tuple
Positional arguments passed to the Expression class
kwargs : dict
Keyword arguments passed to the Expression class
"""
# dispatch
if not hasattr(args[0], '_Expression'):
return BaseExpression(*args, *kwargs)
else:
return args[0]._Expression(*args, **kwargs) | python | def Expression(*args, **kwargs):
"""
Return an appropriate Expression given the arguments
Parameters
----------
args : tuple
Positional arguments passed to the Expression class
kwargs : dict
Keyword arguments passed to the Expression class
"""
# dispatch
if not hasattr(args[0], '_Expression'):
return BaseExpression(*args, *kwargs)
else:
return args[0]._Expression(*args, **kwargs) | [
"def",
"Expression",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"# dispatch",
"if",
"not",
"hasattr",
"(",
"args",
"[",
"0",
"]",
",",
"'_Expression'",
")",
":",
"return",
"BaseExpression",
"(",
"*",
"args",
",",
"*",
"kwargs",
")",
"else",
":",
"return",
"args",
"[",
"0",
"]",
".",
"_Expression",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] | Return an appropriate Expression given the arguments
Parameters
----------
args : tuple
Positional arguments passed to the Expression class
kwargs : dict
Keyword arguments passed to the Expression class | [
"Return",
"an",
"appropriate",
"Expression",
"given",
"the",
"arguments"
] | train | https://github.com/has2k1/plydata/blob/d8ca85ff70eee621e96f7c74034e90fec16e8b61/plydata/expressions.py#L176-L191 |
has2k1/plydata | plydata/expressions.py | BaseExpression.evaluate | def evaluate(self, data, env):
"""
Evaluate statement
Parameters
----------
data : pandas.DataFrame
Data in whose namespace the statement will be
evaluated. Typically, this is a group dataframe.
Returns
-------
out : object
Result of the evaluation.pandas.DataFrame
"""
def n():
"""
Return number of rows in groups
This function is part of the public API
"""
return len(data)
if isinstance(self.stmt, str):
# Add function n() that computes the
# size of the group data to the inner namespace.
if self._has_n_func:
namespace = dict(data, n=n)
else:
namespace = data
# Avoid obvious keywords e.g if a column
# is named class
if self.stmt not in KEYWORDS:
value = env.eval(
self.stmt,
source_name='Expression.evaluate',
inner_namespace=namespace)
else:
value = namespace[self.stmt]
elif callable(self.stmt):
value = self.stmt(data)
else:
value = self.stmt
return value | python | def evaluate(self, data, env):
"""
Evaluate statement
Parameters
----------
data : pandas.DataFrame
Data in whose namespace the statement will be
evaluated. Typically, this is a group dataframe.
Returns
-------
out : object
Result of the evaluation.pandas.DataFrame
"""
def n():
"""
Return number of rows in groups
This function is part of the public API
"""
return len(data)
if isinstance(self.stmt, str):
# Add function n() that computes the
# size of the group data to the inner namespace.
if self._has_n_func:
namespace = dict(data, n=n)
else:
namespace = data
# Avoid obvious keywords e.g if a column
# is named class
if self.stmt not in KEYWORDS:
value = env.eval(
self.stmt,
source_name='Expression.evaluate',
inner_namespace=namespace)
else:
value = namespace[self.stmt]
elif callable(self.stmt):
value = self.stmt(data)
else:
value = self.stmt
return value | [
"def",
"evaluate",
"(",
"self",
",",
"data",
",",
"env",
")",
":",
"def",
"n",
"(",
")",
":",
"\"\"\"\n Return number of rows in groups\n\n This function is part of the public API\n \"\"\"",
"return",
"len",
"(",
"data",
")",
"if",
"isinstance",
"(",
"self",
".",
"stmt",
",",
"str",
")",
":",
"# Add function n() that computes the",
"# size of the group data to the inner namespace.",
"if",
"self",
".",
"_has_n_func",
":",
"namespace",
"=",
"dict",
"(",
"data",
",",
"n",
"=",
"n",
")",
"else",
":",
"namespace",
"=",
"data",
"# Avoid obvious keywords e.g if a column",
"# is named class",
"if",
"self",
".",
"stmt",
"not",
"in",
"KEYWORDS",
":",
"value",
"=",
"env",
".",
"eval",
"(",
"self",
".",
"stmt",
",",
"source_name",
"=",
"'Expression.evaluate'",
",",
"inner_namespace",
"=",
"namespace",
")",
"else",
":",
"value",
"=",
"namespace",
"[",
"self",
".",
"stmt",
"]",
"elif",
"callable",
"(",
"self",
".",
"stmt",
")",
":",
"value",
"=",
"self",
".",
"stmt",
"(",
"data",
")",
"else",
":",
"value",
"=",
"self",
".",
"stmt",
"return",
"value"
] | Evaluate statement
Parameters
----------
data : pandas.DataFrame
Data in whose namespace the statement will be
evaluated. Typically, this is a group dataframe.
Returns
-------
out : object
Result of the evaluation.pandas.DataFrame | [
"Evaluate",
"statement"
] | train | https://github.com/has2k1/plydata/blob/d8ca85ff70eee621e96f7c74034e90fec16e8b61/plydata/expressions.py#L58-L101 |
has2k1/plydata | plydata/expressions.py | CaseWhenExpression.evaluate | def evaluate(self, data, env):
"""
Evaluate the predicates and values
"""
# For each predicate-value, we keep track of the positions
# that have been copied to the result, so that the later
# more general values do not overwrite the previous ones.
result = np.repeat(None, len(data))
copied = np.repeat(False, len(data))
for pred_expr, value_expr in self.pv_expressions:
bool_idx = pred_expr.evaluate(data, env)
if not pdtypes.is_bool_dtype(np.asarray(bool_idx)):
raise TypeError(
"The predicate keys must return a boolean array, "
"or a boolean value.")
value = value_expr.evaluate(data, env)
mask = (copied ^ bool_idx) & bool_idx
copied |= bool_idx
idx = np.where(mask)[0]
result[idx] = self.nice_value(value, idx)
return np.array(list(result)) | python | def evaluate(self, data, env):
"""
Evaluate the predicates and values
"""
# For each predicate-value, we keep track of the positions
# that have been copied to the result, so that the later
# more general values do not overwrite the previous ones.
result = np.repeat(None, len(data))
copied = np.repeat(False, len(data))
for pred_expr, value_expr in self.pv_expressions:
bool_idx = pred_expr.evaluate(data, env)
if not pdtypes.is_bool_dtype(np.asarray(bool_idx)):
raise TypeError(
"The predicate keys must return a boolean array, "
"or a boolean value.")
value = value_expr.evaluate(data, env)
mask = (copied ^ bool_idx) & bool_idx
copied |= bool_idx
idx = np.where(mask)[0]
result[idx] = self.nice_value(value, idx)
return np.array(list(result)) | [
"def",
"evaluate",
"(",
"self",
",",
"data",
",",
"env",
")",
":",
"# For each predicate-value, we keep track of the positions",
"# that have been copied to the result, so that the later",
"# more general values do not overwrite the previous ones.",
"result",
"=",
"np",
".",
"repeat",
"(",
"None",
",",
"len",
"(",
"data",
")",
")",
"copied",
"=",
"np",
".",
"repeat",
"(",
"False",
",",
"len",
"(",
"data",
")",
")",
"for",
"pred_expr",
",",
"value_expr",
"in",
"self",
".",
"pv_expressions",
":",
"bool_idx",
"=",
"pred_expr",
".",
"evaluate",
"(",
"data",
",",
"env",
")",
"if",
"not",
"pdtypes",
".",
"is_bool_dtype",
"(",
"np",
".",
"asarray",
"(",
"bool_idx",
")",
")",
":",
"raise",
"TypeError",
"(",
"\"The predicate keys must return a boolean array, \"",
"\"or a boolean value.\"",
")",
"value",
"=",
"value_expr",
".",
"evaluate",
"(",
"data",
",",
"env",
")",
"mask",
"=",
"(",
"copied",
"^",
"bool_idx",
")",
"&",
"bool_idx",
"copied",
"|=",
"bool_idx",
"idx",
"=",
"np",
".",
"where",
"(",
"mask",
")",
"[",
"0",
"]",
"result",
"[",
"idx",
"]",
"=",
"self",
".",
"nice_value",
"(",
"value",
",",
"idx",
")",
"return",
"np",
".",
"array",
"(",
"list",
"(",
"result",
")",
")"
] | Evaluate the predicates and values | [
"Evaluate",
"the",
"predicates",
"and",
"values"
] | train | https://github.com/has2k1/plydata/blob/d8ca85ff70eee621e96f7c74034e90fec16e8b61/plydata/expressions.py#L130-L150 |
has2k1/plydata | plydata/expressions.py | IfElseExpression.evaluate | def evaluate(self, data, env):
"""
Evaluate the predicates and values
"""
bool_idx = self.predicate_expr.evaluate(data, env)
true_value = self.true_value_expr.evaluate(data, env)
false_value = self.false_value_expr.evaluate(data, env)
true_idx = np.where(bool_idx)[0]
false_idx = np.where(~bool_idx)[0]
result = np.repeat(None, len(data))
result[true_idx] = self.nice_value(true_value, true_idx)
result[false_idx] = self.nice_value(false_value, false_idx)
return np.array(list(result)) | python | def evaluate(self, data, env):
"""
Evaluate the predicates and values
"""
bool_idx = self.predicate_expr.evaluate(data, env)
true_value = self.true_value_expr.evaluate(data, env)
false_value = self.false_value_expr.evaluate(data, env)
true_idx = np.where(bool_idx)[0]
false_idx = np.where(~bool_idx)[0]
result = np.repeat(None, len(data))
result[true_idx] = self.nice_value(true_value, true_idx)
result[false_idx] = self.nice_value(false_value, false_idx)
return np.array(list(result)) | [
"def",
"evaluate",
"(",
"self",
",",
"data",
",",
"env",
")",
":",
"bool_idx",
"=",
"self",
".",
"predicate_expr",
".",
"evaluate",
"(",
"data",
",",
"env",
")",
"true_value",
"=",
"self",
".",
"true_value_expr",
".",
"evaluate",
"(",
"data",
",",
"env",
")",
"false_value",
"=",
"self",
".",
"false_value_expr",
".",
"evaluate",
"(",
"data",
",",
"env",
")",
"true_idx",
"=",
"np",
".",
"where",
"(",
"bool_idx",
")",
"[",
"0",
"]",
"false_idx",
"=",
"np",
".",
"where",
"(",
"~",
"bool_idx",
")",
"[",
"0",
"]",
"result",
"=",
"np",
".",
"repeat",
"(",
"None",
",",
"len",
"(",
"data",
")",
")",
"result",
"[",
"true_idx",
"]",
"=",
"self",
".",
"nice_value",
"(",
"true_value",
",",
"true_idx",
")",
"result",
"[",
"false_idx",
"]",
"=",
"self",
".",
"nice_value",
"(",
"false_value",
",",
"false_idx",
")",
"return",
"np",
".",
"array",
"(",
"list",
"(",
"result",
")",
")"
] | Evaluate the predicates and values | [
"Evaluate",
"the",
"predicates",
"and",
"values"
] | train | https://github.com/has2k1/plydata/blob/d8ca85ff70eee621e96f7c74034e90fec16e8b61/plydata/expressions.py#L161-L173 |
has2k1/plydata | plydata/eval.py | EvalEnvironment.with_outer_namespace | def with_outer_namespace(self, outer_namespace):
"""Return a new EvalEnvironment with an extra namespace added.
This namespace will be used only for variables that are not found in
any existing namespace, i.e., it is "outside" them all."""
return self.__class__(self._namespaces + [outer_namespace],
self.flags) | python | def with_outer_namespace(self, outer_namespace):
"""Return a new EvalEnvironment with an extra namespace added.
This namespace will be used only for variables that are not found in
any existing namespace, i.e., it is "outside" them all."""
return self.__class__(self._namespaces + [outer_namespace],
self.flags) | [
"def",
"with_outer_namespace",
"(",
"self",
",",
"outer_namespace",
")",
":",
"return",
"self",
".",
"__class__",
"(",
"self",
".",
"_namespaces",
"+",
"[",
"outer_namespace",
"]",
",",
"self",
".",
"flags",
")"
] | Return a new EvalEnvironment with an extra namespace added.
This namespace will be used only for variables that are not found in
any existing namespace, i.e., it is "outside" them all. | [
"Return",
"a",
"new",
"EvalEnvironment",
"with",
"an",
"extra",
"namespace",
"added",
".",
"This",
"namespace",
"will",
"be",
"used",
"only",
"for",
"variables",
"that",
"are",
"not",
"found",
"in",
"any",
"existing",
"namespace",
"i",
".",
"e",
".",
"it",
"is",
"outside",
"them",
"all",
"."
] | train | https://github.com/has2k1/plydata/blob/d8ca85ff70eee621e96f7c74034e90fec16e8b61/plydata/eval.py#L81-L86 |
has2k1/plydata | plydata/eval.py | EvalEnvironment.eval | def eval(self, expr, source_name="<string>", inner_namespace={}):
"""Evaluate some Python code in the encapsulated environment.
:arg expr: A string containing a Python expression.
:arg source_name: A name for this string, for use in tracebacks.
:arg inner_namespace: A dict-like object that will be checked first
when `expr` attempts to access any variables.
:returns: The value of `expr`.
"""
code = compile(expr, source_name, "eval", self.flags, False)
return eval(code, {}, VarLookupDict([inner_namespace]
+ self._namespaces)) | python | def eval(self, expr, source_name="<string>", inner_namespace={}):
"""Evaluate some Python code in the encapsulated environment.
:arg expr: A string containing a Python expression.
:arg source_name: A name for this string, for use in tracebacks.
:arg inner_namespace: A dict-like object that will be checked first
when `expr` attempts to access any variables.
:returns: The value of `expr`.
"""
code = compile(expr, source_name, "eval", self.flags, False)
return eval(code, {}, VarLookupDict([inner_namespace]
+ self._namespaces)) | [
"def",
"eval",
"(",
"self",
",",
"expr",
",",
"source_name",
"=",
"\"<string>\"",
",",
"inner_namespace",
"=",
"{",
"}",
")",
":",
"code",
"=",
"compile",
"(",
"expr",
",",
"source_name",
",",
"\"eval\"",
",",
"self",
".",
"flags",
",",
"False",
")",
"return",
"eval",
"(",
"code",
",",
"{",
"}",
",",
"VarLookupDict",
"(",
"[",
"inner_namespace",
"]",
"+",
"self",
".",
"_namespaces",
")",
")"
] | Evaluate some Python code in the encapsulated environment.
:arg expr: A string containing a Python expression.
:arg source_name: A name for this string, for use in tracebacks.
:arg inner_namespace: A dict-like object that will be checked first
when `expr` attempts to access any variables.
:returns: The value of `expr`. | [
"Evaluate",
"some",
"Python",
"code",
"in",
"the",
"encapsulated",
"environment",
".",
":",
"arg",
"expr",
":",
"A",
"string",
"containing",
"a",
"Python",
"expression",
".",
":",
"arg",
"source_name",
":",
"A",
"name",
"for",
"this",
"string",
"for",
"use",
"in",
"tracebacks",
".",
":",
"arg",
"inner_namespace",
":",
"A",
"dict",
"-",
"like",
"object",
"that",
"will",
"be",
"checked",
"first",
"when",
"expr",
"attempts",
"to",
"access",
"any",
"variables",
".",
":",
"returns",
":",
"The",
"value",
"of",
"expr",
"."
] | train | https://github.com/has2k1/plydata/blob/d8ca85ff70eee621e96f7c74034e90fec16e8b61/plydata/eval.py#L88-L98 |
has2k1/plydata | plydata/eval.py | EvalEnvironment.capture | def capture(cls, eval_env=0, reference=0):
"""Capture an execution environment from the stack.
If `eval_env` is already an :class:`EvalEnvironment`, it is returned
unchanged. Otherwise, we walk up the stack by ``eval_env + reference``
steps and capture that function's evaluation environment.
For ``eval_env=0`` and ``reference=0``, the default, this captures the
stack frame of the function that calls :meth:`capture`. If ``eval_env
+ reference`` is 1, then we capture that function's caller, etc.
This somewhat complicated calling convention is designed to be
convenient for functions which want to capture their caller's
environment by default, but also allow explicit environments to be
specified. See the second example.
Example::
x = 1
this_env = EvalEnvironment.capture()
assert this_env.namespace["x"] == 1
def child_func():
return EvalEnvironment.capture(1)
this_env_from_child = child_func()
assert this_env_from_child.namespace["x"] == 1
Example::
# This function can be used like:
# my_model(formula_like, data)
# -> evaluates formula_like in caller's environment
# my_model(formula_like, data, eval_env=1)
# -> evaluates formula_like in caller's caller's environment
# my_model(formula_like, data, eval_env=my_env)
# -> evaluates formula_like in environment 'my_env'
def my_model(formula_like, data, eval_env=0):
eval_env = EvalEnvironment.capture(eval_env, reference=1)
return model_setup_helper(formula_like, data, eval_env)
This is how :func:`dmatrix` works.
.. versionadded: 0.2.0
The ``reference`` argument.
"""
if isinstance(eval_env, cls):
return eval_env
elif isinstance(eval_env, numbers.Integral):
depth = eval_env + reference
else:
raise TypeError("Parameter 'eval_env' must be either an integer "
"or an instance of patsy.EvalEnvironment.")
frame = inspect.currentframe()
try:
for i in range(depth + 1):
if frame is None:
raise ValueError("call-stack is not that deep!")
frame = frame.f_back
return cls([frame.f_locals, frame.f_globals],
frame.f_code.co_flags & _ALL_FUTURE_FLAGS)
# The try/finally is important to avoid a potential reference cycle --
# any exception traceback will carry a reference to *our* frame, which
# contains a reference to our local variables, which would otherwise
# carry a reference to some parent frame, where the exception was
# caught...:
finally:
del frame | python | def capture(cls, eval_env=0, reference=0):
"""Capture an execution environment from the stack.
If `eval_env` is already an :class:`EvalEnvironment`, it is returned
unchanged. Otherwise, we walk up the stack by ``eval_env + reference``
steps and capture that function's evaluation environment.
For ``eval_env=0`` and ``reference=0``, the default, this captures the
stack frame of the function that calls :meth:`capture`. If ``eval_env
+ reference`` is 1, then we capture that function's caller, etc.
This somewhat complicated calling convention is designed to be
convenient for functions which want to capture their caller's
environment by default, but also allow explicit environments to be
specified. See the second example.
Example::
x = 1
this_env = EvalEnvironment.capture()
assert this_env.namespace["x"] == 1
def child_func():
return EvalEnvironment.capture(1)
this_env_from_child = child_func()
assert this_env_from_child.namespace["x"] == 1
Example::
# This function can be used like:
# my_model(formula_like, data)
# -> evaluates formula_like in caller's environment
# my_model(formula_like, data, eval_env=1)
# -> evaluates formula_like in caller's caller's environment
# my_model(formula_like, data, eval_env=my_env)
# -> evaluates formula_like in environment 'my_env'
def my_model(formula_like, data, eval_env=0):
eval_env = EvalEnvironment.capture(eval_env, reference=1)
return model_setup_helper(formula_like, data, eval_env)
This is how :func:`dmatrix` works.
.. versionadded: 0.2.0
The ``reference`` argument.
"""
if isinstance(eval_env, cls):
return eval_env
elif isinstance(eval_env, numbers.Integral):
depth = eval_env + reference
else:
raise TypeError("Parameter 'eval_env' must be either an integer "
"or an instance of patsy.EvalEnvironment.")
frame = inspect.currentframe()
try:
for i in range(depth + 1):
if frame is None:
raise ValueError("call-stack is not that deep!")
frame = frame.f_back
return cls([frame.f_locals, frame.f_globals],
frame.f_code.co_flags & _ALL_FUTURE_FLAGS)
# The try/finally is important to avoid a potential reference cycle --
# any exception traceback will carry a reference to *our* frame, which
# contains a reference to our local variables, which would otherwise
# carry a reference to some parent frame, where the exception was
# caught...:
finally:
del frame | [
"def",
"capture",
"(",
"cls",
",",
"eval_env",
"=",
"0",
",",
"reference",
"=",
"0",
")",
":",
"if",
"isinstance",
"(",
"eval_env",
",",
"cls",
")",
":",
"return",
"eval_env",
"elif",
"isinstance",
"(",
"eval_env",
",",
"numbers",
".",
"Integral",
")",
":",
"depth",
"=",
"eval_env",
"+",
"reference",
"else",
":",
"raise",
"TypeError",
"(",
"\"Parameter 'eval_env' must be either an integer \"",
"\"or an instance of patsy.EvalEnvironment.\"",
")",
"frame",
"=",
"inspect",
".",
"currentframe",
"(",
")",
"try",
":",
"for",
"i",
"in",
"range",
"(",
"depth",
"+",
"1",
")",
":",
"if",
"frame",
"is",
"None",
":",
"raise",
"ValueError",
"(",
"\"call-stack is not that deep!\"",
")",
"frame",
"=",
"frame",
".",
"f_back",
"return",
"cls",
"(",
"[",
"frame",
".",
"f_locals",
",",
"frame",
".",
"f_globals",
"]",
",",
"frame",
".",
"f_code",
".",
"co_flags",
"&",
"_ALL_FUTURE_FLAGS",
")",
"# The try/finally is important to avoid a potential reference cycle --",
"# any exception traceback will carry a reference to *our* frame, which",
"# contains a reference to our local variables, which would otherwise",
"# carry a reference to some parent frame, where the exception was",
"# caught...:",
"finally",
":",
"del",
"frame"
] | Capture an execution environment from the stack.
If `eval_env` is already an :class:`EvalEnvironment`, it is returned
unchanged. Otherwise, we walk up the stack by ``eval_env + reference``
steps and capture that function's evaluation environment.
For ``eval_env=0`` and ``reference=0``, the default, this captures the
stack frame of the function that calls :meth:`capture`. If ``eval_env
+ reference`` is 1, then we capture that function's caller, etc.
This somewhat complicated calling convention is designed to be
convenient for functions which want to capture their caller's
environment by default, but also allow explicit environments to be
specified. See the second example.
Example::
x = 1
this_env = EvalEnvironment.capture()
assert this_env.namespace["x"] == 1
def child_func():
return EvalEnvironment.capture(1)
this_env_from_child = child_func()
assert this_env_from_child.namespace["x"] == 1
Example::
# This function can be used like:
# my_model(formula_like, data)
# -> evaluates formula_like in caller's environment
# my_model(formula_like, data, eval_env=1)
# -> evaluates formula_like in caller's caller's environment
# my_model(formula_like, data, eval_env=my_env)
# -> evaluates formula_like in environment 'my_env'
def my_model(formula_like, data, eval_env=0):
eval_env = EvalEnvironment.capture(eval_env, reference=1)
return model_setup_helper(formula_like, data, eval_env)
This is how :func:`dmatrix` works.
.. versionadded: 0.2.0
The ``reference`` argument. | [
"Capture",
"an",
"execution",
"environment",
"from",
"the",
"stack",
".",
"If",
"eval_env",
"is",
"already",
"an",
":",
"class",
":",
"EvalEnvironment",
"it",
"is",
"returned",
"unchanged",
".",
"Otherwise",
"we",
"walk",
"up",
"the",
"stack",
"by",
"eval_env",
"+",
"reference",
"steps",
"and",
"capture",
"that",
"function",
"s",
"evaluation",
"environment",
".",
"For",
"eval_env",
"=",
"0",
"and",
"reference",
"=",
"0",
"the",
"default",
"this",
"captures",
"the",
"stack",
"frame",
"of",
"the",
"function",
"that",
"calls",
":",
"meth",
":",
"capture",
".",
"If",
"eval_env",
"+",
"reference",
"is",
"1",
"then",
"we",
"capture",
"that",
"function",
"s",
"caller",
"etc",
".",
"This",
"somewhat",
"complicated",
"calling",
"convention",
"is",
"designed",
"to",
"be",
"convenient",
"for",
"functions",
"which",
"want",
"to",
"capture",
"their",
"caller",
"s",
"environment",
"by",
"default",
"but",
"also",
"allow",
"explicit",
"environments",
"to",
"be",
"specified",
".",
"See",
"the",
"second",
"example",
".",
"Example",
"::",
"x",
"=",
"1",
"this_env",
"=",
"EvalEnvironment",
".",
"capture",
"()",
"assert",
"this_env",
".",
"namespace",
"[",
"x",
"]",
"==",
"1",
"def",
"child_func",
"()",
":",
"return",
"EvalEnvironment",
".",
"capture",
"(",
"1",
")",
"this_env_from_child",
"=",
"child_func",
"()",
"assert",
"this_env_from_child",
".",
"namespace",
"[",
"x",
"]",
"==",
"1",
"Example",
"::",
"#",
"This",
"function",
"can",
"be",
"used",
"like",
":",
"#",
"my_model",
"(",
"formula_like",
"data",
")",
"#",
"-",
">",
"evaluates",
"formula_like",
"in",
"caller",
"s",
"environment",
"#",
"my_model",
"(",
"formula_like",
"data",
"eval_env",
"=",
"1",
")",
"#",
"-",
">",
"evaluates",
"formula_like",
"in",
"caller",
"s",
"caller",
"s",
"environment",
"#",
"my_model",
"(",
"formula_like",
"data",
"eval_env",
"=",
"my_env",
")",
"#",
"-",
">",
"evaluates",
"formula_like",
"in",
"environment",
"my_env",
"def",
"my_model",
"(",
"formula_like",
"data",
"eval_env",
"=",
"0",
")",
":",
"eval_env",
"=",
"EvalEnvironment",
".",
"capture",
"(",
"eval_env",
"reference",
"=",
"1",
")",
"return",
"model_setup_helper",
"(",
"formula_like",
"data",
"eval_env",
")",
"This",
"is",
"how",
":",
"func",
":",
"dmatrix",
"works",
".",
"..",
"versionadded",
":",
"0",
".",
"2",
".",
"0",
"The",
"reference",
"argument",
"."
] | train | https://github.com/has2k1/plydata/blob/d8ca85ff70eee621e96f7c74034e90fec16e8b61/plydata/eval.py#L101-L157 |
has2k1/plydata | plydata/eval.py | EvalEnvironment.subset | def subset(self, names):
"""Creates a new, flat EvalEnvironment that contains only
the variables specified."""
vld = VarLookupDict(self._namespaces)
new_ns = dict((name, vld[name]) for name in names)
return EvalEnvironment([new_ns], self.flags) | python | def subset(self, names):
"""Creates a new, flat EvalEnvironment that contains only
the variables specified."""
vld = VarLookupDict(self._namespaces)
new_ns = dict((name, vld[name]) for name in names)
return EvalEnvironment([new_ns], self.flags) | [
"def",
"subset",
"(",
"self",
",",
"names",
")",
":",
"vld",
"=",
"VarLookupDict",
"(",
"self",
".",
"_namespaces",
")",
"new_ns",
"=",
"dict",
"(",
"(",
"name",
",",
"vld",
"[",
"name",
"]",
")",
"for",
"name",
"in",
"names",
")",
"return",
"EvalEnvironment",
"(",
"[",
"new_ns",
"]",
",",
"self",
".",
"flags",
")"
] | Creates a new, flat EvalEnvironment that contains only
the variables specified. | [
"Creates",
"a",
"new",
"flat",
"EvalEnvironment",
"that",
"contains",
"only",
"the",
"variables",
"specified",
"."
] | train | https://github.com/has2k1/plydata/blob/d8ca85ff70eee621e96f7c74034e90fec16e8b61/plydata/eval.py#L159-L164 |
has2k1/plydata | plydata/utils.py | temporary_key | def temporary_key(d, key, value):
"""
Context manager that removes key from dictionary on closing
The dictionary will hold the key for the duration of
the context.
Parameters
----------
d : dict-like
Dictionary in which to insert a temporary key.
key : hashable
Location at which to insert ``value``.
value : object
Value to insert in ``d`` at location ``key``.
"""
d[key] = value
try:
yield d
finally:
del d[key] | python | def temporary_key(d, key, value):
"""
Context manager that removes key from dictionary on closing
The dictionary will hold the key for the duration of
the context.
Parameters
----------
d : dict-like
Dictionary in which to insert a temporary key.
key : hashable
Location at which to insert ``value``.
value : object
Value to insert in ``d`` at location ``key``.
"""
d[key] = value
try:
yield d
finally:
del d[key] | [
"def",
"temporary_key",
"(",
"d",
",",
"key",
",",
"value",
")",
":",
"d",
"[",
"key",
"]",
"=",
"value",
"try",
":",
"yield",
"d",
"finally",
":",
"del",
"d",
"[",
"key",
"]"
] | Context manager that removes key from dictionary on closing
The dictionary will hold the key for the duration of
the context.
Parameters
----------
d : dict-like
Dictionary in which to insert a temporary key.
key : hashable
Location at which to insert ``value``.
value : object
Value to insert in ``d`` at location ``key``. | [
"Context",
"manager",
"that",
"removes",
"key",
"from",
"dictionary",
"on",
"closing"
] | train | https://github.com/has2k1/plydata/blob/d8ca85ff70eee621e96f7c74034e90fec16e8b61/plydata/utils.py#L16-L36 |
has2k1/plydata | plydata/utils.py | temporary_attr | def temporary_attr(obj, name, value):
"""
Context manager that removes key from dictionary on closing
The dictionary will hold the key for the duration of
the context.
Parameters
----------
obj : object
Object onto which to add a temporary attribute.
name : str
Name of attribute to add to ``obj``.
value : object
Value of ``attr``.
"""
setattr(obj, name, value)
try:
yield obj
finally:
delattr(obj, name) | python | def temporary_attr(obj, name, value):
"""
Context manager that removes key from dictionary on closing
The dictionary will hold the key for the duration of
the context.
Parameters
----------
obj : object
Object onto which to add a temporary attribute.
name : str
Name of attribute to add to ``obj``.
value : object
Value of ``attr``.
"""
setattr(obj, name, value)
try:
yield obj
finally:
delattr(obj, name) | [
"def",
"temporary_attr",
"(",
"obj",
",",
"name",
",",
"value",
")",
":",
"setattr",
"(",
"obj",
",",
"name",
",",
"value",
")",
"try",
":",
"yield",
"obj",
"finally",
":",
"delattr",
"(",
"obj",
",",
"name",
")"
] | Context manager that removes key from dictionary on closing
The dictionary will hold the key for the duration of
the context.
Parameters
----------
obj : object
Object onto which to add a temporary attribute.
name : str
Name of attribute to add to ``obj``.
value : object
Value of ``attr``. | [
"Context",
"manager",
"that",
"removes",
"key",
"from",
"dictionary",
"on",
"closing"
] | train | https://github.com/has2k1/plydata/blob/d8ca85ff70eee621e96f7c74034e90fec16e8b61/plydata/utils.py#L40-L60 |
has2k1/plydata | plydata/utils.py | Q | def Q(name):
"""
Quote a variable name
A way to 'quote' variable names, especially ones that do not otherwise
meet Python's variable name rules.
Parameters
----------
name : str
Name of variable
Returns
-------
value : object
Value of variable
Examples
--------
>>> import pandas as pd
>>> from plydata import define
>>> df = pd.DataFrame({'class': [10, 20, 30]})
Since ``class`` is a reserved python keyword it cannot be a variable
name, and therefore cannot be used in an expression without quoting it.
>>> df >> define(y='class+1')
Traceback (most recent call last):
File "<string>", line 1
class+1
^
SyntaxError: invalid syntax
>>> df >> define(y='Q("class")+1')
class y
0 10 11
1 20 21
2 30 31
Note that it is ``'Q("some name")'`` and not ``'Q(some name)'``.
As in the above example, you do not need to ``import`` ``Q`` before
you can use it.
"""
env = EvalEnvironment.capture(1)
try:
return env.namespace[name]
except KeyError:
raise NameError("No data named {!r} found".format(name)) | python | def Q(name):
"""
Quote a variable name
A way to 'quote' variable names, especially ones that do not otherwise
meet Python's variable name rules.
Parameters
----------
name : str
Name of variable
Returns
-------
value : object
Value of variable
Examples
--------
>>> import pandas as pd
>>> from plydata import define
>>> df = pd.DataFrame({'class': [10, 20, 30]})
Since ``class`` is a reserved python keyword it cannot be a variable
name, and therefore cannot be used in an expression without quoting it.
>>> df >> define(y='class+1')
Traceback (most recent call last):
File "<string>", line 1
class+1
^
SyntaxError: invalid syntax
>>> df >> define(y='Q("class")+1')
class y
0 10 11
1 20 21
2 30 31
Note that it is ``'Q("some name")'`` and not ``'Q(some name)'``.
As in the above example, you do not need to ``import`` ``Q`` before
you can use it.
"""
env = EvalEnvironment.capture(1)
try:
return env.namespace[name]
except KeyError:
raise NameError("No data named {!r} found".format(name)) | [
"def",
"Q",
"(",
"name",
")",
":",
"env",
"=",
"EvalEnvironment",
".",
"capture",
"(",
"1",
")",
"try",
":",
"return",
"env",
".",
"namespace",
"[",
"name",
"]",
"except",
"KeyError",
":",
"raise",
"NameError",
"(",
"\"No data named {!r} found\"",
".",
"format",
"(",
"name",
")",
")"
] | Quote a variable name
A way to 'quote' variable names, especially ones that do not otherwise
meet Python's variable name rules.
Parameters
----------
name : str
Name of variable
Returns
-------
value : object
Value of variable
Examples
--------
>>> import pandas as pd
>>> from plydata import define
>>> df = pd.DataFrame({'class': [10, 20, 30]})
Since ``class`` is a reserved python keyword it cannot be a variable
name, and therefore cannot be used in an expression without quoting it.
>>> df >> define(y='class+1')
Traceback (most recent call last):
File "<string>", line 1
class+1
^
SyntaxError: invalid syntax
>>> df >> define(y='Q("class")+1')
class y
0 10 11
1 20 21
2 30 31
Note that it is ``'Q("some name")'`` and not ``'Q(some name)'``.
As in the above example, you do not need to ``import`` ``Q`` before
you can use it. | [
"Quote",
"a",
"variable",
"name"
] | train | https://github.com/has2k1/plydata/blob/d8ca85ff70eee621e96f7c74034e90fec16e8b61/plydata/utils.py#L72-L119 |
has2k1/plydata | plydata/utils.py | regular_index | def regular_index(*dfs):
"""
Change & restore the indices of dataframes
Dataframe with duplicate values can be hard to work with.
When split and recombined, you cannot restore the row order.
This can be the case even if the index has unique but
irregular/unordered. This contextmanager resets the unordered
indices of any dataframe passed to it, on exit it restores
the original index.
A regular index is of the form::
RangeIndex(start=0, stop=n, step=1)
Parameters
----------
dfs : tuple
Dataframes
Yields
------
dfs : tuple
Dataframe
Examples
--------
Create dataframes with different indices
>>> df1 = pd.DataFrame([4, 3, 2, 1])
>>> df2 = pd.DataFrame([3, 2, 1], index=[3, 0, 0])
>>> df3 = pd.DataFrame([11, 12, 13], index=[11, 12, 13])
Within the contexmanager all frames have nice range indices
>>> with regular_index(df1, df2, df3):
... print(df1.index)
... print(df2.index)
... print(df3.index)
RangeIndex(start=0, stop=4, step=1)
RangeIndex(start=0, stop=3, step=1)
RangeIndex(start=0, stop=3, step=1)
Indices restored
>>> df1.index
RangeIndex(start=0, stop=4, step=1)
>>> df2.index
Int64Index([3, 0, 0], dtype='int64')
>>> df3.index
Int64Index([11, 12, 13], dtype='int64')
"""
original_index = [df.index for df in dfs]
have_bad_index = [not isinstance(df.index, pd.RangeIndex)
for df in dfs]
for df, bad in zip(dfs, have_bad_index):
if bad:
df.reset_index(drop=True, inplace=True)
try:
yield dfs
finally:
for df, bad, idx in zip(dfs, have_bad_index, original_index):
if bad and len(df.index) == len(idx):
df.index = idx | python | def regular_index(*dfs):
"""
Change & restore the indices of dataframes
Dataframe with duplicate values can be hard to work with.
When split and recombined, you cannot restore the row order.
This can be the case even if the index has unique but
irregular/unordered. This contextmanager resets the unordered
indices of any dataframe passed to it, on exit it restores
the original index.
A regular index is of the form::
RangeIndex(start=0, stop=n, step=1)
Parameters
----------
dfs : tuple
Dataframes
Yields
------
dfs : tuple
Dataframe
Examples
--------
Create dataframes with different indices
>>> df1 = pd.DataFrame([4, 3, 2, 1])
>>> df2 = pd.DataFrame([3, 2, 1], index=[3, 0, 0])
>>> df3 = pd.DataFrame([11, 12, 13], index=[11, 12, 13])
Within the contexmanager all frames have nice range indices
>>> with regular_index(df1, df2, df3):
... print(df1.index)
... print(df2.index)
... print(df3.index)
RangeIndex(start=0, stop=4, step=1)
RangeIndex(start=0, stop=3, step=1)
RangeIndex(start=0, stop=3, step=1)
Indices restored
>>> df1.index
RangeIndex(start=0, stop=4, step=1)
>>> df2.index
Int64Index([3, 0, 0], dtype='int64')
>>> df3.index
Int64Index([11, 12, 13], dtype='int64')
"""
original_index = [df.index for df in dfs]
have_bad_index = [not isinstance(df.index, pd.RangeIndex)
for df in dfs]
for df, bad in zip(dfs, have_bad_index):
if bad:
df.reset_index(drop=True, inplace=True)
try:
yield dfs
finally:
for df, bad, idx in zip(dfs, have_bad_index, original_index):
if bad and len(df.index) == len(idx):
df.index = idx | [
"def",
"regular_index",
"(",
"*",
"dfs",
")",
":",
"original_index",
"=",
"[",
"df",
".",
"index",
"for",
"df",
"in",
"dfs",
"]",
"have_bad_index",
"=",
"[",
"not",
"isinstance",
"(",
"df",
".",
"index",
",",
"pd",
".",
"RangeIndex",
")",
"for",
"df",
"in",
"dfs",
"]",
"for",
"df",
",",
"bad",
"in",
"zip",
"(",
"dfs",
",",
"have_bad_index",
")",
":",
"if",
"bad",
":",
"df",
".",
"reset_index",
"(",
"drop",
"=",
"True",
",",
"inplace",
"=",
"True",
")",
"try",
":",
"yield",
"dfs",
"finally",
":",
"for",
"df",
",",
"bad",
",",
"idx",
"in",
"zip",
"(",
"dfs",
",",
"have_bad_index",
",",
"original_index",
")",
":",
"if",
"bad",
"and",
"len",
"(",
"df",
".",
"index",
")",
"==",
"len",
"(",
"idx",
")",
":",
"df",
".",
"index",
"=",
"idx"
] | Change & restore the indices of dataframes
Dataframe with duplicate values can be hard to work with.
When split and recombined, you cannot restore the row order.
This can be the case even if the index has unique but
irregular/unordered. This contextmanager resets the unordered
indices of any dataframe passed to it, on exit it restores
the original index.
A regular index is of the form::
RangeIndex(start=0, stop=n, step=1)
Parameters
----------
dfs : tuple
Dataframes
Yields
------
dfs : tuple
Dataframe
Examples
--------
Create dataframes with different indices
>>> df1 = pd.DataFrame([4, 3, 2, 1])
>>> df2 = pd.DataFrame([3, 2, 1], index=[3, 0, 0])
>>> df3 = pd.DataFrame([11, 12, 13], index=[11, 12, 13])
Within the contexmanager all frames have nice range indices
>>> with regular_index(df1, df2, df3):
... print(df1.index)
... print(df2.index)
... print(df3.index)
RangeIndex(start=0, stop=4, step=1)
RangeIndex(start=0, stop=3, step=1)
RangeIndex(start=0, stop=3, step=1)
Indices restored
>>> df1.index
RangeIndex(start=0, stop=4, step=1)
>>> df2.index
Int64Index([3, 0, 0], dtype='int64')
>>> df3.index
Int64Index([11, 12, 13], dtype='int64') | [
"Change",
"&",
"restore",
"the",
"indices",
"of",
"dataframes"
] | train | https://github.com/has2k1/plydata/blob/d8ca85ff70eee621e96f7c74034e90fec16e8b61/plydata/utils.py#L147-L212 |
has2k1/plydata | plydata/utils.py | unique | def unique(lst):
"""
Return unique elements
:class:`pandas.unique` and :class:`numpy.unique` cast
mixed type lists to the same type. They are faster, but
some times we want to maintain the type.
Parameters
----------
lst : list-like
List of items
Returns
-------
out : list
Unique items in the order that they appear in the
input.
Examples
--------
>>> import pandas as pd
>>> import numpy as np
>>> lst = ['one', 'two', 123, 'three']
>>> pd.unique(lst)
array(['one', 'two', '123', 'three'], dtype=object)
>>> np.unique(lst)
array(['123', 'one', 'three', 'two'],
dtype='<U5')
>>> unique(lst)
['one', 'two', 123, 'three']
pandas and numpy cast 123 to a string!, and numpy does not
even maintain the order.
"""
seen = set()
def make_seen(x):
seen.add(x)
return x
return [make_seen(x) for x in lst if x not in seen] | python | def unique(lst):
"""
Return unique elements
:class:`pandas.unique` and :class:`numpy.unique` cast
mixed type lists to the same type. They are faster, but
some times we want to maintain the type.
Parameters
----------
lst : list-like
List of items
Returns
-------
out : list
Unique items in the order that they appear in the
input.
Examples
--------
>>> import pandas as pd
>>> import numpy as np
>>> lst = ['one', 'two', 123, 'three']
>>> pd.unique(lst)
array(['one', 'two', '123', 'three'], dtype=object)
>>> np.unique(lst)
array(['123', 'one', 'three', 'two'],
dtype='<U5')
>>> unique(lst)
['one', 'two', 123, 'three']
pandas and numpy cast 123 to a string!, and numpy does not
even maintain the order.
"""
seen = set()
def make_seen(x):
seen.add(x)
return x
return [make_seen(x) for x in lst if x not in seen] | [
"def",
"unique",
"(",
"lst",
")",
":",
"seen",
"=",
"set",
"(",
")",
"def",
"make_seen",
"(",
"x",
")",
":",
"seen",
".",
"add",
"(",
"x",
")",
"return",
"x",
"return",
"[",
"make_seen",
"(",
"x",
")",
"for",
"x",
"in",
"lst",
"if",
"x",
"not",
"in",
"seen",
"]"
] | Return unique elements
:class:`pandas.unique` and :class:`numpy.unique` cast
mixed type lists to the same type. They are faster, but
some times we want to maintain the type.
Parameters
----------
lst : list-like
List of items
Returns
-------
out : list
Unique items in the order that they appear in the
input.
Examples
--------
>>> import pandas as pd
>>> import numpy as np
>>> lst = ['one', 'two', 123, 'three']
>>> pd.unique(lst)
array(['one', 'two', '123', 'three'], dtype=object)
>>> np.unique(lst)
array(['123', 'one', 'three', 'two'],
dtype='<U5')
>>> unique(lst)
['one', 'two', 123, 'three']
pandas and numpy cast 123 to a string!, and numpy does not
even maintain the order. | [
"Return",
"unique",
"elements"
] | train | https://github.com/has2k1/plydata/blob/d8ca85ff70eee621e96f7c74034e90fec16e8b61/plydata/utils.py#L215-L256 |
has2k1/plydata | plydata/dataframe/one_table.py | _nth | def _nth(arr, n):
"""
Return the nth value of array
If it is missing return NaN
"""
try:
return arr.iloc[n]
except (KeyError, IndexError):
return np.nan | python | def _nth(arr, n):
"""
Return the nth value of array
If it is missing return NaN
"""
try:
return arr.iloc[n]
except (KeyError, IndexError):
return np.nan | [
"def",
"_nth",
"(",
"arr",
",",
"n",
")",
":",
"try",
":",
"return",
"arr",
".",
"iloc",
"[",
"n",
"]",
"except",
"(",
"KeyError",
",",
"IndexError",
")",
":",
"return",
"np",
".",
"nan"
] | Return the nth value of array
If it is missing return NaN | [
"Return",
"the",
"nth",
"value",
"of",
"array"
] | train | https://github.com/has2k1/plydata/blob/d8ca85ff70eee621e96f7c74034e90fec16e8b61/plydata/dataframe/one_table.py#L217-L226 |
tkarabela/pysubs2 | pysubs2/time.py | make_time | def make_time(h=0, m=0, s=0, ms=0, frames=None, fps=None):
"""
Convert time to milliseconds.
See :func:`pysubs2.time.times_to_ms()`. When both frames and fps are specified,
:func:`pysubs2.time.frames_to_ms()` is called instead.
Raises:
ValueError: Invalid fps, or one of frames/fps is missing.
Example:
>>> make_time(s=1.5)
1500
>>> make_time(frames=50, fps=25)
2000
"""
if frames is None and fps is None:
return times_to_ms(h, m, s, ms)
elif frames is not None and fps is not None:
return frames_to_ms(frames, fps)
else:
raise ValueError("Both fps and frames must be specified") | python | def make_time(h=0, m=0, s=0, ms=0, frames=None, fps=None):
"""
Convert time to milliseconds.
See :func:`pysubs2.time.times_to_ms()`. When both frames and fps are specified,
:func:`pysubs2.time.frames_to_ms()` is called instead.
Raises:
ValueError: Invalid fps, or one of frames/fps is missing.
Example:
>>> make_time(s=1.5)
1500
>>> make_time(frames=50, fps=25)
2000
"""
if frames is None and fps is None:
return times_to_ms(h, m, s, ms)
elif frames is not None and fps is not None:
return frames_to_ms(frames, fps)
else:
raise ValueError("Both fps and frames must be specified") | [
"def",
"make_time",
"(",
"h",
"=",
"0",
",",
"m",
"=",
"0",
",",
"s",
"=",
"0",
",",
"ms",
"=",
"0",
",",
"frames",
"=",
"None",
",",
"fps",
"=",
"None",
")",
":",
"if",
"frames",
"is",
"None",
"and",
"fps",
"is",
"None",
":",
"return",
"times_to_ms",
"(",
"h",
",",
"m",
",",
"s",
",",
"ms",
")",
"elif",
"frames",
"is",
"not",
"None",
"and",
"fps",
"is",
"not",
"None",
":",
"return",
"frames_to_ms",
"(",
"frames",
",",
"fps",
")",
"else",
":",
"raise",
"ValueError",
"(",
"\"Both fps and frames must be specified\"",
")"
] | Convert time to milliseconds.
See :func:`pysubs2.time.times_to_ms()`. When both frames and fps are specified,
:func:`pysubs2.time.frames_to_ms()` is called instead.
Raises:
ValueError: Invalid fps, or one of frames/fps is missing.
Example:
>>> make_time(s=1.5)
1500
>>> make_time(frames=50, fps=25)
2000 | [
"Convert",
"time",
"to",
"milliseconds",
"."
] | train | https://github.com/tkarabela/pysubs2/blob/6439eb5159e6aa6b47e0f8e1d950e8bdd7c5341f/pysubs2/time.py#L12-L34 |
tkarabela/pysubs2 | pysubs2/time.py | timestamp_to_ms | def timestamp_to_ms(groups):
"""
Convert groups from :data:`pysubs2.time.TIMESTAMP` match to milliseconds.
Example:
>>> timestamp_to_ms(TIMESTAMP.match("0:00:00.42").groups())
420
"""
h, m, s, frac = map(int, groups)
ms = frac * 10**(3 - len(groups[-1]))
ms += s * 1000
ms += m * 60000
ms += h * 3600000
return ms | python | def timestamp_to_ms(groups):
"""
Convert groups from :data:`pysubs2.time.TIMESTAMP` match to milliseconds.
Example:
>>> timestamp_to_ms(TIMESTAMP.match("0:00:00.42").groups())
420
"""
h, m, s, frac = map(int, groups)
ms = frac * 10**(3 - len(groups[-1]))
ms += s * 1000
ms += m * 60000
ms += h * 3600000
return ms | [
"def",
"timestamp_to_ms",
"(",
"groups",
")",
":",
"h",
",",
"m",
",",
"s",
",",
"frac",
"=",
"map",
"(",
"int",
",",
"groups",
")",
"ms",
"=",
"frac",
"*",
"10",
"**",
"(",
"3",
"-",
"len",
"(",
"groups",
"[",
"-",
"1",
"]",
")",
")",
"ms",
"+=",
"s",
"*",
"1000",
"ms",
"+=",
"m",
"*",
"60000",
"ms",
"+=",
"h",
"*",
"3600000",
"return",
"ms"
] | Convert groups from :data:`pysubs2.time.TIMESTAMP` match to milliseconds.
Example:
>>> timestamp_to_ms(TIMESTAMP.match("0:00:00.42").groups())
420 | [
"Convert",
"groups",
"from",
":",
"data",
":",
"pysubs2",
".",
"time",
".",
"TIMESTAMP",
"match",
"to",
"milliseconds",
".",
"Example",
":",
">>>",
"timestamp_to_ms",
"(",
"TIMESTAMP",
".",
"match",
"(",
"0",
":",
"00",
":",
"00",
".",
"42",
")",
".",
"groups",
"()",
")",
"420"
] | train | https://github.com/tkarabela/pysubs2/blob/6439eb5159e6aa6b47e0f8e1d950e8bdd7c5341f/pysubs2/time.py#L36-L50 |
tkarabela/pysubs2 | pysubs2/time.py | times_to_ms | def times_to_ms(h=0, m=0, s=0, ms=0):
"""
Convert hours, minutes, seconds to milliseconds.
Arguments may be positive or negative, int or float,
need not be normalized (``s=120`` is okay).
Returns:
Number of milliseconds (rounded to int).
"""
ms += s * 1000
ms += m * 60000
ms += h * 3600000
return int(round(ms)) | python | def times_to_ms(h=0, m=0, s=0, ms=0):
"""
Convert hours, minutes, seconds to milliseconds.
Arguments may be positive or negative, int or float,
need not be normalized (``s=120`` is okay).
Returns:
Number of milliseconds (rounded to int).
"""
ms += s * 1000
ms += m * 60000
ms += h * 3600000
return int(round(ms)) | [
"def",
"times_to_ms",
"(",
"h",
"=",
"0",
",",
"m",
"=",
"0",
",",
"s",
"=",
"0",
",",
"ms",
"=",
"0",
")",
":",
"ms",
"+=",
"s",
"*",
"1000",
"ms",
"+=",
"m",
"*",
"60000",
"ms",
"+=",
"h",
"*",
"3600000",
"return",
"int",
"(",
"round",
"(",
"ms",
")",
")"
] | Convert hours, minutes, seconds to milliseconds.
Arguments may be positive or negative, int or float,
need not be normalized (``s=120`` is okay).
Returns:
Number of milliseconds (rounded to int). | [
"Convert",
"hours",
"minutes",
"seconds",
"to",
"milliseconds",
".",
"Arguments",
"may",
"be",
"positive",
"or",
"negative",
"int",
"or",
"float",
"need",
"not",
"be",
"normalized",
"(",
"s",
"=",
"120",
"is",
"okay",
")",
".",
"Returns",
":",
"Number",
"of",
"milliseconds",
"(",
"rounded",
"to",
"int",
")",
"."
] | train | https://github.com/tkarabela/pysubs2/blob/6439eb5159e6aa6b47e0f8e1d950e8bdd7c5341f/pysubs2/time.py#L52-L66 |
tkarabela/pysubs2 | pysubs2/time.py | frames_to_ms | def frames_to_ms(frames, fps):
"""
Convert frame-based duration to milliseconds.
Arguments:
frames: Number of frames (should be int).
fps: Framerate (must be a positive number, eg. 23.976).
Returns:
Number of milliseconds (rounded to int).
Raises:
ValueError: fps was negative or zero.
"""
if fps <= 0:
raise ValueError("Framerate must be positive number (%f)." % fps)
return int(round(frames * (1000 / fps))) | python | def frames_to_ms(frames, fps):
"""
Convert frame-based duration to milliseconds.
Arguments:
frames: Number of frames (should be int).
fps: Framerate (must be a positive number, eg. 23.976).
Returns:
Number of milliseconds (rounded to int).
Raises:
ValueError: fps was negative or zero.
"""
if fps <= 0:
raise ValueError("Framerate must be positive number (%f)." % fps)
return int(round(frames * (1000 / fps))) | [
"def",
"frames_to_ms",
"(",
"frames",
",",
"fps",
")",
":",
"if",
"fps",
"<=",
"0",
":",
"raise",
"ValueError",
"(",
"\"Framerate must be positive number (%f).\"",
"%",
"fps",
")",
"return",
"int",
"(",
"round",
"(",
"frames",
"*",
"(",
"1000",
"/",
"fps",
")",
")",
")"
] | Convert frame-based duration to milliseconds.
Arguments:
frames: Number of frames (should be int).
fps: Framerate (must be a positive number, eg. 23.976).
Returns:
Number of milliseconds (rounded to int).
Raises:
ValueError: fps was negative or zero. | [
"Convert",
"frame",
"-",
"based",
"duration",
"to",
"milliseconds",
".",
"Arguments",
":",
"frames",
":",
"Number",
"of",
"frames",
"(",
"should",
"be",
"int",
")",
".",
"fps",
":",
"Framerate",
"(",
"must",
"be",
"a",
"positive",
"number",
"eg",
".",
"23",
".",
"976",
")",
".",
"Returns",
":",
"Number",
"of",
"milliseconds",
"(",
"rounded",
"to",
"int",
")",
".",
"Raises",
":",
"ValueError",
":",
"fps",
"was",
"negative",
"or",
"zero",
"."
] | train | https://github.com/tkarabela/pysubs2/blob/6439eb5159e6aa6b47e0f8e1d950e8bdd7c5341f/pysubs2/time.py#L68-L86 |
tkarabela/pysubs2 | pysubs2/time.py | ms_to_frames | def ms_to_frames(ms, fps):
"""
Convert milliseconds to number of frames.
Arguments:
ms: Number of milliseconds (may be int, float or other numeric class).
fps: Framerate (must be a positive number, eg. 23.976).
Returns:
Number of frames (int).
Raises:
ValueError: fps was negative or zero.
"""
if fps <= 0:
raise ValueError("Framerate must be positive number (%f)." % fps)
return int(round((ms / 1000) * fps)) | python | def ms_to_frames(ms, fps):
"""
Convert milliseconds to number of frames.
Arguments:
ms: Number of milliseconds (may be int, float or other numeric class).
fps: Framerate (must be a positive number, eg. 23.976).
Returns:
Number of frames (int).
Raises:
ValueError: fps was negative or zero.
"""
if fps <= 0:
raise ValueError("Framerate must be positive number (%f)." % fps)
return int(round((ms / 1000) * fps)) | [
"def",
"ms_to_frames",
"(",
"ms",
",",
"fps",
")",
":",
"if",
"fps",
"<=",
"0",
":",
"raise",
"ValueError",
"(",
"\"Framerate must be positive number (%f).\"",
"%",
"fps",
")",
"return",
"int",
"(",
"round",
"(",
"(",
"ms",
"/",
"1000",
")",
"*",
"fps",
")",
")"
] | Convert milliseconds to number of frames.
Arguments:
ms: Number of milliseconds (may be int, float or other numeric class).
fps: Framerate (must be a positive number, eg. 23.976).
Returns:
Number of frames (int).
Raises:
ValueError: fps was negative or zero. | [
"Convert",
"milliseconds",
"to",
"number",
"of",
"frames",
".",
"Arguments",
":",
"ms",
":",
"Number",
"of",
"milliseconds",
"(",
"may",
"be",
"int",
"float",
"or",
"other",
"numeric",
"class",
")",
".",
"fps",
":",
"Framerate",
"(",
"must",
"be",
"a",
"positive",
"number",
"eg",
".",
"23",
".",
"976",
")",
".",
"Returns",
":",
"Number",
"of",
"frames",
"(",
"int",
")",
".",
"Raises",
":",
"ValueError",
":",
"fps",
"was",
"negative",
"or",
"zero",
"."
] | train | https://github.com/tkarabela/pysubs2/blob/6439eb5159e6aa6b47e0f8e1d950e8bdd7c5341f/pysubs2/time.py#L88-L106 |
tkarabela/pysubs2 | pysubs2/time.py | ms_to_times | def ms_to_times(ms):
"""
Convert milliseconds to normalized tuple (h, m, s, ms).
Arguments:
ms: Number of milliseconds (may be int, float or other numeric class).
Should be non-negative.
Returns:
Named tuple (h, m, s, ms) of ints.
Invariants: ``ms in range(1000) and s in range(60) and m in range(60)``
"""
ms = int(round(ms))
h, ms = divmod(ms, 3600000)
m, ms = divmod(ms, 60000)
s, ms = divmod(ms, 1000)
return Times(h, m, s, ms) | python | def ms_to_times(ms):
"""
Convert milliseconds to normalized tuple (h, m, s, ms).
Arguments:
ms: Number of milliseconds (may be int, float or other numeric class).
Should be non-negative.
Returns:
Named tuple (h, m, s, ms) of ints.
Invariants: ``ms in range(1000) and s in range(60) and m in range(60)``
"""
ms = int(round(ms))
h, ms = divmod(ms, 3600000)
m, ms = divmod(ms, 60000)
s, ms = divmod(ms, 1000)
return Times(h, m, s, ms) | [
"def",
"ms_to_times",
"(",
"ms",
")",
":",
"ms",
"=",
"int",
"(",
"round",
"(",
"ms",
")",
")",
"h",
",",
"ms",
"=",
"divmod",
"(",
"ms",
",",
"3600000",
")",
"m",
",",
"ms",
"=",
"divmod",
"(",
"ms",
",",
"60000",
")",
"s",
",",
"ms",
"=",
"divmod",
"(",
"ms",
",",
"1000",
")",
"return",
"Times",
"(",
"h",
",",
"m",
",",
"s",
",",
"ms",
")"
] | Convert milliseconds to normalized tuple (h, m, s, ms).
Arguments:
ms: Number of milliseconds (may be int, float or other numeric class).
Should be non-negative.
Returns:
Named tuple (h, m, s, ms) of ints.
Invariants: ``ms in range(1000) and s in range(60) and m in range(60)`` | [
"Convert",
"milliseconds",
"to",
"normalized",
"tuple",
"(",
"h",
"m",
"s",
"ms",
")",
".",
"Arguments",
":",
"ms",
":",
"Number",
"of",
"milliseconds",
"(",
"may",
"be",
"int",
"float",
"or",
"other",
"numeric",
"class",
")",
".",
"Should",
"be",
"non",
"-",
"negative",
".",
"Returns",
":",
"Named",
"tuple",
"(",
"h",
"m",
"s",
"ms",
")",
"of",
"ints",
".",
"Invariants",
":",
"ms",
"in",
"range",
"(",
"1000",
")",
"and",
"s",
"in",
"range",
"(",
"60",
")",
"and",
"m",
"in",
"range",
"(",
"60",
")"
] | train | https://github.com/tkarabela/pysubs2/blob/6439eb5159e6aa6b47e0f8e1d950e8bdd7c5341f/pysubs2/time.py#L108-L125 |
tkarabela/pysubs2 | pysubs2/time.py | ms_to_str | def ms_to_str(ms, fractions=False):
"""
Prettyprint milliseconds to [-]H:MM:SS[.mmm]
Handles huge and/or negative times. Non-negative times with ``fractions=True``
are matched by :data:`pysubs2.time.TIMESTAMP`.
Arguments:
ms: Number of milliseconds (int, float or other numeric class).
fractions: Whether to print up to millisecond precision.
Returns:
str
"""
sgn = "-" if ms < 0 else ""
h, m, s, ms = ms_to_times(abs(ms))
if fractions:
return sgn + "{:01d}:{:02d}:{:02d}.{:03d}".format(h, m, s, ms)
else:
return sgn + "{:01d}:{:02d}:{:02d}".format(h, m, s) | python | def ms_to_str(ms, fractions=False):
"""
Prettyprint milliseconds to [-]H:MM:SS[.mmm]
Handles huge and/or negative times. Non-negative times with ``fractions=True``
are matched by :data:`pysubs2.time.TIMESTAMP`.
Arguments:
ms: Number of milliseconds (int, float or other numeric class).
fractions: Whether to print up to millisecond precision.
Returns:
str
"""
sgn = "-" if ms < 0 else ""
h, m, s, ms = ms_to_times(abs(ms))
if fractions:
return sgn + "{:01d}:{:02d}:{:02d}.{:03d}".format(h, m, s, ms)
else:
return sgn + "{:01d}:{:02d}:{:02d}".format(h, m, s) | [
"def",
"ms_to_str",
"(",
"ms",
",",
"fractions",
"=",
"False",
")",
":",
"sgn",
"=",
"\"-\"",
"if",
"ms",
"<",
"0",
"else",
"\"\"",
"h",
",",
"m",
",",
"s",
",",
"ms",
"=",
"ms_to_times",
"(",
"abs",
"(",
"ms",
")",
")",
"if",
"fractions",
":",
"return",
"sgn",
"+",
"\"{:01d}:{:02d}:{:02d}.{:03d}\"",
".",
"format",
"(",
"h",
",",
"m",
",",
"s",
",",
"ms",
")",
"else",
":",
"return",
"sgn",
"+",
"\"{:01d}:{:02d}:{:02d}\"",
".",
"format",
"(",
"h",
",",
"m",
",",
"s",
")"
] | Prettyprint milliseconds to [-]H:MM:SS[.mmm]
Handles huge and/or negative times. Non-negative times with ``fractions=True``
are matched by :data:`pysubs2.time.TIMESTAMP`.
Arguments:
ms: Number of milliseconds (int, float or other numeric class).
fractions: Whether to print up to millisecond precision.
Returns:
str | [
"Prettyprint",
"milliseconds",
"to",
"[",
"-",
"]",
"H",
":",
"MM",
":",
"SS",
"[",
".",
"mmm",
"]",
"Handles",
"huge",
"and",
"/",
"or",
"negative",
"times",
".",
"Non",
"-",
"negative",
"times",
"with",
"fractions",
"=",
"True",
"are",
"matched",
"by",
":",
"data",
":",
"pysubs2",
".",
"time",
".",
"TIMESTAMP",
".",
"Arguments",
":",
"ms",
":",
"Number",
"of",
"milliseconds",
"(",
"int",
"float",
"or",
"other",
"numeric",
"class",
")",
".",
"fractions",
":",
"Whether",
"to",
"print",
"up",
"to",
"millisecond",
"precision",
".",
"Returns",
":",
"str"
] | train | https://github.com/tkarabela/pysubs2/blob/6439eb5159e6aa6b47e0f8e1d950e8bdd7c5341f/pysubs2/time.py#L127-L147 |
tkarabela/pysubs2 | pysubs2/substation.py | ms_to_timestamp | def ms_to_timestamp(ms):
"""Convert ms to 'H:MM:SS.cc'"""
# XXX throw on overflow/underflow?
if ms < 0: ms = 0
if ms > MAX_REPRESENTABLE_TIME: ms = MAX_REPRESENTABLE_TIME
h, m, s, ms = ms_to_times(ms)
return "%01d:%02d:%02d.%02d" % (h, m, s, ms//10) | python | def ms_to_timestamp(ms):
"""Convert ms to 'H:MM:SS.cc'"""
# XXX throw on overflow/underflow?
if ms < 0: ms = 0
if ms > MAX_REPRESENTABLE_TIME: ms = MAX_REPRESENTABLE_TIME
h, m, s, ms = ms_to_times(ms)
return "%01d:%02d:%02d.%02d" % (h, m, s, ms//10) | [
"def",
"ms_to_timestamp",
"(",
"ms",
")",
":",
"# XXX throw on overflow/underflow?",
"if",
"ms",
"<",
"0",
":",
"ms",
"=",
"0",
"if",
"ms",
">",
"MAX_REPRESENTABLE_TIME",
":",
"ms",
"=",
"MAX_REPRESENTABLE_TIME",
"h",
",",
"m",
",",
"s",
",",
"ms",
"=",
"ms_to_times",
"(",
"ms",
")",
"return",
"\"%01d:%02d:%02d.%02d\"",
"%",
"(",
"h",
",",
"m",
",",
"s",
",",
"ms",
"//",
"10",
")"
] | Convert ms to 'H:MM:SS.cc | [
"Convert",
"ms",
"to",
"H",
":",
"MM",
":",
"SS",
".",
"cc"
] | train | https://github.com/tkarabela/pysubs2/blob/6439eb5159e6aa6b47e0f8e1d950e8bdd7c5341f/pysubs2/substation.py#L49-L55 |
tkarabela/pysubs2 | pysubs2/substation.py | parse_tags | def parse_tags(text, style=SSAStyle.DEFAULT_STYLE, styles={}):
"""
Split text into fragments with computed SSAStyles.
Returns list of tuples (fragment, style), where fragment is a part of text
between two brace-delimited override sequences, and style is the computed
styling of the fragment, ie. the original style modified by all override
sequences before the fragment.
Newline and non-breakable space overrides are left as-is.
Supported override tags:
- i, b, u, s
- r (with or without style name)
"""
fragments = SSAEvent.OVERRIDE_SEQUENCE.split(text)
if len(fragments) == 1:
return [(text, style)]
def apply_overrides(all_overrides):
s = style.copy()
for tag in re.findall(r"\\[ibus][10]|\\r[a-zA-Z_0-9 ]*", all_overrides):
if tag == r"\r":
s = style.copy() # reset to original line style
elif tag.startswith(r"\r"):
name = tag[2:]
if name in styles:
s = styles[name].copy() # reset to named style
else:
if "i" in tag: s.italic = "1" in tag
elif "b" in tag: s.bold = "1" in tag
elif "u" in tag: s.underline = "1" in tag
elif "s" in tag: s.strikeout = "1" in tag
return s
overrides = SSAEvent.OVERRIDE_SEQUENCE.findall(text)
overrides_prefix_sum = ["".join(overrides[:i]) for i in range(len(overrides) + 1)]
computed_styles = map(apply_overrides, overrides_prefix_sum)
return list(zip(fragments, computed_styles)) | python | def parse_tags(text, style=SSAStyle.DEFAULT_STYLE, styles={}):
"""
Split text into fragments with computed SSAStyles.
Returns list of tuples (fragment, style), where fragment is a part of text
between two brace-delimited override sequences, and style is the computed
styling of the fragment, ie. the original style modified by all override
sequences before the fragment.
Newline and non-breakable space overrides are left as-is.
Supported override tags:
- i, b, u, s
- r (with or without style name)
"""
fragments = SSAEvent.OVERRIDE_SEQUENCE.split(text)
if len(fragments) == 1:
return [(text, style)]
def apply_overrides(all_overrides):
s = style.copy()
for tag in re.findall(r"\\[ibus][10]|\\r[a-zA-Z_0-9 ]*", all_overrides):
if tag == r"\r":
s = style.copy() # reset to original line style
elif tag.startswith(r"\r"):
name = tag[2:]
if name in styles:
s = styles[name].copy() # reset to named style
else:
if "i" in tag: s.italic = "1" in tag
elif "b" in tag: s.bold = "1" in tag
elif "u" in tag: s.underline = "1" in tag
elif "s" in tag: s.strikeout = "1" in tag
return s
overrides = SSAEvent.OVERRIDE_SEQUENCE.findall(text)
overrides_prefix_sum = ["".join(overrides[:i]) for i in range(len(overrides) + 1)]
computed_styles = map(apply_overrides, overrides_prefix_sum)
return list(zip(fragments, computed_styles)) | [
"def",
"parse_tags",
"(",
"text",
",",
"style",
"=",
"SSAStyle",
".",
"DEFAULT_STYLE",
",",
"styles",
"=",
"{",
"}",
")",
":",
"fragments",
"=",
"SSAEvent",
".",
"OVERRIDE_SEQUENCE",
".",
"split",
"(",
"text",
")",
"if",
"len",
"(",
"fragments",
")",
"==",
"1",
":",
"return",
"[",
"(",
"text",
",",
"style",
")",
"]",
"def",
"apply_overrides",
"(",
"all_overrides",
")",
":",
"s",
"=",
"style",
".",
"copy",
"(",
")",
"for",
"tag",
"in",
"re",
".",
"findall",
"(",
"r\"\\\\[ibus][10]|\\\\r[a-zA-Z_0-9 ]*\"",
",",
"all_overrides",
")",
":",
"if",
"tag",
"==",
"r\"\\r\"",
":",
"s",
"=",
"style",
".",
"copy",
"(",
")",
"# reset to original line style",
"elif",
"tag",
".",
"startswith",
"(",
"r\"\\r\"",
")",
":",
"name",
"=",
"tag",
"[",
"2",
":",
"]",
"if",
"name",
"in",
"styles",
":",
"s",
"=",
"styles",
"[",
"name",
"]",
".",
"copy",
"(",
")",
"# reset to named style",
"else",
":",
"if",
"\"i\"",
"in",
"tag",
":",
"s",
".",
"italic",
"=",
"\"1\"",
"in",
"tag",
"elif",
"\"b\"",
"in",
"tag",
":",
"s",
".",
"bold",
"=",
"\"1\"",
"in",
"tag",
"elif",
"\"u\"",
"in",
"tag",
":",
"s",
".",
"underline",
"=",
"\"1\"",
"in",
"tag",
"elif",
"\"s\"",
"in",
"tag",
":",
"s",
".",
"strikeout",
"=",
"\"1\"",
"in",
"tag",
"return",
"s",
"overrides",
"=",
"SSAEvent",
".",
"OVERRIDE_SEQUENCE",
".",
"findall",
"(",
"text",
")",
"overrides_prefix_sum",
"=",
"[",
"\"\"",
".",
"join",
"(",
"overrides",
"[",
":",
"i",
"]",
")",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"overrides",
")",
"+",
"1",
")",
"]",
"computed_styles",
"=",
"map",
"(",
"apply_overrides",
",",
"overrides_prefix_sum",
")",
"return",
"list",
"(",
"zip",
"(",
"fragments",
",",
"computed_styles",
")",
")"
] | Split text into fragments with computed SSAStyles.
Returns list of tuples (fragment, style), where fragment is a part of text
between two brace-delimited override sequences, and style is the computed
styling of the fragment, ie. the original style modified by all override
sequences before the fragment.
Newline and non-breakable space overrides are left as-is.
Supported override tags:
- i, b, u, s
- r (with or without style name) | [
"Split",
"text",
"into",
"fragments",
"with",
"computed",
"SSAStyles",
".",
"Returns",
"list",
"of",
"tuples",
"(",
"fragment",
"style",
")",
"where",
"fragment",
"is",
"a",
"part",
"of",
"text",
"between",
"two",
"brace",
"-",
"delimited",
"override",
"sequences",
"and",
"style",
"is",
"the",
"computed",
"styling",
"of",
"the",
"fragment",
"ie",
".",
"the",
"original",
"style",
"modified",
"by",
"all",
"override",
"sequences",
"before",
"the",
"fragment",
".",
"Newline",
"and",
"non",
"-",
"breakable",
"space",
"overrides",
"are",
"left",
"as",
"-",
"is",
".",
"Supported",
"override",
"tags",
":",
"-",
"i",
"b",
"u",
"s",
"-",
"r",
"(",
"with",
"or",
"without",
"style",
"name",
")"
] | train | https://github.com/tkarabela/pysubs2/blob/6439eb5159e6aa6b47e0f8e1d950e8bdd7c5341f/pysubs2/substation.py#L89-L130 |
tkarabela/pysubs2 | pysubs2/ssaevent.py | SSAEvent.plaintext | def plaintext(self):
"""
Subtitle text as multi-line string with no tags (read/write property).
Writing to this property replaces :attr:`SSAEvent.text` with given plain
text. Newlines are converted to ``\\N`` tags.
"""
text = self.text
text = self.OVERRIDE_SEQUENCE.sub("", text)
text = text.replace(r"\h", " ")
text = text.replace(r"\n", "\n")
text = text.replace(r"\N", "\n")
return text | python | def plaintext(self):
"""
Subtitle text as multi-line string with no tags (read/write property).
Writing to this property replaces :attr:`SSAEvent.text` with given plain
text. Newlines are converted to ``\\N`` tags.
"""
text = self.text
text = self.OVERRIDE_SEQUENCE.sub("", text)
text = text.replace(r"\h", " ")
text = text.replace(r"\n", "\n")
text = text.replace(r"\N", "\n")
return text | [
"def",
"plaintext",
"(",
"self",
")",
":",
"text",
"=",
"self",
".",
"text",
"text",
"=",
"self",
".",
"OVERRIDE_SEQUENCE",
".",
"sub",
"(",
"\"\"",
",",
"text",
")",
"text",
"=",
"text",
".",
"replace",
"(",
"r\"\\h\"",
",",
"\" \"",
")",
"text",
"=",
"text",
".",
"replace",
"(",
"r\"\\n\"",
",",
"\"\\n\"",
")",
"text",
"=",
"text",
".",
"replace",
"(",
"r\"\\N\"",
",",
"\"\\n\"",
")",
"return",
"text"
] | Subtitle text as multi-line string with no tags (read/write property).
Writing to this property replaces :attr:`SSAEvent.text` with given plain
text. Newlines are converted to ``\\N`` tags. | [
"Subtitle",
"text",
"as",
"multi",
"-",
"line",
"string",
"with",
"no",
"tags",
"(",
"read",
"/",
"write",
"property",
")",
"."
] | train | https://github.com/tkarabela/pysubs2/blob/6439eb5159e6aa6b47e0f8e1d950e8bdd7c5341f/pysubs2/ssaevent.py#L87-L99 |
tkarabela/pysubs2 | pysubs2/ssaevent.py | SSAEvent.shift | def shift(self, h=0, m=0, s=0, ms=0, frames=None, fps=None):
"""
Shift start and end times.
See :meth:`SSAFile.shift()` for full description.
"""
delta = make_time(h=h, m=m, s=s, ms=ms, frames=frames, fps=fps)
self.start += delta
self.end += delta | python | def shift(self, h=0, m=0, s=0, ms=0, frames=None, fps=None):
"""
Shift start and end times.
See :meth:`SSAFile.shift()` for full description.
"""
delta = make_time(h=h, m=m, s=s, ms=ms, frames=frames, fps=fps)
self.start += delta
self.end += delta | [
"def",
"shift",
"(",
"self",
",",
"h",
"=",
"0",
",",
"m",
"=",
"0",
",",
"s",
"=",
"0",
",",
"ms",
"=",
"0",
",",
"frames",
"=",
"None",
",",
"fps",
"=",
"None",
")",
":",
"delta",
"=",
"make_time",
"(",
"h",
"=",
"h",
",",
"m",
"=",
"m",
",",
"s",
"=",
"s",
",",
"ms",
"=",
"ms",
",",
"frames",
"=",
"frames",
",",
"fps",
"=",
"fps",
")",
"self",
".",
"start",
"+=",
"delta",
"self",
".",
"end",
"+=",
"delta"
] | Shift start and end times.
See :meth:`SSAFile.shift()` for full description. | [
"Shift",
"start",
"and",
"end",
"times",
"."
] | train | https://github.com/tkarabela/pysubs2/blob/6439eb5159e6aa6b47e0f8e1d950e8bdd7c5341f/pysubs2/ssaevent.py#L105-L114 |
tkarabela/pysubs2 | pysubs2/ssaevent.py | SSAEvent.equals | def equals(self, other):
"""Field-based equality for SSAEvents."""
if isinstance(other, SSAEvent):
return self.as_dict() == other.as_dict()
else:
raise TypeError("Cannot compare to non-SSAEvent object") | python | def equals(self, other):
"""Field-based equality for SSAEvents."""
if isinstance(other, SSAEvent):
return self.as_dict() == other.as_dict()
else:
raise TypeError("Cannot compare to non-SSAEvent object") | [
"def",
"equals",
"(",
"self",
",",
"other",
")",
":",
"if",
"isinstance",
"(",
"other",
",",
"SSAEvent",
")",
":",
"return",
"self",
".",
"as_dict",
"(",
")",
"==",
"other",
".",
"as_dict",
"(",
")",
"else",
":",
"raise",
"TypeError",
"(",
"\"Cannot compare to non-SSAEvent object\"",
")"
] | Field-based equality for SSAEvents. | [
"Field",
"-",
"based",
"equality",
"for",
"SSAEvents",
"."
] | train | https://github.com/tkarabela/pysubs2/blob/6439eb5159e6aa6b47e0f8e1d950e8bdd7c5341f/pysubs2/ssaevent.py#L123-L128 |
tkarabela/pysubs2 | pysubs2/ssafile.py | SSAFile.load | def load(cls, path, encoding="utf-8", format_=None, fps=None, **kwargs):
"""
Load subtitle file from given path.
Arguments:
path (str): Path to subtitle file.
encoding (str): Character encoding of input file.
Defaults to UTF-8, you may need to change this.
format_ (str): Optional, forces use of specific parser
(eg. `"srt"`, `"ass"`). Otherwise, format is detected
automatically from file contents. This argument should
be rarely needed.
fps (float): Framerate for frame-based formats (MicroDVD),
for other formats this argument is ignored. Framerate might
be detected from the file, in which case you don't need
to specify it here (when given, this argument overrides
autodetection).
kwargs: Extra options for the parser.
Returns:
SSAFile
Raises:
IOError
UnicodeDecodeError
pysubs2.exceptions.UnknownFPSError
pysubs2.exceptions.UnknownFormatIdentifierError
pysubs2.exceptions.FormatAutodetectionError
Note:
pysubs2 may autodetect subtitle format and/or framerate. These
values are set as :attr:`SSAFile.format` and :attr:`SSAFile.fps`
attributes.
Example:
>>> subs1 = pysubs2.load("subrip-subtitles.srt")
>>> subs2 = pysubs2.load("microdvd-subtitles.sub", fps=23.976)
"""
with open(path, encoding=encoding) as fp:
return cls.from_file(fp, format_, fps=fps, **kwargs) | python | def load(cls, path, encoding="utf-8", format_=None, fps=None, **kwargs):
"""
Load subtitle file from given path.
Arguments:
path (str): Path to subtitle file.
encoding (str): Character encoding of input file.
Defaults to UTF-8, you may need to change this.
format_ (str): Optional, forces use of specific parser
(eg. `"srt"`, `"ass"`). Otherwise, format is detected
automatically from file contents. This argument should
be rarely needed.
fps (float): Framerate for frame-based formats (MicroDVD),
for other formats this argument is ignored. Framerate might
be detected from the file, in which case you don't need
to specify it here (when given, this argument overrides
autodetection).
kwargs: Extra options for the parser.
Returns:
SSAFile
Raises:
IOError
UnicodeDecodeError
pysubs2.exceptions.UnknownFPSError
pysubs2.exceptions.UnknownFormatIdentifierError
pysubs2.exceptions.FormatAutodetectionError
Note:
pysubs2 may autodetect subtitle format and/or framerate. These
values are set as :attr:`SSAFile.format` and :attr:`SSAFile.fps`
attributes.
Example:
>>> subs1 = pysubs2.load("subrip-subtitles.srt")
>>> subs2 = pysubs2.load("microdvd-subtitles.sub", fps=23.976)
"""
with open(path, encoding=encoding) as fp:
return cls.from_file(fp, format_, fps=fps, **kwargs) | [
"def",
"load",
"(",
"cls",
",",
"path",
",",
"encoding",
"=",
"\"utf-8\"",
",",
"format_",
"=",
"None",
",",
"fps",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"with",
"open",
"(",
"path",
",",
"encoding",
"=",
"encoding",
")",
"as",
"fp",
":",
"return",
"cls",
".",
"from_file",
"(",
"fp",
",",
"format_",
",",
"fps",
"=",
"fps",
",",
"*",
"*",
"kwargs",
")"
] | Load subtitle file from given path.
Arguments:
path (str): Path to subtitle file.
encoding (str): Character encoding of input file.
Defaults to UTF-8, you may need to change this.
format_ (str): Optional, forces use of specific parser
(eg. `"srt"`, `"ass"`). Otherwise, format is detected
automatically from file contents. This argument should
be rarely needed.
fps (float): Framerate for frame-based formats (MicroDVD),
for other formats this argument is ignored. Framerate might
be detected from the file, in which case you don't need
to specify it here (when given, this argument overrides
autodetection).
kwargs: Extra options for the parser.
Returns:
SSAFile
Raises:
IOError
UnicodeDecodeError
pysubs2.exceptions.UnknownFPSError
pysubs2.exceptions.UnknownFormatIdentifierError
pysubs2.exceptions.FormatAutodetectionError
Note:
pysubs2 may autodetect subtitle format and/or framerate. These
values are set as :attr:`SSAFile.format` and :attr:`SSAFile.fps`
attributes.
Example:
>>> subs1 = pysubs2.load("subrip-subtitles.srt")
>>> subs2 = pysubs2.load("microdvd-subtitles.sub", fps=23.976) | [
"Load",
"subtitle",
"file",
"from",
"given",
"path",
"."
] | train | https://github.com/tkarabela/pysubs2/blob/6439eb5159e6aa6b47e0f8e1d950e8bdd7c5341f/pysubs2/ssafile.py#L52-L92 |
tkarabela/pysubs2 | pysubs2/ssafile.py | SSAFile.from_string | def from_string(cls, string, format_=None, fps=None, **kwargs):
"""
Load subtitle file from string.
See :meth:`SSAFile.load()` for full description.
Arguments:
string (str): Subtitle file in a string. Note that the string
must be Unicode (in Python 2).
Returns:
SSAFile
Example:
>>> text = '''
... 1
... 00:00:00,000 --> 00:00:05,000
... An example SubRip file.
... '''
>>> subs = SSAFile.from_string(text)
"""
fp = io.StringIO(string)
return cls.from_file(fp, format_, fps=fps, **kwargs) | python | def from_string(cls, string, format_=None, fps=None, **kwargs):
"""
Load subtitle file from string.
See :meth:`SSAFile.load()` for full description.
Arguments:
string (str): Subtitle file in a string. Note that the string
must be Unicode (in Python 2).
Returns:
SSAFile
Example:
>>> text = '''
... 1
... 00:00:00,000 --> 00:00:05,000
... An example SubRip file.
... '''
>>> subs = SSAFile.from_string(text)
"""
fp = io.StringIO(string)
return cls.from_file(fp, format_, fps=fps, **kwargs) | [
"def",
"from_string",
"(",
"cls",
",",
"string",
",",
"format_",
"=",
"None",
",",
"fps",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"fp",
"=",
"io",
".",
"StringIO",
"(",
"string",
")",
"return",
"cls",
".",
"from_file",
"(",
"fp",
",",
"format_",
",",
"fps",
"=",
"fps",
",",
"*",
"*",
"kwargs",
")"
] | Load subtitle file from string.
See :meth:`SSAFile.load()` for full description.
Arguments:
string (str): Subtitle file in a string. Note that the string
must be Unicode (in Python 2).
Returns:
SSAFile
Example:
>>> text = '''
... 1
... 00:00:00,000 --> 00:00:05,000
... An example SubRip file.
... '''
>>> subs = SSAFile.from_string(text) | [
"Load",
"subtitle",
"file",
"from",
"string",
"."
] | train | https://github.com/tkarabela/pysubs2/blob/6439eb5159e6aa6b47e0f8e1d950e8bdd7c5341f/pysubs2/ssafile.py#L95-L118 |
tkarabela/pysubs2 | pysubs2/ssafile.py | SSAFile.from_file | def from_file(cls, fp, format_=None, fps=None, **kwargs):
"""
Read subtitle file from file object.
See :meth:`SSAFile.load()` for full description.
Note:
This is a low-level method. Usually, one of :meth:`SSAFile.load()`
or :meth:`SSAFile.from_string()` is preferable.
Arguments:
fp (file object): A file object, ie. :class:`io.TextIOBase` instance.
Note that the file must be opened in text mode (as opposed to binary).
Returns:
SSAFile
"""
if format_ is None:
# Autodetect subtitle format, then read again using correct parser.
# The file might be a pipe and we need to read it twice,
# so just buffer everything.
text = fp.read()
fragment = text[:10000]
format_ = autodetect_format(fragment)
fp = io.StringIO(text)
impl = get_format_class(format_)
subs = cls() # an empty subtitle file
subs.format = format_
subs.fps = fps
impl.from_file(subs, fp, format_, fps=fps, **kwargs)
return subs | python | def from_file(cls, fp, format_=None, fps=None, **kwargs):
"""
Read subtitle file from file object.
See :meth:`SSAFile.load()` for full description.
Note:
This is a low-level method. Usually, one of :meth:`SSAFile.load()`
or :meth:`SSAFile.from_string()` is preferable.
Arguments:
fp (file object): A file object, ie. :class:`io.TextIOBase` instance.
Note that the file must be opened in text mode (as opposed to binary).
Returns:
SSAFile
"""
if format_ is None:
# Autodetect subtitle format, then read again using correct parser.
# The file might be a pipe and we need to read it twice,
# so just buffer everything.
text = fp.read()
fragment = text[:10000]
format_ = autodetect_format(fragment)
fp = io.StringIO(text)
impl = get_format_class(format_)
subs = cls() # an empty subtitle file
subs.format = format_
subs.fps = fps
impl.from_file(subs, fp, format_, fps=fps, **kwargs)
return subs | [
"def",
"from_file",
"(",
"cls",
",",
"fp",
",",
"format_",
"=",
"None",
",",
"fps",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"format_",
"is",
"None",
":",
"# Autodetect subtitle format, then read again using correct parser.",
"# The file might be a pipe and we need to read it twice,",
"# so just buffer everything.",
"text",
"=",
"fp",
".",
"read",
"(",
")",
"fragment",
"=",
"text",
"[",
":",
"10000",
"]",
"format_",
"=",
"autodetect_format",
"(",
"fragment",
")",
"fp",
"=",
"io",
".",
"StringIO",
"(",
"text",
")",
"impl",
"=",
"get_format_class",
"(",
"format_",
")",
"subs",
"=",
"cls",
"(",
")",
"# an empty subtitle file",
"subs",
".",
"format",
"=",
"format_",
"subs",
".",
"fps",
"=",
"fps",
"impl",
".",
"from_file",
"(",
"subs",
",",
"fp",
",",
"format_",
",",
"fps",
"=",
"fps",
",",
"*",
"*",
"kwargs",
")",
"return",
"subs"
] | Read subtitle file from file object.
See :meth:`SSAFile.load()` for full description.
Note:
This is a low-level method. Usually, one of :meth:`SSAFile.load()`
or :meth:`SSAFile.from_string()` is preferable.
Arguments:
fp (file object): A file object, ie. :class:`io.TextIOBase` instance.
Note that the file must be opened in text mode (as opposed to binary).
Returns:
SSAFile | [
"Read",
"subtitle",
"file",
"from",
"file",
"object",
"."
] | train | https://github.com/tkarabela/pysubs2/blob/6439eb5159e6aa6b47e0f8e1d950e8bdd7c5341f/pysubs2/ssafile.py#L121-L153 |
tkarabela/pysubs2 | pysubs2/ssafile.py | SSAFile.save | def save(self, path, encoding="utf-8", format_=None, fps=None, **kwargs):
"""
Save subtitle file to given path.
Arguments:
path (str): Path to subtitle file.
encoding (str): Character encoding of output file.
Defaults to UTF-8, which should be fine for most purposes.
format_ (str): Optional, specifies desired subtitle format
(eg. `"srt"`, `"ass"`). Otherwise, format is detected
automatically from file extension. Thus, this argument
is rarely needed.
fps (float): Framerate for frame-based formats (MicroDVD),
for other formats this argument is ignored. When omitted,
:attr:`SSAFile.fps` value is used (ie. the framerate used
for loading the file, if any). When the :class:`SSAFile`
wasn't loaded from MicroDVD, or if you wish save it with
different framerate, use this argument. See also
:meth:`SSAFile.transform_framerate()` for fixing bad
frame-based to time-based conversions.
kwargs: Extra options for the writer.
Raises:
IOError
UnicodeEncodeError
pysubs2.exceptions.UnknownFPSError
pysubs2.exceptions.UnknownFormatIdentifierError
pysubs2.exceptions.UnknownFileExtensionError
"""
if format_ is None:
ext = os.path.splitext(path)[1].lower()
format_ = get_format_identifier(ext)
with open(path, "w", encoding=encoding) as fp:
self.to_file(fp, format_, fps=fps, **kwargs) | python | def save(self, path, encoding="utf-8", format_=None, fps=None, **kwargs):
"""
Save subtitle file to given path.
Arguments:
path (str): Path to subtitle file.
encoding (str): Character encoding of output file.
Defaults to UTF-8, which should be fine for most purposes.
format_ (str): Optional, specifies desired subtitle format
(eg. `"srt"`, `"ass"`). Otherwise, format is detected
automatically from file extension. Thus, this argument
is rarely needed.
fps (float): Framerate for frame-based formats (MicroDVD),
for other formats this argument is ignored. When omitted,
:attr:`SSAFile.fps` value is used (ie. the framerate used
for loading the file, if any). When the :class:`SSAFile`
wasn't loaded from MicroDVD, or if you wish save it with
different framerate, use this argument. See also
:meth:`SSAFile.transform_framerate()` for fixing bad
frame-based to time-based conversions.
kwargs: Extra options for the writer.
Raises:
IOError
UnicodeEncodeError
pysubs2.exceptions.UnknownFPSError
pysubs2.exceptions.UnknownFormatIdentifierError
pysubs2.exceptions.UnknownFileExtensionError
"""
if format_ is None:
ext = os.path.splitext(path)[1].lower()
format_ = get_format_identifier(ext)
with open(path, "w", encoding=encoding) as fp:
self.to_file(fp, format_, fps=fps, **kwargs) | [
"def",
"save",
"(",
"self",
",",
"path",
",",
"encoding",
"=",
"\"utf-8\"",
",",
"format_",
"=",
"None",
",",
"fps",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"format_",
"is",
"None",
":",
"ext",
"=",
"os",
".",
"path",
".",
"splitext",
"(",
"path",
")",
"[",
"1",
"]",
".",
"lower",
"(",
")",
"format_",
"=",
"get_format_identifier",
"(",
"ext",
")",
"with",
"open",
"(",
"path",
",",
"\"w\"",
",",
"encoding",
"=",
"encoding",
")",
"as",
"fp",
":",
"self",
".",
"to_file",
"(",
"fp",
",",
"format_",
",",
"fps",
"=",
"fps",
",",
"*",
"*",
"kwargs",
")"
] | Save subtitle file to given path.
Arguments:
path (str): Path to subtitle file.
encoding (str): Character encoding of output file.
Defaults to UTF-8, which should be fine for most purposes.
format_ (str): Optional, specifies desired subtitle format
(eg. `"srt"`, `"ass"`). Otherwise, format is detected
automatically from file extension. Thus, this argument
is rarely needed.
fps (float): Framerate for frame-based formats (MicroDVD),
for other formats this argument is ignored. When omitted,
:attr:`SSAFile.fps` value is used (ie. the framerate used
for loading the file, if any). When the :class:`SSAFile`
wasn't loaded from MicroDVD, or if you wish save it with
different framerate, use this argument. See also
:meth:`SSAFile.transform_framerate()` for fixing bad
frame-based to time-based conversions.
kwargs: Extra options for the writer.
Raises:
IOError
UnicodeEncodeError
pysubs2.exceptions.UnknownFPSError
pysubs2.exceptions.UnknownFormatIdentifierError
pysubs2.exceptions.UnknownFileExtensionError | [
"Save",
"subtitle",
"file",
"to",
"given",
"path",
"."
] | train | https://github.com/tkarabela/pysubs2/blob/6439eb5159e6aa6b47e0f8e1d950e8bdd7c5341f/pysubs2/ssafile.py#L155-L190 |
tkarabela/pysubs2 | pysubs2/ssafile.py | SSAFile.to_string | def to_string(self, format_, fps=None, **kwargs):
"""
Get subtitle file as a string.
See :meth:`SSAFile.save()` for full description.
Returns:
str
"""
fp = io.StringIO()
self.to_file(fp, format_, fps=fps, **kwargs)
return fp.getvalue() | python | def to_string(self, format_, fps=None, **kwargs):
"""
Get subtitle file as a string.
See :meth:`SSAFile.save()` for full description.
Returns:
str
"""
fp = io.StringIO()
self.to_file(fp, format_, fps=fps, **kwargs)
return fp.getvalue() | [
"def",
"to_string",
"(",
"self",
",",
"format_",
",",
"fps",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"fp",
"=",
"io",
".",
"StringIO",
"(",
")",
"self",
".",
"to_file",
"(",
"fp",
",",
"format_",
",",
"fps",
"=",
"fps",
",",
"*",
"*",
"kwargs",
")",
"return",
"fp",
".",
"getvalue",
"(",
")"
] | Get subtitle file as a string.
See :meth:`SSAFile.save()` for full description.
Returns:
str | [
"Get",
"subtitle",
"file",
"as",
"a",
"string",
"."
] | train | https://github.com/tkarabela/pysubs2/blob/6439eb5159e6aa6b47e0f8e1d950e8bdd7c5341f/pysubs2/ssafile.py#L192-L204 |
tkarabela/pysubs2 | pysubs2/ssafile.py | SSAFile.to_file | def to_file(self, fp, format_, fps=None, **kwargs):
"""
Write subtitle file to file object.
See :meth:`SSAFile.save()` for full description.
Note:
This is a low-level method. Usually, one of :meth:`SSAFile.save()`
or :meth:`SSAFile.to_string()` is preferable.
Arguments:
fp (file object): A file object, ie. :class:`io.TextIOBase` instance.
Note that the file must be opened in text mode (as opposed to binary).
"""
impl = get_format_class(format_)
impl.to_file(self, fp, format_, fps=fps, **kwargs) | python | def to_file(self, fp, format_, fps=None, **kwargs):
"""
Write subtitle file to file object.
See :meth:`SSAFile.save()` for full description.
Note:
This is a low-level method. Usually, one of :meth:`SSAFile.save()`
or :meth:`SSAFile.to_string()` is preferable.
Arguments:
fp (file object): A file object, ie. :class:`io.TextIOBase` instance.
Note that the file must be opened in text mode (as opposed to binary).
"""
impl = get_format_class(format_)
impl.to_file(self, fp, format_, fps=fps, **kwargs) | [
"def",
"to_file",
"(",
"self",
",",
"fp",
",",
"format_",
",",
"fps",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"impl",
"=",
"get_format_class",
"(",
"format_",
")",
"impl",
".",
"to_file",
"(",
"self",
",",
"fp",
",",
"format_",
",",
"fps",
"=",
"fps",
",",
"*",
"*",
"kwargs",
")"
] | Write subtitle file to file object.
See :meth:`SSAFile.save()` for full description.
Note:
This is a low-level method. Usually, one of :meth:`SSAFile.save()`
or :meth:`SSAFile.to_string()` is preferable.
Arguments:
fp (file object): A file object, ie. :class:`io.TextIOBase` instance.
Note that the file must be opened in text mode (as opposed to binary). | [
"Write",
"subtitle",
"file",
"to",
"file",
"object",
"."
] | train | https://github.com/tkarabela/pysubs2/blob/6439eb5159e6aa6b47e0f8e1d950e8bdd7c5341f/pysubs2/ssafile.py#L206-L222 |
tkarabela/pysubs2 | pysubs2/ssafile.py | SSAFile.transform_framerate | def transform_framerate(self, in_fps, out_fps):
"""
Rescale all timestamps by ratio of in_fps/out_fps.
Can be used to fix files converted from frame-based to time-based
with wrongly assumed framerate.
Arguments:
in_fps (float)
out_fps (float)
Raises:
ValueError: Non-positive framerate given.
"""
if in_fps <= 0 or out_fps <= 0:
raise ValueError("Framerates must be positive, cannot transform %f -> %f" % (in_fps, out_fps))
ratio = in_fps / out_fps
for line in self:
line.start = int(round(line.start * ratio))
line.end = int(round(line.end * ratio)) | python | def transform_framerate(self, in_fps, out_fps):
"""
Rescale all timestamps by ratio of in_fps/out_fps.
Can be used to fix files converted from frame-based to time-based
with wrongly assumed framerate.
Arguments:
in_fps (float)
out_fps (float)
Raises:
ValueError: Non-positive framerate given.
"""
if in_fps <= 0 or out_fps <= 0:
raise ValueError("Framerates must be positive, cannot transform %f -> %f" % (in_fps, out_fps))
ratio = in_fps / out_fps
for line in self:
line.start = int(round(line.start * ratio))
line.end = int(round(line.end * ratio)) | [
"def",
"transform_framerate",
"(",
"self",
",",
"in_fps",
",",
"out_fps",
")",
":",
"if",
"in_fps",
"<=",
"0",
"or",
"out_fps",
"<=",
"0",
":",
"raise",
"ValueError",
"(",
"\"Framerates must be positive, cannot transform %f -> %f\"",
"%",
"(",
"in_fps",
",",
"out_fps",
")",
")",
"ratio",
"=",
"in_fps",
"/",
"out_fps",
"for",
"line",
"in",
"self",
":",
"line",
".",
"start",
"=",
"int",
"(",
"round",
"(",
"line",
".",
"start",
"*",
"ratio",
")",
")",
"line",
".",
"end",
"=",
"int",
"(",
"round",
"(",
"line",
".",
"end",
"*",
"ratio",
")",
")"
] | Rescale all timestamps by ratio of in_fps/out_fps.
Can be used to fix files converted from frame-based to time-based
with wrongly assumed framerate.
Arguments:
in_fps (float)
out_fps (float)
Raises:
ValueError: Non-positive framerate given. | [
"Rescale",
"all",
"timestamps",
"by",
"ratio",
"of",
"in_fps",
"/",
"out_fps",
"."
] | train | https://github.com/tkarabela/pysubs2/blob/6439eb5159e6aa6b47e0f8e1d950e8bdd7c5341f/pysubs2/ssafile.py#L250-L271 |
tkarabela/pysubs2 | pysubs2/ssafile.py | SSAFile.rename_style | def rename_style(self, old_name, new_name):
"""
Rename a style, including references to it.
Arguments:
old_name (str): Style to be renamed.
new_name (str): New name for the style (must be unused).
Raises:
KeyError: No style named old_name.
ValueError: new_name is not a legal name (cannot use commas)
or new_name is taken.
"""
if old_name not in self.styles:
raise KeyError("Style %r not found" % old_name)
if new_name in self.styles:
raise ValueError("There is already a style called %r" % new_name)
if not is_valid_field_content(new_name):
raise ValueError("%r is not a valid name" % new_name)
self.styles[new_name] = self.styles[old_name]
del self.styles[old_name]
for line in self:
# XXX also handle \r override tag
if line.style == old_name:
line.style = new_name | python | def rename_style(self, old_name, new_name):
"""
Rename a style, including references to it.
Arguments:
old_name (str): Style to be renamed.
new_name (str): New name for the style (must be unused).
Raises:
KeyError: No style named old_name.
ValueError: new_name is not a legal name (cannot use commas)
or new_name is taken.
"""
if old_name not in self.styles:
raise KeyError("Style %r not found" % old_name)
if new_name in self.styles:
raise ValueError("There is already a style called %r" % new_name)
if not is_valid_field_content(new_name):
raise ValueError("%r is not a valid name" % new_name)
self.styles[new_name] = self.styles[old_name]
del self.styles[old_name]
for line in self:
# XXX also handle \r override tag
if line.style == old_name:
line.style = new_name | [
"def",
"rename_style",
"(",
"self",
",",
"old_name",
",",
"new_name",
")",
":",
"if",
"old_name",
"not",
"in",
"self",
".",
"styles",
":",
"raise",
"KeyError",
"(",
"\"Style %r not found\"",
"%",
"old_name",
")",
"if",
"new_name",
"in",
"self",
".",
"styles",
":",
"raise",
"ValueError",
"(",
"\"There is already a style called %r\"",
"%",
"new_name",
")",
"if",
"not",
"is_valid_field_content",
"(",
"new_name",
")",
":",
"raise",
"ValueError",
"(",
"\"%r is not a valid name\"",
"%",
"new_name",
")",
"self",
".",
"styles",
"[",
"new_name",
"]",
"=",
"self",
".",
"styles",
"[",
"old_name",
"]",
"del",
"self",
".",
"styles",
"[",
"old_name",
"]",
"for",
"line",
"in",
"self",
":",
"# XXX also handle \\r override tag",
"if",
"line",
".",
"style",
"==",
"old_name",
":",
"line",
".",
"style",
"=",
"new_name"
] | Rename a style, including references to it.
Arguments:
old_name (str): Style to be renamed.
new_name (str): New name for the style (must be unused).
Raises:
KeyError: No style named old_name.
ValueError: new_name is not a legal name (cannot use commas)
or new_name is taken. | [
"Rename",
"a",
"style",
"including",
"references",
"to",
"it",
"."
] | train | https://github.com/tkarabela/pysubs2/blob/6439eb5159e6aa6b47e0f8e1d950e8bdd7c5341f/pysubs2/ssafile.py#L277-L304 |
tkarabela/pysubs2 | pysubs2/ssafile.py | SSAFile.import_styles | def import_styles(self, subs, overwrite=True):
"""
Merge in styles from other SSAFile.
Arguments:
subs (SSAFile): Subtitle file imported from.
overwrite (bool): On name conflict, use style from the other file
(default: True).
"""
if not isinstance(subs, SSAFile):
raise TypeError("Must supply an SSAFile.")
for name, style in subs.styles.items():
if name not in self.styles or overwrite:
self.styles[name] = style | python | def import_styles(self, subs, overwrite=True):
"""
Merge in styles from other SSAFile.
Arguments:
subs (SSAFile): Subtitle file imported from.
overwrite (bool): On name conflict, use style from the other file
(default: True).
"""
if not isinstance(subs, SSAFile):
raise TypeError("Must supply an SSAFile.")
for name, style in subs.styles.items():
if name not in self.styles or overwrite:
self.styles[name] = style | [
"def",
"import_styles",
"(",
"self",
",",
"subs",
",",
"overwrite",
"=",
"True",
")",
":",
"if",
"not",
"isinstance",
"(",
"subs",
",",
"SSAFile",
")",
":",
"raise",
"TypeError",
"(",
"\"Must supply an SSAFile.\"",
")",
"for",
"name",
",",
"style",
"in",
"subs",
".",
"styles",
".",
"items",
"(",
")",
":",
"if",
"name",
"not",
"in",
"self",
".",
"styles",
"or",
"overwrite",
":",
"self",
".",
"styles",
"[",
"name",
"]",
"=",
"style"
] | Merge in styles from other SSAFile.
Arguments:
subs (SSAFile): Subtitle file imported from.
overwrite (bool): On name conflict, use style from the other file
(default: True). | [
"Merge",
"in",
"styles",
"from",
"other",
"SSAFile",
"."
] | train | https://github.com/tkarabela/pysubs2/blob/6439eb5159e6aa6b47e0f8e1d950e8bdd7c5341f/pysubs2/ssafile.py#L306-L321 |
tkarabela/pysubs2 | pysubs2/ssafile.py | SSAFile.equals | def equals(self, other):
"""
Equality of two SSAFiles.
Compares :attr:`SSAFile.info`, :attr:`SSAFile.styles` and :attr:`SSAFile.events`.
Order of entries in OrderedDicts does not matter. "ScriptType" key in info is
considered an implementation detail and thus ignored.
Useful mostly in unit tests. Differences are logged at DEBUG level.
"""
if isinstance(other, SSAFile):
for key in set(chain(self.info.keys(), other.info.keys())) - {"ScriptType"}:
sv, ov = self.info.get(key), other.info.get(key)
if sv is None:
logging.debug("%r missing in self.info", key)
return False
elif ov is None:
logging.debug("%r missing in other.info", key)
return False
elif sv != ov:
logging.debug("info %r differs (self=%r, other=%r)", key, sv, ov)
return False
for key in set(chain(self.styles.keys(), other.styles.keys())):
sv, ov = self.styles.get(key), other.styles.get(key)
if sv is None:
logging.debug("%r missing in self.styles", key)
return False
elif ov is None:
logging.debug("%r missing in other.styles", key)
return False
elif sv != ov:
for k in sv.FIELDS:
if getattr(sv, k) != getattr(ov, k): logging.debug("difference in field %r", k)
logging.debug("style %r differs (self=%r, other=%r)", key, sv.as_dict(), ov.as_dict())
return False
if len(self) != len(other):
logging.debug("different # of subtitles (self=%d, other=%d)", len(self), len(other))
return False
for i, (se, oe) in enumerate(zip(self.events, other.events)):
if not se.equals(oe):
for k in se.FIELDS:
if getattr(se, k) != getattr(oe, k): logging.debug("difference in field %r", k)
logging.debug("event %d differs (self=%r, other=%r)", i, se.as_dict(), oe.as_dict())
return False
return True
else:
raise TypeError("Cannot compare to non-SSAFile object") | python | def equals(self, other):
"""
Equality of two SSAFiles.
Compares :attr:`SSAFile.info`, :attr:`SSAFile.styles` and :attr:`SSAFile.events`.
Order of entries in OrderedDicts does not matter. "ScriptType" key in info is
considered an implementation detail and thus ignored.
Useful mostly in unit tests. Differences are logged at DEBUG level.
"""
if isinstance(other, SSAFile):
for key in set(chain(self.info.keys(), other.info.keys())) - {"ScriptType"}:
sv, ov = self.info.get(key), other.info.get(key)
if sv is None:
logging.debug("%r missing in self.info", key)
return False
elif ov is None:
logging.debug("%r missing in other.info", key)
return False
elif sv != ov:
logging.debug("info %r differs (self=%r, other=%r)", key, sv, ov)
return False
for key in set(chain(self.styles.keys(), other.styles.keys())):
sv, ov = self.styles.get(key), other.styles.get(key)
if sv is None:
logging.debug("%r missing in self.styles", key)
return False
elif ov is None:
logging.debug("%r missing in other.styles", key)
return False
elif sv != ov:
for k in sv.FIELDS:
if getattr(sv, k) != getattr(ov, k): logging.debug("difference in field %r", k)
logging.debug("style %r differs (self=%r, other=%r)", key, sv.as_dict(), ov.as_dict())
return False
if len(self) != len(other):
logging.debug("different # of subtitles (self=%d, other=%d)", len(self), len(other))
return False
for i, (se, oe) in enumerate(zip(self.events, other.events)):
if not se.equals(oe):
for k in se.FIELDS:
if getattr(se, k) != getattr(oe, k): logging.debug("difference in field %r", k)
logging.debug("event %d differs (self=%r, other=%r)", i, se.as_dict(), oe.as_dict())
return False
return True
else:
raise TypeError("Cannot compare to non-SSAFile object") | [
"def",
"equals",
"(",
"self",
",",
"other",
")",
":",
"if",
"isinstance",
"(",
"other",
",",
"SSAFile",
")",
":",
"for",
"key",
"in",
"set",
"(",
"chain",
"(",
"self",
".",
"info",
".",
"keys",
"(",
")",
",",
"other",
".",
"info",
".",
"keys",
"(",
")",
")",
")",
"-",
"{",
"\"ScriptType\"",
"}",
":",
"sv",
",",
"ov",
"=",
"self",
".",
"info",
".",
"get",
"(",
"key",
")",
",",
"other",
".",
"info",
".",
"get",
"(",
"key",
")",
"if",
"sv",
"is",
"None",
":",
"logging",
".",
"debug",
"(",
"\"%r missing in self.info\"",
",",
"key",
")",
"return",
"False",
"elif",
"ov",
"is",
"None",
":",
"logging",
".",
"debug",
"(",
"\"%r missing in other.info\"",
",",
"key",
")",
"return",
"False",
"elif",
"sv",
"!=",
"ov",
":",
"logging",
".",
"debug",
"(",
"\"info %r differs (self=%r, other=%r)\"",
",",
"key",
",",
"sv",
",",
"ov",
")",
"return",
"False",
"for",
"key",
"in",
"set",
"(",
"chain",
"(",
"self",
".",
"styles",
".",
"keys",
"(",
")",
",",
"other",
".",
"styles",
".",
"keys",
"(",
")",
")",
")",
":",
"sv",
",",
"ov",
"=",
"self",
".",
"styles",
".",
"get",
"(",
"key",
")",
",",
"other",
".",
"styles",
".",
"get",
"(",
"key",
")",
"if",
"sv",
"is",
"None",
":",
"logging",
".",
"debug",
"(",
"\"%r missing in self.styles\"",
",",
"key",
")",
"return",
"False",
"elif",
"ov",
"is",
"None",
":",
"logging",
".",
"debug",
"(",
"\"%r missing in other.styles\"",
",",
"key",
")",
"return",
"False",
"elif",
"sv",
"!=",
"ov",
":",
"for",
"k",
"in",
"sv",
".",
"FIELDS",
":",
"if",
"getattr",
"(",
"sv",
",",
"k",
")",
"!=",
"getattr",
"(",
"ov",
",",
"k",
")",
":",
"logging",
".",
"debug",
"(",
"\"difference in field %r\"",
",",
"k",
")",
"logging",
".",
"debug",
"(",
"\"style %r differs (self=%r, other=%r)\"",
",",
"key",
",",
"sv",
".",
"as_dict",
"(",
")",
",",
"ov",
".",
"as_dict",
"(",
")",
")",
"return",
"False",
"if",
"len",
"(",
"self",
")",
"!=",
"len",
"(",
"other",
")",
":",
"logging",
".",
"debug",
"(",
"\"different # of subtitles (self=%d, other=%d)\"",
",",
"len",
"(",
"self",
")",
",",
"len",
"(",
"other",
")",
")",
"return",
"False",
"for",
"i",
",",
"(",
"se",
",",
"oe",
")",
"in",
"enumerate",
"(",
"zip",
"(",
"self",
".",
"events",
",",
"other",
".",
"events",
")",
")",
":",
"if",
"not",
"se",
".",
"equals",
"(",
"oe",
")",
":",
"for",
"k",
"in",
"se",
".",
"FIELDS",
":",
"if",
"getattr",
"(",
"se",
",",
"k",
")",
"!=",
"getattr",
"(",
"oe",
",",
"k",
")",
":",
"logging",
".",
"debug",
"(",
"\"difference in field %r\"",
",",
"k",
")",
"logging",
".",
"debug",
"(",
"\"event %d differs (self=%r, other=%r)\"",
",",
"i",
",",
"se",
".",
"as_dict",
"(",
")",
",",
"oe",
".",
"as_dict",
"(",
")",
")",
"return",
"False",
"return",
"True",
"else",
":",
"raise",
"TypeError",
"(",
"\"Cannot compare to non-SSAFile object\"",
")"
] | Equality of two SSAFiles.
Compares :attr:`SSAFile.info`, :attr:`SSAFile.styles` and :attr:`SSAFile.events`.
Order of entries in OrderedDicts does not matter. "ScriptType" key in info is
considered an implementation detail and thus ignored.
Useful mostly in unit tests. Differences are logged at DEBUG level. | [
"Equality",
"of",
"two",
"SSAFiles",
"."
] | train | https://github.com/tkarabela/pysubs2/blob/6439eb5159e6aa6b47e0f8e1d950e8bdd7c5341f/pysubs2/ssafile.py#L327-L379 |
tkarabela/pysubs2 | pysubs2/formats.py | get_file_extension | def get_file_extension(format_):
"""Format identifier -> file extension"""
if format_ not in FORMAT_IDENTIFIER_TO_FORMAT_CLASS:
raise UnknownFormatIdentifierError(format_)
for ext, f in FILE_EXTENSION_TO_FORMAT_IDENTIFIER.items():
if f == format_:
return ext
raise RuntimeError("No file extension for format %r" % format_) | python | def get_file_extension(format_):
"""Format identifier -> file extension"""
if format_ not in FORMAT_IDENTIFIER_TO_FORMAT_CLASS:
raise UnknownFormatIdentifierError(format_)
for ext, f in FILE_EXTENSION_TO_FORMAT_IDENTIFIER.items():
if f == format_:
return ext
raise RuntimeError("No file extension for format %r" % format_) | [
"def",
"get_file_extension",
"(",
"format_",
")",
":",
"if",
"format_",
"not",
"in",
"FORMAT_IDENTIFIER_TO_FORMAT_CLASS",
":",
"raise",
"UnknownFormatIdentifierError",
"(",
"format_",
")",
"for",
"ext",
",",
"f",
"in",
"FILE_EXTENSION_TO_FORMAT_IDENTIFIER",
".",
"items",
"(",
")",
":",
"if",
"f",
"==",
"format_",
":",
"return",
"ext",
"raise",
"RuntimeError",
"(",
"\"No file extension for format %r\"",
"%",
"format_",
")"
] | Format identifier -> file extension | [
"Format",
"identifier",
"-",
">",
"file",
"extension"
] | train | https://github.com/tkarabela/pysubs2/blob/6439eb5159e6aa6b47e0f8e1d950e8bdd7c5341f/pysubs2/formats.py#L42-L51 |
tkarabela/pysubs2 | pysubs2/formats.py | autodetect_format | def autodetect_format(content):
"""Return format identifier for given fragment or raise FormatAutodetectionError."""
formats = set()
for impl in FORMAT_IDENTIFIER_TO_FORMAT_CLASS.values():
guess = impl.guess_format(content)
if guess is not None:
formats.add(guess)
if len(formats) == 1:
return formats.pop()
elif not formats:
raise FormatAutodetectionError("No suitable formats")
else:
raise FormatAutodetectionError("Multiple suitable formats (%r)" % formats) | python | def autodetect_format(content):
"""Return format identifier for given fragment or raise FormatAutodetectionError."""
formats = set()
for impl in FORMAT_IDENTIFIER_TO_FORMAT_CLASS.values():
guess = impl.guess_format(content)
if guess is not None:
formats.add(guess)
if len(formats) == 1:
return formats.pop()
elif not formats:
raise FormatAutodetectionError("No suitable formats")
else:
raise FormatAutodetectionError("Multiple suitable formats (%r)" % formats) | [
"def",
"autodetect_format",
"(",
"content",
")",
":",
"formats",
"=",
"set",
"(",
")",
"for",
"impl",
"in",
"FORMAT_IDENTIFIER_TO_FORMAT_CLASS",
".",
"values",
"(",
")",
":",
"guess",
"=",
"impl",
".",
"guess_format",
"(",
"content",
")",
"if",
"guess",
"is",
"not",
"None",
":",
"formats",
".",
"add",
"(",
"guess",
")",
"if",
"len",
"(",
"formats",
")",
"==",
"1",
":",
"return",
"formats",
".",
"pop",
"(",
")",
"elif",
"not",
"formats",
":",
"raise",
"FormatAutodetectionError",
"(",
"\"No suitable formats\"",
")",
"else",
":",
"raise",
"FormatAutodetectionError",
"(",
"\"Multiple suitable formats (%r)\"",
"%",
"formats",
")"
] | Return format identifier for given fragment or raise FormatAutodetectionError. | [
"Return",
"format",
"identifier",
"for",
"given",
"fragment",
"or",
"raise",
"FormatAutodetectionError",
"."
] | train | https://github.com/tkarabela/pysubs2/blob/6439eb5159e6aa6b47e0f8e1d950e8bdd7c5341f/pysubs2/formats.py#L53-L66 |
aio-libs/aiohttp-devtools | aiohttp_devtools/runserver/serve.py | modify_main_app | def modify_main_app(app, config: Config):
"""
Modify the app we're serving to make development easier, eg.
* modify responses to add the livereload snippet
* set ``static_root_url`` on the app
* setup the debug toolbar
"""
app._debug = True
dft_logger.debug('livereload enabled: %s', '✓' if config.livereload else '✖')
def get_host(request):
if config.infer_host:
return request.headers.get('host', 'localhost').split(':', 1)[0]
else:
return config.host
if config.livereload:
async def on_prepare(request, response):
if (not request.path.startswith('/_debugtoolbar') and
'text/html' in response.content_type and
getattr(response, 'body', False)):
lr_snippet = LIVE_RELOAD_HOST_SNIPPET.format(get_host(request), config.aux_port)
dft_logger.debug('appending live reload snippet "%s" to body', lr_snippet)
response.body += lr_snippet.encode()
app.on_response_prepare.append(on_prepare)
static_path = config.static_url.strip('/')
if config.infer_host and config.static_path is not None:
# we set the app key even in middleware to make the switch to production easier and for backwards compat.
@web.middleware
async def static_middleware(request, handler):
static_url = 'http://{}:{}/{}'.format(get_host(request), config.aux_port, static_path)
dft_logger.debug('settings app static_root_url to "%s"', static_url)
request.app['static_root_url'].change(static_url)
return await handler(request)
app.middlewares.insert(0, static_middleware)
if config.static_path is not None:
static_url = 'http://{}:{}/{}'.format(config.host, config.aux_port, static_path)
dft_logger.debug('settings app static_root_url to "%s"', static_url)
app['static_root_url'] = MutableValue(static_url)
if config.debug_toolbar and aiohttp_debugtoolbar:
aiohttp_debugtoolbar.setup(app, intercept_redirects=False) | python | def modify_main_app(app, config: Config):
"""
Modify the app we're serving to make development easier, eg.
* modify responses to add the livereload snippet
* set ``static_root_url`` on the app
* setup the debug toolbar
"""
app._debug = True
dft_logger.debug('livereload enabled: %s', '✓' if config.livereload else '✖')
def get_host(request):
if config.infer_host:
return request.headers.get('host', 'localhost').split(':', 1)[0]
else:
return config.host
if config.livereload:
async def on_prepare(request, response):
if (not request.path.startswith('/_debugtoolbar') and
'text/html' in response.content_type and
getattr(response, 'body', False)):
lr_snippet = LIVE_RELOAD_HOST_SNIPPET.format(get_host(request), config.aux_port)
dft_logger.debug('appending live reload snippet "%s" to body', lr_snippet)
response.body += lr_snippet.encode()
app.on_response_prepare.append(on_prepare)
static_path = config.static_url.strip('/')
if config.infer_host and config.static_path is not None:
# we set the app key even in middleware to make the switch to production easier and for backwards compat.
@web.middleware
async def static_middleware(request, handler):
static_url = 'http://{}:{}/{}'.format(get_host(request), config.aux_port, static_path)
dft_logger.debug('settings app static_root_url to "%s"', static_url)
request.app['static_root_url'].change(static_url)
return await handler(request)
app.middlewares.insert(0, static_middleware)
if config.static_path is not None:
static_url = 'http://{}:{}/{}'.format(config.host, config.aux_port, static_path)
dft_logger.debug('settings app static_root_url to "%s"', static_url)
app['static_root_url'] = MutableValue(static_url)
if config.debug_toolbar and aiohttp_debugtoolbar:
aiohttp_debugtoolbar.setup(app, intercept_redirects=False) | [
"def",
"modify_main_app",
"(",
"app",
",",
"config",
":",
"Config",
")",
":",
"app",
".",
"_debug",
"=",
"True",
"dft_logger",
".",
"debug",
"(",
"'livereload enabled: %s'",
",",
"'✓' i",
" c",
"nfig.l",
"i",
"vereload e",
"se '",
"')",
"",
"def",
"get_host",
"(",
"request",
")",
":",
"if",
"config",
".",
"infer_host",
":",
"return",
"request",
".",
"headers",
".",
"get",
"(",
"'host'",
",",
"'localhost'",
")",
".",
"split",
"(",
"':'",
",",
"1",
")",
"[",
"0",
"]",
"else",
":",
"return",
"config",
".",
"host",
"if",
"config",
".",
"livereload",
":",
"async",
"def",
"on_prepare",
"(",
"request",
",",
"response",
")",
":",
"if",
"(",
"not",
"request",
".",
"path",
".",
"startswith",
"(",
"'/_debugtoolbar'",
")",
"and",
"'text/html'",
"in",
"response",
".",
"content_type",
"and",
"getattr",
"(",
"response",
",",
"'body'",
",",
"False",
")",
")",
":",
"lr_snippet",
"=",
"LIVE_RELOAD_HOST_SNIPPET",
".",
"format",
"(",
"get_host",
"(",
"request",
")",
",",
"config",
".",
"aux_port",
")",
"dft_logger",
".",
"debug",
"(",
"'appending live reload snippet \"%s\" to body'",
",",
"lr_snippet",
")",
"response",
".",
"body",
"+=",
"lr_snippet",
".",
"encode",
"(",
")",
"app",
".",
"on_response_prepare",
".",
"append",
"(",
"on_prepare",
")",
"static_path",
"=",
"config",
".",
"static_url",
".",
"strip",
"(",
"'/'",
")",
"if",
"config",
".",
"infer_host",
"and",
"config",
".",
"static_path",
"is",
"not",
"None",
":",
"# we set the app key even in middleware to make the switch to production easier and for backwards compat.",
"@",
"web",
".",
"middleware",
"async",
"def",
"static_middleware",
"(",
"request",
",",
"handler",
")",
":",
"static_url",
"=",
"'http://{}:{}/{}'",
".",
"format",
"(",
"get_host",
"(",
"request",
")",
",",
"config",
".",
"aux_port",
",",
"static_path",
")",
"dft_logger",
".",
"debug",
"(",
"'settings app static_root_url to \"%s\"'",
",",
"static_url",
")",
"request",
".",
"app",
"[",
"'static_root_url'",
"]",
".",
"change",
"(",
"static_url",
")",
"return",
"await",
"handler",
"(",
"request",
")",
"app",
".",
"middlewares",
".",
"insert",
"(",
"0",
",",
"static_middleware",
")",
"if",
"config",
".",
"static_path",
"is",
"not",
"None",
":",
"static_url",
"=",
"'http://{}:{}/{}'",
".",
"format",
"(",
"config",
".",
"host",
",",
"config",
".",
"aux_port",
",",
"static_path",
")",
"dft_logger",
".",
"debug",
"(",
"'settings app static_root_url to \"%s\"'",
",",
"static_url",
")",
"app",
"[",
"'static_root_url'",
"]",
"=",
"MutableValue",
"(",
"static_url",
")",
"if",
"config",
".",
"debug_toolbar",
"and",
"aiohttp_debugtoolbar",
":",
"aiohttp_debugtoolbar",
".",
"setup",
"(",
"app",
",",
"intercept_redirects",
"=",
"False",
")"
] | Modify the app we're serving to make development easier, eg.
* modify responses to add the livereload snippet
* set ``static_root_url`` on the app
* setup the debug toolbar | [
"Modify",
"the",
"app",
"we",
"re",
"serving",
"to",
"make",
"development",
"easier",
"eg",
".",
"*",
"modify",
"responses",
"to",
"add",
"the",
"livereload",
"snippet",
"*",
"set",
"static_root_url",
"on",
"the",
"app",
"*",
"setup",
"the",
"debug",
"toolbar"
] | train | https://github.com/aio-libs/aiohttp-devtools/blob/e9ea6feb43558e6e64595ea0ea5613f226cba81f/aiohttp_devtools/runserver/serve.py#L36-L80 |
aio-libs/aiohttp-devtools | aiohttp_devtools/runserver/serve.py | src_reload | async def src_reload(app, path: str = None):
"""
prompt each connected browser to reload by sending websocket message.
:param path: if supplied this must be a path relative to app['static_path'],
eg. reload of a single file is only supported for static resources.
:return: number of sources reloaded
"""
cli_count = len(app[WS])
if cli_count == 0:
return 0
is_html = None
if path:
path = str(Path(app['static_url']) / Path(path).relative_to(app['static_path']))
is_html = mimetypes.guess_type(path)[0] == 'text/html'
reloads = 0
aux_logger.debug('prompting source reload for %d clients', cli_count)
for ws, url in app[WS]:
if path and is_html and path not in {url, url + '.html', url.rstrip('/') + '/index.html'}:
aux_logger.debug('skipping reload for client at %s', url)
continue
aux_logger.debug('reload client at %s', url)
data = {
'command': 'reload',
'path': path or url,
'liveCSS': True,
'liveImg': True,
}
try:
await ws.send_str(json.dumps(data))
except RuntimeError as e:
# eg. "RuntimeError: websocket connection is closing"
aux_logger.error('Error broadcasting change to %s, RuntimeError: %s', path or url, e)
else:
reloads += 1
if reloads:
s = '' if reloads == 1 else 's'
aux_logger.info('prompted reload of %s on %d client%s', path or 'page', reloads, s)
return reloads | python | async def src_reload(app, path: str = None):
"""
prompt each connected browser to reload by sending websocket message.
:param path: if supplied this must be a path relative to app['static_path'],
eg. reload of a single file is only supported for static resources.
:return: number of sources reloaded
"""
cli_count = len(app[WS])
if cli_count == 0:
return 0
is_html = None
if path:
path = str(Path(app['static_url']) / Path(path).relative_to(app['static_path']))
is_html = mimetypes.guess_type(path)[0] == 'text/html'
reloads = 0
aux_logger.debug('prompting source reload for %d clients', cli_count)
for ws, url in app[WS]:
if path and is_html and path not in {url, url + '.html', url.rstrip('/') + '/index.html'}:
aux_logger.debug('skipping reload for client at %s', url)
continue
aux_logger.debug('reload client at %s', url)
data = {
'command': 'reload',
'path': path or url,
'liveCSS': True,
'liveImg': True,
}
try:
await ws.send_str(json.dumps(data))
except RuntimeError as e:
# eg. "RuntimeError: websocket connection is closing"
aux_logger.error('Error broadcasting change to %s, RuntimeError: %s', path or url, e)
else:
reloads += 1
if reloads:
s = '' if reloads == 1 else 's'
aux_logger.info('prompted reload of %s on %d client%s', path or 'page', reloads, s)
return reloads | [
"async",
"def",
"src_reload",
"(",
"app",
",",
"path",
":",
"str",
"=",
"None",
")",
":",
"cli_count",
"=",
"len",
"(",
"app",
"[",
"WS",
"]",
")",
"if",
"cli_count",
"==",
"0",
":",
"return",
"0",
"is_html",
"=",
"None",
"if",
"path",
":",
"path",
"=",
"str",
"(",
"Path",
"(",
"app",
"[",
"'static_url'",
"]",
")",
"/",
"Path",
"(",
"path",
")",
".",
"relative_to",
"(",
"app",
"[",
"'static_path'",
"]",
")",
")",
"is_html",
"=",
"mimetypes",
".",
"guess_type",
"(",
"path",
")",
"[",
"0",
"]",
"==",
"'text/html'",
"reloads",
"=",
"0",
"aux_logger",
".",
"debug",
"(",
"'prompting source reload for %d clients'",
",",
"cli_count",
")",
"for",
"ws",
",",
"url",
"in",
"app",
"[",
"WS",
"]",
":",
"if",
"path",
"and",
"is_html",
"and",
"path",
"not",
"in",
"{",
"url",
",",
"url",
"+",
"'.html'",
",",
"url",
".",
"rstrip",
"(",
"'/'",
")",
"+",
"'/index.html'",
"}",
":",
"aux_logger",
".",
"debug",
"(",
"'skipping reload for client at %s'",
",",
"url",
")",
"continue",
"aux_logger",
".",
"debug",
"(",
"'reload client at %s'",
",",
"url",
")",
"data",
"=",
"{",
"'command'",
":",
"'reload'",
",",
"'path'",
":",
"path",
"or",
"url",
",",
"'liveCSS'",
":",
"True",
",",
"'liveImg'",
":",
"True",
",",
"}",
"try",
":",
"await",
"ws",
".",
"send_str",
"(",
"json",
".",
"dumps",
"(",
"data",
")",
")",
"except",
"RuntimeError",
"as",
"e",
":",
"# eg. \"RuntimeError: websocket connection is closing\"",
"aux_logger",
".",
"error",
"(",
"'Error broadcasting change to %s, RuntimeError: %s'",
",",
"path",
"or",
"url",
",",
"e",
")",
"else",
":",
"reloads",
"+=",
"1",
"if",
"reloads",
":",
"s",
"=",
"''",
"if",
"reloads",
"==",
"1",
"else",
"'s'",
"aux_logger",
".",
"info",
"(",
"'prompted reload of %s on %d client%s'",
",",
"path",
"or",
"'page'",
",",
"reloads",
",",
"s",
")",
"return",
"reloads"
] | prompt each connected browser to reload by sending websocket message.
:param path: if supplied this must be a path relative to app['static_path'],
eg. reload of a single file is only supported for static resources.
:return: number of sources reloaded | [
"prompt",
"each",
"connected",
"browser",
"to",
"reload",
"by",
"sending",
"websocket",
"message",
"."
] | train | https://github.com/aio-libs/aiohttp-devtools/blob/e9ea6feb43558e6e64595ea0ea5613f226cba81f/aiohttp_devtools/runserver/serve.py#L145-L186 |
aio-libs/aiohttp-devtools | aiohttp_devtools/runserver/serve.py | CustomStaticResource.modify_request | def modify_request(self, request):
"""
Apply common path conventions eg. / > /index.html, /foobar > /foobar.html
"""
filename = URL.build(path=request.match_info['filename'], encoded=True).path
raw_path = self._directory.joinpath(filename)
try:
filepath = raw_path.resolve()
if not filepath.exists():
# simulate strict=True for python 3.6 which is not permitted with 3.5
raise FileNotFoundError()
except FileNotFoundError:
try:
html_file = raw_path.with_name(raw_path.name + '.html').resolve().relative_to(self._directory)
except (FileNotFoundError, ValueError):
pass
else:
request.match_info['filename'] = str(html_file)
else:
if filepath.is_dir():
index_file = filepath / 'index.html'
if index_file.exists():
try:
request.match_info['filename'] = str(index_file.relative_to(self._directory))
except ValueError:
# path is not not relative to self._directory
pass | python | def modify_request(self, request):
"""
Apply common path conventions eg. / > /index.html, /foobar > /foobar.html
"""
filename = URL.build(path=request.match_info['filename'], encoded=True).path
raw_path = self._directory.joinpath(filename)
try:
filepath = raw_path.resolve()
if not filepath.exists():
# simulate strict=True for python 3.6 which is not permitted with 3.5
raise FileNotFoundError()
except FileNotFoundError:
try:
html_file = raw_path.with_name(raw_path.name + '.html').resolve().relative_to(self._directory)
except (FileNotFoundError, ValueError):
pass
else:
request.match_info['filename'] = str(html_file)
else:
if filepath.is_dir():
index_file = filepath / 'index.html'
if index_file.exists():
try:
request.match_info['filename'] = str(index_file.relative_to(self._directory))
except ValueError:
# path is not not relative to self._directory
pass | [
"def",
"modify_request",
"(",
"self",
",",
"request",
")",
":",
"filename",
"=",
"URL",
".",
"build",
"(",
"path",
"=",
"request",
".",
"match_info",
"[",
"'filename'",
"]",
",",
"encoded",
"=",
"True",
")",
".",
"path",
"raw_path",
"=",
"self",
".",
"_directory",
".",
"joinpath",
"(",
"filename",
")",
"try",
":",
"filepath",
"=",
"raw_path",
".",
"resolve",
"(",
")",
"if",
"not",
"filepath",
".",
"exists",
"(",
")",
":",
"# simulate strict=True for python 3.6 which is not permitted with 3.5",
"raise",
"FileNotFoundError",
"(",
")",
"except",
"FileNotFoundError",
":",
"try",
":",
"html_file",
"=",
"raw_path",
".",
"with_name",
"(",
"raw_path",
".",
"name",
"+",
"'.html'",
")",
".",
"resolve",
"(",
")",
".",
"relative_to",
"(",
"self",
".",
"_directory",
")",
"except",
"(",
"FileNotFoundError",
",",
"ValueError",
")",
":",
"pass",
"else",
":",
"request",
".",
"match_info",
"[",
"'filename'",
"]",
"=",
"str",
"(",
"html_file",
")",
"else",
":",
"if",
"filepath",
".",
"is_dir",
"(",
")",
":",
"index_file",
"=",
"filepath",
"/",
"'index.html'",
"if",
"index_file",
".",
"exists",
"(",
")",
":",
"try",
":",
"request",
".",
"match_info",
"[",
"'filename'",
"]",
"=",
"str",
"(",
"index_file",
".",
"relative_to",
"(",
"self",
".",
"_directory",
")",
")",
"except",
"ValueError",
":",
"# path is not not relative to self._directory",
"pass"
] | Apply common path conventions eg. / > /index.html, /foobar > /foobar.html | [
"Apply",
"common",
"path",
"conventions",
"eg",
".",
"/",
">",
"/",
"index",
".",
"html",
"/",
"foobar",
">",
"/",
"foobar",
".",
"html"
] | train | https://github.com/aio-libs/aiohttp-devtools/blob/e9ea6feb43558e6e64595ea0ea5613f226cba81f/aiohttp_devtools/runserver/serve.py#L288-L314 |
aio-libs/aiohttp-devtools | aiohttp_devtools/start/template/app/settings.py | Settings.substitute_environ | def substitute_environ(self):
"""
Substitute environment variables into settings.
"""
for attr_name in dir(self):
if attr_name.startswith('_') or attr_name.upper() != attr_name:
continue
orig_value = getattr(self, attr_name)
is_required = isinstance(orig_value, Required)
orig_type = orig_value.v_type if is_required else type(orig_value)
env_var_name = self._ENV_PREFIX + attr_name
env_var = os.getenv(env_var_name, None)
if env_var is not None:
if issubclass(orig_type, bool):
env_var = env_var.upper() in ('1', 'TRUE')
elif issubclass(orig_type, int):
env_var = int(env_var)
elif issubclass(orig_type, Path):
env_var = Path(env_var)
elif issubclass(orig_type, bytes):
env_var = env_var.encode()
# could do floats here and lists etc via json
setattr(self, attr_name, env_var)
elif is_required and attr_name not in self._custom_settings:
raise RuntimeError('The required environment variable "{0}" is currently not set, '
'you\'ll need to run `source activate.settings.sh` '
'or you can set that single environment variable with '
'`export {0}="<value>"`'.format(env_var_name)) | python | def substitute_environ(self):
"""
Substitute environment variables into settings.
"""
for attr_name in dir(self):
if attr_name.startswith('_') or attr_name.upper() != attr_name:
continue
orig_value = getattr(self, attr_name)
is_required = isinstance(orig_value, Required)
orig_type = orig_value.v_type if is_required else type(orig_value)
env_var_name = self._ENV_PREFIX + attr_name
env_var = os.getenv(env_var_name, None)
if env_var is not None:
if issubclass(orig_type, bool):
env_var = env_var.upper() in ('1', 'TRUE')
elif issubclass(orig_type, int):
env_var = int(env_var)
elif issubclass(orig_type, Path):
env_var = Path(env_var)
elif issubclass(orig_type, bytes):
env_var = env_var.encode()
# could do floats here and lists etc via json
setattr(self, attr_name, env_var)
elif is_required and attr_name not in self._custom_settings:
raise RuntimeError('The required environment variable "{0}" is currently not set, '
'you\'ll need to run `source activate.settings.sh` '
'or you can set that single environment variable with '
'`export {0}="<value>"`'.format(env_var_name)) | [
"def",
"substitute_environ",
"(",
"self",
")",
":",
"for",
"attr_name",
"in",
"dir",
"(",
"self",
")",
":",
"if",
"attr_name",
".",
"startswith",
"(",
"'_'",
")",
"or",
"attr_name",
".",
"upper",
"(",
")",
"!=",
"attr_name",
":",
"continue",
"orig_value",
"=",
"getattr",
"(",
"self",
",",
"attr_name",
")",
"is_required",
"=",
"isinstance",
"(",
"orig_value",
",",
"Required",
")",
"orig_type",
"=",
"orig_value",
".",
"v_type",
"if",
"is_required",
"else",
"type",
"(",
"orig_value",
")",
"env_var_name",
"=",
"self",
".",
"_ENV_PREFIX",
"+",
"attr_name",
"env_var",
"=",
"os",
".",
"getenv",
"(",
"env_var_name",
",",
"None",
")",
"if",
"env_var",
"is",
"not",
"None",
":",
"if",
"issubclass",
"(",
"orig_type",
",",
"bool",
")",
":",
"env_var",
"=",
"env_var",
".",
"upper",
"(",
")",
"in",
"(",
"'1'",
",",
"'TRUE'",
")",
"elif",
"issubclass",
"(",
"orig_type",
",",
"int",
")",
":",
"env_var",
"=",
"int",
"(",
"env_var",
")",
"elif",
"issubclass",
"(",
"orig_type",
",",
"Path",
")",
":",
"env_var",
"=",
"Path",
"(",
"env_var",
")",
"elif",
"issubclass",
"(",
"orig_type",
",",
"bytes",
")",
":",
"env_var",
"=",
"env_var",
".",
"encode",
"(",
")",
"# could do floats here and lists etc via json",
"setattr",
"(",
"self",
",",
"attr_name",
",",
"env_var",
")",
"elif",
"is_required",
"and",
"attr_name",
"not",
"in",
"self",
".",
"_custom_settings",
":",
"raise",
"RuntimeError",
"(",
"'The required environment variable \"{0}\" is currently not set, '",
"'you\\'ll need to run `source activate.settings.sh` '",
"'or you can set that single environment variable with '",
"'`export {0}=\"<value>\"`'",
".",
"format",
"(",
"env_var_name",
")",
")"
] | Substitute environment variables into settings. | [
"Substitute",
"environment",
"variables",
"into",
"settings",
"."
] | train | https://github.com/aio-libs/aiohttp-devtools/blob/e9ea6feb43558e6e64595ea0ea5613f226cba81f/aiohttp_devtools/start/template/app/settings.py#L48-L76 |
aio-libs/aiohttp-devtools | aiohttp_devtools/start/template/app/management.py | prepare_database | def prepare_database(delete_existing: bool) -> bool:
"""
(Re)create a fresh database and run migrations.
:param delete_existing: whether or not to drop an existing database if it exists
:return: whether or not a database has been (re)created
"""
settings = Settings()
conn = psycopg2.connect(
password=settings.DB_PASSWORD,
host=settings.DB_HOST,
port=settings.DB_PORT,
user=settings.DB_USER,
)
conn.autocommit = True
cur = conn.cursor()
db_name = settings.DB_NAME
cur.execute('SELECT EXISTS (SELECT datname FROM pg_catalog.pg_database WHERE datname=%s)', (db_name,))
already_exists = bool(cur.fetchone()[0])
if already_exists:
if not delete_existing:
print('database "{}" already exists, skipping'.format(db_name))
return False
else:
print('dropping database "{}" as it already exists...'.format(db_name))
cur.execute('DROP DATABASE {}'.format(db_name))
else:
print('database "{}" does not yet exist'.format(db_name))
print('creating database "{}"...'.format(db_name))
cur.execute('CREATE DATABASE {}'.format(db_name))
cur.close()
conn.close()
# {% if database.is_pg_sqlalchemy %}
engine = create_engine(pg_dsn(settings))
print('creating tables from model definition...')
Base.metadata.create_all(engine)
engine.dispose()
# {% else %}
# TODO
# {% endif %}
return True | python | def prepare_database(delete_existing: bool) -> bool:
"""
(Re)create a fresh database and run migrations.
:param delete_existing: whether or not to drop an existing database if it exists
:return: whether or not a database has been (re)created
"""
settings = Settings()
conn = psycopg2.connect(
password=settings.DB_PASSWORD,
host=settings.DB_HOST,
port=settings.DB_PORT,
user=settings.DB_USER,
)
conn.autocommit = True
cur = conn.cursor()
db_name = settings.DB_NAME
cur.execute('SELECT EXISTS (SELECT datname FROM pg_catalog.pg_database WHERE datname=%s)', (db_name,))
already_exists = bool(cur.fetchone()[0])
if already_exists:
if not delete_existing:
print('database "{}" already exists, skipping'.format(db_name))
return False
else:
print('dropping database "{}" as it already exists...'.format(db_name))
cur.execute('DROP DATABASE {}'.format(db_name))
else:
print('database "{}" does not yet exist'.format(db_name))
print('creating database "{}"...'.format(db_name))
cur.execute('CREATE DATABASE {}'.format(db_name))
cur.close()
conn.close()
# {% if database.is_pg_sqlalchemy %}
engine = create_engine(pg_dsn(settings))
print('creating tables from model definition...')
Base.metadata.create_all(engine)
engine.dispose()
# {% else %}
# TODO
# {% endif %}
return True | [
"def",
"prepare_database",
"(",
"delete_existing",
":",
"bool",
")",
"->",
"bool",
":",
"settings",
"=",
"Settings",
"(",
")",
"conn",
"=",
"psycopg2",
".",
"connect",
"(",
"password",
"=",
"settings",
".",
"DB_PASSWORD",
",",
"host",
"=",
"settings",
".",
"DB_HOST",
",",
"port",
"=",
"settings",
".",
"DB_PORT",
",",
"user",
"=",
"settings",
".",
"DB_USER",
",",
")",
"conn",
".",
"autocommit",
"=",
"True",
"cur",
"=",
"conn",
".",
"cursor",
"(",
")",
"db_name",
"=",
"settings",
".",
"DB_NAME",
"cur",
".",
"execute",
"(",
"'SELECT EXISTS (SELECT datname FROM pg_catalog.pg_database WHERE datname=%s)'",
",",
"(",
"db_name",
",",
")",
")",
"already_exists",
"=",
"bool",
"(",
"cur",
".",
"fetchone",
"(",
")",
"[",
"0",
"]",
")",
"if",
"already_exists",
":",
"if",
"not",
"delete_existing",
":",
"print",
"(",
"'database \"{}\" already exists, skipping'",
".",
"format",
"(",
"db_name",
")",
")",
"return",
"False",
"else",
":",
"print",
"(",
"'dropping database \"{}\" as it already exists...'",
".",
"format",
"(",
"db_name",
")",
")",
"cur",
".",
"execute",
"(",
"'DROP DATABASE {}'",
".",
"format",
"(",
"db_name",
")",
")",
"else",
":",
"print",
"(",
"'database \"{}\" does not yet exist'",
".",
"format",
"(",
"db_name",
")",
")",
"print",
"(",
"'creating database \"{}\"...'",
".",
"format",
"(",
"db_name",
")",
")",
"cur",
".",
"execute",
"(",
"'CREATE DATABASE {}'",
".",
"format",
"(",
"db_name",
")",
")",
"cur",
".",
"close",
"(",
")",
"conn",
".",
"close",
"(",
")",
"# {% if database.is_pg_sqlalchemy %}",
"engine",
"=",
"create_engine",
"(",
"pg_dsn",
"(",
"settings",
")",
")",
"print",
"(",
"'creating tables from model definition...'",
")",
"Base",
".",
"metadata",
".",
"create_all",
"(",
"engine",
")",
"engine",
".",
"dispose",
"(",
")",
"# {% else %}",
"# TODO",
"# {% endif %}",
"return",
"True"
] | (Re)create a fresh database and run migrations.
:param delete_existing: whether or not to drop an existing database if it exists
:return: whether or not a database has been (re)created | [
"(",
"Re",
")",
"create",
"a",
"fresh",
"database",
"and",
"run",
"migrations",
"."
] | train | https://github.com/aio-libs/aiohttp-devtools/blob/e9ea6feb43558e6e64595ea0ea5613f226cba81f/aiohttp_devtools/start/template/app/management.py#L11-L54 |
aio-libs/aiohttp-devtools | aiohttp_devtools/start/template/app/views.py | index | async def index(request):
"""
This is the view handler for the "/" url.
**Note: returning html without a template engine like jinja2 is ugly, no way around that.**
:param request: the request object see http://aiohttp.readthedocs.io/en/stable/web_reference.html#request
:return: aiohttp.web.Response object
"""
# {% if database.is_none and example.is_message_board %}
# app.router allows us to generate urls based on their names,
# see http://aiohttp.readthedocs.io/en/stable/web.html#reverse-url-constructing-using-named-resources
message_url = request.app.router['messages'].url_for()
ctx = dict(
title=request.app['name'],
styles_css_url=request.app['static_root_url'] + '/styles.css',
content="""\
<p>Success! you've setup a basic aiohttp app.</p>
<p>To demonstrate a little of the functionality of aiohttp this app implements a very simple message board.</p>
<b>
<a href="{message_url}">View and add messages</a>
</b>""".format(message_url=message_url)
)
# {% else %}
ctx = dict(
title=request.app['name'],
styles_css_url=request.app['static_root_url'] + '/styles.css',
content="<p>Success! you've setup a basic aiohttp app.</p>",
)
# {% endif %}
# with the base web.Response type we have to manually set the content type, otherwise text/plain will be used.
return web.Response(text=BASE_PAGE.format(**ctx), content_type='text/html') | python | async def index(request):
"""
This is the view handler for the "/" url.
**Note: returning html without a template engine like jinja2 is ugly, no way around that.**
:param request: the request object see http://aiohttp.readthedocs.io/en/stable/web_reference.html#request
:return: aiohttp.web.Response object
"""
# {% if database.is_none and example.is_message_board %}
# app.router allows us to generate urls based on their names,
# see http://aiohttp.readthedocs.io/en/stable/web.html#reverse-url-constructing-using-named-resources
message_url = request.app.router['messages'].url_for()
ctx = dict(
title=request.app['name'],
styles_css_url=request.app['static_root_url'] + '/styles.css',
content="""\
<p>Success! you've setup a basic aiohttp app.</p>
<p>To demonstrate a little of the functionality of aiohttp this app implements a very simple message board.</p>
<b>
<a href="{message_url}">View and add messages</a>
</b>""".format(message_url=message_url)
)
# {% else %}
ctx = dict(
title=request.app['name'],
styles_css_url=request.app['static_root_url'] + '/styles.css',
content="<p>Success! you've setup a basic aiohttp app.</p>",
)
# {% endif %}
# with the base web.Response type we have to manually set the content type, otherwise text/plain will be used.
return web.Response(text=BASE_PAGE.format(**ctx), content_type='text/html') | [
"async",
"def",
"index",
"(",
"request",
")",
":",
"# {% if database.is_none and example.is_message_board %}",
"# app.router allows us to generate urls based on their names,",
"# see http://aiohttp.readthedocs.io/en/stable/web.html#reverse-url-constructing-using-named-resources",
"message_url",
"=",
"request",
".",
"app",
".",
"router",
"[",
"'messages'",
"]",
".",
"url_for",
"(",
")",
"ctx",
"=",
"dict",
"(",
"title",
"=",
"request",
".",
"app",
"[",
"'name'",
"]",
",",
"styles_css_url",
"=",
"request",
".",
"app",
"[",
"'static_root_url'",
"]",
"+",
"'/styles.css'",
",",
"content",
"=",
"\"\"\"\\\n <p>Success! you've setup a basic aiohttp app.</p>\n <p>To demonstrate a little of the functionality of aiohttp this app implements a very simple message board.</p>\n <b>\n <a href=\"{message_url}\">View and add messages</a>\n </b>\"\"\"",
".",
"format",
"(",
"message_url",
"=",
"message_url",
")",
")",
"# {% else %}",
"ctx",
"=",
"dict",
"(",
"title",
"=",
"request",
".",
"app",
"[",
"'name'",
"]",
",",
"styles_css_url",
"=",
"request",
".",
"app",
"[",
"'static_root_url'",
"]",
"+",
"'/styles.css'",
",",
"content",
"=",
"\"<p>Success! you've setup a basic aiohttp app.</p>\"",
",",
")",
"# {% endif %}",
"# with the base web.Response type we have to manually set the content type, otherwise text/plain will be used.",
"return",
"web",
".",
"Response",
"(",
"text",
"=",
"BASE_PAGE",
".",
"format",
"(",
"*",
"*",
"ctx",
")",
",",
"content_type",
"=",
"'text/html'",
")"
] | This is the view handler for the "/" url.
**Note: returning html without a template engine like jinja2 is ugly, no way around that.**
:param request: the request object see http://aiohttp.readthedocs.io/en/stable/web_reference.html#request
:return: aiohttp.web.Response object | [
"This",
"is",
"the",
"view",
"handler",
"for",
"the",
"/",
"url",
"."
] | train | https://github.com/aio-libs/aiohttp-devtools/blob/e9ea6feb43558e6e64595ea0ea5613f226cba81f/aiohttp_devtools/start/template/app/views.py#L60-L91 |
aio-libs/aiohttp-devtools | aiohttp_devtools/start/template/app/views.py | message_data | async def message_data(request):
"""
As an example of aiohttp providing a non-html response, we load the actual messages for the "messages" view above
via ajax using this endpoint to get data. see static/message_display.js for details of rendering.
"""
messages = []
# {% if database.is_none %}
if request.app['settings'].MESSAGE_FILE.exists():
# read the message file, process it and populate the "messages" list
with request.app['settings'].MESSAGE_FILE.open() as msg_file:
for line in msg_file:
if not line:
# ignore blank lines eg. end of file
continue
# split the line into it constituent parts, see process_form above
username, ts, message = line.split('|', 2)
# parse the datetime string and render it in a more readable format.
ts = '{:%Y-%m-%d %H:%M:%S}'.format(datetime.strptime(ts, '%Y-%m-%dT%H:%M:%S.%f'))
messages.append({'username': username, 'timestamp': ts, 'message': message})
messages.reverse()
# {% elif database.is_pg_sqlalchemy %}
async with request.app['pg_engine'].acquire() as conn:
async for row in conn.execute(sa_messages.select().order_by(sa_messages.c.timestamp.desc())):
ts = '{:%Y-%m-%d %H:%M:%S}'.format(row.timestamp)
messages.append({'username': row.username, 'timestamp': ts, 'message': row.message})
# {% endif %}
return json_response(messages) | python | async def message_data(request):
"""
As an example of aiohttp providing a non-html response, we load the actual messages for the "messages" view above
via ajax using this endpoint to get data. see static/message_display.js for details of rendering.
"""
messages = []
# {% if database.is_none %}
if request.app['settings'].MESSAGE_FILE.exists():
# read the message file, process it and populate the "messages" list
with request.app['settings'].MESSAGE_FILE.open() as msg_file:
for line in msg_file:
if not line:
# ignore blank lines eg. end of file
continue
# split the line into it constituent parts, see process_form above
username, ts, message = line.split('|', 2)
# parse the datetime string and render it in a more readable format.
ts = '{:%Y-%m-%d %H:%M:%S}'.format(datetime.strptime(ts, '%Y-%m-%dT%H:%M:%S.%f'))
messages.append({'username': username, 'timestamp': ts, 'message': message})
messages.reverse()
# {% elif database.is_pg_sqlalchemy %}
async with request.app['pg_engine'].acquire() as conn:
async for row in conn.execute(sa_messages.select().order_by(sa_messages.c.timestamp.desc())):
ts = '{:%Y-%m-%d %H:%M:%S}'.format(row.timestamp)
messages.append({'username': row.username, 'timestamp': ts, 'message': row.message})
# {% endif %}
return json_response(messages) | [
"async",
"def",
"message_data",
"(",
"request",
")",
":",
"messages",
"=",
"[",
"]",
"# {% if database.is_none %}",
"if",
"request",
".",
"app",
"[",
"'settings'",
"]",
".",
"MESSAGE_FILE",
".",
"exists",
"(",
")",
":",
"# read the message file, process it and populate the \"messages\" list",
"with",
"request",
".",
"app",
"[",
"'settings'",
"]",
".",
"MESSAGE_FILE",
".",
"open",
"(",
")",
"as",
"msg_file",
":",
"for",
"line",
"in",
"msg_file",
":",
"if",
"not",
"line",
":",
"# ignore blank lines eg. end of file",
"continue",
"# split the line into it constituent parts, see process_form above",
"username",
",",
"ts",
",",
"message",
"=",
"line",
".",
"split",
"(",
"'|'",
",",
"2",
")",
"# parse the datetime string and render it in a more readable format.",
"ts",
"=",
"'{:%Y-%m-%d %H:%M:%S}'",
".",
"format",
"(",
"datetime",
".",
"strptime",
"(",
"ts",
",",
"'%Y-%m-%dT%H:%M:%S.%f'",
")",
")",
"messages",
".",
"append",
"(",
"{",
"'username'",
":",
"username",
",",
"'timestamp'",
":",
"ts",
",",
"'message'",
":",
"message",
"}",
")",
"messages",
".",
"reverse",
"(",
")",
"# {% elif database.is_pg_sqlalchemy %}",
"async",
"with",
"request",
".",
"app",
"[",
"'pg_engine'",
"]",
".",
"acquire",
"(",
")",
"as",
"conn",
":",
"async",
"for",
"row",
"in",
"conn",
".",
"execute",
"(",
"sa_messages",
".",
"select",
"(",
")",
".",
"order_by",
"(",
"sa_messages",
".",
"c",
".",
"timestamp",
".",
"desc",
"(",
")",
")",
")",
":",
"ts",
"=",
"'{:%Y-%m-%d %H:%M:%S}'",
".",
"format",
"(",
"row",
".",
"timestamp",
")",
"messages",
".",
"append",
"(",
"{",
"'username'",
":",
"row",
".",
"username",
",",
"'timestamp'",
":",
"ts",
",",
"'message'",
":",
"row",
".",
"message",
"}",
")",
"# {% endif %}",
"return",
"json_response",
"(",
"messages",
")"
] | As an example of aiohttp providing a non-html response, we load the actual messages for the "messages" view above
via ajax using this endpoint to get data. see static/message_display.js for details of rendering. | [
"As",
"an",
"example",
"of",
"aiohttp",
"providing",
"a",
"non",
"-",
"html",
"response",
"we",
"load",
"the",
"actual",
"messages",
"for",
"the",
"messages",
"view",
"above",
"via",
"ajax",
"using",
"this",
"endpoint",
"to",
"get",
"data",
".",
"see",
"static",
"/",
"message_display",
".",
"js",
"for",
"details",
"of",
"rendering",
"."
] | train | https://github.com/aio-libs/aiohttp-devtools/blob/e9ea6feb43558e6e64595ea0ea5613f226cba81f/aiohttp_devtools/start/template/app/views.py#L193-L220 |
aio-libs/aiohttp-devtools | aiohttp_devtools/start/template/app/main.py | pg_dsn | def pg_dsn(settings: Settings) -> str:
"""
:param settings: settings including connection settings
:return: DSN url suitable for sqlalchemy and aiopg.
"""
return str(URL(
database=settings.DB_NAME,
password=settings.DB_PASSWORD,
host=settings.DB_HOST,
port=settings.DB_PORT,
username=settings.DB_USER,
drivername='postgres',
)) | python | def pg_dsn(settings: Settings) -> str:
"""
:param settings: settings including connection settings
:return: DSN url suitable for sqlalchemy and aiopg.
"""
return str(URL(
database=settings.DB_NAME,
password=settings.DB_PASSWORD,
host=settings.DB_HOST,
port=settings.DB_PORT,
username=settings.DB_USER,
drivername='postgres',
)) | [
"def",
"pg_dsn",
"(",
"settings",
":",
"Settings",
")",
"->",
"str",
":",
"return",
"str",
"(",
"URL",
"(",
"database",
"=",
"settings",
".",
"DB_NAME",
",",
"password",
"=",
"settings",
".",
"DB_PASSWORD",
",",
"host",
"=",
"settings",
".",
"DB_HOST",
",",
"port",
"=",
"settings",
".",
"DB_PORT",
",",
"username",
"=",
"settings",
".",
"DB_USER",
",",
"drivername",
"=",
"'postgres'",
",",
")",
")"
] | :param settings: settings including connection settings
:return: DSN url suitable for sqlalchemy and aiopg. | [
":",
"param",
"settings",
":",
"settings",
"including",
"connection",
"settings",
":",
"return",
":",
"DSN",
"url",
"suitable",
"for",
"sqlalchemy",
"and",
"aiopg",
"."
] | train | https://github.com/aio-libs/aiohttp-devtools/blob/e9ea6feb43558e6e64595ea0ea5613f226cba81f/aiohttp_devtools/start/template/app/main.py#L32-L44 |
aio-libs/aiohttp-devtools | aiohttp_devtools/cli.py | serve | def serve(path, livereload, port, verbose):
"""
Serve static files from a directory.
"""
setup_logging(verbose)
run_app(*serve_static(static_path=path, livereload=livereload, port=port)) | python | def serve(path, livereload, port, verbose):
"""
Serve static files from a directory.
"""
setup_logging(verbose)
run_app(*serve_static(static_path=path, livereload=livereload, port=port)) | [
"def",
"serve",
"(",
"path",
",",
"livereload",
",",
"port",
",",
"verbose",
")",
":",
"setup_logging",
"(",
"verbose",
")",
"run_app",
"(",
"*",
"serve_static",
"(",
"static_path",
"=",
"path",
",",
"livereload",
"=",
"livereload",
",",
"port",
"=",
"port",
")",
")"
] | Serve static files from a directory. | [
"Serve",
"static",
"files",
"from",
"a",
"directory",
"."
] | train | https://github.com/aio-libs/aiohttp-devtools/blob/e9ea6feb43558e6e64595ea0ea5613f226cba81f/aiohttp_devtools/cli.py#L38-L43 |
aio-libs/aiohttp-devtools | aiohttp_devtools/cli.py | runserver | def runserver(**config):
"""
Run a development server for an aiohttp apps.
Takes one argument "app-path" which should be a path to either a directory containing a recognized default file
("app.py" or "main.py") or to a specific file. Defaults to the environment variable "AIO_APP_PATH" or ".".
The app path is run directly, see the "--app-factory" option for details on how an app is loaded from a python
module.
"""
active_config = {k: v for k, v in config.items() if v is not None}
setup_logging(config['verbose'])
try:
run_app(*_runserver(**active_config))
except AiohttpDevException as e:
if config['verbose']:
tb = click.style(traceback.format_exc().strip('\n'), fg='white', dim=True)
main_logger.warning('AiohttpDevException traceback:\n%s', tb)
main_logger.error('Error: %s', e)
sys.exit(2) | python | def runserver(**config):
"""
Run a development server for an aiohttp apps.
Takes one argument "app-path" which should be a path to either a directory containing a recognized default file
("app.py" or "main.py") or to a specific file. Defaults to the environment variable "AIO_APP_PATH" or ".".
The app path is run directly, see the "--app-factory" option for details on how an app is loaded from a python
module.
"""
active_config = {k: v for k, v in config.items() if v is not None}
setup_logging(config['verbose'])
try:
run_app(*_runserver(**active_config))
except AiohttpDevException as e:
if config['verbose']:
tb = click.style(traceback.format_exc().strip('\n'), fg='white', dim=True)
main_logger.warning('AiohttpDevException traceback:\n%s', tb)
main_logger.error('Error: %s', e)
sys.exit(2) | [
"def",
"runserver",
"(",
"*",
"*",
"config",
")",
":",
"active_config",
"=",
"{",
"k",
":",
"v",
"for",
"k",
",",
"v",
"in",
"config",
".",
"items",
"(",
")",
"if",
"v",
"is",
"not",
"None",
"}",
"setup_logging",
"(",
"config",
"[",
"'verbose'",
"]",
")",
"try",
":",
"run_app",
"(",
"*",
"_runserver",
"(",
"*",
"*",
"active_config",
")",
")",
"except",
"AiohttpDevException",
"as",
"e",
":",
"if",
"config",
"[",
"'verbose'",
"]",
":",
"tb",
"=",
"click",
".",
"style",
"(",
"traceback",
".",
"format_exc",
"(",
")",
".",
"strip",
"(",
"'\\n'",
")",
",",
"fg",
"=",
"'white'",
",",
"dim",
"=",
"True",
")",
"main_logger",
".",
"warning",
"(",
"'AiohttpDevException traceback:\\n%s'",
",",
"tb",
")",
"main_logger",
".",
"error",
"(",
"'Error: %s'",
",",
"e",
")",
"sys",
".",
"exit",
"(",
"2",
")"
] | Run a development server for an aiohttp apps.
Takes one argument "app-path" which should be a path to either a directory containing a recognized default file
("app.py" or "main.py") or to a specific file. Defaults to the environment variable "AIO_APP_PATH" or ".".
The app path is run directly, see the "--app-factory" option for details on how an app is loaded from a python
module. | [
"Run",
"a",
"development",
"server",
"for",
"an",
"aiohttp",
"apps",
"."
] | train | https://github.com/aio-libs/aiohttp-devtools/blob/e9ea6feb43558e6e64595ea0ea5613f226cba81f/aiohttp_devtools/cli.py#L73-L92 |
aio-libs/aiohttp-devtools | aiohttp_devtools/cli.py | start | def start(*, path, name, verbose, **kwargs):
"""
Create a new aiohttp app.
"""
setup_logging(verbose)
try:
check_dir_clean(Path(path))
if name is None:
name = Path(path).name
for kwarg_name, choice_enum in DECISIONS:
docs = dedent(choice_enum.__doc__).split('\n')
title, *help_text = filter(bool, docs)
click.secho('\n' + title, fg='green')
if kwargs[kwarg_name] is None:
click.secho('\n'.join(help_text), dim=True)
choices = _display_enum_choices(choice_enum)
kwargs[kwarg_name] = click.prompt(
'choose which {} to use {}'.format(kwarg_name, choices),
type=EnumChoice(choice_enum),
show_default=False,
default=enum_default(choice_enum),
)
click.echo('using: {}'.format(click.style(kwargs[kwarg_name], bold=True)))
continue
StartProject(path=path, name=name, **kwargs)
except AiohttpDevException as e:
main_logger.error('Error: %s', e)
sys.exit(2) | python | def start(*, path, name, verbose, **kwargs):
"""
Create a new aiohttp app.
"""
setup_logging(verbose)
try:
check_dir_clean(Path(path))
if name is None:
name = Path(path).name
for kwarg_name, choice_enum in DECISIONS:
docs = dedent(choice_enum.__doc__).split('\n')
title, *help_text = filter(bool, docs)
click.secho('\n' + title, fg='green')
if kwargs[kwarg_name] is None:
click.secho('\n'.join(help_text), dim=True)
choices = _display_enum_choices(choice_enum)
kwargs[kwarg_name] = click.prompt(
'choose which {} to use {}'.format(kwarg_name, choices),
type=EnumChoice(choice_enum),
show_default=False,
default=enum_default(choice_enum),
)
click.echo('using: {}'.format(click.style(kwargs[kwarg_name], bold=True)))
continue
StartProject(path=path, name=name, **kwargs)
except AiohttpDevException as e:
main_logger.error('Error: %s', e)
sys.exit(2) | [
"def",
"start",
"(",
"*",
",",
"path",
",",
"name",
",",
"verbose",
",",
"*",
"*",
"kwargs",
")",
":",
"setup_logging",
"(",
"verbose",
")",
"try",
":",
"check_dir_clean",
"(",
"Path",
"(",
"path",
")",
")",
"if",
"name",
"is",
"None",
":",
"name",
"=",
"Path",
"(",
"path",
")",
".",
"name",
"for",
"kwarg_name",
",",
"choice_enum",
"in",
"DECISIONS",
":",
"docs",
"=",
"dedent",
"(",
"choice_enum",
".",
"__doc__",
")",
".",
"split",
"(",
"'\\n'",
")",
"title",
",",
"",
"*",
"help_text",
"=",
"filter",
"(",
"bool",
",",
"docs",
")",
"click",
".",
"secho",
"(",
"'\\n'",
"+",
"title",
",",
"fg",
"=",
"'green'",
")",
"if",
"kwargs",
"[",
"kwarg_name",
"]",
"is",
"None",
":",
"click",
".",
"secho",
"(",
"'\\n'",
".",
"join",
"(",
"help_text",
")",
",",
"dim",
"=",
"True",
")",
"choices",
"=",
"_display_enum_choices",
"(",
"choice_enum",
")",
"kwargs",
"[",
"kwarg_name",
"]",
"=",
"click",
".",
"prompt",
"(",
"'choose which {} to use {}'",
".",
"format",
"(",
"kwarg_name",
",",
"choices",
")",
",",
"type",
"=",
"EnumChoice",
"(",
"choice_enum",
")",
",",
"show_default",
"=",
"False",
",",
"default",
"=",
"enum_default",
"(",
"choice_enum",
")",
",",
")",
"click",
".",
"echo",
"(",
"'using: {}'",
".",
"format",
"(",
"click",
".",
"style",
"(",
"kwargs",
"[",
"kwarg_name",
"]",
",",
"bold",
"=",
"True",
")",
")",
")",
"continue",
"StartProject",
"(",
"path",
"=",
"path",
",",
"name",
"=",
"name",
",",
"*",
"*",
"kwargs",
")",
"except",
"AiohttpDevException",
"as",
"e",
":",
"main_logger",
".",
"error",
"(",
"'Error: %s'",
",",
"e",
")",
"sys",
".",
"exit",
"(",
"2",
")"
] | Create a new aiohttp app. | [
"Create",
"a",
"new",
"aiohttp",
"app",
"."
] | train | https://github.com/aio-libs/aiohttp-devtools/blob/e9ea6feb43558e6e64595ea0ea5613f226cba81f/aiohttp_devtools/cli.py#L125-L154 |
aio-libs/aiohttp-devtools | aiohttp_devtools/runserver/config.py | Config.import_app_factory | def import_app_factory(self):
"""
Import attribute/class from from a python module. Raise AdevConfigError if the import failed.
:return: (attribute, Path object for directory of file)
"""
rel_py_file = self.py_file.relative_to(self.python_path)
module_path = '.'.join(rel_py_file.with_suffix('').parts)
sys.path.append(str(self.python_path))
try:
module = import_module(module_path)
except ImportError as e:
raise AdevConfigError('error importing "{}" '
'from "{}": {}'.format(module_path, self.python_path, e)) from e
logger.debug('successfully loaded "%s" from "%s"', module_path, self.python_path)
if self.app_factory_name is None:
try:
self.app_factory_name = next(an for an in APP_FACTORY_NAMES if hasattr(module, an))
except StopIteration as e:
raise AdevConfigError('No name supplied and no default app factory '
'found in {s.py_file.name}'.format(s=self)) from e
else:
logger.debug('found default attribute "%s" in module "%s"',
self.app_factory_name, module)
try:
attr = getattr(module, self.app_factory_name)
except AttributeError as e:
raise AdevConfigError('Module "{s.py_file.name}" '
'does not define a "{s.app_factory_name}" attribute/class'.format(s=self)) from e
self.watch_path = self.watch_path or Path(module.__file__).parent
return attr | python | def import_app_factory(self):
"""
Import attribute/class from from a python module. Raise AdevConfigError if the import failed.
:return: (attribute, Path object for directory of file)
"""
rel_py_file = self.py_file.relative_to(self.python_path)
module_path = '.'.join(rel_py_file.with_suffix('').parts)
sys.path.append(str(self.python_path))
try:
module = import_module(module_path)
except ImportError as e:
raise AdevConfigError('error importing "{}" '
'from "{}": {}'.format(module_path, self.python_path, e)) from e
logger.debug('successfully loaded "%s" from "%s"', module_path, self.python_path)
if self.app_factory_name is None:
try:
self.app_factory_name = next(an for an in APP_FACTORY_NAMES if hasattr(module, an))
except StopIteration as e:
raise AdevConfigError('No name supplied and no default app factory '
'found in {s.py_file.name}'.format(s=self)) from e
else:
logger.debug('found default attribute "%s" in module "%s"',
self.app_factory_name, module)
try:
attr = getattr(module, self.app_factory_name)
except AttributeError as e:
raise AdevConfigError('Module "{s.py_file.name}" '
'does not define a "{s.app_factory_name}" attribute/class'.format(s=self)) from e
self.watch_path = self.watch_path or Path(module.__file__).parent
return attr | [
"def",
"import_app_factory",
"(",
"self",
")",
":",
"rel_py_file",
"=",
"self",
".",
"py_file",
".",
"relative_to",
"(",
"self",
".",
"python_path",
")",
"module_path",
"=",
"'.'",
".",
"join",
"(",
"rel_py_file",
".",
"with_suffix",
"(",
"''",
")",
".",
"parts",
")",
"sys",
".",
"path",
".",
"append",
"(",
"str",
"(",
"self",
".",
"python_path",
")",
")",
"try",
":",
"module",
"=",
"import_module",
"(",
"module_path",
")",
"except",
"ImportError",
"as",
"e",
":",
"raise",
"AdevConfigError",
"(",
"'error importing \"{}\" '",
"'from \"{}\": {}'",
".",
"format",
"(",
"module_path",
",",
"self",
".",
"python_path",
",",
"e",
")",
")",
"from",
"e",
"logger",
".",
"debug",
"(",
"'successfully loaded \"%s\" from \"%s\"'",
",",
"module_path",
",",
"self",
".",
"python_path",
")",
"if",
"self",
".",
"app_factory_name",
"is",
"None",
":",
"try",
":",
"self",
".",
"app_factory_name",
"=",
"next",
"(",
"an",
"for",
"an",
"in",
"APP_FACTORY_NAMES",
"if",
"hasattr",
"(",
"module",
",",
"an",
")",
")",
"except",
"StopIteration",
"as",
"e",
":",
"raise",
"AdevConfigError",
"(",
"'No name supplied and no default app factory '",
"'found in {s.py_file.name}'",
".",
"format",
"(",
"s",
"=",
"self",
")",
")",
"from",
"e",
"else",
":",
"logger",
".",
"debug",
"(",
"'found default attribute \"%s\" in module \"%s\"'",
",",
"self",
".",
"app_factory_name",
",",
"module",
")",
"try",
":",
"attr",
"=",
"getattr",
"(",
"module",
",",
"self",
".",
"app_factory_name",
")",
"except",
"AttributeError",
"as",
"e",
":",
"raise",
"AdevConfigError",
"(",
"'Module \"{s.py_file.name}\" '",
"'does not define a \"{s.app_factory_name}\" attribute/class'",
".",
"format",
"(",
"s",
"=",
"self",
")",
")",
"from",
"e",
"self",
".",
"watch_path",
"=",
"self",
".",
"watch_path",
"or",
"Path",
"(",
"module",
".",
"__file__",
")",
".",
"parent",
"return",
"attr"
] | Import attribute/class from from a python module. Raise AdevConfigError if the import failed.
:return: (attribute, Path object for directory of file) | [
"Import",
"attribute",
"/",
"class",
"from",
"from",
"a",
"python",
"module",
".",
"Raise",
"AdevConfigError",
"if",
"the",
"import",
"failed",
"."
] | train | https://github.com/aio-libs/aiohttp-devtools/blob/e9ea6feb43558e6e64595ea0ea5613f226cba81f/aiohttp_devtools/runserver/config.py#L123-L158 |
aio-libs/aiohttp-devtools | aiohttp_devtools/runserver/main.py | runserver | def runserver(**config_kwargs):
"""
Prepare app ready to run development server.
:param config_kwargs: see config.Config for more details
:return: tuple (auxiliary app, auxiliary app port, event loop)
"""
# force a full reload in sub processes so they load an updated version of code, this must be called only once
set_start_method('spawn')
config = Config(**config_kwargs)
config.import_app_factory()
loop = asyncio.get_event_loop()
loop.run_until_complete(check_port_open(config.main_port, loop))
aux_app = create_auxiliary_app(
static_path=config.static_path_str,
static_url=config.static_url,
livereload=config.livereload,
)
main_manager = AppTask(config, loop)
aux_app.on_startup.append(main_manager.start)
aux_app.on_shutdown.append(main_manager.close)
if config.static_path:
static_manager = LiveReloadTask(config.static_path, loop)
logger.debug('starting livereload to watch %s', config.static_path_str)
aux_app.on_startup.append(static_manager.start)
aux_app.on_shutdown.append(static_manager.close)
url = 'http://{0.host}:{0.aux_port}'.format(config)
logger.info('Starting aux server at %s ◆', url)
if config.static_path:
rel_path = config.static_path.relative_to(os.getcwd())
logger.info('serving static files from ./%s/ at %s%s', rel_path, url, config.static_url)
return aux_app, config.aux_port, loop, AuxAccessLogger | python | def runserver(**config_kwargs):
"""
Prepare app ready to run development server.
:param config_kwargs: see config.Config for more details
:return: tuple (auxiliary app, auxiliary app port, event loop)
"""
# force a full reload in sub processes so they load an updated version of code, this must be called only once
set_start_method('spawn')
config = Config(**config_kwargs)
config.import_app_factory()
loop = asyncio.get_event_loop()
loop.run_until_complete(check_port_open(config.main_port, loop))
aux_app = create_auxiliary_app(
static_path=config.static_path_str,
static_url=config.static_url,
livereload=config.livereload,
)
main_manager = AppTask(config, loop)
aux_app.on_startup.append(main_manager.start)
aux_app.on_shutdown.append(main_manager.close)
if config.static_path:
static_manager = LiveReloadTask(config.static_path, loop)
logger.debug('starting livereload to watch %s', config.static_path_str)
aux_app.on_startup.append(static_manager.start)
aux_app.on_shutdown.append(static_manager.close)
url = 'http://{0.host}:{0.aux_port}'.format(config)
logger.info('Starting aux server at %s ◆', url)
if config.static_path:
rel_path = config.static_path.relative_to(os.getcwd())
logger.info('serving static files from ./%s/ at %s%s', rel_path, url, config.static_url)
return aux_app, config.aux_port, loop, AuxAccessLogger | [
"def",
"runserver",
"(",
"*",
"*",
"config_kwargs",
")",
":",
"# force a full reload in sub processes so they load an updated version of code, this must be called only once",
"set_start_method",
"(",
"'spawn'",
")",
"config",
"=",
"Config",
"(",
"*",
"*",
"config_kwargs",
")",
"config",
".",
"import_app_factory",
"(",
")",
"loop",
"=",
"asyncio",
".",
"get_event_loop",
"(",
")",
"loop",
".",
"run_until_complete",
"(",
"check_port_open",
"(",
"config",
".",
"main_port",
",",
"loop",
")",
")",
"aux_app",
"=",
"create_auxiliary_app",
"(",
"static_path",
"=",
"config",
".",
"static_path_str",
",",
"static_url",
"=",
"config",
".",
"static_url",
",",
"livereload",
"=",
"config",
".",
"livereload",
",",
")",
"main_manager",
"=",
"AppTask",
"(",
"config",
",",
"loop",
")",
"aux_app",
".",
"on_startup",
".",
"append",
"(",
"main_manager",
".",
"start",
")",
"aux_app",
".",
"on_shutdown",
".",
"append",
"(",
"main_manager",
".",
"close",
")",
"if",
"config",
".",
"static_path",
":",
"static_manager",
"=",
"LiveReloadTask",
"(",
"config",
".",
"static_path",
",",
"loop",
")",
"logger",
".",
"debug",
"(",
"'starting livereload to watch %s'",
",",
"config",
".",
"static_path_str",
")",
"aux_app",
".",
"on_startup",
".",
"append",
"(",
"static_manager",
".",
"start",
")",
"aux_app",
".",
"on_shutdown",
".",
"append",
"(",
"static_manager",
".",
"close",
")",
"url",
"=",
"'http://{0.host}:{0.aux_port}'",
".",
"format",
"(",
"config",
")",
"logger",
".",
"info",
"(",
"'Starting aux server at %s ◆', ",
"u",
"l)",
"",
"if",
"config",
".",
"static_path",
":",
"rel_path",
"=",
"config",
".",
"static_path",
".",
"relative_to",
"(",
"os",
".",
"getcwd",
"(",
")",
")",
"logger",
".",
"info",
"(",
"'serving static files from ./%s/ at %s%s'",
",",
"rel_path",
",",
"url",
",",
"config",
".",
"static_url",
")",
"return",
"aux_app",
",",
"config",
".",
"aux_port",
",",
"loop",
",",
"AuxAccessLogger"
] | Prepare app ready to run development server.
:param config_kwargs: see config.Config for more details
:return: tuple (auxiliary app, auxiliary app port, event loop) | [
"Prepare",
"app",
"ready",
"to",
"run",
"development",
"server",
"."
] | train | https://github.com/aio-libs/aiohttp-devtools/blob/e9ea6feb43558e6e64595ea0ea5613f226cba81f/aiohttp_devtools/runserver/main.py#L34-L73 |
aio-libs/aiohttp-devtools | aiohttp_devtools/logs.py | log_config | def log_config(verbose: bool) -> dict:
"""
Setup default config. for dictConfig.
:param verbose: level: DEBUG if True, INFO if False
:return: dict suitable for ``logging.config.dictConfig``
"""
log_level = 'DEBUG' if verbose else 'INFO'
return {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'default': {
'format': '[%(asctime)s] %(message)s',
'datefmt': '%H:%M:%S',
'class': 'aiohttp_devtools.logs.DefaultFormatter',
},
'no_ts': {
'format': '%(message)s',
'class': 'aiohttp_devtools.logs.DefaultFormatter',
},
'aiohttp': {
'format': '%(message)s',
'class': 'aiohttp_devtools.logs.AccessFormatter',
},
},
'handlers': {
'default': {
'level': log_level,
'class': 'aiohttp_devtools.logs.HighlightStreamHandler',
'formatter': 'default'
},
'no_ts': {
'level': log_level,
'class': 'aiohttp_devtools.logs.HighlightStreamHandler',
'formatter': 'no_ts'
},
'aiohttp_access': {
'level': log_level,
'class': 'aiohttp_devtools.logs.HighlightStreamHandler',
'formatter': 'aiohttp'
},
'aiohttp_server': {
'class': 'aiohttp_devtools.logs.HighlightStreamHandler',
'formatter': 'aiohttp'
},
},
'loggers': {
rs_dft_logger.name: {
'handlers': ['default'],
'level': log_level,
},
rs_aux_logger.name: {
'handlers': ['default'],
'level': log_level,
},
tools_logger.name: {
'handlers': ['default'],
'level': log_level,
},
main_logger.name: {
'handlers': ['no_ts'],
'level': log_level,
},
'aiohttp.access': {
'handlers': ['aiohttp_access'],
'level': log_level,
'propagate': False,
},
'aiohttp.server': {
'handlers': ['aiohttp_server'],
'level': log_level,
},
},
} | python | def log_config(verbose: bool) -> dict:
"""
Setup default config. for dictConfig.
:param verbose: level: DEBUG if True, INFO if False
:return: dict suitable for ``logging.config.dictConfig``
"""
log_level = 'DEBUG' if verbose else 'INFO'
return {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'default': {
'format': '[%(asctime)s] %(message)s',
'datefmt': '%H:%M:%S',
'class': 'aiohttp_devtools.logs.DefaultFormatter',
},
'no_ts': {
'format': '%(message)s',
'class': 'aiohttp_devtools.logs.DefaultFormatter',
},
'aiohttp': {
'format': '%(message)s',
'class': 'aiohttp_devtools.logs.AccessFormatter',
},
},
'handlers': {
'default': {
'level': log_level,
'class': 'aiohttp_devtools.logs.HighlightStreamHandler',
'formatter': 'default'
},
'no_ts': {
'level': log_level,
'class': 'aiohttp_devtools.logs.HighlightStreamHandler',
'formatter': 'no_ts'
},
'aiohttp_access': {
'level': log_level,
'class': 'aiohttp_devtools.logs.HighlightStreamHandler',
'formatter': 'aiohttp'
},
'aiohttp_server': {
'class': 'aiohttp_devtools.logs.HighlightStreamHandler',
'formatter': 'aiohttp'
},
},
'loggers': {
rs_dft_logger.name: {
'handlers': ['default'],
'level': log_level,
},
rs_aux_logger.name: {
'handlers': ['default'],
'level': log_level,
},
tools_logger.name: {
'handlers': ['default'],
'level': log_level,
},
main_logger.name: {
'handlers': ['no_ts'],
'level': log_level,
},
'aiohttp.access': {
'handlers': ['aiohttp_access'],
'level': log_level,
'propagate': False,
},
'aiohttp.server': {
'handlers': ['aiohttp_server'],
'level': log_level,
},
},
} | [
"def",
"log_config",
"(",
"verbose",
":",
"bool",
")",
"->",
"dict",
":",
"log_level",
"=",
"'DEBUG'",
"if",
"verbose",
"else",
"'INFO'",
"return",
"{",
"'version'",
":",
"1",
",",
"'disable_existing_loggers'",
":",
"False",
",",
"'formatters'",
":",
"{",
"'default'",
":",
"{",
"'format'",
":",
"'[%(asctime)s] %(message)s'",
",",
"'datefmt'",
":",
"'%H:%M:%S'",
",",
"'class'",
":",
"'aiohttp_devtools.logs.DefaultFormatter'",
",",
"}",
",",
"'no_ts'",
":",
"{",
"'format'",
":",
"'%(message)s'",
",",
"'class'",
":",
"'aiohttp_devtools.logs.DefaultFormatter'",
",",
"}",
",",
"'aiohttp'",
":",
"{",
"'format'",
":",
"'%(message)s'",
",",
"'class'",
":",
"'aiohttp_devtools.logs.AccessFormatter'",
",",
"}",
",",
"}",
",",
"'handlers'",
":",
"{",
"'default'",
":",
"{",
"'level'",
":",
"log_level",
",",
"'class'",
":",
"'aiohttp_devtools.logs.HighlightStreamHandler'",
",",
"'formatter'",
":",
"'default'",
"}",
",",
"'no_ts'",
":",
"{",
"'level'",
":",
"log_level",
",",
"'class'",
":",
"'aiohttp_devtools.logs.HighlightStreamHandler'",
",",
"'formatter'",
":",
"'no_ts'",
"}",
",",
"'aiohttp_access'",
":",
"{",
"'level'",
":",
"log_level",
",",
"'class'",
":",
"'aiohttp_devtools.logs.HighlightStreamHandler'",
",",
"'formatter'",
":",
"'aiohttp'",
"}",
",",
"'aiohttp_server'",
":",
"{",
"'class'",
":",
"'aiohttp_devtools.logs.HighlightStreamHandler'",
",",
"'formatter'",
":",
"'aiohttp'",
"}",
",",
"}",
",",
"'loggers'",
":",
"{",
"rs_dft_logger",
".",
"name",
":",
"{",
"'handlers'",
":",
"[",
"'default'",
"]",
",",
"'level'",
":",
"log_level",
",",
"}",
",",
"rs_aux_logger",
".",
"name",
":",
"{",
"'handlers'",
":",
"[",
"'default'",
"]",
",",
"'level'",
":",
"log_level",
",",
"}",
",",
"tools_logger",
".",
"name",
":",
"{",
"'handlers'",
":",
"[",
"'default'",
"]",
",",
"'level'",
":",
"log_level",
",",
"}",
",",
"main_logger",
".",
"name",
":",
"{",
"'handlers'",
":",
"[",
"'no_ts'",
"]",
",",
"'level'",
":",
"log_level",
",",
"}",
",",
"'aiohttp.access'",
":",
"{",
"'handlers'",
":",
"[",
"'aiohttp_access'",
"]",
",",
"'level'",
":",
"log_level",
",",
"'propagate'",
":",
"False",
",",
"}",
",",
"'aiohttp.server'",
":",
"{",
"'handlers'",
":",
"[",
"'aiohttp_server'",
"]",
",",
"'level'",
":",
"log_level",
",",
"}",
",",
"}",
",",
"}"
] | Setup default config. for dictConfig.
:param verbose: level: DEBUG if True, INFO if False
:return: dict suitable for ``logging.config.dictConfig`` | [
"Setup",
"default",
"config",
".",
"for",
"dictConfig",
".",
":",
"param",
"verbose",
":",
"level",
":",
"DEBUG",
"if",
"True",
"INFO",
"if",
"False",
":",
"return",
":",
"dict",
"suitable",
"for",
"logging",
".",
"config",
".",
"dictConfig"
] | train | https://github.com/aio-libs/aiohttp-devtools/blob/e9ea6feb43558e6e64595ea0ea5613f226cba81f/aiohttp_devtools/logs.py#L93-L166 |
loads/molotov | molotov/api.py | scenario | def scenario(weight=1, delay=0.0, name=None):
"""Decorator to register a function as a Molotov test.
Options:
- **weight** used by Molotov when the scenarii are randomly picked.
The functions with the highest values are more likely to be picked.
Integer, defaults to 1. This value is ignored when the
*scenario_picker* decorator is used.
- **delay** once the scenario is done, the worker will sleep
*delay* seconds. Float, defaults to 0.
The general --delay argument you can pass to Molotov
will be summed with this delay.
- **name** name of the scenario. If not provided, will use the
function __name___ attribute.
The decorated function receives an :class:`aiohttp.ClientSession` instance.
"""
def _scenario(func, *args, **kw):
_check_coroutine(func)
if weight > 0:
sname = name or func.__name__
data = {'name': sname,
'weight': weight, 'delay': delay,
'func': func, 'args': args, 'kw': kw}
_SCENARIO[sname] = data
@functools.wraps(func)
def __scenario(*args, **kw):
return func(*args, **kw)
return __scenario
return _scenario | python | def scenario(weight=1, delay=0.0, name=None):
"""Decorator to register a function as a Molotov test.
Options:
- **weight** used by Molotov when the scenarii are randomly picked.
The functions with the highest values are more likely to be picked.
Integer, defaults to 1. This value is ignored when the
*scenario_picker* decorator is used.
- **delay** once the scenario is done, the worker will sleep
*delay* seconds. Float, defaults to 0.
The general --delay argument you can pass to Molotov
will be summed with this delay.
- **name** name of the scenario. If not provided, will use the
function __name___ attribute.
The decorated function receives an :class:`aiohttp.ClientSession` instance.
"""
def _scenario(func, *args, **kw):
_check_coroutine(func)
if weight > 0:
sname = name or func.__name__
data = {'name': sname,
'weight': weight, 'delay': delay,
'func': func, 'args': args, 'kw': kw}
_SCENARIO[sname] = data
@functools.wraps(func)
def __scenario(*args, **kw):
return func(*args, **kw)
return __scenario
return _scenario | [
"def",
"scenario",
"(",
"weight",
"=",
"1",
",",
"delay",
"=",
"0.0",
",",
"name",
"=",
"None",
")",
":",
"def",
"_scenario",
"(",
"func",
",",
"*",
"args",
",",
"*",
"*",
"kw",
")",
":",
"_check_coroutine",
"(",
"func",
")",
"if",
"weight",
">",
"0",
":",
"sname",
"=",
"name",
"or",
"func",
".",
"__name__",
"data",
"=",
"{",
"'name'",
":",
"sname",
",",
"'weight'",
":",
"weight",
",",
"'delay'",
":",
"delay",
",",
"'func'",
":",
"func",
",",
"'args'",
":",
"args",
",",
"'kw'",
":",
"kw",
"}",
"_SCENARIO",
"[",
"sname",
"]",
"=",
"data",
"@",
"functools",
".",
"wraps",
"(",
"func",
")",
"def",
"__scenario",
"(",
"*",
"args",
",",
"*",
"*",
"kw",
")",
":",
"return",
"func",
"(",
"*",
"args",
",",
"*",
"*",
"kw",
")",
"return",
"__scenario",
"return",
"_scenario"
] | Decorator to register a function as a Molotov test.
Options:
- **weight** used by Molotov when the scenarii are randomly picked.
The functions with the highest values are more likely to be picked.
Integer, defaults to 1. This value is ignored when the
*scenario_picker* decorator is used.
- **delay** once the scenario is done, the worker will sleep
*delay* seconds. Float, defaults to 0.
The general --delay argument you can pass to Molotov
will be summed with this delay.
- **name** name of the scenario. If not provided, will use the
function __name___ attribute.
The decorated function receives an :class:`aiohttp.ClientSession` instance. | [
"Decorator",
"to",
"register",
"a",
"function",
"as",
"a",
"Molotov",
"test",
"."
] | train | https://github.com/loads/molotov/blob/bd2c94e7f250e1fbb21940f02c68b4437655bc11/molotov/api.py#L24-L56 |