repo
stringlengths
7
55
path
stringlengths
4
127
func_name
stringlengths
1
88
original_string
stringlengths
75
19.8k
language
stringclasses
1 value
code
stringlengths
75
19.8k
code_tokens
sequence
docstring
stringlengths
3
17.3k
docstring_tokens
sequence
sha
stringlengths
40
40
url
stringlengths
87
242
partition
stringclasses
1 value
wtolson/pysis
pysis/cubefile.py
CubeFile.get_image_array
def get_image_array(self): """Create an array for use in making an image. Creates a linear stretch of the image and scales it to between `0` and `255`. `Null`, `Lis` and `Lrs` pixels are set to `0`. `His` and `Hrs` pixels are set to `255`. Usage:: from pysis import CubeFile from PIL import Image # Read in the image and create the image data image = CubeFile.open('test.cub') data = image.get_image_array() # Save the first band to a new file Image.fromarray(data[0]).save('test.png') :returns: A uint8 array of pixel values. """ specials_mask = self.specials_mask() data = self.data.copy() data[specials_mask] -= data[specials_mask].min() data[specials_mask] *= 255 / data[specials_mask].max() data[data == self.specials['His']] = 255 data[data == self.specials['Hrs']] = 255 return data.astype(numpy.uint8)
python
def get_image_array(self): """Create an array for use in making an image. Creates a linear stretch of the image and scales it to between `0` and `255`. `Null`, `Lis` and `Lrs` pixels are set to `0`. `His` and `Hrs` pixels are set to `255`. Usage:: from pysis import CubeFile from PIL import Image # Read in the image and create the image data image = CubeFile.open('test.cub') data = image.get_image_array() # Save the first band to a new file Image.fromarray(data[0]).save('test.png') :returns: A uint8 array of pixel values. """ specials_mask = self.specials_mask() data = self.data.copy() data[specials_mask] -= data[specials_mask].min() data[specials_mask] *= 255 / data[specials_mask].max() data[data == self.specials['His']] = 255 data[data == self.specials['Hrs']] = 255 return data.astype(numpy.uint8)
[ "def", "get_image_array", "(", "self", ")", ":", "specials_mask", "=", "self", ".", "specials_mask", "(", ")", "data", "=", "self", ".", "data", ".", "copy", "(", ")", "data", "[", "specials_mask", "]", "-=", "data", "[", "specials_mask", "]", ".", "min", "(", ")", "data", "[", "specials_mask", "]", "*=", "255", "/", "data", "[", "specials_mask", "]", ".", "max", "(", ")", "data", "[", "data", "==", "self", ".", "specials", "[", "'His'", "]", "]", "=", "255", "data", "[", "data", "==", "self", ".", "specials", "[", "'Hrs'", "]", "]", "=", "255", "return", "data", ".", "astype", "(", "numpy", ".", "uint8", ")" ]
Create an array for use in making an image. Creates a linear stretch of the image and scales it to between `0` and `255`. `Null`, `Lis` and `Lrs` pixels are set to `0`. `His` and `Hrs` pixels are set to `255`. Usage:: from pysis import CubeFile from PIL import Image # Read in the image and create the image data image = CubeFile.open('test.cub') data = image.get_image_array() # Save the first band to a new file Image.fromarray(data[0]).save('test.png') :returns: A uint8 array of pixel values.
[ "Create", "an", "array", "for", "use", "in", "making", "an", "image", "." ]
7b907c8104bddfbb14c603de4d666c2101e1f999
https://github.com/wtolson/pysis/blob/7b907c8104bddfbb14c603de4d666c2101e1f999/pysis/cubefile.py#L132-L163
train
wtolson/pysis
pysis/env.py
check_isis_version
def check_isis_version(major, minor=0, patch=0): """Checks that the current isis version is equal to or above the suplied version.""" if ISIS_VERSION and (major, minor, patch) <= ISIS_VERISON_TUPLE: return msg = 'Version %s.%s.%s of isis required (%s found).' raise VersionError(msg % (major, minor, patch, ISIS_VERSION))
python
def check_isis_version(major, minor=0, patch=0): """Checks that the current isis version is equal to or above the suplied version.""" if ISIS_VERSION and (major, minor, patch) <= ISIS_VERISON_TUPLE: return msg = 'Version %s.%s.%s of isis required (%s found).' raise VersionError(msg % (major, minor, patch, ISIS_VERSION))
[ "def", "check_isis_version", "(", "major", ",", "minor", "=", "0", ",", "patch", "=", "0", ")", ":", "if", "ISIS_VERSION", "and", "(", "major", ",", "minor", ",", "patch", ")", "<=", "ISIS_VERISON_TUPLE", ":", "return", "msg", "=", "'Version %s.%s.%s of isis required (%s found).'", "raise", "VersionError", "(", "msg", "%", "(", "major", ",", "minor", ",", "patch", ",", "ISIS_VERSION", ")", ")" ]
Checks that the current isis version is equal to or above the suplied version.
[ "Checks", "that", "the", "current", "isis", "version", "is", "equal", "to", "or", "above", "the", "suplied", "version", "." ]
7b907c8104bddfbb14c603de4d666c2101e1f999
https://github.com/wtolson/pysis/blob/7b907c8104bddfbb14c603de4d666c2101e1f999/pysis/env.py#L67-L74
train
wtolson/pysis
pysis/env.py
require_isis_version
def require_isis_version(major, minor=0, patch=0): """Decorator that ensures a function is called with a minimum isis version. """ def decorator(fn): @wraps(fn) def wrapper(*args, **kwargs): check_isis_version(major, minor, patch) return fn(*args, **kwargs) return wrapper return decorator
python
def require_isis_version(major, minor=0, patch=0): """Decorator that ensures a function is called with a minimum isis version. """ def decorator(fn): @wraps(fn) def wrapper(*args, **kwargs): check_isis_version(major, minor, patch) return fn(*args, **kwargs) return wrapper return decorator
[ "def", "require_isis_version", "(", "major", ",", "minor", "=", "0", ",", "patch", "=", "0", ")", ":", "def", "decorator", "(", "fn", ")", ":", "@", "wraps", "(", "fn", ")", "def", "wrapper", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "check_isis_version", "(", "major", ",", "minor", ",", "patch", ")", "return", "fn", "(", "*", "args", ",", "*", "*", "kwargs", ")", "return", "wrapper", "return", "decorator" ]
Decorator that ensures a function is called with a minimum isis version.
[ "Decorator", "that", "ensures", "a", "function", "is", "called", "with", "a", "minimum", "isis", "version", "." ]
7b907c8104bddfbb14c603de4d666c2101e1f999
https://github.com/wtolson/pysis/blob/7b907c8104bddfbb14c603de4d666c2101e1f999/pysis/env.py#L77-L86
train
wtolson/pysis
pysis/util/file_manipulation.py
write_file_list
def write_file_list(filename, file_list=[], glob=None): """Write a list of files to a file. :param filename: the name of the file to write the list to :param file_list: a list of filenames to write to a file :param glob: if glob is specified, it will ignore file_list and instead create a list of files based on the pattern provide by glob (ex. *.cub) """ if glob: file_list = iglob(glob) with open(filename, 'w') as f: for line in file_list: f.write(line + '\n')
python
def write_file_list(filename, file_list=[], glob=None): """Write a list of files to a file. :param filename: the name of the file to write the list to :param file_list: a list of filenames to write to a file :param glob: if glob is specified, it will ignore file_list and instead create a list of files based on the pattern provide by glob (ex. *.cub) """ if glob: file_list = iglob(glob) with open(filename, 'w') as f: for line in file_list: f.write(line + '\n')
[ "def", "write_file_list", "(", "filename", ",", "file_list", "=", "[", "]", ",", "glob", "=", "None", ")", ":", "if", "glob", ":", "file_list", "=", "iglob", "(", "glob", ")", "with", "open", "(", "filename", ",", "'w'", ")", "as", "f", ":", "for", "line", "in", "file_list", ":", "f", ".", "write", "(", "line", "+", "'\\n'", ")" ]
Write a list of files to a file. :param filename: the name of the file to write the list to :param file_list: a list of filenames to write to a file :param glob: if glob is specified, it will ignore file_list and instead create a list of files based on the pattern provide by glob (ex. *.cub)
[ "Write", "a", "list", "of", "files", "to", "a", "file", "." ]
7b907c8104bddfbb14c603de4d666c2101e1f999
https://github.com/wtolson/pysis/blob/7b907c8104bddfbb14c603de4d666c2101e1f999/pysis/util/file_manipulation.py#L11-L26
train
wtolson/pysis
pysis/util/file_manipulation.py
file_variations
def file_variations(filename, extensions): """Create a variation of file names. Generate a list of variations on a filename by replacing the extension with a the provided list. :param filename: The original file name to use as a base. :param extensions: A list of file extensions to generate new filenames. """ (label, ext) = splitext(filename) return [label + extention for extention in extensions]
python
def file_variations(filename, extensions): """Create a variation of file names. Generate a list of variations on a filename by replacing the extension with a the provided list. :param filename: The original file name to use as a base. :param extensions: A list of file extensions to generate new filenames. """ (label, ext) = splitext(filename) return [label + extention for extention in extensions]
[ "def", "file_variations", "(", "filename", ",", "extensions", ")", ":", "(", "label", ",", "ext", ")", "=", "splitext", "(", "filename", ")", "return", "[", "label", "+", "extention", "for", "extention", "in", "extensions", "]" ]
Create a variation of file names. Generate a list of variations on a filename by replacing the extension with a the provided list. :param filename: The original file name to use as a base. :param extensions: A list of file extensions to generate new filenames.
[ "Create", "a", "variation", "of", "file", "names", "." ]
7b907c8104bddfbb14c603de4d666c2101e1f999
https://github.com/wtolson/pysis/blob/7b907c8104bddfbb14c603de4d666c2101e1f999/pysis/util/file_manipulation.py#L29-L40
train
wtolson/pysis
pysis/binning/abstract.py
AbstractBinnedKeys.insert
def insert(self, key, value, data={}): """Insert the `key` into a bin based on the given `value`. Optionally, `data` dictionary may be provided to attach arbitrary data to the key. """ if value < self.min_value or value > self.max_value: raise BoundsError('item value out of bounds') item = self.Item(key, value, data) index = self.get_bin_index(value) self.bins[index].append(item)
python
def insert(self, key, value, data={}): """Insert the `key` into a bin based on the given `value`. Optionally, `data` dictionary may be provided to attach arbitrary data to the key. """ if value < self.min_value or value > self.max_value: raise BoundsError('item value out of bounds') item = self.Item(key, value, data) index = self.get_bin_index(value) self.bins[index].append(item)
[ "def", "insert", "(", "self", ",", "key", ",", "value", ",", "data", "=", "{", "}", ")", ":", "if", "value", "<", "self", ".", "min_value", "or", "value", ">", "self", ".", "max_value", ":", "raise", "BoundsError", "(", "'item value out of bounds'", ")", "item", "=", "self", ".", "Item", "(", "key", ",", "value", ",", "data", ")", "index", "=", "self", ".", "get_bin_index", "(", "value", ")", "self", ".", "bins", "[", "index", "]", ".", "append", "(", "item", ")" ]
Insert the `key` into a bin based on the given `value`. Optionally, `data` dictionary may be provided to attach arbitrary data to the key.
[ "Insert", "the", "key", "into", "a", "bin", "based", "on", "the", "given", "value", "." ]
7b907c8104bddfbb14c603de4d666c2101e1f999
https://github.com/wtolson/pysis/blob/7b907c8104bddfbb14c603de4d666c2101e1f999/pysis/binning/abstract.py#L40-L52
train
wtolson/pysis
pysis/binning/abstract.py
AbstractBinnedKeys.iterkeys
def iterkeys(self): """An iterator over the keys of each bin.""" def _iterkeys(bin): for item in bin: yield item.key for bin in self.bins: yield _iterkeys(bin)
python
def iterkeys(self): """An iterator over the keys of each bin.""" def _iterkeys(bin): for item in bin: yield item.key for bin in self.bins: yield _iterkeys(bin)
[ "def", "iterkeys", "(", "self", ")", ":", "def", "_iterkeys", "(", "bin", ")", ":", "for", "item", "in", "bin", ":", "yield", "item", ".", "key", "for", "bin", "in", "self", ".", "bins", ":", "yield", "_iterkeys", "(", "bin", ")" ]
An iterator over the keys of each bin.
[ "An", "iterator", "over", "the", "keys", "of", "each", "bin", "." ]
7b907c8104bddfbb14c603de4d666c2101e1f999
https://github.com/wtolson/pysis/blob/7b907c8104bddfbb14c603de4d666c2101e1f999/pysis/binning/abstract.py#L54-L61
train
signaturit/python-sdk
signaturit_sdk/resources/connection.py
Connection.file_request
def file_request(self): """ Request that retrieve a binary file """ response = requests.get( self.__base_url, headers=self.__headers, stream=True) return response.raw.read(), response.headers
python
def file_request(self): """ Request that retrieve a binary file """ response = requests.get( self.__base_url, headers=self.__headers, stream=True) return response.raw.read(), response.headers
[ "def", "file_request", "(", "self", ")", ":", "response", "=", "requests", ".", "get", "(", "self", ".", "__base_url", ",", "headers", "=", "self", ".", "__headers", ",", "stream", "=", "True", ")", "return", "response", ".", "raw", ".", "read", "(", ")", ",", "response", ".", "headers" ]
Request that retrieve a binary file
[ "Request", "that", "retrieve", "a", "binary", "file" ]
2419c6d9675d901244f807ae360dc58aa46109a9
https://github.com/signaturit/python-sdk/blob/2419c6d9675d901244f807ae360dc58aa46109a9/signaturit_sdk/resources/connection.py#L77-L86
train
signaturit/python-sdk
signaturit_sdk/signaturit_client.py
SignaturitClient.get_signatures
def get_signatures(self, limit=100, offset=0, conditions={}): """ Get all signatures """ url = self.SIGNS_URL + "?limit=%s&offset=%s" % (limit, offset) for key, value in conditions.items(): if key is 'ids': value = ",".join(value) url += '&%s=%s' % (key, value) connection = Connection(self.token) connection.set_url(self.production, url) return connection.get_request()
python
def get_signatures(self, limit=100, offset=0, conditions={}): """ Get all signatures """ url = self.SIGNS_URL + "?limit=%s&offset=%s" % (limit, offset) for key, value in conditions.items(): if key is 'ids': value = ",".join(value) url += '&%s=%s' % (key, value) connection = Connection(self.token) connection.set_url(self.production, url) return connection.get_request()
[ "def", "get_signatures", "(", "self", ",", "limit", "=", "100", ",", "offset", "=", "0", ",", "conditions", "=", "{", "}", ")", ":", "url", "=", "self", ".", "SIGNS_URL", "+", "\"?limit=%s&offset=%s\"", "%", "(", "limit", ",", "offset", ")", "for", "key", ",", "value", "in", "conditions", ".", "items", "(", ")", ":", "if", "key", "is", "'ids'", ":", "value", "=", "\",\"", ".", "join", "(", "value", ")", "url", "+=", "'&%s=%s'", "%", "(", "key", ",", "value", ")", "connection", "=", "Connection", "(", "self", ".", "token", ")", "connection", ".", "set_url", "(", "self", ".", "production", ",", "url", ")", "return", "connection", ".", "get_request", "(", ")" ]
Get all signatures
[ "Get", "all", "signatures" ]
2419c6d9675d901244f807ae360dc58aa46109a9
https://github.com/signaturit/python-sdk/blob/2419c6d9675d901244f807ae360dc58aa46109a9/signaturit_sdk/signaturit_client.py#L57-L72
train
signaturit/python-sdk
signaturit_sdk/signaturit_client.py
SignaturitClient.get_signature
def get_signature(self, signature_id): """ Get a concrete Signature @return Signature data """ connection = Connection(self.token) connection.set_url(self.production, self.SIGNS_ID_URL % signature_id) return connection.get_request()
python
def get_signature(self, signature_id): """ Get a concrete Signature @return Signature data """ connection = Connection(self.token) connection.set_url(self.production, self.SIGNS_ID_URL % signature_id) return connection.get_request()
[ "def", "get_signature", "(", "self", ",", "signature_id", ")", ":", "connection", "=", "Connection", "(", "self", ".", "token", ")", "connection", ".", "set_url", "(", "self", ".", "production", ",", "self", ".", "SIGNS_ID_URL", "%", "signature_id", ")", "return", "connection", ".", "get_request", "(", ")" ]
Get a concrete Signature @return Signature data
[ "Get", "a", "concrete", "Signature" ]
2419c6d9675d901244f807ae360dc58aa46109a9
https://github.com/signaturit/python-sdk/blob/2419c6d9675d901244f807ae360dc58aa46109a9/signaturit_sdk/signaturit_client.py#L74-L82
train
signaturit/python-sdk
signaturit_sdk/signaturit_client.py
SignaturitClient.count_signatures
def count_signatures(self, conditions={}): """ Count all signatures """ url = self.SIGNS_COUNT_URL + '?' for key, value in conditions.items(): if key is 'ids': value = ",".join(value) url += '&%s=%s' % (key, value) connection = Connection(self.token) connection.set_url(self.production, url) return connection.get_request()
python
def count_signatures(self, conditions={}): """ Count all signatures """ url = self.SIGNS_COUNT_URL + '?' for key, value in conditions.items(): if key is 'ids': value = ",".join(value) url += '&%s=%s' % (key, value) connection = Connection(self.token) connection.set_url(self.production, url) return connection.get_request()
[ "def", "count_signatures", "(", "self", ",", "conditions", "=", "{", "}", ")", ":", "url", "=", "self", ".", "SIGNS_COUNT_URL", "+", "'?'", "for", "key", ",", "value", "in", "conditions", ".", "items", "(", ")", ":", "if", "key", "is", "'ids'", ":", "value", "=", "\",\"", ".", "join", "(", "value", ")", "url", "+=", "'&%s=%s'", "%", "(", "key", ",", "value", ")", "connection", "=", "Connection", "(", "self", ".", "token", ")", "connection", ".", "set_url", "(", "self", ".", "production", ",", "url", ")", "return", "connection", ".", "get_request", "(", ")" ]
Count all signatures
[ "Count", "all", "signatures" ]
2419c6d9675d901244f807ae360dc58aa46109a9
https://github.com/signaturit/python-sdk/blob/2419c6d9675d901244f807ae360dc58aa46109a9/signaturit_sdk/signaturit_client.py#L84-L99
train
signaturit/python-sdk
signaturit_sdk/signaturit_client.py
SignaturitClient.cancel_signature
def cancel_signature(self, signature_id): """ Cancel a concrete Signature @signature_id: Id of signature @return Signature data """ connection = Connection(self.token) connection.set_url(self.production, self.SIGNS_CANCEL_URL % signature_id) return connection.patch_request()
python
def cancel_signature(self, signature_id): """ Cancel a concrete Signature @signature_id: Id of signature @return Signature data """ connection = Connection(self.token) connection.set_url(self.production, self.SIGNS_CANCEL_URL % signature_id) return connection.patch_request()
[ "def", "cancel_signature", "(", "self", ",", "signature_id", ")", ":", "connection", "=", "Connection", "(", "self", ".", "token", ")", "connection", ".", "set_url", "(", "self", ".", "production", ",", "self", ".", "SIGNS_CANCEL_URL", "%", "signature_id", ")", "return", "connection", ".", "patch_request", "(", ")" ]
Cancel a concrete Signature @signature_id: Id of signature @return Signature data
[ "Cancel", "a", "concrete", "Signature" ]
2419c6d9675d901244f807ae360dc58aa46109a9
https://github.com/signaturit/python-sdk/blob/2419c6d9675d901244f807ae360dc58aa46109a9/signaturit_sdk/signaturit_client.py#L175-L185
train
signaturit/python-sdk
signaturit_sdk/signaturit_client.py
SignaturitClient.send_signature_reminder
def send_signature_reminder(self, signature_id): """ Send a reminder email @signature_id: Id of signature @document_id: Id of document """ connection = Connection(self.token) connection.set_url(self.production, self.SIGNS_SEND_REMINDER_URL % signature_id) return connection.post_request()
python
def send_signature_reminder(self, signature_id): """ Send a reminder email @signature_id: Id of signature @document_id: Id of document """ connection = Connection(self.token) connection.set_url(self.production, self.SIGNS_SEND_REMINDER_URL % signature_id) return connection.post_request()
[ "def", "send_signature_reminder", "(", "self", ",", "signature_id", ")", ":", "connection", "=", "Connection", "(", "self", ".", "token", ")", "connection", ".", "set_url", "(", "self", ".", "production", ",", "self", ".", "SIGNS_SEND_REMINDER_URL", "%", "signature_id", ")", "return", "connection", ".", "post_request", "(", ")" ]
Send a reminder email @signature_id: Id of signature @document_id: Id of document
[ "Send", "a", "reminder", "email" ]
2419c6d9675d901244f807ae360dc58aa46109a9
https://github.com/signaturit/python-sdk/blob/2419c6d9675d901244f807ae360dc58aa46109a9/signaturit_sdk/signaturit_client.py#L187-L197
train
signaturit/python-sdk
signaturit_sdk/signaturit_client.py
SignaturitClient.get_branding
def get_branding(self, branding_id): """ Get a concrete branding @branding_id: Id of the branding to fetch @return Branding """ connection = Connection(self.token) connection.set_url(self.production, self.BRANDINGS_ID_URL % branding_id) return connection.get_request()
python
def get_branding(self, branding_id): """ Get a concrete branding @branding_id: Id of the branding to fetch @return Branding """ connection = Connection(self.token) connection.set_url(self.production, self.BRANDINGS_ID_URL % branding_id) return connection.get_request()
[ "def", "get_branding", "(", "self", ",", "branding_id", ")", ":", "connection", "=", "Connection", "(", "self", ".", "token", ")", "connection", ".", "set_url", "(", "self", ".", "production", ",", "self", ".", "BRANDINGS_ID_URL", "%", "branding_id", ")", "return", "connection", ".", "get_request", "(", ")" ]
Get a concrete branding @branding_id: Id of the branding to fetch @return Branding
[ "Get", "a", "concrete", "branding" ]
2419c6d9675d901244f807ae360dc58aa46109a9
https://github.com/signaturit/python-sdk/blob/2419c6d9675d901244f807ae360dc58aa46109a9/signaturit_sdk/signaturit_client.py#L199-L209
train
signaturit/python-sdk
signaturit_sdk/signaturit_client.py
SignaturitClient.get_brandings
def get_brandings(self): """ Get all account brandings @return List of brandings """ connection = Connection(self.token) connection.set_url(self.production, self.BRANDINGS_URL) return connection.get_request()
python
def get_brandings(self): """ Get all account brandings @return List of brandings """ connection = Connection(self.token) connection.set_url(self.production, self.BRANDINGS_URL) return connection.get_request()
[ "def", "get_brandings", "(", "self", ")", ":", "connection", "=", "Connection", "(", "self", ".", "token", ")", "connection", ".", "set_url", "(", "self", ".", "production", ",", "self", ".", "BRANDINGS_URL", ")", "return", "connection", ".", "get_request", "(", ")" ]
Get all account brandings @return List of brandings
[ "Get", "all", "account", "brandings" ]
2419c6d9675d901244f807ae360dc58aa46109a9
https://github.com/signaturit/python-sdk/blob/2419c6d9675d901244f807ae360dc58aa46109a9/signaturit_sdk/signaturit_client.py#L211-L220
train
signaturit/python-sdk
signaturit_sdk/signaturit_client.py
SignaturitClient.create_branding
def create_branding(self, params): """ Create a new branding @params: An array of params (all params are optional) - layout: Default color for all application widgets (hex code) - text: Default text color for all application widgets (hex code) - application_texts: A dict with the new text values - sign_button: Text for sign button - send_button: Text for send button - decline_button: Text for decline button: - decline_modal_title: Title for decline modal (when you click decline button) - decline_modal_body: Body for decline modal (when you click decline button) - photo: Photo message text, which tells the user that a photo is needed in the current process - multi_pages: Header of the document, which tells the user the number of pages to sign ex: { 'photo': 'Hey! Take a photo of yourself to validate the process!'} """ connection = Connection(self.token) connection.add_header('Content-Type', 'application/json') connection.set_url(self.production, self.BRANDINGS_URL) connection.add_params(params, json_format=True) return connection.post_request()
python
def create_branding(self, params): """ Create a new branding @params: An array of params (all params are optional) - layout: Default color for all application widgets (hex code) - text: Default text color for all application widgets (hex code) - application_texts: A dict with the new text values - sign_button: Text for sign button - send_button: Text for send button - decline_button: Text for decline button: - decline_modal_title: Title for decline modal (when you click decline button) - decline_modal_body: Body for decline modal (when you click decline button) - photo: Photo message text, which tells the user that a photo is needed in the current process - multi_pages: Header of the document, which tells the user the number of pages to sign ex: { 'photo': 'Hey! Take a photo of yourself to validate the process!'} """ connection = Connection(self.token) connection.add_header('Content-Type', 'application/json') connection.set_url(self.production, self.BRANDINGS_URL) connection.add_params(params, json_format=True) return connection.post_request()
[ "def", "create_branding", "(", "self", ",", "params", ")", ":", "connection", "=", "Connection", "(", "self", ".", "token", ")", "connection", ".", "add_header", "(", "'Content-Type'", ",", "'application/json'", ")", "connection", ".", "set_url", "(", "self", ".", "production", ",", "self", ".", "BRANDINGS_URL", ")", "connection", ".", "add_params", "(", "params", ",", "json_format", "=", "True", ")", "return", "connection", ".", "post_request", "(", ")" ]
Create a new branding @params: An array of params (all params are optional) - layout: Default color for all application widgets (hex code) - text: Default text color for all application widgets (hex code) - application_texts: A dict with the new text values - sign_button: Text for sign button - send_button: Text for send button - decline_button: Text for decline button: - decline_modal_title: Title for decline modal (when you click decline button) - decline_modal_body: Body for decline modal (when you click decline button) - photo: Photo message text, which tells the user that a photo is needed in the current process - multi_pages: Header of the document, which tells the user the number of pages to sign ex: { 'photo': 'Hey! Take a photo of yourself to validate the process!'}
[ "Create", "a", "new", "branding" ]
2419c6d9675d901244f807ae360dc58aa46109a9
https://github.com/signaturit/python-sdk/blob/2419c6d9675d901244f807ae360dc58aa46109a9/signaturit_sdk/signaturit_client.py#L222-L244
train
signaturit/python-sdk
signaturit_sdk/signaturit_client.py
SignaturitClient.update_branding
def update_branding(self, branding_id, params): """ Update a existing branding @branding_id: Id of the branding to update @params: Same params as method create_branding, see above @return: A dict with updated branding data """ connection = Connection(self.token) connection.add_header('Content-Type', 'application/json') connection.set_url(self.production, self.BRANDINGS_ID_URL % branding_id) connection.add_params(params) return connection.patch_request()
python
def update_branding(self, branding_id, params): """ Update a existing branding @branding_id: Id of the branding to update @params: Same params as method create_branding, see above @return: A dict with updated branding data """ connection = Connection(self.token) connection.add_header('Content-Type', 'application/json') connection.set_url(self.production, self.BRANDINGS_ID_URL % branding_id) connection.add_params(params) return connection.patch_request()
[ "def", "update_branding", "(", "self", ",", "branding_id", ",", "params", ")", ":", "connection", "=", "Connection", "(", "self", ".", "token", ")", "connection", ".", "add_header", "(", "'Content-Type'", ",", "'application/json'", ")", "connection", ".", "set_url", "(", "self", ".", "production", ",", "self", ".", "BRANDINGS_ID_URL", "%", "branding_id", ")", "connection", ".", "add_params", "(", "params", ")", "return", "connection", ".", "patch_request", "(", ")" ]
Update a existing branding @branding_id: Id of the branding to update @params: Same params as method create_branding, see above @return: A dict with updated branding data
[ "Update", "a", "existing", "branding" ]
2419c6d9675d901244f807ae360dc58aa46109a9
https://github.com/signaturit/python-sdk/blob/2419c6d9675d901244f807ae360dc58aa46109a9/signaturit_sdk/signaturit_client.py#L246-L259
train
signaturit/python-sdk
signaturit_sdk/signaturit_client.py
SignaturitClient.get_templates
def get_templates(self, limit=100, offset=0): """ Get all account templates """ url = self.TEMPLATES_URL + "?limit=%s&offset=%s" % (limit, offset) connection = Connection(self.token) connection.set_url(self.production, url) return connection.get_request()
python
def get_templates(self, limit=100, offset=0): """ Get all account templates """ url = self.TEMPLATES_URL + "?limit=%s&offset=%s" % (limit, offset) connection = Connection(self.token) connection.set_url(self.production, url) return connection.get_request()
[ "def", "get_templates", "(", "self", ",", "limit", "=", "100", ",", "offset", "=", "0", ")", ":", "url", "=", "self", ".", "TEMPLATES_URL", "+", "\"?limit=%s&offset=%s\"", "%", "(", "limit", ",", "offset", ")", "connection", "=", "Connection", "(", "self", ".", "token", ")", "connection", ".", "set_url", "(", "self", ".", "production", ",", "url", ")", "return", "connection", ".", "get_request", "(", ")" ]
Get all account templates
[ "Get", "all", "account", "templates" ]
2419c6d9675d901244f807ae360dc58aa46109a9
https://github.com/signaturit/python-sdk/blob/2419c6d9675d901244f807ae360dc58aa46109a9/signaturit_sdk/signaturit_client.py#L261-L271
train
signaturit/python-sdk
signaturit_sdk/signaturit_client.py
SignaturitClient.get_emails
def get_emails(self, limit=100, offset=0, conditions={}): """ Get all certified emails """ url = self.EMAILS_URL + "?limit=%s&offset=%s" % (limit, offset) for key, value in conditions.items(): if key is 'ids': value = ",".join(value) url += '&%s=%s' % (key, value) connection = Connection(self.token) connection.set_url(self.production, url) return connection.get_request()
python
def get_emails(self, limit=100, offset=0, conditions={}): """ Get all certified emails """ url = self.EMAILS_URL + "?limit=%s&offset=%s" % (limit, offset) for key, value in conditions.items(): if key is 'ids': value = ",".join(value) url += '&%s=%s' % (key, value) connection = Connection(self.token) connection.set_url(self.production, url) return connection.get_request()
[ "def", "get_emails", "(", "self", ",", "limit", "=", "100", ",", "offset", "=", "0", ",", "conditions", "=", "{", "}", ")", ":", "url", "=", "self", ".", "EMAILS_URL", "+", "\"?limit=%s&offset=%s\"", "%", "(", "limit", ",", "offset", ")", "for", "key", ",", "value", "in", "conditions", ".", "items", "(", ")", ":", "if", "key", "is", "'ids'", ":", "value", "=", "\",\"", ".", "join", "(", "value", ")", "url", "+=", "'&%s=%s'", "%", "(", "key", ",", "value", ")", "connection", "=", "Connection", "(", "self", ".", "token", ")", "connection", ".", "set_url", "(", "self", ".", "production", ",", "url", ")", "return", "connection", ".", "get_request", "(", ")" ]
Get all certified emails
[ "Get", "all", "certified", "emails" ]
2419c6d9675d901244f807ae360dc58aa46109a9
https://github.com/signaturit/python-sdk/blob/2419c6d9675d901244f807ae360dc58aa46109a9/signaturit_sdk/signaturit_client.py#L273-L288
train
signaturit/python-sdk
signaturit_sdk/signaturit_client.py
SignaturitClient.count_emails
def count_emails(self, conditions={}): """ Count all certified emails """ url = self.EMAILS_COUNT_URL + "?" for key, value in conditions.items(): if key is 'ids': value = ",".join(value) url += '&%s=%s' % (key, value) connection = Connection(self.token) connection.set_url(self.production, url) connection.set_url(self.production, url) return connection.get_request()
python
def count_emails(self, conditions={}): """ Count all certified emails """ url = self.EMAILS_COUNT_URL + "?" for key, value in conditions.items(): if key is 'ids': value = ",".join(value) url += '&%s=%s' % (key, value) connection = Connection(self.token) connection.set_url(self.production, url) connection.set_url(self.production, url) return connection.get_request()
[ "def", "count_emails", "(", "self", ",", "conditions", "=", "{", "}", ")", ":", "url", "=", "self", ".", "EMAILS_COUNT_URL", "+", "\"?\"", "for", "key", ",", "value", "in", "conditions", ".", "items", "(", ")", ":", "if", "key", "is", "'ids'", ":", "value", "=", "\",\"", ".", "join", "(", "value", ")", "url", "+=", "'&%s=%s'", "%", "(", "key", ",", "value", ")", "connection", "=", "Connection", "(", "self", ".", "token", ")", "connection", ".", "set_url", "(", "self", ".", "production", ",", "url", ")", "connection", ".", "set_url", "(", "self", ".", "production", ",", "url", ")", "return", "connection", ".", "get_request", "(", ")" ]
Count all certified emails
[ "Count", "all", "certified", "emails" ]
2419c6d9675d901244f807ae360dc58aa46109a9
https://github.com/signaturit/python-sdk/blob/2419c6d9675d901244f807ae360dc58aa46109a9/signaturit_sdk/signaturit_client.py#L290-L306
train
signaturit/python-sdk
signaturit_sdk/signaturit_client.py
SignaturitClient.get_email
def get_email(self, email_id): """ Get a specific email """ connection = Connection(self.token) connection.set_url(self.production, self.EMAILS_ID_URL % email_id) return connection.get_request()
python
def get_email(self, email_id): """ Get a specific email """ connection = Connection(self.token) connection.set_url(self.production, self.EMAILS_ID_URL % email_id) return connection.get_request()
[ "def", "get_email", "(", "self", ",", "email_id", ")", ":", "connection", "=", "Connection", "(", "self", ".", "token", ")", "connection", ".", "set_url", "(", "self", ".", "production", ",", "self", ".", "EMAILS_ID_URL", "%", "email_id", ")", "return", "connection", ".", "get_request", "(", ")" ]
Get a specific email
[ "Get", "a", "specific", "email" ]
2419c6d9675d901244f807ae360dc58aa46109a9
https://github.com/signaturit/python-sdk/blob/2419c6d9675d901244f807ae360dc58aa46109a9/signaturit_sdk/signaturit_client.py#L308-L316
train
signaturit/python-sdk
signaturit_sdk/signaturit_client.py
SignaturitClient.count_SMS
def count_SMS(self, conditions={}): """ Count all certified sms """ url = self.SMS_COUNT_URL + "?" for key, value in conditions.items(): if key is 'ids': value = ",".join(value) url += '&%s=%s' % (key, value) connection = Connection(self.token) connection.set_url(self.production, url) connection.set_url(self.production, url) return connection.get_request()
python
def count_SMS(self, conditions={}): """ Count all certified sms """ url = self.SMS_COUNT_URL + "?" for key, value in conditions.items(): if key is 'ids': value = ",".join(value) url += '&%s=%s' % (key, value) connection = Connection(self.token) connection.set_url(self.production, url) connection.set_url(self.production, url) return connection.get_request()
[ "def", "count_SMS", "(", "self", ",", "conditions", "=", "{", "}", ")", ":", "url", "=", "self", ".", "SMS_COUNT_URL", "+", "\"?\"", "for", "key", ",", "value", "in", "conditions", ".", "items", "(", ")", ":", "if", "key", "is", "'ids'", ":", "value", "=", "\",\"", ".", "join", "(", "value", ")", "url", "+=", "'&%s=%s'", "%", "(", "key", ",", "value", ")", "connection", "=", "Connection", "(", "self", ".", "token", ")", "connection", ".", "set_url", "(", "self", ".", "production", ",", "url", ")", "connection", ".", "set_url", "(", "self", ".", "production", ",", "url", ")", "return", "connection", ".", "get_request", "(", ")" ]
Count all certified sms
[ "Count", "all", "certified", "sms" ]
2419c6d9675d901244f807ae360dc58aa46109a9
https://github.com/signaturit/python-sdk/blob/2419c6d9675d901244f807ae360dc58aa46109a9/signaturit_sdk/signaturit_client.py#L377-L393
train
signaturit/python-sdk
signaturit_sdk/signaturit_client.py
SignaturitClient.get_SMS
def get_SMS(self, limit=100, offset=0, conditions={}): """ Get all certified sms """ url = self.SMS_URL + "?limit=%s&offset=%s" % (limit, offset) for key, value in conditions.items(): if key is 'ids': value = ",".join(value) url += '&%s=%s' % (key, value) connection = Connection(self.token) connection.set_url(self.production, url) return connection.get_request()
python
def get_SMS(self, limit=100, offset=0, conditions={}): """ Get all certified sms """ url = self.SMS_URL + "?limit=%s&offset=%s" % (limit, offset) for key, value in conditions.items(): if key is 'ids': value = ",".join(value) url += '&%s=%s' % (key, value) connection = Connection(self.token) connection.set_url(self.production, url) return connection.get_request()
[ "def", "get_SMS", "(", "self", ",", "limit", "=", "100", ",", "offset", "=", "0", ",", "conditions", "=", "{", "}", ")", ":", "url", "=", "self", ".", "SMS_URL", "+", "\"?limit=%s&offset=%s\"", "%", "(", "limit", ",", "offset", ")", "for", "key", ",", "value", "in", "conditions", ".", "items", "(", ")", ":", "if", "key", "is", "'ids'", ":", "value", "=", "\",\"", ".", "join", "(", "value", ")", "url", "+=", "'&%s=%s'", "%", "(", "key", ",", "value", ")", "connection", "=", "Connection", "(", "self", ".", "token", ")", "connection", ".", "set_url", "(", "self", ".", "production", ",", "url", ")", "return", "connection", ".", "get_request", "(", ")" ]
Get all certified sms
[ "Get", "all", "certified", "sms" ]
2419c6d9675d901244f807ae360dc58aa46109a9
https://github.com/signaturit/python-sdk/blob/2419c6d9675d901244f807ae360dc58aa46109a9/signaturit_sdk/signaturit_client.py#L395-L410
train
signaturit/python-sdk
signaturit_sdk/signaturit_client.py
SignaturitClient.get_single_SMS
def get_single_SMS(self, sms_id): """ Get a specific sms """ connection = Connection(self.token) connection.set_url(self.production, self.SMS_ID_URL % sms_id) return connection.get_request()
python
def get_single_SMS(self, sms_id): """ Get a specific sms """ connection = Connection(self.token) connection.set_url(self.production, self.SMS_ID_URL % sms_id) return connection.get_request()
[ "def", "get_single_SMS", "(", "self", ",", "sms_id", ")", ":", "connection", "=", "Connection", "(", "self", ".", "token", ")", "connection", ".", "set_url", "(", "self", ".", "production", ",", "self", ".", "SMS_ID_URL", "%", "sms_id", ")", "return", "connection", ".", "get_request", "(", ")" ]
Get a specific sms
[ "Get", "a", "specific", "sms" ]
2419c6d9675d901244f807ae360dc58aa46109a9
https://github.com/signaturit/python-sdk/blob/2419c6d9675d901244f807ae360dc58aa46109a9/signaturit_sdk/signaturit_client.py#L412-L420
train
signaturit/python-sdk
signaturit_sdk/signaturit_client.py
SignaturitClient.create_SMS
def create_SMS(self, files, recipients, body, params={}): """ Create a new certified sms @files Files to send ex: ['/documents/internet_contract.pdf', ... ] @recipients A dictionary with the phone and name of the person you want to sign. Phone must be always with prefix If you wanna send only to one person: - [{"phone": "34123456", "name": "John"}] For multiple recipients, yo need to submit a list of dicts: - [{"email": "34123456, "name": "John"}, {"email":"34654321", "name": "Bob"}] @body Email body @params """ parameters = {} parser = Parser() documents = {} parser.fill_array(documents, files, 'files') recipients = recipients if isinstance(recipients, list) else [recipients] index = 0 for recipient in recipients: parser.fill_array(parameters, recipient, 'recipients[%i]' % index) index += 1 parser.fill_array(parameters, params, '') parameters['body'] = body connection = Connection(self.token) connection.set_url(self.production, self.SMS_URL) connection.add_params(parameters) connection.add_files(documents) return connection.post_request()
python
def create_SMS(self, files, recipients, body, params={}): """ Create a new certified sms @files Files to send ex: ['/documents/internet_contract.pdf', ... ] @recipients A dictionary with the phone and name of the person you want to sign. Phone must be always with prefix If you wanna send only to one person: - [{"phone": "34123456", "name": "John"}] For multiple recipients, yo need to submit a list of dicts: - [{"email": "34123456, "name": "John"}, {"email":"34654321", "name": "Bob"}] @body Email body @params """ parameters = {} parser = Parser() documents = {} parser.fill_array(documents, files, 'files') recipients = recipients if isinstance(recipients, list) else [recipients] index = 0 for recipient in recipients: parser.fill_array(parameters, recipient, 'recipients[%i]' % index) index += 1 parser.fill_array(parameters, params, '') parameters['body'] = body connection = Connection(self.token) connection.set_url(self.production, self.SMS_URL) connection.add_params(parameters) connection.add_files(documents) return connection.post_request()
[ "def", "create_SMS", "(", "self", ",", "files", ",", "recipients", ",", "body", ",", "params", "=", "{", "}", ")", ":", "parameters", "=", "{", "}", "parser", "=", "Parser", "(", ")", "documents", "=", "{", "}", "parser", ".", "fill_array", "(", "documents", ",", "files", ",", "'files'", ")", "recipients", "=", "recipients", "if", "isinstance", "(", "recipients", ",", "list", ")", "else", "[", "recipients", "]", "index", "=", "0", "for", "recipient", "in", "recipients", ":", "parser", ".", "fill_array", "(", "parameters", ",", "recipient", ",", "'recipients[%i]'", "%", "index", ")", "index", "+=", "1", "parser", ".", "fill_array", "(", "parameters", ",", "params", ",", "''", ")", "parameters", "[", "'body'", "]", "=", "body", "connection", "=", "Connection", "(", "self", ".", "token", ")", "connection", ".", "set_url", "(", "self", ".", "production", ",", "self", ".", "SMS_URL", ")", "connection", ".", "add_params", "(", "parameters", ")", "connection", ".", "add_files", "(", "documents", ")", "return", "connection", ".", "post_request", "(", ")" ]
Create a new certified sms @files Files to send ex: ['/documents/internet_contract.pdf', ... ] @recipients A dictionary with the phone and name of the person you want to sign. Phone must be always with prefix If you wanna send only to one person: - [{"phone": "34123456", "name": "John"}] For multiple recipients, yo need to submit a list of dicts: - [{"email": "34123456, "name": "John"}, {"email":"34654321", "name": "Bob"}] @body Email body @params
[ "Create", "a", "new", "certified", "sms" ]
2419c6d9675d901244f807ae360dc58aa46109a9
https://github.com/signaturit/python-sdk/blob/2419c6d9675d901244f807ae360dc58aa46109a9/signaturit_sdk/signaturit_client.py#L434-L476
train
signaturit/python-sdk
signaturit_sdk/signaturit_client.py
SignaturitClient.get_users
def get_users(self, limit=100, offset=0): """ Get all users from your current team """ url = self.TEAM_USERS_URL + "?limit=%s&offset=%s" % (limit, offset) connection = Connection(self.token) connection.set_url(self.production, url) return connection.get_request()
python
def get_users(self, limit=100, offset=0): """ Get all users from your current team """ url = self.TEAM_USERS_URL + "?limit=%s&offset=%s" % (limit, offset) connection = Connection(self.token) connection.set_url(self.production, url) return connection.get_request()
[ "def", "get_users", "(", "self", ",", "limit", "=", "100", ",", "offset", "=", "0", ")", ":", "url", "=", "self", ".", "TEAM_USERS_URL", "+", "\"?limit=%s&offset=%s\"", "%", "(", "limit", ",", "offset", ")", "connection", "=", "Connection", "(", "self", ".", "token", ")", "connection", ".", "set_url", "(", "self", ".", "production", ",", "url", ")", "return", "connection", ".", "get_request", "(", ")" ]
Get all users from your current team
[ "Get", "all", "users", "from", "your", "current", "team" ]
2419c6d9675d901244f807ae360dc58aa46109a9
https://github.com/signaturit/python-sdk/blob/2419c6d9675d901244f807ae360dc58aa46109a9/signaturit_sdk/signaturit_client.py#L478-L487
train
signaturit/python-sdk
signaturit_sdk/signaturit_client.py
SignaturitClient.get_seats
def get_seats(self, limit=100, offset=0): """ Get all seats from your current team """ url = self.TEAM_SEATS_URL + "?limit=%s&offset=%s" % (limit, offset) connection = Connection(self.token) connection.set_url(self.production, url) return connection.get_request()
python
def get_seats(self, limit=100, offset=0): """ Get all seats from your current team """ url = self.TEAM_SEATS_URL + "?limit=%s&offset=%s" % (limit, offset) connection = Connection(self.token) connection.set_url(self.production, url) return connection.get_request()
[ "def", "get_seats", "(", "self", ",", "limit", "=", "100", ",", "offset", "=", "0", ")", ":", "url", "=", "self", ".", "TEAM_SEATS_URL", "+", "\"?limit=%s&offset=%s\"", "%", "(", "limit", ",", "offset", ")", "connection", "=", "Connection", "(", "self", ".", "token", ")", "connection", ".", "set_url", "(", "self", ".", "production", ",", "url", ")", "return", "connection", ".", "get_request", "(", ")" ]
Get all seats from your current team
[ "Get", "all", "seats", "from", "your", "current", "team" ]
2419c6d9675d901244f807ae360dc58aa46109a9
https://github.com/signaturit/python-sdk/blob/2419c6d9675d901244f807ae360dc58aa46109a9/signaturit_sdk/signaturit_client.py#L489-L498
train
signaturit/python-sdk
signaturit_sdk/signaturit_client.py
SignaturitClient.get_groups
def get_groups(self, limit=100, offset=0): """ Get all groups from your current team """ url = self.TEAM_GROUPS_URL + "?limit=%s&offset=%s" % (limit, offset) connection = Connection(self.token) connection.set_url(self.production, url) return connection.get_request()
python
def get_groups(self, limit=100, offset=0): """ Get all groups from your current team """ url = self.TEAM_GROUPS_URL + "?limit=%s&offset=%s" % (limit, offset) connection = Connection(self.token) connection.set_url(self.production, url) return connection.get_request()
[ "def", "get_groups", "(", "self", ",", "limit", "=", "100", ",", "offset", "=", "0", ")", ":", "url", "=", "self", ".", "TEAM_GROUPS_URL", "+", "\"?limit=%s&offset=%s\"", "%", "(", "limit", ",", "offset", ")", "connection", "=", "Connection", "(", "self", ".", "token", ")", "connection", ".", "set_url", "(", "self", ".", "production", ",", "url", ")", "return", "connection", ".", "get_request", "(", ")" ]
Get all groups from your current team
[ "Get", "all", "groups", "from", "your", "current", "team" ]
2419c6d9675d901244f807ae360dc58aa46109a9
https://github.com/signaturit/python-sdk/blob/2419c6d9675d901244f807ae360dc58aa46109a9/signaturit_sdk/signaturit_client.py#L572-L581
train
signaturit/python-sdk
signaturit_sdk/signaturit_client.py
SignaturitClient.get_subscriptions
def get_subscriptions(self, limit=100, offset=0, params={}): """ Get all subscriptions """ url = self.SUBSCRIPTIONS_URL + "?limit=%s&offset=%s" % (limit, offset) for key, value in params.items(): if key is 'ids': value = ",".join(value) url += '&%s=%s' % (key, value) connection = Connection(self.token) connection.set_url(self.production, url) return connection.get_request()
python
def get_subscriptions(self, limit=100, offset=0, params={}): """ Get all subscriptions """ url = self.SUBSCRIPTIONS_URL + "?limit=%s&offset=%s" % (limit, offset) for key, value in params.items(): if key is 'ids': value = ",".join(value) url += '&%s=%s' % (key, value) connection = Connection(self.token) connection.set_url(self.production, url) return connection.get_request()
[ "def", "get_subscriptions", "(", "self", ",", "limit", "=", "100", ",", "offset", "=", "0", ",", "params", "=", "{", "}", ")", ":", "url", "=", "self", ".", "SUBSCRIPTIONS_URL", "+", "\"?limit=%s&offset=%s\"", "%", "(", "limit", ",", "offset", ")", "for", "key", ",", "value", "in", "params", ".", "items", "(", ")", ":", "if", "key", "is", "'ids'", ":", "value", "=", "\",\"", ".", "join", "(", "value", ")", "url", "+=", "'&%s=%s'", "%", "(", "key", ",", "value", ")", "connection", "=", "Connection", "(", "self", ".", "token", ")", "connection", ".", "set_url", "(", "self", ".", "production", ",", "url", ")", "return", "connection", ".", "get_request", "(", ")" ]
Get all subscriptions
[ "Get", "all", "subscriptions" ]
2419c6d9675d901244f807ae360dc58aa46109a9
https://github.com/signaturit/python-sdk/blob/2419c6d9675d901244f807ae360dc58aa46109a9/signaturit_sdk/signaturit_client.py#L695-L710
train
signaturit/python-sdk
signaturit_sdk/signaturit_client.py
SignaturitClient.count_subscriptions
def count_subscriptions(self, params={}): """ Count all subscriptions """ url = self.SUBSCRIPTIONS_COUNT_URL + '?' for key, value in params.items(): if key is 'ids': value = ",".join(value) url += '&%s=%s' % (key, value) connection = Connection(self.token) connection.set_url(self.production, url) return connection.get_request()
python
def count_subscriptions(self, params={}): """ Count all subscriptions """ url = self.SUBSCRIPTIONS_COUNT_URL + '?' for key, value in params.items(): if key is 'ids': value = ",".join(value) url += '&%s=%s' % (key, value) connection = Connection(self.token) connection.set_url(self.production, url) return connection.get_request()
[ "def", "count_subscriptions", "(", "self", ",", "params", "=", "{", "}", ")", ":", "url", "=", "self", ".", "SUBSCRIPTIONS_COUNT_URL", "+", "'?'", "for", "key", ",", "value", "in", "params", ".", "items", "(", ")", ":", "if", "key", "is", "'ids'", ":", "value", "=", "\",\"", ".", "join", "(", "value", ")", "url", "+=", "'&%s=%s'", "%", "(", "key", ",", "value", ")", "connection", "=", "Connection", "(", "self", ".", "token", ")", "connection", ".", "set_url", "(", "self", ".", "production", ",", "url", ")", "return", "connection", ".", "get_request", "(", ")" ]
Count all subscriptions
[ "Count", "all", "subscriptions" ]
2419c6d9675d901244f807ae360dc58aa46109a9
https://github.com/signaturit/python-sdk/blob/2419c6d9675d901244f807ae360dc58aa46109a9/signaturit_sdk/signaturit_client.py#L712-L727
train
signaturit/python-sdk
signaturit_sdk/signaturit_client.py
SignaturitClient.get_subscription
def get_subscription(self, subscription_id): """ Get single subscription """ url = self.SUBSCRIPTIONS_ID_URL % subscription_id connection = Connection(self.token) connection.set_url(self.production, url) return connection.get_request()
python
def get_subscription(self, subscription_id): """ Get single subscription """ url = self.SUBSCRIPTIONS_ID_URL % subscription_id connection = Connection(self.token) connection.set_url(self.production, url) return connection.get_request()
[ "def", "get_subscription", "(", "self", ",", "subscription_id", ")", ":", "url", "=", "self", ".", "SUBSCRIPTIONS_ID_URL", "%", "subscription_id", "connection", "=", "Connection", "(", "self", ".", "token", ")", "connection", ".", "set_url", "(", "self", ".", "production", ",", "url", ")", "return", "connection", ".", "get_request", "(", ")" ]
Get single subscription
[ "Get", "single", "subscription" ]
2419c6d9675d901244f807ae360dc58aa46109a9
https://github.com/signaturit/python-sdk/blob/2419c6d9675d901244f807ae360dc58aa46109a9/signaturit_sdk/signaturit_client.py#L729-L738
train
signaturit/python-sdk
signaturit_sdk/signaturit_client.py
SignaturitClient.delete_subscription
def delete_subscription(self, subscription_id): """ Delete single subscription """ url = self.SUBSCRIPTIONS_ID_URL % subscription_id connection = Connection(self.token) connection.set_url(self.production, url) return connection.delete_request()
python
def delete_subscription(self, subscription_id): """ Delete single subscription """ url = self.SUBSCRIPTIONS_ID_URL % subscription_id connection = Connection(self.token) connection.set_url(self.production, url) return connection.delete_request()
[ "def", "delete_subscription", "(", "self", ",", "subscription_id", ")", ":", "url", "=", "self", ".", "SUBSCRIPTIONS_ID_URL", "%", "subscription_id", "connection", "=", "Connection", "(", "self", ".", "token", ")", "connection", ".", "set_url", "(", "self", ".", "production", ",", "url", ")", "return", "connection", ".", "delete_request", "(", ")" ]
Delete single subscription
[ "Delete", "single", "subscription" ]
2419c6d9675d901244f807ae360dc58aa46109a9
https://github.com/signaturit/python-sdk/blob/2419c6d9675d901244f807ae360dc58aa46109a9/signaturit_sdk/signaturit_client.py#L784-L793
train
signaturit/python-sdk
signaturit_sdk/signaturit_client.py
SignaturitClient.get_contacts
def get_contacts(self, limit=100, offset=0, params={}): """ Get all account contacts """ url = self.CONTACTS_URL + "?limit=%s&offset=%s" % (limit, offset) for key, value in params.items(): if key is 'ids': value = ",".join(value) url += '&%s=%s' % (key, value) connection = Connection(self.token) connection.set_url(self.production, url) return connection.get_request()
python
def get_contacts(self, limit=100, offset=0, params={}): """ Get all account contacts """ url = self.CONTACTS_URL + "?limit=%s&offset=%s" % (limit, offset) for key, value in params.items(): if key is 'ids': value = ",".join(value) url += '&%s=%s' % (key, value) connection = Connection(self.token) connection.set_url(self.production, url) return connection.get_request()
[ "def", "get_contacts", "(", "self", ",", "limit", "=", "100", ",", "offset", "=", "0", ",", "params", "=", "{", "}", ")", ":", "url", "=", "self", ".", "CONTACTS_URL", "+", "\"?limit=%s&offset=%s\"", "%", "(", "limit", ",", "offset", ")", "for", "key", ",", "value", "in", "params", ".", "items", "(", ")", ":", "if", "key", "is", "'ids'", ":", "value", "=", "\",\"", ".", "join", "(", "value", ")", "url", "+=", "'&%s=%s'", "%", "(", "key", ",", "value", ")", "connection", "=", "Connection", "(", "self", ".", "token", ")", "connection", ".", "set_url", "(", "self", ".", "production", ",", "url", ")", "return", "connection", ".", "get_request", "(", ")" ]
Get all account contacts
[ "Get", "all", "account", "contacts" ]
2419c6d9675d901244f807ae360dc58aa46109a9
https://github.com/signaturit/python-sdk/blob/2419c6d9675d901244f807ae360dc58aa46109a9/signaturit_sdk/signaturit_client.py#L795-L810
train
signaturit/python-sdk
signaturit_sdk/signaturit_client.py
SignaturitClient.get_contact
def get_contact(self, contact_id): """ Get single contact """ url = self.CONTACTS_ID_URL % contact_id connection = Connection(self.token) connection.set_url(self.production, url) return connection.get_request()
python
def get_contact(self, contact_id): """ Get single contact """ url = self.CONTACTS_ID_URL % contact_id connection = Connection(self.token) connection.set_url(self.production, url) return connection.get_request()
[ "def", "get_contact", "(", "self", ",", "contact_id", ")", ":", "url", "=", "self", ".", "CONTACTS_ID_URL", "%", "contact_id", "connection", "=", "Connection", "(", "self", ".", "token", ")", "connection", ".", "set_url", "(", "self", ".", "production", ",", "url", ")", "return", "connection", ".", "get_request", "(", ")" ]
Get single contact
[ "Get", "single", "contact" ]
2419c6d9675d901244f807ae360dc58aa46109a9
https://github.com/signaturit/python-sdk/blob/2419c6d9675d901244f807ae360dc58aa46109a9/signaturit_sdk/signaturit_client.py#L812-L821
train
signaturit/python-sdk
signaturit_sdk/signaturit_client.py
SignaturitClient.delete_contact
def delete_contact(self, contact_id): """ Delete single contact """ url = self.CONTACTS_ID_URL % contact_id connection = Connection(self.token) connection.set_url(self.production, url) return connection.delete_request()
python
def delete_contact(self, contact_id): """ Delete single contact """ url = self.CONTACTS_ID_URL % contact_id connection = Connection(self.token) connection.set_url(self.production, url) return connection.delete_request()
[ "def", "delete_contact", "(", "self", ",", "contact_id", ")", ":", "url", "=", "self", ".", "CONTACTS_ID_URL", "%", "contact_id", "connection", "=", "Connection", "(", "self", ".", "token", ")", "connection", ".", "set_url", "(", "self", ".", "production", ",", "url", ")", "return", "connection", ".", "delete_request", "(", ")" ]
Delete single contact
[ "Delete", "single", "contact" ]
2419c6d9675d901244f807ae360dc58aa46109a9
https://github.com/signaturit/python-sdk/blob/2419c6d9675d901244f807ae360dc58aa46109a9/signaturit_sdk/signaturit_client.py#L864-L873
train
pygridtools/gridmap
examples/map_reduce.py
main
def main(): """ execute map example """ logging.captureWarnings(True) logging.basicConfig(format=('%(asctime)s - %(name)s - %(levelname)s - ' + '%(message)s'), level=logging.INFO) args = [3, 5, 10, 20] # The default queue used by grid_map is all.q. You must specify # the `queue` keyword argument if that is not the name of your queue. intermediate_results = grid_map(computeFactorial, args, quiet=False, max_processes=4, queue='all.q') # Just print the items instead of really reducing. We could always sum them. print("reducing result") for i, ret in enumerate(intermediate_results): print("f({0}) = {1}".format(args[i], ret))
python
def main(): """ execute map example """ logging.captureWarnings(True) logging.basicConfig(format=('%(asctime)s - %(name)s - %(levelname)s - ' + '%(message)s'), level=logging.INFO) args = [3, 5, 10, 20] # The default queue used by grid_map is all.q. You must specify # the `queue` keyword argument if that is not the name of your queue. intermediate_results = grid_map(computeFactorial, args, quiet=False, max_processes=4, queue='all.q') # Just print the items instead of really reducing. We could always sum them. print("reducing result") for i, ret in enumerate(intermediate_results): print("f({0}) = {1}".format(args[i], ret))
[ "def", "main", "(", ")", ":", "logging", ".", "captureWarnings", "(", "True", ")", "logging", ".", "basicConfig", "(", "format", "=", "(", "'%(asctime)s - %(name)s - %(levelname)s - '", "+", "'%(message)s'", ")", ",", "level", "=", "logging", ".", "INFO", ")", "args", "=", "[", "3", ",", "5", ",", "10", ",", "20", "]", "# The default queue used by grid_map is all.q. You must specify", "# the `queue` keyword argument if that is not the name of your queue.", "intermediate_results", "=", "grid_map", "(", "computeFactorial", ",", "args", ",", "quiet", "=", "False", ",", "max_processes", "=", "4", ",", "queue", "=", "'all.q'", ")", "# Just print the items instead of really reducing. We could always sum them.", "print", "(", "\"reducing result\"", ")", "for", "i", ",", "ret", "in", "enumerate", "(", "intermediate_results", ")", ":", "print", "(", "\"f({0}) = {1}\"", ".", "format", "(", "args", "[", "i", "]", ",", "ret", ")", ")" ]
execute map example
[ "execute", "map", "example" ]
be4fb1478ab8d19fa3acddecdf1a5d8bd3789127
https://github.com/pygridtools/gridmap/blob/be4fb1478ab8d19fa3acddecdf1a5d8bd3789127/examples/map_reduce.py#L62-L81
train
pygridtools/gridmap
examples/manual.py
main
def main(): """ run a set of jobs on cluster """ logging.captureWarnings(True) logging.basicConfig(format=('%(asctime)s - %(name)s - %(levelname)s - ' + '%(message)s'), level=logging.INFO) print("=====================================") print("======== Submit and Wait ========") print("=====================================") print("") functionJobs = make_jobs() print("sending function jobs to cluster") print("") job_outputs = process_jobs(functionJobs, max_processes=4) print("results from each job") for (i, result) in enumerate(job_outputs): print("Job {0}- result: {1}".format(i, result))
python
def main(): """ run a set of jobs on cluster """ logging.captureWarnings(True) logging.basicConfig(format=('%(asctime)s - %(name)s - %(levelname)s - ' + '%(message)s'), level=logging.INFO) print("=====================================") print("======== Submit and Wait ========") print("=====================================") print("") functionJobs = make_jobs() print("sending function jobs to cluster") print("") job_outputs = process_jobs(functionJobs, max_processes=4) print("results from each job") for (i, result) in enumerate(job_outputs): print("Job {0}- result: {1}".format(i, result))
[ "def", "main", "(", ")", ":", "logging", ".", "captureWarnings", "(", "True", ")", "logging", ".", "basicConfig", "(", "format", "=", "(", "'%(asctime)s - %(name)s - %(levelname)s - '", "+", "'%(message)s'", ")", ",", "level", "=", "logging", ".", "INFO", ")", "print", "(", "\"=====================================\"", ")", "print", "(", "\"======== Submit and Wait ========\"", ")", "print", "(", "\"=====================================\"", ")", "print", "(", "\"\"", ")", "functionJobs", "=", "make_jobs", "(", ")", "print", "(", "\"sending function jobs to cluster\"", ")", "print", "(", "\"\"", ")", "job_outputs", "=", "process_jobs", "(", "functionJobs", ",", "max_processes", "=", "4", ")", "print", "(", "\"results from each job\"", ")", "for", "(", "i", ",", "result", ")", "in", "enumerate", "(", "job_outputs", ")", ":", "print", "(", "\"Job {0}- result: {1}\"", ".", "format", "(", "i", ",", "result", ")", ")" ]
run a set of jobs on cluster
[ "run", "a", "set", "of", "jobs", "on", "cluster" ]
be4fb1478ab8d19fa3acddecdf1a5d8bd3789127
https://github.com/pygridtools/gridmap/blob/be4fb1478ab8d19fa3acddecdf1a5d8bd3789127/examples/manual.py#L86-L109
train
jupyterhub/nbgitpuller
nbgitpuller/pull.py
execute_cmd
def execute_cmd(cmd, **kwargs): """ Call given command, yielding output line by line """ yield '$ {}\n'.format(' '.join(cmd)) kwargs['stdout'] = subprocess.PIPE kwargs['stderr'] = subprocess.STDOUT proc = subprocess.Popen(cmd, **kwargs) # Capture output for logging. # Each line will be yielded as text. # This should behave the same as .readline(), but splits on `\r` OR `\n`, # not just `\n`. buf = [] def flush(): line = b''.join(buf).decode('utf8', 'replace') buf[:] = [] return line c_last = '' try: for c in iter(partial(proc.stdout.read, 1), b''): if c_last == b'\r' and buf and c != b'\n': yield flush() buf.append(c) if c == b'\n': yield flush() c_last = c finally: ret = proc.wait() if ret != 0: raise subprocess.CalledProcessError(ret, cmd)
python
def execute_cmd(cmd, **kwargs): """ Call given command, yielding output line by line """ yield '$ {}\n'.format(' '.join(cmd)) kwargs['stdout'] = subprocess.PIPE kwargs['stderr'] = subprocess.STDOUT proc = subprocess.Popen(cmd, **kwargs) # Capture output for logging. # Each line will be yielded as text. # This should behave the same as .readline(), but splits on `\r` OR `\n`, # not just `\n`. buf = [] def flush(): line = b''.join(buf).decode('utf8', 'replace') buf[:] = [] return line c_last = '' try: for c in iter(partial(proc.stdout.read, 1), b''): if c_last == b'\r' and buf and c != b'\n': yield flush() buf.append(c) if c == b'\n': yield flush() c_last = c finally: ret = proc.wait() if ret != 0: raise subprocess.CalledProcessError(ret, cmd)
[ "def", "execute_cmd", "(", "cmd", ",", "*", "*", "kwargs", ")", ":", "yield", "'$ {}\\n'", ".", "format", "(", "' '", ".", "join", "(", "cmd", ")", ")", "kwargs", "[", "'stdout'", "]", "=", "subprocess", ".", "PIPE", "kwargs", "[", "'stderr'", "]", "=", "subprocess", ".", "STDOUT", "proc", "=", "subprocess", ".", "Popen", "(", "cmd", ",", "*", "*", "kwargs", ")", "# Capture output for logging.", "# Each line will be yielded as text.", "# This should behave the same as .readline(), but splits on `\\r` OR `\\n`,", "# not just `\\n`.", "buf", "=", "[", "]", "def", "flush", "(", ")", ":", "line", "=", "b''", ".", "join", "(", "buf", ")", ".", "decode", "(", "'utf8'", ",", "'replace'", ")", "buf", "[", ":", "]", "=", "[", "]", "return", "line", "c_last", "=", "''", "try", ":", "for", "c", "in", "iter", "(", "partial", "(", "proc", ".", "stdout", ".", "read", ",", "1", ")", ",", "b''", ")", ":", "if", "c_last", "==", "b'\\r'", "and", "buf", "and", "c", "!=", "b'\\n'", ":", "yield", "flush", "(", ")", "buf", ".", "append", "(", "c", ")", "if", "c", "==", "b'\\n'", ":", "yield", "flush", "(", ")", "c_last", "=", "c", "finally", ":", "ret", "=", "proc", ".", "wait", "(", ")", "if", "ret", "!=", "0", ":", "raise", "subprocess", ".", "CalledProcessError", "(", "ret", ",", "cmd", ")" ]
Call given command, yielding output line by line
[ "Call", "given", "command", "yielding", "output", "line", "by", "line" ]
30df8d548078c58665ce0ae920308f991122abe3
https://github.com/jupyterhub/nbgitpuller/blob/30df8d548078c58665ce0ae920308f991122abe3/nbgitpuller/pull.py#L11-L43
train
jupyterhub/nbgitpuller
nbgitpuller/pull.py
main
def main(): """ Synchronizes a github repository with a local repository. """ logging.basicConfig( format='[%(asctime)s] %(levelname)s -- %(message)s', level=logging.DEBUG) parser = argparse.ArgumentParser(description='Synchronizes a github repository with a local repository.') parser.add_argument('git_url', help='Url of the repo to sync') parser.add_argument('branch_name', default='master', help='Branch of repo to sync', nargs='?') parser.add_argument('repo_dir', default='.', help='Path to clone repo under', nargs='?') args = parser.parse_args() for line in GitPuller( args.git_url, args.branch_name, args.repo_dir ).pull(): print(line)
python
def main(): """ Synchronizes a github repository with a local repository. """ logging.basicConfig( format='[%(asctime)s] %(levelname)s -- %(message)s', level=logging.DEBUG) parser = argparse.ArgumentParser(description='Synchronizes a github repository with a local repository.') parser.add_argument('git_url', help='Url of the repo to sync') parser.add_argument('branch_name', default='master', help='Branch of repo to sync', nargs='?') parser.add_argument('repo_dir', default='.', help='Path to clone repo under', nargs='?') args = parser.parse_args() for line in GitPuller( args.git_url, args.branch_name, args.repo_dir ).pull(): print(line)
[ "def", "main", "(", ")", ":", "logging", ".", "basicConfig", "(", "format", "=", "'[%(asctime)s] %(levelname)s -- %(message)s'", ",", "level", "=", "logging", ".", "DEBUG", ")", "parser", "=", "argparse", ".", "ArgumentParser", "(", "description", "=", "'Synchronizes a github repository with a local repository.'", ")", "parser", ".", "add_argument", "(", "'git_url'", ",", "help", "=", "'Url of the repo to sync'", ")", "parser", ".", "add_argument", "(", "'branch_name'", ",", "default", "=", "'master'", ",", "help", "=", "'Branch of repo to sync'", ",", "nargs", "=", "'?'", ")", "parser", ".", "add_argument", "(", "'repo_dir'", ",", "default", "=", "'.'", ",", "help", "=", "'Path to clone repo under'", ",", "nargs", "=", "'?'", ")", "args", "=", "parser", ".", "parse_args", "(", ")", "for", "line", "in", "GitPuller", "(", "args", ".", "git_url", ",", "args", ".", "branch_name", ",", "args", ".", "repo_dir", ")", ".", "pull", "(", ")", ":", "print", "(", "line", ")" ]
Synchronizes a github repository with a local repository.
[ "Synchronizes", "a", "github", "repository", "with", "a", "local", "repository", "." ]
30df8d548078c58665ce0ae920308f991122abe3
https://github.com/jupyterhub/nbgitpuller/blob/30df8d548078c58665ce0ae920308f991122abe3/nbgitpuller/pull.py#L220-L239
train
jupyterhub/nbgitpuller
nbgitpuller/pull.py
GitPuller.pull
def pull(self): """ Pull selected repo from a remote git repository, while preserving user changes """ if not os.path.exists(self.repo_dir): yield from self.initialize_repo() else: yield from self.update()
python
def pull(self): """ Pull selected repo from a remote git repository, while preserving user changes """ if not os.path.exists(self.repo_dir): yield from self.initialize_repo() else: yield from self.update()
[ "def", "pull", "(", "self", ")", ":", "if", "not", "os", ".", "path", ".", "exists", "(", "self", ".", "repo_dir", ")", ":", "yield", "from", "self", ".", "initialize_repo", "(", ")", "else", ":", "yield", "from", "self", ".", "update", "(", ")" ]
Pull selected repo from a remote git repository, while preserving user changes
[ "Pull", "selected", "repo", "from", "a", "remote", "git", "repository", "while", "preserving", "user", "changes" ]
30df8d548078c58665ce0ae920308f991122abe3
https://github.com/jupyterhub/nbgitpuller/blob/30df8d548078c58665ce0ae920308f991122abe3/nbgitpuller/pull.py#L73-L81
train
jupyterhub/nbgitpuller
nbgitpuller/pull.py
GitPuller.initialize_repo
def initialize_repo(self): """ Clones repository & sets up usernames. """ logging.info('Repo {} doesn\'t exist. Cloning...'.format(self.repo_dir)) clone_args = ['git', 'clone'] if self.depth and self.depth > 0: clone_args.extend(['--depth', str(self.depth)]) clone_args.extend(['--branch', self.branch_name]) clone_args.extend([self.git_url, self.repo_dir]) yield from execute_cmd(clone_args) yield from execute_cmd(['git', 'config', 'user.email', 'nbgitpuller@example.com'], cwd=self.repo_dir) yield from execute_cmd(['git', 'config', 'user.name', 'nbgitpuller'], cwd=self.repo_dir) logging.info('Repo {} initialized'.format(self.repo_dir))
python
def initialize_repo(self): """ Clones repository & sets up usernames. """ logging.info('Repo {} doesn\'t exist. Cloning...'.format(self.repo_dir)) clone_args = ['git', 'clone'] if self.depth and self.depth > 0: clone_args.extend(['--depth', str(self.depth)]) clone_args.extend(['--branch', self.branch_name]) clone_args.extend([self.git_url, self.repo_dir]) yield from execute_cmd(clone_args) yield from execute_cmd(['git', 'config', 'user.email', 'nbgitpuller@example.com'], cwd=self.repo_dir) yield from execute_cmd(['git', 'config', 'user.name', 'nbgitpuller'], cwd=self.repo_dir) logging.info('Repo {} initialized'.format(self.repo_dir))
[ "def", "initialize_repo", "(", "self", ")", ":", "logging", ".", "info", "(", "'Repo {} doesn\\'t exist. Cloning...'", ".", "format", "(", "self", ".", "repo_dir", ")", ")", "clone_args", "=", "[", "'git'", ",", "'clone'", "]", "if", "self", ".", "depth", "and", "self", ".", "depth", ">", "0", ":", "clone_args", ".", "extend", "(", "[", "'--depth'", ",", "str", "(", "self", ".", "depth", ")", "]", ")", "clone_args", ".", "extend", "(", "[", "'--branch'", ",", "self", ".", "branch_name", "]", ")", "clone_args", ".", "extend", "(", "[", "self", ".", "git_url", ",", "self", ".", "repo_dir", "]", ")", "yield", "from", "execute_cmd", "(", "clone_args", ")", "yield", "from", "execute_cmd", "(", "[", "'git'", ",", "'config'", ",", "'user.email'", ",", "'nbgitpuller@example.com'", "]", ",", "cwd", "=", "self", ".", "repo_dir", ")", "yield", "from", "execute_cmd", "(", "[", "'git'", ",", "'config'", ",", "'user.name'", ",", "'nbgitpuller'", "]", ",", "cwd", "=", "self", ".", "repo_dir", ")", "logging", ".", "info", "(", "'Repo {} initialized'", ".", "format", "(", "self", ".", "repo_dir", ")", ")" ]
Clones repository & sets up usernames.
[ "Clones", "repository", "&", "sets", "up", "usernames", "." ]
30df8d548078c58665ce0ae920308f991122abe3
https://github.com/jupyterhub/nbgitpuller/blob/30df8d548078c58665ce0ae920308f991122abe3/nbgitpuller/pull.py#L83-L97
train
jupyterhub/nbgitpuller
nbgitpuller/pull.py
GitPuller.repo_is_dirty
def repo_is_dirty(self): """ Return true if repo is dirty """ try: subprocess.check_call(['git', 'diff-files', '--quiet'], cwd=self.repo_dir) # Return code is 0 return False except subprocess.CalledProcessError: return True
python
def repo_is_dirty(self): """ Return true if repo is dirty """ try: subprocess.check_call(['git', 'diff-files', '--quiet'], cwd=self.repo_dir) # Return code is 0 return False except subprocess.CalledProcessError: return True
[ "def", "repo_is_dirty", "(", "self", ")", ":", "try", ":", "subprocess", ".", "check_call", "(", "[", "'git'", ",", "'diff-files'", ",", "'--quiet'", "]", ",", "cwd", "=", "self", ".", "repo_dir", ")", "# Return code is 0", "return", "False", "except", "subprocess", ".", "CalledProcessError", ":", "return", "True" ]
Return true if repo is dirty
[ "Return", "true", "if", "repo", "is", "dirty" ]
30df8d548078c58665ce0ae920308f991122abe3
https://github.com/jupyterhub/nbgitpuller/blob/30df8d548078c58665ce0ae920308f991122abe3/nbgitpuller/pull.py#L116-L125
train
jupyterhub/nbgitpuller
nbgitpuller/pull.py
GitPuller.find_upstream_changed
def find_upstream_changed(self, kind): """ Return list of files that have been changed upstream belonging to a particular kind of change """ output = subprocess.check_output([ 'git', 'log', '{}..origin/{}'.format(self.branch_name, self.branch_name), '--oneline', '--name-status' ], cwd=self.repo_dir).decode() files = [] for line in output.split('\n'): if line.startswith(kind): files.append(os.path.join(self.repo_dir, line.split('\t', 1)[1])) return files
python
def find_upstream_changed(self, kind): """ Return list of files that have been changed upstream belonging to a particular kind of change """ output = subprocess.check_output([ 'git', 'log', '{}..origin/{}'.format(self.branch_name, self.branch_name), '--oneline', '--name-status' ], cwd=self.repo_dir).decode() files = [] for line in output.split('\n'): if line.startswith(kind): files.append(os.path.join(self.repo_dir, line.split('\t', 1)[1])) return files
[ "def", "find_upstream_changed", "(", "self", ",", "kind", ")", ":", "output", "=", "subprocess", ".", "check_output", "(", "[", "'git'", ",", "'log'", ",", "'{}..origin/{}'", ".", "format", "(", "self", ".", "branch_name", ",", "self", ".", "branch_name", ")", ",", "'--oneline'", ",", "'--name-status'", "]", ",", "cwd", "=", "self", ".", "repo_dir", ")", ".", "decode", "(", ")", "files", "=", "[", "]", "for", "line", "in", "output", ".", "split", "(", "'\\n'", ")", ":", "if", "line", ".", "startswith", "(", "kind", ")", ":", "files", ".", "append", "(", "os", ".", "path", ".", "join", "(", "self", ".", "repo_dir", ",", "line", ".", "split", "(", "'\\t'", ",", "1", ")", "[", "1", "]", ")", ")", "return", "files" ]
Return list of files that have been changed upstream belonging to a particular kind of change
[ "Return", "list", "of", "files", "that", "have", "been", "changed", "upstream", "belonging", "to", "a", "particular", "kind", "of", "change" ]
30df8d548078c58665ce0ae920308f991122abe3
https://github.com/jupyterhub/nbgitpuller/blob/30df8d548078c58665ce0ae920308f991122abe3/nbgitpuller/pull.py#L133-L146
train
jupyterhub/nbgitpuller
nbgitpuller/pull.py
GitPuller.rename_local_untracked
def rename_local_untracked(self): """ Rename local untracked files that would require pulls """ # Find what files have been added! new_upstream_files = self.find_upstream_changed('A') for f in new_upstream_files: if os.path.exists(f): # If there's a file extension, put the timestamp before that ts = datetime.datetime.now().strftime('__%Y%m%d%H%M%S') path_head, path_tail = os.path.split(f) path_tail = ts.join(os.path.splitext(path_tail)) new_file_name = os.path.join(path_head, path_tail) os.rename(f, new_file_name) yield 'Renamed {} to {} to avoid conflict with upstream'.format(f, new_file_name)
python
def rename_local_untracked(self): """ Rename local untracked files that would require pulls """ # Find what files have been added! new_upstream_files = self.find_upstream_changed('A') for f in new_upstream_files: if os.path.exists(f): # If there's a file extension, put the timestamp before that ts = datetime.datetime.now().strftime('__%Y%m%d%H%M%S') path_head, path_tail = os.path.split(f) path_tail = ts.join(os.path.splitext(path_tail)) new_file_name = os.path.join(path_head, path_tail) os.rename(f, new_file_name) yield 'Renamed {} to {} to avoid conflict with upstream'.format(f, new_file_name)
[ "def", "rename_local_untracked", "(", "self", ")", ":", "# Find what files have been added!", "new_upstream_files", "=", "self", ".", "find_upstream_changed", "(", "'A'", ")", "for", "f", "in", "new_upstream_files", ":", "if", "os", ".", "path", ".", "exists", "(", "f", ")", ":", "# If there's a file extension, put the timestamp before that", "ts", "=", "datetime", ".", "datetime", ".", "now", "(", ")", ".", "strftime", "(", "'__%Y%m%d%H%M%S'", ")", "path_head", ",", "path_tail", "=", "os", ".", "path", ".", "split", "(", "f", ")", "path_tail", "=", "ts", ".", "join", "(", "os", ".", "path", ".", "splitext", "(", "path_tail", ")", ")", "new_file_name", "=", "os", ".", "path", ".", "join", "(", "path_head", ",", "path_tail", ")", "os", ".", "rename", "(", "f", ",", "new_file_name", ")", "yield", "'Renamed {} to {} to avoid conflict with upstream'", ".", "format", "(", "f", ",", "new_file_name", ")" ]
Rename local untracked files that would require pulls
[ "Rename", "local", "untracked", "files", "that", "would", "require", "pulls" ]
30df8d548078c58665ce0ae920308f991122abe3
https://github.com/jupyterhub/nbgitpuller/blob/30df8d548078c58665ce0ae920308f991122abe3/nbgitpuller/pull.py#L172-L186
train
jupyterhub/nbgitpuller
nbgitpuller/pull.py
GitPuller.update
def update(self): """ Do the pulling if necessary """ # Fetch remotes, so we know we're dealing with latest remote yield from self.update_remotes() # Rename local untracked files that might be overwritten by pull yield from self.rename_local_untracked() # Reset local files that have been deleted. We don't actually expect users to # delete something that's present upstream and expect to keep it. This prevents # unnecessary conflicts, and also allows users to click the link again to get # a fresh copy of a file they might have screwed up. yield from self.reset_deleted_files() # If there are local changes, make a commit so we can do merges when pulling # We also allow empty commits. On NFS (at least), sometimes repo_is_dirty returns a false # positive, returning True even when there are no local changes (git diff-files seems to return # bogus output?). While ideally that would not happen, allowing empty commits keeps us # resilient to that issue. if self.repo_is_dirty(): yield from self.ensure_lock() yield from execute_cmd(['git', 'commit', '-am', 'WIP', '--allow-empty'], cwd=self.repo_dir) # Merge master into local! yield from self.ensure_lock() yield from execute_cmd(['git', 'merge', '-Xours', 'origin/{}'.format(self.branch_name)], cwd=self.repo_dir)
python
def update(self): """ Do the pulling if necessary """ # Fetch remotes, so we know we're dealing with latest remote yield from self.update_remotes() # Rename local untracked files that might be overwritten by pull yield from self.rename_local_untracked() # Reset local files that have been deleted. We don't actually expect users to # delete something that's present upstream and expect to keep it. This prevents # unnecessary conflicts, and also allows users to click the link again to get # a fresh copy of a file they might have screwed up. yield from self.reset_deleted_files() # If there are local changes, make a commit so we can do merges when pulling # We also allow empty commits. On NFS (at least), sometimes repo_is_dirty returns a false # positive, returning True even when there are no local changes (git diff-files seems to return # bogus output?). While ideally that would not happen, allowing empty commits keeps us # resilient to that issue. if self.repo_is_dirty(): yield from self.ensure_lock() yield from execute_cmd(['git', 'commit', '-am', 'WIP', '--allow-empty'], cwd=self.repo_dir) # Merge master into local! yield from self.ensure_lock() yield from execute_cmd(['git', 'merge', '-Xours', 'origin/{}'.format(self.branch_name)], cwd=self.repo_dir)
[ "def", "update", "(", "self", ")", ":", "# Fetch remotes, so we know we're dealing with latest remote", "yield", "from", "self", ".", "update_remotes", "(", ")", "# Rename local untracked files that might be overwritten by pull", "yield", "from", "self", ".", "rename_local_untracked", "(", ")", "# Reset local files that have been deleted. We don't actually expect users to", "# delete something that's present upstream and expect to keep it. This prevents", "# unnecessary conflicts, and also allows users to click the link again to get", "# a fresh copy of a file they might have screwed up.", "yield", "from", "self", ".", "reset_deleted_files", "(", ")", "# If there are local changes, make a commit so we can do merges when pulling", "# We also allow empty commits. On NFS (at least), sometimes repo_is_dirty returns a false", "# positive, returning True even when there are no local changes (git diff-files seems to return", "# bogus output?). While ideally that would not happen, allowing empty commits keeps us", "# resilient to that issue.", "if", "self", ".", "repo_is_dirty", "(", ")", ":", "yield", "from", "self", ".", "ensure_lock", "(", ")", "yield", "from", "execute_cmd", "(", "[", "'git'", ",", "'commit'", ",", "'-am'", ",", "'WIP'", ",", "'--allow-empty'", "]", ",", "cwd", "=", "self", ".", "repo_dir", ")", "# Merge master into local!", "yield", "from", "self", ".", "ensure_lock", "(", ")", "yield", "from", "execute_cmd", "(", "[", "'git'", ",", "'merge'", ",", "'-Xours'", ",", "'origin/{}'", ".", "format", "(", "self", ".", "branch_name", ")", "]", ",", "cwd", "=", "self", ".", "repo_dir", ")" ]
Do the pulling if necessary
[ "Do", "the", "pulling", "if", "necessary" ]
30df8d548078c58665ce0ae920308f991122abe3
https://github.com/jupyterhub/nbgitpuller/blob/30df8d548078c58665ce0ae920308f991122abe3/nbgitpuller/pull.py#L189-L216
train
jcrobak/parquet-python
parquet/__init__.py
_get_footer_size
def _get_footer_size(file_obj): """Read the footer size in bytes, which is serialized as little endian.""" file_obj.seek(-8, 2) tup = struct.unpack(b"<i", file_obj.read(4)) return tup[0]
python
def _get_footer_size(file_obj): """Read the footer size in bytes, which is serialized as little endian.""" file_obj.seek(-8, 2) tup = struct.unpack(b"<i", file_obj.read(4)) return tup[0]
[ "def", "_get_footer_size", "(", "file_obj", ")", ":", "file_obj", ".", "seek", "(", "-", "8", ",", "2", ")", "tup", "=", "struct", ".", "unpack", "(", "b\"<i\"", ",", "file_obj", ".", "read", "(", "4", ")", ")", "return", "tup", "[", "0", "]" ]
Read the footer size in bytes, which is serialized as little endian.
[ "Read", "the", "footer", "size", "in", "bytes", "which", "is", "serialized", "as", "little", "endian", "." ]
e2caab7aceca91a3075998d0113e186f8ba2ca37
https://github.com/jcrobak/parquet-python/blob/e2caab7aceca91a3075998d0113e186f8ba2ca37/parquet/__init__.py#L64-L68
train
jcrobak/parquet-python
parquet/__init__.py
_read_footer
def _read_footer(file_obj): """Read the footer from the given file object and returns a FileMetaData object. This method assumes that the fo references a valid parquet file. """ footer_size = _get_footer_size(file_obj) if logger.isEnabledFor(logging.DEBUG): logger.debug("Footer size in bytes: %s", footer_size) file_obj.seek(-(8 + footer_size), 2) # seek to beginning of footer tin = TFileTransport(file_obj) pin = TCompactProtocolFactory().get_protocol(tin) fmd = parquet_thrift.FileMetaData() fmd.read(pin) return fmd
python
def _read_footer(file_obj): """Read the footer from the given file object and returns a FileMetaData object. This method assumes that the fo references a valid parquet file. """ footer_size = _get_footer_size(file_obj) if logger.isEnabledFor(logging.DEBUG): logger.debug("Footer size in bytes: %s", footer_size) file_obj.seek(-(8 + footer_size), 2) # seek to beginning of footer tin = TFileTransport(file_obj) pin = TCompactProtocolFactory().get_protocol(tin) fmd = parquet_thrift.FileMetaData() fmd.read(pin) return fmd
[ "def", "_read_footer", "(", "file_obj", ")", ":", "footer_size", "=", "_get_footer_size", "(", "file_obj", ")", "if", "logger", ".", "isEnabledFor", "(", "logging", ".", "DEBUG", ")", ":", "logger", ".", "debug", "(", "\"Footer size in bytes: %s\"", ",", "footer_size", ")", "file_obj", ".", "seek", "(", "-", "(", "8", "+", "footer_size", ")", ",", "2", ")", "# seek to beginning of footer", "tin", "=", "TFileTransport", "(", "file_obj", ")", "pin", "=", "TCompactProtocolFactory", "(", ")", ".", "get_protocol", "(", "tin", ")", "fmd", "=", "parquet_thrift", ".", "FileMetaData", "(", ")", "fmd", ".", "read", "(", "pin", ")", "return", "fmd" ]
Read the footer from the given file object and returns a FileMetaData object. This method assumes that the fo references a valid parquet file.
[ "Read", "the", "footer", "from", "the", "given", "file", "object", "and", "returns", "a", "FileMetaData", "object", "." ]
e2caab7aceca91a3075998d0113e186f8ba2ca37
https://github.com/jcrobak/parquet-python/blob/e2caab7aceca91a3075998d0113e186f8ba2ca37/parquet/__init__.py#L71-L84
train
jcrobak/parquet-python
parquet/__init__.py
_read_page_header
def _read_page_header(file_obj): """Read the page_header from the given fo.""" tin = TFileTransport(file_obj) pin = TCompactProtocolFactory().get_protocol(tin) page_header = parquet_thrift.PageHeader() page_header.read(pin) return page_header
python
def _read_page_header(file_obj): """Read the page_header from the given fo.""" tin = TFileTransport(file_obj) pin = TCompactProtocolFactory().get_protocol(tin) page_header = parquet_thrift.PageHeader() page_header.read(pin) return page_header
[ "def", "_read_page_header", "(", "file_obj", ")", ":", "tin", "=", "TFileTransport", "(", "file_obj", ")", "pin", "=", "TCompactProtocolFactory", "(", ")", ".", "get_protocol", "(", "tin", ")", "page_header", "=", "parquet_thrift", ".", "PageHeader", "(", ")", "page_header", ".", "read", "(", "pin", ")", "return", "page_header" ]
Read the page_header from the given fo.
[ "Read", "the", "page_header", "from", "the", "given", "fo", "." ]
e2caab7aceca91a3075998d0113e186f8ba2ca37
https://github.com/jcrobak/parquet-python/blob/e2caab7aceca91a3075998d0113e186f8ba2ca37/parquet/__init__.py#L87-L93
train
jcrobak/parquet-python
parquet/__init__.py
read_footer
def read_footer(filename): """Read the footer and return the FileMetaData for the specified filename.""" with open(filename, 'rb') as file_obj: if not _check_header_magic_bytes(file_obj) or \ not _check_footer_magic_bytes(file_obj): raise ParquetFormatException("{0} is not a valid parquet file " "(missing magic bytes)" .format(filename)) return _read_footer(file_obj)
python
def read_footer(filename): """Read the footer and return the FileMetaData for the specified filename.""" with open(filename, 'rb') as file_obj: if not _check_header_magic_bytes(file_obj) or \ not _check_footer_magic_bytes(file_obj): raise ParquetFormatException("{0} is not a valid parquet file " "(missing magic bytes)" .format(filename)) return _read_footer(file_obj)
[ "def", "read_footer", "(", "filename", ")", ":", "with", "open", "(", "filename", ",", "'rb'", ")", "as", "file_obj", ":", "if", "not", "_check_header_magic_bytes", "(", "file_obj", ")", "or", "not", "_check_footer_magic_bytes", "(", "file_obj", ")", ":", "raise", "ParquetFormatException", "(", "\"{0} is not a valid parquet file \"", "\"(missing magic bytes)\"", ".", "format", "(", "filename", ")", ")", "return", "_read_footer", "(", "file_obj", ")" ]
Read the footer and return the FileMetaData for the specified filename.
[ "Read", "the", "footer", "and", "return", "the", "FileMetaData", "for", "the", "specified", "filename", "." ]
e2caab7aceca91a3075998d0113e186f8ba2ca37
https://github.com/jcrobak/parquet-python/blob/e2caab7aceca91a3075998d0113e186f8ba2ca37/parquet/__init__.py#L96-L104
train
jcrobak/parquet-python
parquet/__init__.py
_get_offset
def _get_offset(cmd): """Return the offset into the cmd based upon if it's a dictionary page or a data page.""" dict_offset = cmd.dictionary_page_offset data_offset = cmd.data_page_offset if dict_offset is None or data_offset < dict_offset: return data_offset return dict_offset
python
def _get_offset(cmd): """Return the offset into the cmd based upon if it's a dictionary page or a data page.""" dict_offset = cmd.dictionary_page_offset data_offset = cmd.data_page_offset if dict_offset is None or data_offset < dict_offset: return data_offset return dict_offset
[ "def", "_get_offset", "(", "cmd", ")", ":", "dict_offset", "=", "cmd", ".", "dictionary_page_offset", "data_offset", "=", "cmd", ".", "data_page_offset", "if", "dict_offset", "is", "None", "or", "data_offset", "<", "dict_offset", ":", "return", "data_offset", "return", "dict_offset" ]
Return the offset into the cmd based upon if it's a dictionary page or a data page.
[ "Return", "the", "offset", "into", "the", "cmd", "based", "upon", "if", "it", "s", "a", "dictionary", "page", "or", "a", "data", "page", "." ]
e2caab7aceca91a3075998d0113e186f8ba2ca37
https://github.com/jcrobak/parquet-python/blob/e2caab7aceca91a3075998d0113e186f8ba2ca37/parquet/__init__.py#L115-L121
train
jcrobak/parquet-python
parquet/__init__.py
_read_data
def _read_data(file_obj, fo_encoding, value_count, bit_width): """Read data from the file-object using the given encoding. The data could be definition levels, repetition levels, or actual values. """ vals = [] if fo_encoding == parquet_thrift.Encoding.RLE: seen = 0 while seen < value_count: values = encoding.read_rle_bit_packed_hybrid(file_obj, bit_width) if values is None: break # EOF was reached. vals += values seen += len(values) elif fo_encoding == parquet_thrift.Encoding.BIT_PACKED: raise NotImplementedError("Bit packing not yet supported") return vals
python
def _read_data(file_obj, fo_encoding, value_count, bit_width): """Read data from the file-object using the given encoding. The data could be definition levels, repetition levels, or actual values. """ vals = [] if fo_encoding == parquet_thrift.Encoding.RLE: seen = 0 while seen < value_count: values = encoding.read_rle_bit_packed_hybrid(file_obj, bit_width) if values is None: break # EOF was reached. vals += values seen += len(values) elif fo_encoding == parquet_thrift.Encoding.BIT_PACKED: raise NotImplementedError("Bit packing not yet supported") return vals
[ "def", "_read_data", "(", "file_obj", ",", "fo_encoding", ",", "value_count", ",", "bit_width", ")", ":", "vals", "=", "[", "]", "if", "fo_encoding", "==", "parquet_thrift", ".", "Encoding", ".", "RLE", ":", "seen", "=", "0", "while", "seen", "<", "value_count", ":", "values", "=", "encoding", ".", "read_rle_bit_packed_hybrid", "(", "file_obj", ",", "bit_width", ")", "if", "values", "is", "None", ":", "break", "# EOF was reached.", "vals", "+=", "values", "seen", "+=", "len", "(", "values", ")", "elif", "fo_encoding", "==", "parquet_thrift", ".", "Encoding", ".", "BIT_PACKED", ":", "raise", "NotImplementedError", "(", "\"Bit packing not yet supported\"", ")", "return", "vals" ]
Read data from the file-object using the given encoding. The data could be definition levels, repetition levels, or actual values.
[ "Read", "data", "from", "the", "file", "-", "object", "using", "the", "given", "encoding", "." ]
e2caab7aceca91a3075998d0113e186f8ba2ca37
https://github.com/jcrobak/parquet-python/blob/e2caab7aceca91a3075998d0113e186f8ba2ca37/parquet/__init__.py#L258-L275
train
jcrobak/parquet-python
parquet/__init__.py
_read_dictionary_page
def _read_dictionary_page(file_obj, schema_helper, page_header, column_metadata): """Read a page containing dictionary data. Consumes data using the plain encoding and returns an array of values. """ raw_bytes = _read_page(file_obj, page_header, column_metadata) io_obj = io.BytesIO(raw_bytes) values = encoding.read_plain( io_obj, column_metadata.type, page_header.dictionary_page_header.num_values ) # convert the values once, if the dictionary is associated with a converted_type. schema_element = schema_helper.schema_element(column_metadata.path_in_schema[-1]) return convert_column(values, schema_element) if schema_element.converted_type is not None else values
python
def _read_dictionary_page(file_obj, schema_helper, page_header, column_metadata): """Read a page containing dictionary data. Consumes data using the plain encoding and returns an array of values. """ raw_bytes = _read_page(file_obj, page_header, column_metadata) io_obj = io.BytesIO(raw_bytes) values = encoding.read_plain( io_obj, column_metadata.type, page_header.dictionary_page_header.num_values ) # convert the values once, if the dictionary is associated with a converted_type. schema_element = schema_helper.schema_element(column_metadata.path_in_schema[-1]) return convert_column(values, schema_element) if schema_element.converted_type is not None else values
[ "def", "_read_dictionary_page", "(", "file_obj", ",", "schema_helper", ",", "page_header", ",", "column_metadata", ")", ":", "raw_bytes", "=", "_read_page", "(", "file_obj", ",", "page_header", ",", "column_metadata", ")", "io_obj", "=", "io", ".", "BytesIO", "(", "raw_bytes", ")", "values", "=", "encoding", ".", "read_plain", "(", "io_obj", ",", "column_metadata", ".", "type", ",", "page_header", ".", "dictionary_page_header", ".", "num_values", ")", "# convert the values once, if the dictionary is associated with a converted_type.", "schema_element", "=", "schema_helper", ".", "schema_element", "(", "column_metadata", ".", "path_in_schema", "[", "-", "1", "]", ")", "return", "convert_column", "(", "values", ",", "schema_element", ")", "if", "schema_element", ".", "converted_type", "is", "not", "None", "else", "values" ]
Read a page containing dictionary data. Consumes data using the plain encoding and returns an array of values.
[ "Read", "a", "page", "containing", "dictionary", "data", "." ]
e2caab7aceca91a3075998d0113e186f8ba2ca37
https://github.com/jcrobak/parquet-python/blob/e2caab7aceca91a3075998d0113e186f8ba2ca37/parquet/__init__.py#L386-L400
train
jcrobak/parquet-python
parquet/__init__.py
_dump
def _dump(file_obj, options, out=sys.stdout): """Dump to fo with given options.""" # writer and keys are lazily loaded. We don't know the keys until we have # the first item. And we need the keys for the csv writer. total_count = 0 writer = None keys = None for row in DictReader(file_obj, options.col): if not keys: keys = row.keys() if not writer: writer = csv.DictWriter(out, keys, delimiter=u'\t', quotechar=u'\'', quoting=csv.QUOTE_MINIMAL) \ if options.format == 'csv' \ else JsonWriter(out) if options.format == 'json' \ else None if total_count == 0 and options.format == "csv" and not options.no_headers: writer.writeheader() if options.limit != -1 and total_count >= options.limit: return row_unicode = {k: v.decode("utf-8") if isinstance(v, bytes) else v for k, v in row.items()} writer.writerow(row_unicode) total_count += 1
python
def _dump(file_obj, options, out=sys.stdout): """Dump to fo with given options.""" # writer and keys are lazily loaded. We don't know the keys until we have # the first item. And we need the keys for the csv writer. total_count = 0 writer = None keys = None for row in DictReader(file_obj, options.col): if not keys: keys = row.keys() if not writer: writer = csv.DictWriter(out, keys, delimiter=u'\t', quotechar=u'\'', quoting=csv.QUOTE_MINIMAL) \ if options.format == 'csv' \ else JsonWriter(out) if options.format == 'json' \ else None if total_count == 0 and options.format == "csv" and not options.no_headers: writer.writeheader() if options.limit != -1 and total_count >= options.limit: return row_unicode = {k: v.decode("utf-8") if isinstance(v, bytes) else v for k, v in row.items()} writer.writerow(row_unicode) total_count += 1
[ "def", "_dump", "(", "file_obj", ",", "options", ",", "out", "=", "sys", ".", "stdout", ")", ":", "# writer and keys are lazily loaded. We don't know the keys until we have", "# the first item. And we need the keys for the csv writer.", "total_count", "=", "0", "writer", "=", "None", "keys", "=", "None", "for", "row", "in", "DictReader", "(", "file_obj", ",", "options", ".", "col", ")", ":", "if", "not", "keys", ":", "keys", "=", "row", ".", "keys", "(", ")", "if", "not", "writer", ":", "writer", "=", "csv", ".", "DictWriter", "(", "out", ",", "keys", ",", "delimiter", "=", "u'\\t'", ",", "quotechar", "=", "u'\\''", ",", "quoting", "=", "csv", ".", "QUOTE_MINIMAL", ")", "if", "options", ".", "format", "==", "'csv'", "else", "JsonWriter", "(", "out", ")", "if", "options", ".", "format", "==", "'json'", "else", "None", "if", "total_count", "==", "0", "and", "options", ".", "format", "==", "\"csv\"", "and", "not", "options", ".", "no_headers", ":", "writer", ".", "writeheader", "(", ")", "if", "options", ".", "limit", "!=", "-", "1", "and", "total_count", ">=", "options", ".", "limit", ":", "return", "row_unicode", "=", "{", "k", ":", "v", ".", "decode", "(", "\"utf-8\"", ")", "if", "isinstance", "(", "v", ",", "bytes", ")", "else", "v", "for", "k", ",", "v", "in", "row", ".", "items", "(", ")", "}", "writer", ".", "writerow", "(", "row_unicode", ")", "total_count", "+=", "1" ]
Dump to fo with given options.
[ "Dump", "to", "fo", "with", "given", "options", "." ]
e2caab7aceca91a3075998d0113e186f8ba2ca37
https://github.com/jcrobak/parquet-python/blob/e2caab7aceca91a3075998d0113e186f8ba2ca37/parquet/__init__.py#L504-L525
train
jcrobak/parquet-python
parquet/__init__.py
dump
def dump(filename, options, out=sys.stdout): """Dump parquet file with given filename using options to `out`.""" with open(filename, 'rb') as file_obj: return _dump(file_obj, options=options, out=out)
python
def dump(filename, options, out=sys.stdout): """Dump parquet file with given filename using options to `out`.""" with open(filename, 'rb') as file_obj: return _dump(file_obj, options=options, out=out)
[ "def", "dump", "(", "filename", ",", "options", ",", "out", "=", "sys", ".", "stdout", ")", ":", "with", "open", "(", "filename", ",", "'rb'", ")", "as", "file_obj", ":", "return", "_dump", "(", "file_obj", ",", "options", "=", "options", ",", "out", "=", "out", ")" ]
Dump parquet file with given filename using options to `out`.
[ "Dump", "parquet", "file", "with", "given", "filename", "using", "options", "to", "out", "." ]
e2caab7aceca91a3075998d0113e186f8ba2ca37
https://github.com/jcrobak/parquet-python/blob/e2caab7aceca91a3075998d0113e186f8ba2ca37/parquet/__init__.py#L528-L531
train
jcrobak/parquet-python
parquet/__init__.py
JsonWriter.writerow
def writerow(self, row): """Write a single row.""" json_text = json.dumps(row) if isinstance(json_text, bytes): json_text = json_text.decode('utf-8') self._out.write(json_text) self._out.write(u'\n')
python
def writerow(self, row): """Write a single row.""" json_text = json.dumps(row) if isinstance(json_text, bytes): json_text = json_text.decode('utf-8') self._out.write(json_text) self._out.write(u'\n')
[ "def", "writerow", "(", "self", ",", "row", ")", ":", "json_text", "=", "json", ".", "dumps", "(", "row", ")", "if", "isinstance", "(", "json_text", ",", "bytes", ")", ":", "json_text", "=", "json_text", ".", "decode", "(", "'utf-8'", ")", "self", ".", "_out", ".", "write", "(", "json_text", ")", "self", ".", "_out", ".", "write", "(", "u'\\n'", ")" ]
Write a single row.
[ "Write", "a", "single", "row", "." ]
e2caab7aceca91a3075998d0113e186f8ba2ca37
https://github.com/jcrobak/parquet-python/blob/e2caab7aceca91a3075998d0113e186f8ba2ca37/parquet/__init__.py#L495-L501
train
jcrobak/parquet-python
parquet/encoding.py
read_plain_boolean
def read_plain_boolean(file_obj, count): """Read `count` booleans using the plain encoding.""" # for bit packed, the count is stored shifted up. But we want to pass in a count, # so we shift up. # bit width is 1 for a single-bit boolean. return read_bitpacked(file_obj, count << 1, 1, logger.isEnabledFor(logging.DEBUG))
python
def read_plain_boolean(file_obj, count): """Read `count` booleans using the plain encoding.""" # for bit packed, the count is stored shifted up. But we want to pass in a count, # so we shift up. # bit width is 1 for a single-bit boolean. return read_bitpacked(file_obj, count << 1, 1, logger.isEnabledFor(logging.DEBUG))
[ "def", "read_plain_boolean", "(", "file_obj", ",", "count", ")", ":", "# for bit packed, the count is stored shifted up. But we want to pass in a count,", "# so we shift up.", "# bit width is 1 for a single-bit boolean.", "return", "read_bitpacked", "(", "file_obj", ",", "count", "<<", "1", ",", "1", ",", "logger", ".", "isEnabledFor", "(", "logging", ".", "DEBUG", ")", ")" ]
Read `count` booleans using the plain encoding.
[ "Read", "count", "booleans", "using", "the", "plain", "encoding", "." ]
e2caab7aceca91a3075998d0113e186f8ba2ca37
https://github.com/jcrobak/parquet-python/blob/e2caab7aceca91a3075998d0113e186f8ba2ca37/parquet/encoding.py#L28-L33
train
jcrobak/parquet-python
parquet/encoding.py
read_plain_int32
def read_plain_int32(file_obj, count): """Read `count` 32-bit ints using the plain encoding.""" length = 4 * count data = file_obj.read(length) if len(data) != length: raise EOFError("Expected {} bytes but got {} bytes".format(length, len(data))) res = struct.unpack("<{}i".format(count).encode("utf-8"), data) return res
python
def read_plain_int32(file_obj, count): """Read `count` 32-bit ints using the plain encoding.""" length = 4 * count data = file_obj.read(length) if len(data) != length: raise EOFError("Expected {} bytes but got {} bytes".format(length, len(data))) res = struct.unpack("<{}i".format(count).encode("utf-8"), data) return res
[ "def", "read_plain_int32", "(", "file_obj", ",", "count", ")", ":", "length", "=", "4", "*", "count", "data", "=", "file_obj", ".", "read", "(", "length", ")", "if", "len", "(", "data", ")", "!=", "length", ":", "raise", "EOFError", "(", "\"Expected {} bytes but got {} bytes\"", ".", "format", "(", "length", ",", "len", "(", "data", ")", ")", ")", "res", "=", "struct", ".", "unpack", "(", "\"<{}i\"", ".", "format", "(", "count", ")", ".", "encode", "(", "\"utf-8\"", ")", ",", "data", ")", "return", "res" ]
Read `count` 32-bit ints using the plain encoding.
[ "Read", "count", "32", "-", "bit", "ints", "using", "the", "plain", "encoding", "." ]
e2caab7aceca91a3075998d0113e186f8ba2ca37
https://github.com/jcrobak/parquet-python/blob/e2caab7aceca91a3075998d0113e186f8ba2ca37/parquet/encoding.py#L36-L43
train
jcrobak/parquet-python
parquet/encoding.py
read_plain_int64
def read_plain_int64(file_obj, count): """Read `count` 64-bit ints using the plain encoding.""" return struct.unpack("<{}q".format(count).encode("utf-8"), file_obj.read(8 * count))
python
def read_plain_int64(file_obj, count): """Read `count` 64-bit ints using the plain encoding.""" return struct.unpack("<{}q".format(count).encode("utf-8"), file_obj.read(8 * count))
[ "def", "read_plain_int64", "(", "file_obj", ",", "count", ")", ":", "return", "struct", ".", "unpack", "(", "\"<{}q\"", ".", "format", "(", "count", ")", ".", "encode", "(", "\"utf-8\"", ")", ",", "file_obj", ".", "read", "(", "8", "*", "count", ")", ")" ]
Read `count` 64-bit ints using the plain encoding.
[ "Read", "count", "64", "-", "bit", "ints", "using", "the", "plain", "encoding", "." ]
e2caab7aceca91a3075998d0113e186f8ba2ca37
https://github.com/jcrobak/parquet-python/blob/e2caab7aceca91a3075998d0113e186f8ba2ca37/parquet/encoding.py#L46-L48
train
jcrobak/parquet-python
parquet/encoding.py
read_plain_int96
def read_plain_int96(file_obj, count): """Read `count` 96-bit ints using the plain encoding.""" items = struct.unpack(b"<" + b"qi" * count, file_obj.read(12 * count)) return [q << 32 | i for (q, i) in zip(items[0::2], items[1::2])]
python
def read_plain_int96(file_obj, count): """Read `count` 96-bit ints using the plain encoding.""" items = struct.unpack(b"<" + b"qi" * count, file_obj.read(12 * count)) return [q << 32 | i for (q, i) in zip(items[0::2], items[1::2])]
[ "def", "read_plain_int96", "(", "file_obj", ",", "count", ")", ":", "items", "=", "struct", ".", "unpack", "(", "b\"<\"", "+", "b\"qi\"", "*", "count", ",", "file_obj", ".", "read", "(", "12", "*", "count", ")", ")", "return", "[", "q", "<<", "32", "|", "i", "for", "(", "q", ",", "i", ")", "in", "zip", "(", "items", "[", "0", ":", ":", "2", "]", ",", "items", "[", "1", ":", ":", "2", "]", ")", "]" ]
Read `count` 96-bit ints using the plain encoding.
[ "Read", "count", "96", "-", "bit", "ints", "using", "the", "plain", "encoding", "." ]
e2caab7aceca91a3075998d0113e186f8ba2ca37
https://github.com/jcrobak/parquet-python/blob/e2caab7aceca91a3075998d0113e186f8ba2ca37/parquet/encoding.py#L51-L54
train
jcrobak/parquet-python
parquet/encoding.py
read_plain_float
def read_plain_float(file_obj, count): """Read `count` 32-bit floats using the plain encoding.""" return struct.unpack("<{}f".format(count).encode("utf-8"), file_obj.read(4 * count))
python
def read_plain_float(file_obj, count): """Read `count` 32-bit floats using the plain encoding.""" return struct.unpack("<{}f".format(count).encode("utf-8"), file_obj.read(4 * count))
[ "def", "read_plain_float", "(", "file_obj", ",", "count", ")", ":", "return", "struct", ".", "unpack", "(", "\"<{}f\"", ".", "format", "(", "count", ")", ".", "encode", "(", "\"utf-8\"", ")", ",", "file_obj", ".", "read", "(", "4", "*", "count", ")", ")" ]
Read `count` 32-bit floats using the plain encoding.
[ "Read", "count", "32", "-", "bit", "floats", "using", "the", "plain", "encoding", "." ]
e2caab7aceca91a3075998d0113e186f8ba2ca37
https://github.com/jcrobak/parquet-python/blob/e2caab7aceca91a3075998d0113e186f8ba2ca37/parquet/encoding.py#L57-L59
train
jcrobak/parquet-python
parquet/encoding.py
read_plain_byte_array
def read_plain_byte_array(file_obj, count): """Read `count` byte arrays using the plain encoding.""" return [file_obj.read(struct.unpack(b"<i", file_obj.read(4))[0]) for i in range(count)]
python
def read_plain_byte_array(file_obj, count): """Read `count` byte arrays using the plain encoding.""" return [file_obj.read(struct.unpack(b"<i", file_obj.read(4))[0]) for i in range(count)]
[ "def", "read_plain_byte_array", "(", "file_obj", ",", "count", ")", ":", "return", "[", "file_obj", ".", "read", "(", "struct", ".", "unpack", "(", "b\"<i\"", ",", "file_obj", ".", "read", "(", "4", ")", ")", "[", "0", "]", ")", "for", "i", "in", "range", "(", "count", ")", "]" ]
Read `count` byte arrays using the plain encoding.
[ "Read", "count", "byte", "arrays", "using", "the", "plain", "encoding", "." ]
e2caab7aceca91a3075998d0113e186f8ba2ca37
https://github.com/jcrobak/parquet-python/blob/e2caab7aceca91a3075998d0113e186f8ba2ca37/parquet/encoding.py#L67-L69
train
jcrobak/parquet-python
parquet/encoding.py
read_plain
def read_plain(file_obj, type_, count): """Read `count` items `type` from the fo using the plain encoding.""" if count == 0: return [] conv = DECODE_PLAIN[type_] return conv(file_obj, count)
python
def read_plain(file_obj, type_, count): """Read `count` items `type` from the fo using the plain encoding.""" if count == 0: return [] conv = DECODE_PLAIN[type_] return conv(file_obj, count)
[ "def", "read_plain", "(", "file_obj", ",", "type_", ",", "count", ")", ":", "if", "count", "==", "0", ":", "return", "[", "]", "conv", "=", "DECODE_PLAIN", "[", "type_", "]", "return", "conv", "(", "file_obj", ",", "count", ")" ]
Read `count` items `type` from the fo using the plain encoding.
[ "Read", "count", "items", "type", "from", "the", "fo", "using", "the", "plain", "encoding", "." ]
e2caab7aceca91a3075998d0113e186f8ba2ca37
https://github.com/jcrobak/parquet-python/blob/e2caab7aceca91a3075998d0113e186f8ba2ca37/parquet/encoding.py#L89-L94
train
jcrobak/parquet-python
parquet/encoding.py
read_unsigned_var_int
def read_unsigned_var_int(file_obj): """Read a value using the unsigned, variable int encoding.""" result = 0 shift = 0 while True: byte = struct.unpack(b"<B", file_obj.read(1))[0] result |= ((byte & 0x7F) << shift) if (byte & 0x80) == 0: break shift += 7 return result
python
def read_unsigned_var_int(file_obj): """Read a value using the unsigned, variable int encoding.""" result = 0 shift = 0 while True: byte = struct.unpack(b"<B", file_obj.read(1))[0] result |= ((byte & 0x7F) << shift) if (byte & 0x80) == 0: break shift += 7 return result
[ "def", "read_unsigned_var_int", "(", "file_obj", ")", ":", "result", "=", "0", "shift", "=", "0", "while", "True", ":", "byte", "=", "struct", ".", "unpack", "(", "b\"<B\"", ",", "file_obj", ".", "read", "(", "1", ")", ")", "[", "0", "]", "result", "|=", "(", "(", "byte", "&", "0x7F", ")", "<<", "shift", ")", "if", "(", "byte", "&", "0x80", ")", "==", "0", ":", "break", "shift", "+=", "7", "return", "result" ]
Read a value using the unsigned, variable int encoding.
[ "Read", "a", "value", "using", "the", "unsigned", "variable", "int", "encoding", "." ]
e2caab7aceca91a3075998d0113e186f8ba2ca37
https://github.com/jcrobak/parquet-python/blob/e2caab7aceca91a3075998d0113e186f8ba2ca37/parquet/encoding.py#L97-L107
train
jcrobak/parquet-python
parquet/encoding.py
read_rle
def read_rle(file_obj, header, bit_width, debug_logging): """Read a run-length encoded run from the given fo with the given header and bit_width. The count is determined from the header and the width is used to grab the value that's repeated. Yields the value repeated count times. """ count = header >> 1 zero_data = b"\x00\x00\x00\x00" width = (bit_width + 7) // 8 data = file_obj.read(width) data = data + zero_data[len(data):] value = struct.unpack(b"<i", data)[0] if debug_logging: logger.debug("Read RLE group with value %s of byte-width %s and count %s", value, width, count) for _ in range(count): yield value
python
def read_rle(file_obj, header, bit_width, debug_logging): """Read a run-length encoded run from the given fo with the given header and bit_width. The count is determined from the header and the width is used to grab the value that's repeated. Yields the value repeated count times. """ count = header >> 1 zero_data = b"\x00\x00\x00\x00" width = (bit_width + 7) // 8 data = file_obj.read(width) data = data + zero_data[len(data):] value = struct.unpack(b"<i", data)[0] if debug_logging: logger.debug("Read RLE group with value %s of byte-width %s and count %s", value, width, count) for _ in range(count): yield value
[ "def", "read_rle", "(", "file_obj", ",", "header", ",", "bit_width", ",", "debug_logging", ")", ":", "count", "=", "header", ">>", "1", "zero_data", "=", "b\"\\x00\\x00\\x00\\x00\"", "width", "=", "(", "bit_width", "+", "7", ")", "//", "8", "data", "=", "file_obj", ".", "read", "(", "width", ")", "data", "=", "data", "+", "zero_data", "[", "len", "(", "data", ")", ":", "]", "value", "=", "struct", ".", "unpack", "(", "b\"<i\"", ",", "data", ")", "[", "0", "]", "if", "debug_logging", ":", "logger", ".", "debug", "(", "\"Read RLE group with value %s of byte-width %s and count %s\"", ",", "value", ",", "width", ",", "count", ")", "for", "_", "in", "range", "(", "count", ")", ":", "yield", "value" ]
Read a run-length encoded run from the given fo with the given header and bit_width. The count is determined from the header and the width is used to grab the value that's repeated. Yields the value repeated count times.
[ "Read", "a", "run", "-", "length", "encoded", "run", "from", "the", "given", "fo", "with", "the", "given", "header", "and", "bit_width", "." ]
e2caab7aceca91a3075998d0113e186f8ba2ca37
https://github.com/jcrobak/parquet-python/blob/e2caab7aceca91a3075998d0113e186f8ba2ca37/parquet/encoding.py#L110-L126
train
jcrobak/parquet-python
parquet/encoding.py
read_bitpacked_deprecated
def read_bitpacked_deprecated(file_obj, byte_count, count, width, debug_logging): """Read `count` values from `fo` using the deprecated bitpacking encoding.""" raw_bytes = array.array(ARRAY_BYTE_STR, file_obj.read(byte_count)).tolist() mask = _mask_for_bits(width) index = 0 res = [] word = 0 bits_in_word = 0 while len(res) < count and index <= len(raw_bytes): if debug_logging: logger.debug("index = %d", index) logger.debug("bits in word = %d", bits_in_word) logger.debug("word = %s", bin(word)) if bits_in_word >= width: # how many bits over the value is stored offset = (bits_in_word - width) # figure out the value value = (word & (mask << offset)) >> offset if debug_logging: logger.debug("offset = %d", offset) logger.debug("value = %d (%s)", value, bin(value)) res.append(value) bits_in_word -= width else: word = (word << 8) | raw_bytes[index] index += 1 bits_in_word += 8 return res
python
def read_bitpacked_deprecated(file_obj, byte_count, count, width, debug_logging): """Read `count` values from `fo` using the deprecated bitpacking encoding.""" raw_bytes = array.array(ARRAY_BYTE_STR, file_obj.read(byte_count)).tolist() mask = _mask_for_bits(width) index = 0 res = [] word = 0 bits_in_word = 0 while len(res) < count and index <= len(raw_bytes): if debug_logging: logger.debug("index = %d", index) logger.debug("bits in word = %d", bits_in_word) logger.debug("word = %s", bin(word)) if bits_in_word >= width: # how many bits over the value is stored offset = (bits_in_word - width) # figure out the value value = (word & (mask << offset)) >> offset if debug_logging: logger.debug("offset = %d", offset) logger.debug("value = %d (%s)", value, bin(value)) res.append(value) bits_in_word -= width else: word = (word << 8) | raw_bytes[index] index += 1 bits_in_word += 8 return res
[ "def", "read_bitpacked_deprecated", "(", "file_obj", ",", "byte_count", ",", "count", ",", "width", ",", "debug_logging", ")", ":", "raw_bytes", "=", "array", ".", "array", "(", "ARRAY_BYTE_STR", ",", "file_obj", ".", "read", "(", "byte_count", ")", ")", ".", "tolist", "(", ")", "mask", "=", "_mask_for_bits", "(", "width", ")", "index", "=", "0", "res", "=", "[", "]", "word", "=", "0", "bits_in_word", "=", "0", "while", "len", "(", "res", ")", "<", "count", "and", "index", "<=", "len", "(", "raw_bytes", ")", ":", "if", "debug_logging", ":", "logger", ".", "debug", "(", "\"index = %d\"", ",", "index", ")", "logger", ".", "debug", "(", "\"bits in word = %d\"", ",", "bits_in_word", ")", "logger", ".", "debug", "(", "\"word = %s\"", ",", "bin", "(", "word", ")", ")", "if", "bits_in_word", ">=", "width", ":", "# how many bits over the value is stored", "offset", "=", "(", "bits_in_word", "-", "width", ")", "# figure out the value", "value", "=", "(", "word", "&", "(", "mask", "<<", "offset", ")", ")", ">>", "offset", "if", "debug_logging", ":", "logger", ".", "debug", "(", "\"offset = %d\"", ",", "offset", ")", "logger", ".", "debug", "(", "\"value = %d (%s)\"", ",", "value", ",", "bin", "(", "value", ")", ")", "res", ".", "append", "(", "value", ")", "bits_in_word", "-=", "width", "else", ":", "word", "=", "(", "word", "<<", "8", ")", "|", "raw_bytes", "[", "index", "]", "index", "+=", "1", "bits_in_word", "+=", "8", "return", "res" ]
Read `count` values from `fo` using the deprecated bitpacking encoding.
[ "Read", "count", "values", "from", "fo", "using", "the", "deprecated", "bitpacking", "encoding", "." ]
e2caab7aceca91a3075998d0113e186f8ba2ca37
https://github.com/jcrobak/parquet-python/blob/e2caab7aceca91a3075998d0113e186f8ba2ca37/parquet/encoding.py#L183-L213
train
jcrobak/parquet-python
parquet/converted_types.py
_convert_unsigned
def _convert_unsigned(data, fmt): """Convert data from signed to unsigned in bulk.""" num = len(data) return struct.unpack( "{}{}".format(num, fmt.upper()).encode("utf-8"), struct.pack("{}{}".format(num, fmt).encode("utf-8"), *data) )
python
def _convert_unsigned(data, fmt): """Convert data from signed to unsigned in bulk.""" num = len(data) return struct.unpack( "{}{}".format(num, fmt.upper()).encode("utf-8"), struct.pack("{}{}".format(num, fmt).encode("utf-8"), *data) )
[ "def", "_convert_unsigned", "(", "data", ",", "fmt", ")", ":", "num", "=", "len", "(", "data", ")", "return", "struct", ".", "unpack", "(", "\"{}{}\"", ".", "format", "(", "num", ",", "fmt", ".", "upper", "(", ")", ")", ".", "encode", "(", "\"utf-8\"", ")", ",", "struct", ".", "pack", "(", "\"{}{}\"", ".", "format", "(", "num", ",", "fmt", ")", ".", "encode", "(", "\"utf-8\"", ")", ",", "*", "data", ")", ")" ]
Convert data from signed to unsigned in bulk.
[ "Convert", "data", "from", "signed", "to", "unsigned", "in", "bulk", "." ]
e2caab7aceca91a3075998d0113e186f8ba2ca37
https://github.com/jcrobak/parquet-python/blob/e2caab7aceca91a3075998d0113e186f8ba2ca37/parquet/converted_types.py#L52-L58
train
jcrobak/parquet-python
parquet/converted_types.py
convert_column
def convert_column(data, schemae): """Convert known types from primitive to rich.""" ctype = schemae.converted_type if ctype == parquet_thrift.ConvertedType.DECIMAL: scale_factor = Decimal("10e-{}".format(schemae.scale)) if schemae.type == parquet_thrift.Type.INT32 or schemae.type == parquet_thrift.Type.INT64: return [Decimal(unscaled) * scale_factor for unscaled in data] return [Decimal(intbig(unscaled)) * scale_factor for unscaled in data] elif ctype == parquet_thrift.ConvertedType.DATE: return [datetime.date.fromordinal(d) for d in data] elif ctype == parquet_thrift.ConvertedType.TIME_MILLIS: return [datetime.timedelta(milliseconds=d) for d in data] elif ctype == parquet_thrift.ConvertedType.TIMESTAMP_MILLIS: return [datetime.datetime.utcfromtimestamp(d / 1000.0) for d in data] elif ctype == parquet_thrift.ConvertedType.UTF8: return [codecs.decode(item, "utf-8") for item in data] elif ctype == parquet_thrift.ConvertedType.UINT_8: return _convert_unsigned(data, 'b') elif ctype == parquet_thrift.ConvertedType.UINT_16: return _convert_unsigned(data, 'h') elif ctype == parquet_thrift.ConvertedType.UINT_32: return _convert_unsigned(data, 'i') elif ctype == parquet_thrift.ConvertedType.UINT_64: return _convert_unsigned(data, 'q') elif ctype == parquet_thrift.ConvertedType.JSON: return [json.loads(s) for s in codecs.iterdecode(data, "utf-8")] elif ctype == parquet_thrift.ConvertedType.BSON and bson: return [bson.BSON(s).decode() for s in data] else: logger.info("Converted type '%s'' not handled", parquet_thrift.ConvertedType._VALUES_TO_NAMES[ctype]) # pylint:disable=protected-access return data
python
def convert_column(data, schemae): """Convert known types from primitive to rich.""" ctype = schemae.converted_type if ctype == parquet_thrift.ConvertedType.DECIMAL: scale_factor = Decimal("10e-{}".format(schemae.scale)) if schemae.type == parquet_thrift.Type.INT32 or schemae.type == parquet_thrift.Type.INT64: return [Decimal(unscaled) * scale_factor for unscaled in data] return [Decimal(intbig(unscaled)) * scale_factor for unscaled in data] elif ctype == parquet_thrift.ConvertedType.DATE: return [datetime.date.fromordinal(d) for d in data] elif ctype == parquet_thrift.ConvertedType.TIME_MILLIS: return [datetime.timedelta(milliseconds=d) for d in data] elif ctype == parquet_thrift.ConvertedType.TIMESTAMP_MILLIS: return [datetime.datetime.utcfromtimestamp(d / 1000.0) for d in data] elif ctype == parquet_thrift.ConvertedType.UTF8: return [codecs.decode(item, "utf-8") for item in data] elif ctype == parquet_thrift.ConvertedType.UINT_8: return _convert_unsigned(data, 'b') elif ctype == parquet_thrift.ConvertedType.UINT_16: return _convert_unsigned(data, 'h') elif ctype == parquet_thrift.ConvertedType.UINT_32: return _convert_unsigned(data, 'i') elif ctype == parquet_thrift.ConvertedType.UINT_64: return _convert_unsigned(data, 'q') elif ctype == parquet_thrift.ConvertedType.JSON: return [json.loads(s) for s in codecs.iterdecode(data, "utf-8")] elif ctype == parquet_thrift.ConvertedType.BSON and bson: return [bson.BSON(s).decode() for s in data] else: logger.info("Converted type '%s'' not handled", parquet_thrift.ConvertedType._VALUES_TO_NAMES[ctype]) # pylint:disable=protected-access return data
[ "def", "convert_column", "(", "data", ",", "schemae", ")", ":", "ctype", "=", "schemae", ".", "converted_type", "if", "ctype", "==", "parquet_thrift", ".", "ConvertedType", ".", "DECIMAL", ":", "scale_factor", "=", "Decimal", "(", "\"10e-{}\"", ".", "format", "(", "schemae", ".", "scale", ")", ")", "if", "schemae", ".", "type", "==", "parquet_thrift", ".", "Type", ".", "INT32", "or", "schemae", ".", "type", "==", "parquet_thrift", ".", "Type", ".", "INT64", ":", "return", "[", "Decimal", "(", "unscaled", ")", "*", "scale_factor", "for", "unscaled", "in", "data", "]", "return", "[", "Decimal", "(", "intbig", "(", "unscaled", ")", ")", "*", "scale_factor", "for", "unscaled", "in", "data", "]", "elif", "ctype", "==", "parquet_thrift", ".", "ConvertedType", ".", "DATE", ":", "return", "[", "datetime", ".", "date", ".", "fromordinal", "(", "d", ")", "for", "d", "in", "data", "]", "elif", "ctype", "==", "parquet_thrift", ".", "ConvertedType", ".", "TIME_MILLIS", ":", "return", "[", "datetime", ".", "timedelta", "(", "milliseconds", "=", "d", ")", "for", "d", "in", "data", "]", "elif", "ctype", "==", "parquet_thrift", ".", "ConvertedType", ".", "TIMESTAMP_MILLIS", ":", "return", "[", "datetime", ".", "datetime", ".", "utcfromtimestamp", "(", "d", "/", "1000.0", ")", "for", "d", "in", "data", "]", "elif", "ctype", "==", "parquet_thrift", ".", "ConvertedType", ".", "UTF8", ":", "return", "[", "codecs", ".", "decode", "(", "item", ",", "\"utf-8\"", ")", "for", "item", "in", "data", "]", "elif", "ctype", "==", "parquet_thrift", ".", "ConvertedType", ".", "UINT_8", ":", "return", "_convert_unsigned", "(", "data", ",", "'b'", ")", "elif", "ctype", "==", "parquet_thrift", ".", "ConvertedType", ".", "UINT_16", ":", "return", "_convert_unsigned", "(", "data", ",", "'h'", ")", "elif", "ctype", "==", "parquet_thrift", ".", "ConvertedType", ".", "UINT_32", ":", "return", "_convert_unsigned", "(", "data", ",", "'i'", ")", "elif", "ctype", "==", "parquet_thrift", ".", "ConvertedType", ".", "UINT_64", ":", "return", "_convert_unsigned", "(", "data", ",", "'q'", ")", "elif", "ctype", "==", "parquet_thrift", ".", "ConvertedType", ".", "JSON", ":", "return", "[", "json", ".", "loads", "(", "s", ")", "for", "s", "in", "codecs", ".", "iterdecode", "(", "data", ",", "\"utf-8\"", ")", "]", "elif", "ctype", "==", "parquet_thrift", ".", "ConvertedType", ".", "BSON", "and", "bson", ":", "return", "[", "bson", ".", "BSON", "(", "s", ")", ".", "decode", "(", ")", "for", "s", "in", "data", "]", "else", ":", "logger", ".", "info", "(", "\"Converted type '%s'' not handled\"", ",", "parquet_thrift", ".", "ConvertedType", ".", "_VALUES_TO_NAMES", "[", "ctype", "]", ")", "# pylint:disable=protected-access", "return", "data" ]
Convert known types from primitive to rich.
[ "Convert", "known", "types", "from", "primitive", "to", "rich", "." ]
e2caab7aceca91a3075998d0113e186f8ba2ca37
https://github.com/jcrobak/parquet-python/blob/e2caab7aceca91a3075998d0113e186f8ba2ca37/parquet/converted_types.py#L61-L92
train
jcrobak/parquet-python
parquet/__main__.py
setup_logging
def setup_logging(options=None): """Configure logging based on options.""" level = logging.DEBUG if options is not None and options.debug \ else logging.WARNING console = logging.StreamHandler() console.setLevel(level) formatter = logging.Formatter('%(name)s: %(levelname)-8s %(message)s') console.setFormatter(formatter) logging.getLogger('parquet').setLevel(level) logging.getLogger('parquet').addHandler(console)
python
def setup_logging(options=None): """Configure logging based on options.""" level = logging.DEBUG if options is not None and options.debug \ else logging.WARNING console = logging.StreamHandler() console.setLevel(level) formatter = logging.Formatter('%(name)s: %(levelname)-8s %(message)s') console.setFormatter(formatter) logging.getLogger('parquet').setLevel(level) logging.getLogger('parquet').addHandler(console)
[ "def", "setup_logging", "(", "options", "=", "None", ")", ":", "level", "=", "logging", ".", "DEBUG", "if", "options", "is", "not", "None", "and", "options", ".", "debug", "else", "logging", ".", "WARNING", "console", "=", "logging", ".", "StreamHandler", "(", ")", "console", ".", "setLevel", "(", "level", ")", "formatter", "=", "logging", ".", "Formatter", "(", "'%(name)s: %(levelname)-8s %(message)s'", ")", "console", ".", "setFormatter", "(", "formatter", ")", "logging", ".", "getLogger", "(", "'parquet'", ")", ".", "setLevel", "(", "level", ")", "logging", ".", "getLogger", "(", "'parquet'", ")", ".", "addHandler", "(", "console", ")" ]
Configure logging based on options.
[ "Configure", "logging", "based", "on", "options", "." ]
e2caab7aceca91a3075998d0113e186f8ba2ca37
https://github.com/jcrobak/parquet-python/blob/e2caab7aceca91a3075998d0113e186f8ba2ca37/parquet/__main__.py#L13-L22
train
jcrobak/parquet-python
parquet/__main__.py
main
def main(argv=None): """Run parquet utility application.""" argv = argv or sys.argv[1:] parser = argparse.ArgumentParser('parquet', description='Read parquet files') parser.add_argument('--metadata', action='store_true', help='show metadata on file') parser.add_argument('--row-group-metadata', action='store_true', help="show per row group metadata") parser.add_argument('--no-data', action='store_true', help="don't dump any data from the file") parser.add_argument('--limit', action='store', type=int, default=-1, help='max records to output') parser.add_argument('--col', action='append', type=str, help='only include this column (can be ' 'specified multiple times)') parser.add_argument('--no-headers', action='store_true', help='skip headers in output (only applies if ' 'format=csv)') parser.add_argument('--format', action='store', type=str, default='csv', help='format for the output data. can be csv or json.') parser.add_argument('--debug', action='store_true', help='log debug info to stderr') parser.add_argument('file', help='path to the file to parse') args = parser.parse_args(argv) setup_logging(args) import parquet if args.metadata: parquet.dump_metadata(args.file, args.row_group_metadata) if not args.no_data: parquet.dump(args.file, args)
python
def main(argv=None): """Run parquet utility application.""" argv = argv or sys.argv[1:] parser = argparse.ArgumentParser('parquet', description='Read parquet files') parser.add_argument('--metadata', action='store_true', help='show metadata on file') parser.add_argument('--row-group-metadata', action='store_true', help="show per row group metadata") parser.add_argument('--no-data', action='store_true', help="don't dump any data from the file") parser.add_argument('--limit', action='store', type=int, default=-1, help='max records to output') parser.add_argument('--col', action='append', type=str, help='only include this column (can be ' 'specified multiple times)') parser.add_argument('--no-headers', action='store_true', help='skip headers in output (only applies if ' 'format=csv)') parser.add_argument('--format', action='store', type=str, default='csv', help='format for the output data. can be csv or json.') parser.add_argument('--debug', action='store_true', help='log debug info to stderr') parser.add_argument('file', help='path to the file to parse') args = parser.parse_args(argv) setup_logging(args) import parquet if args.metadata: parquet.dump_metadata(args.file, args.row_group_metadata) if not args.no_data: parquet.dump(args.file, args)
[ "def", "main", "(", "argv", "=", "None", ")", ":", "argv", "=", "argv", "or", "sys", ".", "argv", "[", "1", ":", "]", "parser", "=", "argparse", ".", "ArgumentParser", "(", "'parquet'", ",", "description", "=", "'Read parquet files'", ")", "parser", ".", "add_argument", "(", "'--metadata'", ",", "action", "=", "'store_true'", ",", "help", "=", "'show metadata on file'", ")", "parser", ".", "add_argument", "(", "'--row-group-metadata'", ",", "action", "=", "'store_true'", ",", "help", "=", "\"show per row group metadata\"", ")", "parser", ".", "add_argument", "(", "'--no-data'", ",", "action", "=", "'store_true'", ",", "help", "=", "\"don't dump any data from the file\"", ")", "parser", ".", "add_argument", "(", "'--limit'", ",", "action", "=", "'store'", ",", "type", "=", "int", ",", "default", "=", "-", "1", ",", "help", "=", "'max records to output'", ")", "parser", ".", "add_argument", "(", "'--col'", ",", "action", "=", "'append'", ",", "type", "=", "str", ",", "help", "=", "'only include this column (can be '", "'specified multiple times)'", ")", "parser", ".", "add_argument", "(", "'--no-headers'", ",", "action", "=", "'store_true'", ",", "help", "=", "'skip headers in output (only applies if '", "'format=csv)'", ")", "parser", ".", "add_argument", "(", "'--format'", ",", "action", "=", "'store'", ",", "type", "=", "str", ",", "default", "=", "'csv'", ",", "help", "=", "'format for the output data. can be csv or json.'", ")", "parser", ".", "add_argument", "(", "'--debug'", ",", "action", "=", "'store_true'", ",", "help", "=", "'log debug info to stderr'", ")", "parser", ".", "add_argument", "(", "'file'", ",", "help", "=", "'path to the file to parse'", ")", "args", "=", "parser", ".", "parse_args", "(", "argv", ")", "setup_logging", "(", "args", ")", "import", "parquet", "if", "args", ".", "metadata", ":", "parquet", ".", "dump_metadata", "(", "args", ".", "file", ",", "args", ".", "row_group_metadata", ")", "if", "not", "args", ".", "no_data", ":", "parquet", ".", "dump", "(", "args", ".", "file", ",", "args", ")" ]
Run parquet utility application.
[ "Run", "parquet", "utility", "application", "." ]
e2caab7aceca91a3075998d0113e186f8ba2ca37
https://github.com/jcrobak/parquet-python/blob/e2caab7aceca91a3075998d0113e186f8ba2ca37/parquet/__main__.py#L25-L61
train
jcrobak/parquet-python
parquet/schema.py
SchemaHelper.is_required
def is_required(self, name): """Return true iff the schema element with the given name is required.""" return self.schema_element(name).repetition_type == parquet_thrift.FieldRepetitionType.REQUIRED
python
def is_required(self, name): """Return true iff the schema element with the given name is required.""" return self.schema_element(name).repetition_type == parquet_thrift.FieldRepetitionType.REQUIRED
[ "def", "is_required", "(", "self", ",", "name", ")", ":", "return", "self", ".", "schema_element", "(", "name", ")", ".", "repetition_type", "==", "parquet_thrift", ".", "FieldRepetitionType", ".", "REQUIRED" ]
Return true iff the schema element with the given name is required.
[ "Return", "true", "iff", "the", "schema", "element", "with", "the", "given", "name", "is", "required", "." ]
e2caab7aceca91a3075998d0113e186f8ba2ca37
https://github.com/jcrobak/parquet-python/blob/e2caab7aceca91a3075998d0113e186f8ba2ca37/parquet/schema.py#L30-L32
train
jcrobak/parquet-python
parquet/schema.py
SchemaHelper.max_repetition_level
def max_repetition_level(self, path): """Get the max repetition level for the given schema path.""" max_level = 0 for part in path: element = self.schema_element(part) if element.repetition_type == parquet_thrift.FieldRepetitionType.REQUIRED: max_level += 1 return max_level
python
def max_repetition_level(self, path): """Get the max repetition level for the given schema path.""" max_level = 0 for part in path: element = self.schema_element(part) if element.repetition_type == parquet_thrift.FieldRepetitionType.REQUIRED: max_level += 1 return max_level
[ "def", "max_repetition_level", "(", "self", ",", "path", ")", ":", "max_level", "=", "0", "for", "part", "in", "path", ":", "element", "=", "self", ".", "schema_element", "(", "part", ")", "if", "element", ".", "repetition_type", "==", "parquet_thrift", ".", "FieldRepetitionType", ".", "REQUIRED", ":", "max_level", "+=", "1", "return", "max_level" ]
Get the max repetition level for the given schema path.
[ "Get", "the", "max", "repetition", "level", "for", "the", "given", "schema", "path", "." ]
e2caab7aceca91a3075998d0113e186f8ba2ca37
https://github.com/jcrobak/parquet-python/blob/e2caab7aceca91a3075998d0113e186f8ba2ca37/parquet/schema.py#L34-L41
train
joke2k/django-faker
django_faker/populator.py
Populator.execute
def execute(self, using=None): """ Populate the database using all the Entity classes previously added. :param using A Django database connection name :rtype: A list of the inserted PKs """ if not using: using = self.getConnection() insertedEntities = {} for klass in self.orders: number = self.quantities[klass] if klass not in insertedEntities: insertedEntities[klass] = [] for i in range(0,number): insertedEntities[klass].append( self.entities[klass].execute(using, insertedEntities) ) return insertedEntities
python
def execute(self, using=None): """ Populate the database using all the Entity classes previously added. :param using A Django database connection name :rtype: A list of the inserted PKs """ if not using: using = self.getConnection() insertedEntities = {} for klass in self.orders: number = self.quantities[klass] if klass not in insertedEntities: insertedEntities[klass] = [] for i in range(0,number): insertedEntities[klass].append( self.entities[klass].execute(using, insertedEntities) ) return insertedEntities
[ "def", "execute", "(", "self", ",", "using", "=", "None", ")", ":", "if", "not", "using", ":", "using", "=", "self", ".", "getConnection", "(", ")", "insertedEntities", "=", "{", "}", "for", "klass", "in", "self", ".", "orders", ":", "number", "=", "self", ".", "quantities", "[", "klass", "]", "if", "klass", "not", "in", "insertedEntities", ":", "insertedEntities", "[", "klass", "]", "=", "[", "]", "for", "i", "in", "range", "(", "0", ",", "number", ")", ":", "insertedEntities", "[", "klass", "]", ".", "append", "(", "self", ".", "entities", "[", "klass", "]", ".", "execute", "(", "using", ",", "insertedEntities", ")", ")", "return", "insertedEntities" ]
Populate the database using all the Entity classes previously added. :param using A Django database connection name :rtype: A list of the inserted PKs
[ "Populate", "the", "database", "using", "all", "the", "Entity", "classes", "previously", "added", "." ]
345e3eebcf636e2566d9890ae7b35788ebdb5173
https://github.com/joke2k/django-faker/blob/345e3eebcf636e2566d9890ae7b35788ebdb5173/django_faker/populator.py#L147-L165
train
joke2k/django-faker
django_faker/__init__.py
Faker.getGenerator
def getGenerator(cls, locale=None, providers=None, codename=None): """ use a codename to cache generators """ codename = codename or cls.getCodename(locale, providers) if codename not in cls.generators: from faker import Faker as FakerGenerator # initialize with faker.generator.Generator instance # and remember in cache cls.generators[codename] = FakerGenerator( locale, providers ) cls.generators[codename].seed( cls.generators[codename].randomInt() ) return cls.generators[codename]
python
def getGenerator(cls, locale=None, providers=None, codename=None): """ use a codename to cache generators """ codename = codename or cls.getCodename(locale, providers) if codename not in cls.generators: from faker import Faker as FakerGenerator # initialize with faker.generator.Generator instance # and remember in cache cls.generators[codename] = FakerGenerator( locale, providers ) cls.generators[codename].seed( cls.generators[codename].randomInt() ) return cls.generators[codename]
[ "def", "getGenerator", "(", "cls", ",", "locale", "=", "None", ",", "providers", "=", "None", ",", "codename", "=", "None", ")", ":", "codename", "=", "codename", "or", "cls", ".", "getCodename", "(", "locale", ",", "providers", ")", "if", "codename", "not", "in", "cls", ".", "generators", ":", "from", "faker", "import", "Faker", "as", "FakerGenerator", "# initialize with faker.generator.Generator instance", "# and remember in cache", "cls", ".", "generators", "[", "codename", "]", "=", "FakerGenerator", "(", "locale", ",", "providers", ")", "cls", ".", "generators", "[", "codename", "]", ".", "seed", "(", "cls", ".", "generators", "[", "codename", "]", ".", "randomInt", "(", ")", ")", "return", "cls", ".", "generators", "[", "codename", "]" ]
use a codename to cache generators
[ "use", "a", "codename", "to", "cache", "generators" ]
345e3eebcf636e2566d9890ae7b35788ebdb5173
https://github.com/joke2k/django-faker/blob/345e3eebcf636e2566d9890ae7b35788ebdb5173/django_faker/__init__.py#L48-L62
train
brandonxiang/geojson-python-utils
geojson_utils/geojson_utils.py
_point_in_bbox
def _point_in_bbox(point, bounds): """ valid whether the point is inside the bounding box """ return not(point['coordinates'][1] < bounds[0] or point['coordinates'][1] > bounds[2] or point['coordinates'][0] < bounds[1] or point['coordinates'][0] > bounds[3])
python
def _point_in_bbox(point, bounds): """ valid whether the point is inside the bounding box """ return not(point['coordinates'][1] < bounds[0] or point['coordinates'][1] > bounds[2] or point['coordinates'][0] < bounds[1] or point['coordinates'][0] > bounds[3])
[ "def", "_point_in_bbox", "(", "point", ",", "bounds", ")", ":", "return", "not", "(", "point", "[", "'coordinates'", "]", "[", "1", "]", "<", "bounds", "[", "0", "]", "or", "point", "[", "'coordinates'", "]", "[", "1", "]", ">", "bounds", "[", "2", "]", "or", "point", "[", "'coordinates'", "]", "[", "0", "]", "<", "bounds", "[", "1", "]", "or", "point", "[", "'coordinates'", "]", "[", "0", "]", ">", "bounds", "[", "3", "]", ")" ]
valid whether the point is inside the bounding box
[ "valid", "whether", "the", "point", "is", "inside", "the", "bounding", "box" ]
33d0dcd5f16e0567b48c0d49fd292a4f1db16b41
https://github.com/brandonxiang/geojson-python-utils/blob/33d0dcd5f16e0567b48c0d49fd292a4f1db16b41/geojson_utils/geojson_utils.py#L56-L61
train
brandonxiang/geojson-python-utils
geojson_utils/geojson_utils.py
point_in_polygon
def point_in_polygon(point, poly): """ valid whether the point is located in a polygon Keyword arguments: point -- point geojson object poly -- polygon geojson object if(point inside poly) return true else false """ coords = [poly['coordinates']] if poly[ 'type'] == 'Polygon' else poly['coordinates'] return _point_in_polygon(point, coords)
python
def point_in_polygon(point, poly): """ valid whether the point is located in a polygon Keyword arguments: point -- point geojson object poly -- polygon geojson object if(point inside poly) return true else false """ coords = [poly['coordinates']] if poly[ 'type'] == 'Polygon' else poly['coordinates'] return _point_in_polygon(point, coords)
[ "def", "point_in_polygon", "(", "point", ",", "poly", ")", ":", "coords", "=", "[", "poly", "[", "'coordinates'", "]", "]", "if", "poly", "[", "'type'", "]", "==", "'Polygon'", "else", "poly", "[", "'coordinates'", "]", "return", "_point_in_polygon", "(", "point", ",", "coords", ")" ]
valid whether the point is located in a polygon Keyword arguments: point -- point geojson object poly -- polygon geojson object if(point inside poly) return true else false
[ "valid", "whether", "the", "point", "is", "located", "in", "a", "polygon" ]
33d0dcd5f16e0567b48c0d49fd292a4f1db16b41
https://github.com/brandonxiang/geojson-python-utils/blob/33d0dcd5f16e0567b48c0d49fd292a4f1db16b41/geojson_utils/geojson_utils.py#L111-L123
train
brandonxiang/geojson-python-utils
geojson_utils/geojson_utils.py
draw_circle
def draw_circle(radius_in_meters, center_point, steps=15): """ get a circle shape polygon based on centerPoint and radius Keyword arguments: point1 -- point one geojson object point2 -- point two geojson object if(point inside multipoly) return true else false """ steps = steps if steps > 15 else 15 center = [center_point['coordinates'][1], center_point['coordinates'][0]] dist = (radius_in_meters / 1000) / 6371 # convert meters to radiant rad_center = [number2radius(center[0]), number2radius(center[1])] # 15 sided circle poly = [] for step in range(0, steps): brng = 2 * math.pi * step / steps lat = math.asin(math.sin(rad_center[0]) * math.cos(dist) + math.cos(rad_center[0]) * math.sin(dist) * math.cos(brng)) lng = rad_center[1] + math.atan2(math.sin(brng) * math.sin(dist) * math.cos(rad_center[0]), math.cos(dist) - math.sin(rad_center[0]) * math.sin(lat)) poly.append([number2degree(lng), number2degree(lat)]) return {"type": "Polygon", "coordinates": [poly]}
python
def draw_circle(radius_in_meters, center_point, steps=15): """ get a circle shape polygon based on centerPoint and radius Keyword arguments: point1 -- point one geojson object point2 -- point two geojson object if(point inside multipoly) return true else false """ steps = steps if steps > 15 else 15 center = [center_point['coordinates'][1], center_point['coordinates'][0]] dist = (radius_in_meters / 1000) / 6371 # convert meters to radiant rad_center = [number2radius(center[0]), number2radius(center[1])] # 15 sided circle poly = [] for step in range(0, steps): brng = 2 * math.pi * step / steps lat = math.asin(math.sin(rad_center[0]) * math.cos(dist) + math.cos(rad_center[0]) * math.sin(dist) * math.cos(brng)) lng = rad_center[1] + math.atan2(math.sin(brng) * math.sin(dist) * math.cos(rad_center[0]), math.cos(dist) - math.sin(rad_center[0]) * math.sin(lat)) poly.append([number2degree(lng), number2degree(lat)]) return {"type": "Polygon", "coordinates": [poly]}
[ "def", "draw_circle", "(", "radius_in_meters", ",", "center_point", ",", "steps", "=", "15", ")", ":", "steps", "=", "steps", "if", "steps", ">", "15", "else", "15", "center", "=", "[", "center_point", "[", "'coordinates'", "]", "[", "1", "]", ",", "center_point", "[", "'coordinates'", "]", "[", "0", "]", "]", "dist", "=", "(", "radius_in_meters", "/", "1000", ")", "/", "6371", "# convert meters to radiant", "rad_center", "=", "[", "number2radius", "(", "center", "[", "0", "]", ")", ",", "number2radius", "(", "center", "[", "1", "]", ")", "]", "# 15 sided circle", "poly", "=", "[", "]", "for", "step", "in", "range", "(", "0", ",", "steps", ")", ":", "brng", "=", "2", "*", "math", ".", "pi", "*", "step", "/", "steps", "lat", "=", "math", ".", "asin", "(", "math", ".", "sin", "(", "rad_center", "[", "0", "]", ")", "*", "math", ".", "cos", "(", "dist", ")", "+", "math", ".", "cos", "(", "rad_center", "[", "0", "]", ")", "*", "math", ".", "sin", "(", "dist", ")", "*", "math", ".", "cos", "(", "brng", ")", ")", "lng", "=", "rad_center", "[", "1", "]", "+", "math", ".", "atan2", "(", "math", ".", "sin", "(", "brng", ")", "*", "math", ".", "sin", "(", "dist", ")", "*", "math", ".", "cos", "(", "rad_center", "[", "0", "]", ")", ",", "math", ".", "cos", "(", "dist", ")", "-", "math", ".", "sin", "(", "rad_center", "[", "0", "]", ")", "*", "math", ".", "sin", "(", "lat", ")", ")", "poly", ".", "append", "(", "[", "number2degree", "(", "lng", ")", ",", "number2degree", "(", "lat", ")", "]", ")", "return", "{", "\"type\"", ":", "\"Polygon\"", ",", "\"coordinates\"", ":", "[", "poly", "]", "}" ]
get a circle shape polygon based on centerPoint and radius Keyword arguments: point1 -- point one geojson object point2 -- point two geojson object if(point inside multipoly) return true else false
[ "get", "a", "circle", "shape", "polygon", "based", "on", "centerPoint", "and", "radius" ]
33d0dcd5f16e0567b48c0d49fd292a4f1db16b41
https://github.com/brandonxiang/geojson-python-utils/blob/33d0dcd5f16e0567b48c0d49fd292a4f1db16b41/geojson_utils/geojson_utils.py#L170-L194
train
brandonxiang/geojson-python-utils
geojson_utils/geojson_utils.py
rectangle_centroid
def rectangle_centroid(rectangle): """ get the centroid of the rectangle Keyword arguments: rectangle -- polygon geojson object return centroid """ bbox = rectangle['coordinates'][0] xmin = bbox[0][0] ymin = bbox[0][1] xmax = bbox[2][0] ymax = bbox[2][1] xwidth = xmax - xmin ywidth = ymax - ymin return {'type': 'Point', 'coordinates': [xmin + xwidth / 2, ymin + ywidth / 2]}
python
def rectangle_centroid(rectangle): """ get the centroid of the rectangle Keyword arguments: rectangle -- polygon geojson object return centroid """ bbox = rectangle['coordinates'][0] xmin = bbox[0][0] ymin = bbox[0][1] xmax = bbox[2][0] ymax = bbox[2][1] xwidth = xmax - xmin ywidth = ymax - ymin return {'type': 'Point', 'coordinates': [xmin + xwidth / 2, ymin + ywidth / 2]}
[ "def", "rectangle_centroid", "(", "rectangle", ")", ":", "bbox", "=", "rectangle", "[", "'coordinates'", "]", "[", "0", "]", "xmin", "=", "bbox", "[", "0", "]", "[", "0", "]", "ymin", "=", "bbox", "[", "0", "]", "[", "1", "]", "xmax", "=", "bbox", "[", "2", "]", "[", "0", "]", "ymax", "=", "bbox", "[", "2", "]", "[", "1", "]", "xwidth", "=", "xmax", "-", "xmin", "ywidth", "=", "ymax", "-", "ymin", "return", "{", "'type'", ":", "'Point'", ",", "'coordinates'", ":", "[", "xmin", "+", "xwidth", "/", "2", ",", "ymin", "+", "ywidth", "/", "2", "]", "}" ]
get the centroid of the rectangle Keyword arguments: rectangle -- polygon geojson object return centroid
[ "get", "the", "centroid", "of", "the", "rectangle" ]
33d0dcd5f16e0567b48c0d49fd292a4f1db16b41
https://github.com/brandonxiang/geojson-python-utils/blob/33d0dcd5f16e0567b48c0d49fd292a4f1db16b41/geojson_utils/geojson_utils.py#L197-L213
train
brandonxiang/geojson-python-utils
geojson_utils/geojson_utils.py
geometry_within_radius
def geometry_within_radius(geometry, center, radius): """ To valid whether point or linestring or polygon is inside a radius around a center Keyword arguments: geometry -- point/linstring/polygon geojson object center -- point geojson object radius -- radius if(geometry inside radius) return true else false """ if geometry['type'] == 'Point': return point_distance(geometry, center) <= radius elif geometry['type'] == 'LineString' or geometry['type'] == 'Polygon': point = {} # it's enough to check the exterior ring of the Polygon coordinates = geometry['coordinates'][0] if geometry['type'] == 'Polygon' else geometry['coordinates'] for coordinate in coordinates: point['coordinates'] = coordinate if point_distance(point, center) > radius: return False return True
python
def geometry_within_radius(geometry, center, radius): """ To valid whether point or linestring or polygon is inside a radius around a center Keyword arguments: geometry -- point/linstring/polygon geojson object center -- point geojson object radius -- radius if(geometry inside radius) return true else false """ if geometry['type'] == 'Point': return point_distance(geometry, center) <= radius elif geometry['type'] == 'LineString' or geometry['type'] == 'Polygon': point = {} # it's enough to check the exterior ring of the Polygon coordinates = geometry['coordinates'][0] if geometry['type'] == 'Polygon' else geometry['coordinates'] for coordinate in coordinates: point['coordinates'] = coordinate if point_distance(point, center) > radius: return False return True
[ "def", "geometry_within_radius", "(", "geometry", ",", "center", ",", "radius", ")", ":", "if", "geometry", "[", "'type'", "]", "==", "'Point'", ":", "return", "point_distance", "(", "geometry", ",", "center", ")", "<=", "radius", "elif", "geometry", "[", "'type'", "]", "==", "'LineString'", "or", "geometry", "[", "'type'", "]", "==", "'Polygon'", ":", "point", "=", "{", "}", "# it's enough to check the exterior ring of the Polygon", "coordinates", "=", "geometry", "[", "'coordinates'", "]", "[", "0", "]", "if", "geometry", "[", "'type'", "]", "==", "'Polygon'", "else", "geometry", "[", "'coordinates'", "]", "for", "coordinate", "in", "coordinates", ":", "point", "[", "'coordinates'", "]", "=", "coordinate", "if", "point_distance", "(", "point", ",", "center", ")", ">", "radius", ":", "return", "False", "return", "True" ]
To valid whether point or linestring or polygon is inside a radius around a center Keyword arguments: geometry -- point/linstring/polygon geojson object center -- point geojson object radius -- radius if(geometry inside radius) return true else false
[ "To", "valid", "whether", "point", "or", "linestring", "or", "polygon", "is", "inside", "a", "radius", "around", "a", "center" ]
33d0dcd5f16e0567b48c0d49fd292a4f1db16b41
https://github.com/brandonxiang/geojson-python-utils/blob/33d0dcd5f16e0567b48c0d49fd292a4f1db16b41/geojson_utils/geojson_utils.py#L264-L286
train
brandonxiang/geojson-python-utils
geojson_utils/geojson_utils.py
area
def area(poly): """ calculate the area of polygon Keyword arguments: poly -- polygon geojson object return polygon area """ poly_area = 0 # TODO: polygon holes at coordinates[1] points = poly['coordinates'][0] j = len(points) - 1 count = len(points) for i in range(0, count): p1_x = points[i][1] p1_y = points[i][0] p2_x = points[j][1] p2_y = points[j][0] poly_area += p1_x * p2_y poly_area -= p1_y * p2_x j = i poly_area /= 2 return poly_area
python
def area(poly): """ calculate the area of polygon Keyword arguments: poly -- polygon geojson object return polygon area """ poly_area = 0 # TODO: polygon holes at coordinates[1] points = poly['coordinates'][0] j = len(points) - 1 count = len(points) for i in range(0, count): p1_x = points[i][1] p1_y = points[i][0] p2_x = points[j][1] p2_y = points[j][0] poly_area += p1_x * p2_y poly_area -= p1_y * p2_x j = i poly_area /= 2 return poly_area
[ "def", "area", "(", "poly", ")", ":", "poly_area", "=", "0", "# TODO: polygon holes at coordinates[1]", "points", "=", "poly", "[", "'coordinates'", "]", "[", "0", "]", "j", "=", "len", "(", "points", ")", "-", "1", "count", "=", "len", "(", "points", ")", "for", "i", "in", "range", "(", "0", ",", "count", ")", ":", "p1_x", "=", "points", "[", "i", "]", "[", "1", "]", "p1_y", "=", "points", "[", "i", "]", "[", "0", "]", "p2_x", "=", "points", "[", "j", "]", "[", "1", "]", "p2_y", "=", "points", "[", "j", "]", "[", "0", "]", "poly_area", "+=", "p1_x", "*", "p2_y", "poly_area", "-=", "p1_y", "*", "p2_x", "j", "=", "i", "poly_area", "/=", "2", "return", "poly_area" ]
calculate the area of polygon Keyword arguments: poly -- polygon geojson object return polygon area
[ "calculate", "the", "area", "of", "polygon" ]
33d0dcd5f16e0567b48c0d49fd292a4f1db16b41
https://github.com/brandonxiang/geojson-python-utils/blob/33d0dcd5f16e0567b48c0d49fd292a4f1db16b41/geojson_utils/geojson_utils.py#L289-L315
train
brandonxiang/geojson-python-utils
geojson_utils/geojson_utils.py
destination_point
def destination_point(point, brng, dist): """ Calculate a destination Point base on a base point and a distance Keyword arguments: pt -- polygon geojson object brng -- an angle in degrees dist -- distance in Kilometer between destination and base point return destination point object """ dist = float(dist) / 6371 # convert dist to angular distance in radians brng = number2radius(brng) lon1 = number2radius(point['coordinates'][0]) lat1 = number2radius(point['coordinates'][1]) lat2 = math.asin(math.sin(lat1) * math.cos(dist) + math.cos(lat1) * math.sin(dist) * math.cos(brng)) lon2 = lon1 + math.atan2(math.sin(brng) * math.sin(dist) * math.cos(lat1), math.cos(dist) - math.sin(lat1) * math.sin(lat2)) lon2 = (lon2 + 3 * math.pi) % (2 * math.pi) - math.pi # normalise to -180 degree +180 degree return {'type': 'Point', 'coordinates': [number2degree(lon2), number2degree(lat2)]}
python
def destination_point(point, brng, dist): """ Calculate a destination Point base on a base point and a distance Keyword arguments: pt -- polygon geojson object brng -- an angle in degrees dist -- distance in Kilometer between destination and base point return destination point object """ dist = float(dist) / 6371 # convert dist to angular distance in radians brng = number2radius(brng) lon1 = number2radius(point['coordinates'][0]) lat1 = number2radius(point['coordinates'][1]) lat2 = math.asin(math.sin(lat1) * math.cos(dist) + math.cos(lat1) * math.sin(dist) * math.cos(brng)) lon2 = lon1 + math.atan2(math.sin(brng) * math.sin(dist) * math.cos(lat1), math.cos(dist) - math.sin(lat1) * math.sin(lat2)) lon2 = (lon2 + 3 * math.pi) % (2 * math.pi) - math.pi # normalise to -180 degree +180 degree return {'type': 'Point', 'coordinates': [number2degree(lon2), number2degree(lat2)]}
[ "def", "destination_point", "(", "point", ",", "brng", ",", "dist", ")", ":", "dist", "=", "float", "(", "dist", ")", "/", "6371", "# convert dist to angular distance in radians", "brng", "=", "number2radius", "(", "brng", ")", "lon1", "=", "number2radius", "(", "point", "[", "'coordinates'", "]", "[", "0", "]", ")", "lat1", "=", "number2radius", "(", "point", "[", "'coordinates'", "]", "[", "1", "]", ")", "lat2", "=", "math", ".", "asin", "(", "math", ".", "sin", "(", "lat1", ")", "*", "math", ".", "cos", "(", "dist", ")", "+", "math", ".", "cos", "(", "lat1", ")", "*", "math", ".", "sin", "(", "dist", ")", "*", "math", ".", "cos", "(", "brng", ")", ")", "lon2", "=", "lon1", "+", "math", ".", "atan2", "(", "math", ".", "sin", "(", "brng", ")", "*", "math", ".", "sin", "(", "dist", ")", "*", "math", ".", "cos", "(", "lat1", ")", ",", "math", ".", "cos", "(", "dist", ")", "-", "math", ".", "sin", "(", "lat1", ")", "*", "math", ".", "sin", "(", "lat2", ")", ")", "lon2", "=", "(", "lon2", "+", "3", "*", "math", ".", "pi", ")", "%", "(", "2", "*", "math", ".", "pi", ")", "-", "math", ".", "pi", "# normalise to -180 degree +180 degree", "return", "{", "'type'", ":", "'Point'", ",", "'coordinates'", ":", "[", "number2degree", "(", "lon2", ")", ",", "number2degree", "(", "lat2", ")", "]", "}" ]
Calculate a destination Point base on a base point and a distance Keyword arguments: pt -- polygon geojson object brng -- an angle in degrees dist -- distance in Kilometer between destination and base point return destination point object
[ "Calculate", "a", "destination", "Point", "base", "on", "a", "base", "point", "and", "a", "distance" ]
33d0dcd5f16e0567b48c0d49fd292a4f1db16b41
https://github.com/brandonxiang/geojson-python-utils/blob/33d0dcd5f16e0567b48c0d49fd292a4f1db16b41/geojson_utils/geojson_utils.py#L351-L375
train
brandonxiang/geojson-python-utils
geojson_utils/merger.py
merge_featurecollection
def merge_featurecollection(*jsons): """ merge features into one featurecollection Keyword arguments: jsons -- jsons object list return geojson featurecollection """ features = [] for json in jsons: if json['type'] == 'FeatureCollection': for feature in json['features']: features.append(feature) return {"type":'FeatureCollection', "features":features}
python
def merge_featurecollection(*jsons): """ merge features into one featurecollection Keyword arguments: jsons -- jsons object list return geojson featurecollection """ features = [] for json in jsons: if json['type'] == 'FeatureCollection': for feature in json['features']: features.append(feature) return {"type":'FeatureCollection', "features":features}
[ "def", "merge_featurecollection", "(", "*", "jsons", ")", ":", "features", "=", "[", "]", "for", "json", "in", "jsons", ":", "if", "json", "[", "'type'", "]", "==", "'FeatureCollection'", ":", "for", "feature", "in", "json", "[", "'features'", "]", ":", "features", ".", "append", "(", "feature", ")", "return", "{", "\"type\"", ":", "'FeatureCollection'", ",", "\"features\"", ":", "features", "}" ]
merge features into one featurecollection Keyword arguments: jsons -- jsons object list return geojson featurecollection
[ "merge", "features", "into", "one", "featurecollection" ]
33d0dcd5f16e0567b48c0d49fd292a4f1db16b41
https://github.com/brandonxiang/geojson-python-utils/blob/33d0dcd5f16e0567b48c0d49fd292a4f1db16b41/geojson_utils/merger.py#L6-L20
train
gotcha/vimpdb
src/vimpdb/debugger.py
trace_dispatch
def trace_dispatch(self, frame, event, arg): """allow to switch to Vimpdb instance""" if hasattr(self, 'vimpdb'): return self.vimpdb.trace_dispatch(frame, event, arg) else: return self._orig_trace_dispatch(frame, event, arg)
python
def trace_dispatch(self, frame, event, arg): """allow to switch to Vimpdb instance""" if hasattr(self, 'vimpdb'): return self.vimpdb.trace_dispatch(frame, event, arg) else: return self._orig_trace_dispatch(frame, event, arg)
[ "def", "trace_dispatch", "(", "self", ",", "frame", ",", "event", ",", "arg", ")", ":", "if", "hasattr", "(", "self", ",", "'vimpdb'", ")", ":", "return", "self", ".", "vimpdb", ".", "trace_dispatch", "(", "frame", ",", "event", ",", "arg", ")", "else", ":", "return", "self", ".", "_orig_trace_dispatch", "(", "frame", ",", "event", ",", "arg", ")" ]
allow to switch to Vimpdb instance
[ "allow", "to", "switch", "to", "Vimpdb", "instance" ]
1171938751127d23f66f6b750dd79166c64bdf20
https://github.com/gotcha/vimpdb/blob/1171938751127d23f66f6b750dd79166c64bdf20/src/vimpdb/debugger.py#L237-L242
train
gotcha/vimpdb
src/vimpdb/debugger.py
hook
def hook(klass): """ monkey-patch pdb.Pdb class adds a 'vim' (and 'v') command: it switches to debugging with vimpdb """ if not hasattr(klass, 'do_vim'): setupMethod(klass, trace_dispatch) klass.__bases__ += (SwitcherToVimpdb, )
python
def hook(klass): """ monkey-patch pdb.Pdb class adds a 'vim' (and 'v') command: it switches to debugging with vimpdb """ if not hasattr(klass, 'do_vim'): setupMethod(klass, trace_dispatch) klass.__bases__ += (SwitcherToVimpdb, )
[ "def", "hook", "(", "klass", ")", ":", "if", "not", "hasattr", "(", "klass", ",", "'do_vim'", ")", ":", "setupMethod", "(", "klass", ",", "trace_dispatch", ")", "klass", ".", "__bases__", "+=", "(", "SwitcherToVimpdb", ",", ")" ]
monkey-patch pdb.Pdb class adds a 'vim' (and 'v') command: it switches to debugging with vimpdb
[ "monkey", "-", "patch", "pdb", ".", "Pdb", "class" ]
1171938751127d23f66f6b750dd79166c64bdf20
https://github.com/gotcha/vimpdb/blob/1171938751127d23f66f6b750dd79166c64bdf20/src/vimpdb/debugger.py#L277-L287
train
gotcha/vimpdb
src/vimpdb/debugger.py
VimPdb.trace_dispatch
def trace_dispatch(self, frame, event, arg): """allow to switch to Pdb instance""" if hasattr(self, 'pdb'): return self.pdb.trace_dispatch(frame, event, arg) else: return Pdb.trace_dispatch(self, frame, event, arg)
python
def trace_dispatch(self, frame, event, arg): """allow to switch to Pdb instance""" if hasattr(self, 'pdb'): return self.pdb.trace_dispatch(frame, event, arg) else: return Pdb.trace_dispatch(self, frame, event, arg)
[ "def", "trace_dispatch", "(", "self", ",", "frame", ",", "event", ",", "arg", ")", ":", "if", "hasattr", "(", "self", ",", "'pdb'", ")", ":", "return", "self", ".", "pdb", ".", "trace_dispatch", "(", "frame", ",", "event", ",", "arg", ")", "else", ":", "return", "Pdb", ".", "trace_dispatch", "(", "self", ",", "frame", ",", "event", ",", "arg", ")" ]
allow to switch to Pdb instance
[ "allow", "to", "switch", "to", "Pdb", "instance" ]
1171938751127d23f66f6b750dd79166c64bdf20
https://github.com/gotcha/vimpdb/blob/1171938751127d23f66f6b750dd79166c64bdf20/src/vimpdb/debugger.py#L97-L102
train
rkhleics/wagtailmenus
wagtailmenus/models/mixins.py
DefinesSubMenuTemplatesMixin.get_context_data
def get_context_data(self, **kwargs): """ Include the name of the sub menu template in the context. This is purely for backwards compatibility. Any sub menus rendered as part of this menu will call `sub_menu_template` on the original menu instance to get an actual `Template` """ data = {} if self._contextual_vals.current_level == 1 and self.max_levels > 1: data['sub_menu_template'] = self.sub_menu_template.template.name data.update(kwargs) return super().get_context_data(**data)
python
def get_context_data(self, **kwargs): """ Include the name of the sub menu template in the context. This is purely for backwards compatibility. Any sub menus rendered as part of this menu will call `sub_menu_template` on the original menu instance to get an actual `Template` """ data = {} if self._contextual_vals.current_level == 1 and self.max_levels > 1: data['sub_menu_template'] = self.sub_menu_template.template.name data.update(kwargs) return super().get_context_data(**data)
[ "def", "get_context_data", "(", "self", ",", "*", "*", "kwargs", ")", ":", "data", "=", "{", "}", "if", "self", ".", "_contextual_vals", ".", "current_level", "==", "1", "and", "self", ".", "max_levels", ">", "1", ":", "data", "[", "'sub_menu_template'", "]", "=", "self", ".", "sub_menu_template", ".", "template", ".", "name", "data", ".", "update", "(", "kwargs", ")", "return", "super", "(", ")", ".", "get_context_data", "(", "*", "*", "data", ")" ]
Include the name of the sub menu template in the context. This is purely for backwards compatibility. Any sub menus rendered as part of this menu will call `sub_menu_template` on the original menu instance to get an actual `Template`
[ "Include", "the", "name", "of", "the", "sub", "menu", "template", "in", "the", "context", ".", "This", "is", "purely", "for", "backwards", "compatibility", ".", "Any", "sub", "menus", "rendered", "as", "part", "of", "this", "menu", "will", "call", "sub_menu_template", "on", "the", "original", "menu", "instance", "to", "get", "an", "actual", "Template" ]
a41f240bed0d362e0d4dd4ef04a230f2b1827a93
https://github.com/rkhleics/wagtailmenus/blob/a41f240bed0d362e0d4dd4ef04a230f2b1827a93/wagtailmenus/models/mixins.py#L105-L116
train
rkhleics/wagtailmenus
wagtailmenus/models/menus.py
Menu.render_from_tag
def render_from_tag( cls, context, max_levels=None, use_specific=None, apply_active_classes=True, allow_repeating_parents=True, use_absolute_page_urls=False, add_sub_menus_inline=None, template_name='', **kwargs ): """ A template tag should call this method to render a menu. The ``Context`` instance and option values provided are used to get or create a relevant menu instance, prepare it, then render it and it's menu items to an appropriate template. It shouldn't be neccessary to override this method, as any new option values will be available as a dict in `opt_vals.extra`, and there are more specific methods for overriding certain behaviour at different stages of rendering, such as: * get_from_collected_values() (if the class is a Django model), OR * create_from_collected_values() (if it isn't) * prepare_to_render() * get_context_data() * render_to_template() """ instance = cls._get_render_prepared_object( context, max_levels=max_levels, use_specific=use_specific, apply_active_classes=apply_active_classes, allow_repeating_parents=allow_repeating_parents, use_absolute_page_urls=use_absolute_page_urls, add_sub_menus_inline=add_sub_menus_inline, template_name=template_name, **kwargs ) if not instance: return '' return instance.render_to_template()
python
def render_from_tag( cls, context, max_levels=None, use_specific=None, apply_active_classes=True, allow_repeating_parents=True, use_absolute_page_urls=False, add_sub_menus_inline=None, template_name='', **kwargs ): """ A template tag should call this method to render a menu. The ``Context`` instance and option values provided are used to get or create a relevant menu instance, prepare it, then render it and it's menu items to an appropriate template. It shouldn't be neccessary to override this method, as any new option values will be available as a dict in `opt_vals.extra`, and there are more specific methods for overriding certain behaviour at different stages of rendering, such as: * get_from_collected_values() (if the class is a Django model), OR * create_from_collected_values() (if it isn't) * prepare_to_render() * get_context_data() * render_to_template() """ instance = cls._get_render_prepared_object( context, max_levels=max_levels, use_specific=use_specific, apply_active_classes=apply_active_classes, allow_repeating_parents=allow_repeating_parents, use_absolute_page_urls=use_absolute_page_urls, add_sub_menus_inline=add_sub_menus_inline, template_name=template_name, **kwargs ) if not instance: return '' return instance.render_to_template()
[ "def", "render_from_tag", "(", "cls", ",", "context", ",", "max_levels", "=", "None", ",", "use_specific", "=", "None", ",", "apply_active_classes", "=", "True", ",", "allow_repeating_parents", "=", "True", ",", "use_absolute_page_urls", "=", "False", ",", "add_sub_menus_inline", "=", "None", ",", "template_name", "=", "''", ",", "*", "*", "kwargs", ")", ":", "instance", "=", "cls", ".", "_get_render_prepared_object", "(", "context", ",", "max_levels", "=", "max_levels", ",", "use_specific", "=", "use_specific", ",", "apply_active_classes", "=", "apply_active_classes", ",", "allow_repeating_parents", "=", "allow_repeating_parents", ",", "use_absolute_page_urls", "=", "use_absolute_page_urls", ",", "add_sub_menus_inline", "=", "add_sub_menus_inline", ",", "template_name", "=", "template_name", ",", "*", "*", "kwargs", ")", "if", "not", "instance", ":", "return", "''", "return", "instance", ".", "render_to_template", "(", ")" ]
A template tag should call this method to render a menu. The ``Context`` instance and option values provided are used to get or create a relevant menu instance, prepare it, then render it and it's menu items to an appropriate template. It shouldn't be neccessary to override this method, as any new option values will be available as a dict in `opt_vals.extra`, and there are more specific methods for overriding certain behaviour at different stages of rendering, such as: * get_from_collected_values() (if the class is a Django model), OR * create_from_collected_values() (if it isn't) * prepare_to_render() * get_context_data() * render_to_template()
[ "A", "template", "tag", "should", "call", "this", "method", "to", "render", "a", "menu", ".", "The", "Context", "instance", "and", "option", "values", "provided", "are", "used", "to", "get", "or", "create", "a", "relevant", "menu", "instance", "prepare", "it", "then", "render", "it", "and", "it", "s", "menu", "items", "to", "an", "appropriate", "template", "." ]
a41f240bed0d362e0d4dd4ef04a230f2b1827a93
https://github.com/rkhleics/wagtailmenus/blob/a41f240bed0d362e0d4dd4ef04a230f2b1827a93/wagtailmenus/models/menus.py#L68-L105
train
rkhleics/wagtailmenus
wagtailmenus/models/menus.py
Menu._create_contextualvals_obj_from_context
def _create_contextualvals_obj_from_context(cls, context): """ Gathers all of the 'contextual' data needed to render a menu instance and returns it in a structure that can be conveniently referenced throughout the process of preparing the menu and menu items and for rendering. """ context_processor_vals = context.get('wagtailmenus_vals', {}) return ContextualVals( context, context['request'], get_site_from_request(context['request']), context.get('current_level', 0) + 1, context.get('original_menu_tag', cls.related_templatetag_name), context.get('original_menu_instance'), context_processor_vals.get('current_page'), context_processor_vals.get('section_root'), context_processor_vals.get('current_page_ancestor_ids', ()), )
python
def _create_contextualvals_obj_from_context(cls, context): """ Gathers all of the 'contextual' data needed to render a menu instance and returns it in a structure that can be conveniently referenced throughout the process of preparing the menu and menu items and for rendering. """ context_processor_vals = context.get('wagtailmenus_vals', {}) return ContextualVals( context, context['request'], get_site_from_request(context['request']), context.get('current_level', 0) + 1, context.get('original_menu_tag', cls.related_templatetag_name), context.get('original_menu_instance'), context_processor_vals.get('current_page'), context_processor_vals.get('section_root'), context_processor_vals.get('current_page_ancestor_ids', ()), )
[ "def", "_create_contextualvals_obj_from_context", "(", "cls", ",", "context", ")", ":", "context_processor_vals", "=", "context", ".", "get", "(", "'wagtailmenus_vals'", ",", "{", "}", ")", "return", "ContextualVals", "(", "context", ",", "context", "[", "'request'", "]", ",", "get_site_from_request", "(", "context", "[", "'request'", "]", ")", ",", "context", ".", "get", "(", "'current_level'", ",", "0", ")", "+", "1", ",", "context", ".", "get", "(", "'original_menu_tag'", ",", "cls", ".", "related_templatetag_name", ")", ",", "context", ".", "get", "(", "'original_menu_instance'", ")", ",", "context_processor_vals", ".", "get", "(", "'current_page'", ")", ",", "context_processor_vals", ".", "get", "(", "'section_root'", ")", ",", "context_processor_vals", ".", "get", "(", "'current_page_ancestor_ids'", ",", "(", ")", ")", ",", ")" ]
Gathers all of the 'contextual' data needed to render a menu instance and returns it in a structure that can be conveniently referenced throughout the process of preparing the menu and menu items and for rendering.
[ "Gathers", "all", "of", "the", "contextual", "data", "needed", "to", "render", "a", "menu", "instance", "and", "returns", "it", "in", "a", "structure", "that", "can", "be", "conveniently", "referenced", "throughout", "the", "process", "of", "preparing", "the", "menu", "and", "menu", "items", "and", "for", "rendering", "." ]
a41f240bed0d362e0d4dd4ef04a230f2b1827a93
https://github.com/rkhleics/wagtailmenus/blob/a41f240bed0d362e0d4dd4ef04a230f2b1827a93/wagtailmenus/models/menus.py#L128-L146
train
rkhleics/wagtailmenus
wagtailmenus/models/menus.py
Menu.render_to_template
def render_to_template(self): """ Render the current menu instance to a template and return a string """ context_data = self.get_context_data() template = self.get_template() context_data['current_template'] = template.template.name return template.render(context_data)
python
def render_to_template(self): """ Render the current menu instance to a template and return a string """ context_data = self.get_context_data() template = self.get_template() context_data['current_template'] = template.template.name return template.render(context_data)
[ "def", "render_to_template", "(", "self", ")", ":", "context_data", "=", "self", ".", "get_context_data", "(", ")", "template", "=", "self", ".", "get_template", "(", ")", "context_data", "[", "'current_template'", "]", "=", "template", ".", "template", ".", "name", "return", "template", ".", "render", "(", "context_data", ")" ]
Render the current menu instance to a template and return a string
[ "Render", "the", "current", "menu", "instance", "to", "a", "template", "and", "return", "a", "string" ]
a41f240bed0d362e0d4dd4ef04a230f2b1827a93
https://github.com/rkhleics/wagtailmenus/blob/a41f240bed0d362e0d4dd4ef04a230f2b1827a93/wagtailmenus/models/menus.py#L222-L230
train
rkhleics/wagtailmenus
wagtailmenus/models/menus.py
Menu.get_common_hook_kwargs
def get_common_hook_kwargs(self, **kwargs): """ Returns a dictionary of common values to be passed as keyword arguments to methods registered as 'hooks'. """ opt_vals = self._option_vals hook_kwargs = self._contextual_vals._asdict() hook_kwargs.update({ 'menu_instance': self, 'menu_tag': self.related_templatetag_name, 'parent_page': None, 'max_levels': self.max_levels, 'use_specific': self.use_specific, 'apply_active_classes': opt_vals.apply_active_classes, 'allow_repeating_parents': opt_vals.allow_repeating_parents, 'use_absolute_page_urls': opt_vals.use_absolute_page_urls, }) if hook_kwargs['original_menu_instance'] is None: hook_kwargs['original_menu_instance'] = self hook_kwargs.update(kwargs) return hook_kwargs
python
def get_common_hook_kwargs(self, **kwargs): """ Returns a dictionary of common values to be passed as keyword arguments to methods registered as 'hooks'. """ opt_vals = self._option_vals hook_kwargs = self._contextual_vals._asdict() hook_kwargs.update({ 'menu_instance': self, 'menu_tag': self.related_templatetag_name, 'parent_page': None, 'max_levels': self.max_levels, 'use_specific': self.use_specific, 'apply_active_classes': opt_vals.apply_active_classes, 'allow_repeating_parents': opt_vals.allow_repeating_parents, 'use_absolute_page_urls': opt_vals.use_absolute_page_urls, }) if hook_kwargs['original_menu_instance'] is None: hook_kwargs['original_menu_instance'] = self hook_kwargs.update(kwargs) return hook_kwargs
[ "def", "get_common_hook_kwargs", "(", "self", ",", "*", "*", "kwargs", ")", ":", "opt_vals", "=", "self", ".", "_option_vals", "hook_kwargs", "=", "self", ".", "_contextual_vals", ".", "_asdict", "(", ")", "hook_kwargs", ".", "update", "(", "{", "'menu_instance'", ":", "self", ",", "'menu_tag'", ":", "self", ".", "related_templatetag_name", ",", "'parent_page'", ":", "None", ",", "'max_levels'", ":", "self", ".", "max_levels", ",", "'use_specific'", ":", "self", ".", "use_specific", ",", "'apply_active_classes'", ":", "opt_vals", ".", "apply_active_classes", ",", "'allow_repeating_parents'", ":", "opt_vals", ".", "allow_repeating_parents", ",", "'use_absolute_page_urls'", ":", "opt_vals", ".", "use_absolute_page_urls", ",", "}", ")", "if", "hook_kwargs", "[", "'original_menu_instance'", "]", "is", "None", ":", "hook_kwargs", "[", "'original_menu_instance'", "]", "=", "self", "hook_kwargs", ".", "update", "(", "kwargs", ")", "return", "hook_kwargs" ]
Returns a dictionary of common values to be passed as keyword arguments to methods registered as 'hooks'.
[ "Returns", "a", "dictionary", "of", "common", "values", "to", "be", "passed", "as", "keyword", "arguments", "to", "methods", "registered", "as", "hooks", "." ]
a41f240bed0d362e0d4dd4ef04a230f2b1827a93
https://github.com/rkhleics/wagtailmenus/blob/a41f240bed0d362e0d4dd4ef04a230f2b1827a93/wagtailmenus/models/menus.py#L269-L289
train
rkhleics/wagtailmenus
wagtailmenus/models/menus.py
Menu.get_page_children_dict
def get_page_children_dict(self, page_qs=None): """ Returns a dictionary of lists, where the keys are 'path' values for pages, and the value is a list of children pages for that page. """ children_dict = defaultdict(list) for page in page_qs or self.pages_for_display: children_dict[page.path[:-page.steplen]].append(page) return children_dict
python
def get_page_children_dict(self, page_qs=None): """ Returns a dictionary of lists, where the keys are 'path' values for pages, and the value is a list of children pages for that page. """ children_dict = defaultdict(list) for page in page_qs or self.pages_for_display: children_dict[page.path[:-page.steplen]].append(page) return children_dict
[ "def", "get_page_children_dict", "(", "self", ",", "page_qs", "=", "None", ")", ":", "children_dict", "=", "defaultdict", "(", "list", ")", "for", "page", "in", "page_qs", "or", "self", ".", "pages_for_display", ":", "children_dict", "[", "page", ".", "path", "[", ":", "-", "page", ".", "steplen", "]", "]", ".", "append", "(", "page", ")", "return", "children_dict" ]
Returns a dictionary of lists, where the keys are 'path' values for pages, and the value is a list of children pages for that page.
[ "Returns", "a", "dictionary", "of", "lists", "where", "the", "keys", "are", "path", "values", "for", "pages", "and", "the", "value", "is", "a", "list", "of", "children", "pages", "for", "that", "page", "." ]
a41f240bed0d362e0d4dd4ef04a230f2b1827a93
https://github.com/rkhleics/wagtailmenus/blob/a41f240bed0d362e0d4dd4ef04a230f2b1827a93/wagtailmenus/models/menus.py#L318-L326
train
rkhleics/wagtailmenus
wagtailmenus/models/menus.py
Menu.get_context_data
def get_context_data(self, **kwargs): """ Return a dictionary containing all of the values needed to render the menu instance to a template, including values that might be used by the 'sub_menu' tag to render any additional levels. """ ctx_vals = self._contextual_vals opt_vals = self._option_vals data = self.create_dict_from_parent_context() data.update(ctx_vals._asdict()) data.update({ 'apply_active_classes': opt_vals.apply_active_classes, 'allow_repeating_parents': opt_vals.allow_repeating_parents, 'use_absolute_page_urls': opt_vals.use_absolute_page_urls, 'max_levels': self.max_levels, 'use_specific': self.use_specific, 'menu_instance': self, self.menu_instance_context_name: self, # Repeat some vals with backwards-compatible keys 'section_root': data['current_section_root_page'], 'current_ancestor_ids': data['current_page_ancestor_ids'], }) if not ctx_vals.original_menu_instance and ctx_vals.current_level == 1: data['original_menu_instance'] = self if 'menu_items' not in kwargs: data['menu_items'] = self.get_menu_items_for_rendering() data.update(kwargs) return data
python
def get_context_data(self, **kwargs): """ Return a dictionary containing all of the values needed to render the menu instance to a template, including values that might be used by the 'sub_menu' tag to render any additional levels. """ ctx_vals = self._contextual_vals opt_vals = self._option_vals data = self.create_dict_from_parent_context() data.update(ctx_vals._asdict()) data.update({ 'apply_active_classes': opt_vals.apply_active_classes, 'allow_repeating_parents': opt_vals.allow_repeating_parents, 'use_absolute_page_urls': opt_vals.use_absolute_page_urls, 'max_levels': self.max_levels, 'use_specific': self.use_specific, 'menu_instance': self, self.menu_instance_context_name: self, # Repeat some vals with backwards-compatible keys 'section_root': data['current_section_root_page'], 'current_ancestor_ids': data['current_page_ancestor_ids'], }) if not ctx_vals.original_menu_instance and ctx_vals.current_level == 1: data['original_menu_instance'] = self if 'menu_items' not in kwargs: data['menu_items'] = self.get_menu_items_for_rendering() data.update(kwargs) return data
[ "def", "get_context_data", "(", "self", ",", "*", "*", "kwargs", ")", ":", "ctx_vals", "=", "self", ".", "_contextual_vals", "opt_vals", "=", "self", ".", "_option_vals", "data", "=", "self", ".", "create_dict_from_parent_context", "(", ")", "data", ".", "update", "(", "ctx_vals", ".", "_asdict", "(", ")", ")", "data", ".", "update", "(", "{", "'apply_active_classes'", ":", "opt_vals", ".", "apply_active_classes", ",", "'allow_repeating_parents'", ":", "opt_vals", ".", "allow_repeating_parents", ",", "'use_absolute_page_urls'", ":", "opt_vals", ".", "use_absolute_page_urls", ",", "'max_levels'", ":", "self", ".", "max_levels", ",", "'use_specific'", ":", "self", ".", "use_specific", ",", "'menu_instance'", ":", "self", ",", "self", ".", "menu_instance_context_name", ":", "self", ",", "# Repeat some vals with backwards-compatible keys", "'section_root'", ":", "data", "[", "'current_section_root_page'", "]", ",", "'current_ancestor_ids'", ":", "data", "[", "'current_page_ancestor_ids'", "]", ",", "}", ")", "if", "not", "ctx_vals", ".", "original_menu_instance", "and", "ctx_vals", ".", "current_level", "==", "1", ":", "data", "[", "'original_menu_instance'", "]", "=", "self", "if", "'menu_items'", "not", "in", "kwargs", ":", "data", "[", "'menu_items'", "]", "=", "self", ".", "get_menu_items_for_rendering", "(", ")", "data", ".", "update", "(", "kwargs", ")", "return", "data" ]
Return a dictionary containing all of the values needed to render the menu instance to a template, including values that might be used by the 'sub_menu' tag to render any additional levels.
[ "Return", "a", "dictionary", "containing", "all", "of", "the", "values", "needed", "to", "render", "the", "menu", "instance", "to", "a", "template", "including", "values", "that", "might", "be", "used", "by", "the", "sub_menu", "tag", "to", "render", "any", "additional", "levels", "." ]
a41f240bed0d362e0d4dd4ef04a230f2b1827a93
https://github.com/rkhleics/wagtailmenus/blob/a41f240bed0d362e0d4dd4ef04a230f2b1827a93/wagtailmenus/models/menus.py#L385-L412
train
rkhleics/wagtailmenus
wagtailmenus/models/menus.py
Menu.get_menu_items_for_rendering
def get_menu_items_for_rendering(self): """ Return a list of 'menu items' to be included in the context for rendering the current level of the menu. The responsibility for sourcing, priming, and modifying menu items is split between three methods: ``get_raw_menu_items()``, ``prime_menu_items()`` and ``modify_menu_items()``, respectively. """ items = self.get_raw_menu_items() # Allow hooks to modify the raw list for hook in hooks.get_hooks('menus_modify_raw_menu_items'): items = hook(items, **self.common_hook_kwargs) # Prime and modify the menu items accordingly items = self.modify_menu_items(self.prime_menu_items(items)) if isinstance(items, GeneratorType): items = list(items) # Allow hooks to modify the primed/modified list hook_methods = hooks.get_hooks('menus_modify_primed_menu_items') for hook in hook_methods: items = hook(items, **self.common_hook_kwargs) return items
python
def get_menu_items_for_rendering(self): """ Return a list of 'menu items' to be included in the context for rendering the current level of the menu. The responsibility for sourcing, priming, and modifying menu items is split between three methods: ``get_raw_menu_items()``, ``prime_menu_items()`` and ``modify_menu_items()``, respectively. """ items = self.get_raw_menu_items() # Allow hooks to modify the raw list for hook in hooks.get_hooks('menus_modify_raw_menu_items'): items = hook(items, **self.common_hook_kwargs) # Prime and modify the menu items accordingly items = self.modify_menu_items(self.prime_menu_items(items)) if isinstance(items, GeneratorType): items = list(items) # Allow hooks to modify the primed/modified list hook_methods = hooks.get_hooks('menus_modify_primed_menu_items') for hook in hook_methods: items = hook(items, **self.common_hook_kwargs) return items
[ "def", "get_menu_items_for_rendering", "(", "self", ")", ":", "items", "=", "self", ".", "get_raw_menu_items", "(", ")", "# Allow hooks to modify the raw list", "for", "hook", "in", "hooks", ".", "get_hooks", "(", "'menus_modify_raw_menu_items'", ")", ":", "items", "=", "hook", "(", "items", ",", "*", "*", "self", ".", "common_hook_kwargs", ")", "# Prime and modify the menu items accordingly", "items", "=", "self", ".", "modify_menu_items", "(", "self", ".", "prime_menu_items", "(", "items", ")", ")", "if", "isinstance", "(", "items", ",", "GeneratorType", ")", ":", "items", "=", "list", "(", "items", ")", "# Allow hooks to modify the primed/modified list", "hook_methods", "=", "hooks", ".", "get_hooks", "(", "'menus_modify_primed_menu_items'", ")", "for", "hook", "in", "hook_methods", ":", "items", "=", "hook", "(", "items", ",", "*", "*", "self", ".", "common_hook_kwargs", ")", "return", "items" ]
Return a list of 'menu items' to be included in the context for rendering the current level of the menu. The responsibility for sourcing, priming, and modifying menu items is split between three methods: ``get_raw_menu_items()``, ``prime_menu_items()`` and ``modify_menu_items()``, respectively.
[ "Return", "a", "list", "of", "menu", "items", "to", "be", "included", "in", "the", "context", "for", "rendering", "the", "current", "level", "of", "the", "menu", "." ]
a41f240bed0d362e0d4dd4ef04a230f2b1827a93
https://github.com/rkhleics/wagtailmenus/blob/a41f240bed0d362e0d4dd4ef04a230f2b1827a93/wagtailmenus/models/menus.py#L414-L438
train
rkhleics/wagtailmenus
wagtailmenus/models/menus.py
Menu._replace_with_specific_page
def _replace_with_specific_page(page, menu_item): """ If ``page`` is a vanilla ``Page` object, replace it with a 'specific' version of itself. Also update ``menu_item``, depending on whether it's a ``MenuItem`` object or a ``Page`` object. """ if type(page) is Page: page = page.specific if isinstance(menu_item, MenuItem): menu_item.link_page = page else: menu_item = page return page, menu_item
python
def _replace_with_specific_page(page, menu_item): """ If ``page`` is a vanilla ``Page` object, replace it with a 'specific' version of itself. Also update ``menu_item``, depending on whether it's a ``MenuItem`` object or a ``Page`` object. """ if type(page) is Page: page = page.specific if isinstance(menu_item, MenuItem): menu_item.link_page = page else: menu_item = page return page, menu_item
[ "def", "_replace_with_specific_page", "(", "page", ",", "menu_item", ")", ":", "if", "type", "(", "page", ")", "is", "Page", ":", "page", "=", "page", ".", "specific", "if", "isinstance", "(", "menu_item", ",", "MenuItem", ")", ":", "menu_item", ".", "link_page", "=", "page", "else", ":", "menu_item", "=", "page", "return", "page", ",", "menu_item" ]
If ``page`` is a vanilla ``Page` object, replace it with a 'specific' version of itself. Also update ``menu_item``, depending on whether it's a ``MenuItem`` object or a ``Page`` object.
[ "If", "page", "is", "a", "vanilla", "Page", "object", "replace", "it", "with", "a", "specific", "version", "of", "itself", ".", "Also", "update", "menu_item", "depending", "on", "whether", "it", "s", "a", "MenuItem", "object", "or", "a", "Page", "object", "." ]
a41f240bed0d362e0d4dd4ef04a230f2b1827a93
https://github.com/rkhleics/wagtailmenus/blob/a41f240bed0d362e0d4dd4ef04a230f2b1827a93/wagtailmenus/models/menus.py#L451-L463
train
rkhleics/wagtailmenus
wagtailmenus/models/menus.py
Menu.prime_menu_items
def prime_menu_items(self, menu_items): """ A generator method that takes a list of ``MenuItem`` or ``Page`` objects and sets a number of additional attributes on each item that are useful in menu templates. """ for item in menu_items: item = self._prime_menu_item(item) if item is not None: yield item
python
def prime_menu_items(self, menu_items): """ A generator method that takes a list of ``MenuItem`` or ``Page`` objects and sets a number of additional attributes on each item that are useful in menu templates. """ for item in menu_items: item = self._prime_menu_item(item) if item is not None: yield item
[ "def", "prime_menu_items", "(", "self", ",", "menu_items", ")", ":", "for", "item", "in", "menu_items", ":", "item", "=", "self", ".", "_prime_menu_item", "(", "item", ")", "if", "item", "is", "not", "None", ":", "yield", "item" ]
A generator method that takes a list of ``MenuItem`` or ``Page`` objects and sets a number of additional attributes on each item that are useful in menu templates.
[ "A", "generator", "method", "that", "takes", "a", "list", "of", "MenuItem", "or", "Page", "objects", "and", "sets", "a", "number", "of", "additional", "attributes", "on", "each", "item", "that", "are", "useful", "in", "menu", "templates", "." ]
a41f240bed0d362e0d4dd4ef04a230f2b1827a93
https://github.com/rkhleics/wagtailmenus/blob/a41f240bed0d362e0d4dd4ef04a230f2b1827a93/wagtailmenus/models/menus.py#L603-L612
train
rkhleics/wagtailmenus
wagtailmenus/models/menus.py
MenuFromPage.get_children_for_page
def get_children_for_page(self, page): """Return a list of relevant child pages for a given page""" if self.max_levels == 1: # If there's only a single level of pages to display, skip the # dict creation / lookup and just return the QuerySet result return self.pages_for_display return super().get_children_for_page(page)
python
def get_children_for_page(self, page): """Return a list of relevant child pages for a given page""" if self.max_levels == 1: # If there's only a single level of pages to display, skip the # dict creation / lookup and just return the QuerySet result return self.pages_for_display return super().get_children_for_page(page)
[ "def", "get_children_for_page", "(", "self", ",", "page", ")", ":", "if", "self", ".", "max_levels", "==", "1", ":", "# If there's only a single level of pages to display, skip the", "# dict creation / lookup and just return the QuerySet result", "return", "self", ".", "pages_for_display", "return", "super", "(", ")", ".", "get_children_for_page", "(", "page", ")" ]
Return a list of relevant child pages for a given page
[ "Return", "a", "list", "of", "relevant", "child", "pages", "for", "a", "given", "page" ]
a41f240bed0d362e0d4dd4ef04a230f2b1827a93
https://github.com/rkhleics/wagtailmenus/blob/a41f240bed0d362e0d4dd4ef04a230f2b1827a93/wagtailmenus/models/menus.py#L705-L711
train
rkhleics/wagtailmenus
wagtailmenus/models/menus.py
MenuWithMenuItems.get_top_level_items
def get_top_level_items(self): """Return a list of menu items with link_page objects supplemented with 'specific' pages where appropriate.""" menu_items = self.get_base_menuitem_queryset() # Identify which pages to fetch for the top level items page_ids = tuple( obj.link_page_id for obj in menu_items if obj.link_page_id ) page_dict = {} if page_ids: # We use 'get_base_page_queryset' here, because if hooks are being # used to modify page querysets, that should affect the top level # items also top_level_pages = self.get_base_page_queryset().filter( id__in=page_ids ) if self.use_specific >= constants.USE_SPECIFIC_TOP_LEVEL: """ The menu is being generated with a specificity level of TOP_LEVEL or ALWAYS, so we use PageQuerySet.specific() to fetch specific page instances as efficiently as possible """ top_level_pages = top_level_pages.specific() # Evaluate the above queryset to a dictionary, using IDs as keys page_dict = {p.id: p for p in top_level_pages} # Now build a list to return menu_item_list = [] for item in menu_items: if not item.link_page_id: menu_item_list.append(item) continue # skip to next if item.link_page_id in page_dict.keys(): # Only return menu items for pages where the page was included # in the 'get_base_page_queryset' result item.link_page = page_dict.get(item.link_page_id) menu_item_list.append(item) return menu_item_list
python
def get_top_level_items(self): """Return a list of menu items with link_page objects supplemented with 'specific' pages where appropriate.""" menu_items = self.get_base_menuitem_queryset() # Identify which pages to fetch for the top level items page_ids = tuple( obj.link_page_id for obj in menu_items if obj.link_page_id ) page_dict = {} if page_ids: # We use 'get_base_page_queryset' here, because if hooks are being # used to modify page querysets, that should affect the top level # items also top_level_pages = self.get_base_page_queryset().filter( id__in=page_ids ) if self.use_specific >= constants.USE_SPECIFIC_TOP_LEVEL: """ The menu is being generated with a specificity level of TOP_LEVEL or ALWAYS, so we use PageQuerySet.specific() to fetch specific page instances as efficiently as possible """ top_level_pages = top_level_pages.specific() # Evaluate the above queryset to a dictionary, using IDs as keys page_dict = {p.id: p for p in top_level_pages} # Now build a list to return menu_item_list = [] for item in menu_items: if not item.link_page_id: menu_item_list.append(item) continue # skip to next if item.link_page_id in page_dict.keys(): # Only return menu items for pages where the page was included # in the 'get_base_page_queryset' result item.link_page = page_dict.get(item.link_page_id) menu_item_list.append(item) return menu_item_list
[ "def", "get_top_level_items", "(", "self", ")", ":", "menu_items", "=", "self", ".", "get_base_menuitem_queryset", "(", ")", "# Identify which pages to fetch for the top level items", "page_ids", "=", "tuple", "(", "obj", ".", "link_page_id", "for", "obj", "in", "menu_items", "if", "obj", ".", "link_page_id", ")", "page_dict", "=", "{", "}", "if", "page_ids", ":", "# We use 'get_base_page_queryset' here, because if hooks are being", "# used to modify page querysets, that should affect the top level", "# items also", "top_level_pages", "=", "self", ".", "get_base_page_queryset", "(", ")", ".", "filter", "(", "id__in", "=", "page_ids", ")", "if", "self", ".", "use_specific", ">=", "constants", ".", "USE_SPECIFIC_TOP_LEVEL", ":", "\"\"\"\n The menu is being generated with a specificity level of\n TOP_LEVEL or ALWAYS, so we use PageQuerySet.specific() to fetch\n specific page instances as efficiently as possible\n \"\"\"", "top_level_pages", "=", "top_level_pages", ".", "specific", "(", ")", "# Evaluate the above queryset to a dictionary, using IDs as keys", "page_dict", "=", "{", "p", ".", "id", ":", "p", "for", "p", "in", "top_level_pages", "}", "# Now build a list to return", "menu_item_list", "=", "[", "]", "for", "item", "in", "menu_items", ":", "if", "not", "item", ".", "link_page_id", ":", "menu_item_list", ".", "append", "(", "item", ")", "continue", "# skip to next", "if", "item", ".", "link_page_id", "in", "page_dict", ".", "keys", "(", ")", ":", "# Only return menu items for pages where the page was included", "# in the 'get_base_page_queryset' result", "item", ".", "link_page", "=", "page_dict", ".", "get", "(", "item", ".", "link_page_id", ")", "menu_item_list", ".", "append", "(", "item", ")", "return", "menu_item_list" ]
Return a list of menu items with link_page objects supplemented with 'specific' pages where appropriate.
[ "Return", "a", "list", "of", "menu", "items", "with", "link_page", "objects", "supplemented", "with", "specific", "pages", "where", "appropriate", "." ]
a41f240bed0d362e0d4dd4ef04a230f2b1827a93
https://github.com/rkhleics/wagtailmenus/blob/a41f240bed0d362e0d4dd4ef04a230f2b1827a93/wagtailmenus/models/menus.py#L1015-L1054
train
rkhleics/wagtailmenus
wagtailmenus/models/menus.py
AbstractMainMenu.get_for_site
def get_for_site(cls, site): """Return the 'main menu' instance for the provided site""" instance, created = cls.objects.get_or_create(site=site) return instance
python
def get_for_site(cls, site): """Return the 'main menu' instance for the provided site""" instance, created = cls.objects.get_or_create(site=site) return instance
[ "def", "get_for_site", "(", "cls", ",", "site", ")", ":", "instance", ",", "created", "=", "cls", ".", "objects", ".", "get_or_create", "(", "site", "=", "site", ")", "return", "instance" ]
Return the 'main menu' instance for the provided site
[ "Return", "the", "main", "menu", "instance", "for", "the", "provided", "site" ]
a41f240bed0d362e0d4dd4ef04a230f2b1827a93
https://github.com/rkhleics/wagtailmenus/blob/a41f240bed0d362e0d4dd4ef04a230f2b1827a93/wagtailmenus/models/menus.py#L1207-L1210
train
rkhleics/wagtailmenus
wagtailmenus/views.py
FlatMenuCopyView.get_form_kwargs
def get_form_kwargs(self): kwargs = super().get_form_kwargs() """ When the form is posted, don't pass an instance to the form. It should create a new one out of the posted data. We also need to nullify any IDs posted for inline menu items, so that new instances of those are created too. """ if self.request.method == 'POST': data = copy(self.request.POST) i = 0 while(data.get('%s-%s-id' % ( settings.FLAT_MENU_ITEMS_RELATED_NAME, i ))): data['%s-%s-id' % ( settings.FLAT_MENU_ITEMS_RELATED_NAME, i )] = None i += 1 kwargs.update({ 'data': data, 'instance': self.model() }) return kwargs
python
def get_form_kwargs(self): kwargs = super().get_form_kwargs() """ When the form is posted, don't pass an instance to the form. It should create a new one out of the posted data. We also need to nullify any IDs posted for inline menu items, so that new instances of those are created too. """ if self.request.method == 'POST': data = copy(self.request.POST) i = 0 while(data.get('%s-%s-id' % ( settings.FLAT_MENU_ITEMS_RELATED_NAME, i ))): data['%s-%s-id' % ( settings.FLAT_MENU_ITEMS_RELATED_NAME, i )] = None i += 1 kwargs.update({ 'data': data, 'instance': self.model() }) return kwargs
[ "def", "get_form_kwargs", "(", "self", ")", ":", "kwargs", "=", "super", "(", ")", ".", "get_form_kwargs", "(", ")", "if", "self", ".", "request", ".", "method", "==", "'POST'", ":", "data", "=", "copy", "(", "self", ".", "request", ".", "POST", ")", "i", "=", "0", "while", "(", "data", ".", "get", "(", "'%s-%s-id'", "%", "(", "settings", ".", "FLAT_MENU_ITEMS_RELATED_NAME", ",", "i", ")", ")", ")", ":", "data", "[", "'%s-%s-id'", "%", "(", "settings", ".", "FLAT_MENU_ITEMS_RELATED_NAME", ",", "i", ")", "]", "=", "None", "i", "+=", "1", "kwargs", ".", "update", "(", "{", "'data'", ":", "data", ",", "'instance'", ":", "self", ".", "model", "(", ")", "}", ")", "return", "kwargs" ]
When the form is posted, don't pass an instance to the form. It should create a new one out of the posted data. We also need to nullify any IDs posted for inline menu items, so that new instances of those are created too.
[ "When", "the", "form", "is", "posted", "don", "t", "pass", "an", "instance", "to", "the", "form", ".", "It", "should", "create", "a", "new", "one", "out", "of", "the", "posted", "data", ".", "We", "also", "need", "to", "nullify", "any", "IDs", "posted", "for", "inline", "menu", "items", "so", "that", "new", "instances", "of", "those", "are", "created", "too", "." ]
a41f240bed0d362e0d4dd4ef04a230f2b1827a93
https://github.com/rkhleics/wagtailmenus/blob/a41f240bed0d362e0d4dd4ef04a230f2b1827a93/wagtailmenus/views.py#L156-L178
train
rkhleics/wagtailmenus
wagtailmenus/models/pages.py
MenuPageMixin.modify_submenu_items
def modify_submenu_items( self, menu_items, current_page, current_ancestor_ids, current_site, allow_repeating_parents, apply_active_classes, original_menu_tag, menu_instance=None, request=None, use_absolute_page_urls=False, ): """ Make any necessary modifications to `menu_items` and return the list back to the calling menu tag to render in templates. Any additional items added should have a `text` and `href` attribute as a minimum. `original_menu_tag` should be one of 'main_menu', 'section_menu' or 'children_menu', which should be useful when extending/overriding. """ if (allow_repeating_parents and menu_items and self.repeat_in_subnav): """ This page should have a version of itself repeated alongside children in the subnav, so we create a new item and prepend it to menu_items. """ repeated_item = self.get_repeated_menu_item( current_page=current_page, current_site=current_site, apply_active_classes=apply_active_classes, original_menu_tag=original_menu_tag, use_absolute_page_urls=use_absolute_page_urls, request=request, ) menu_items.insert(0, repeated_item) return menu_items
python
def modify_submenu_items( self, menu_items, current_page, current_ancestor_ids, current_site, allow_repeating_parents, apply_active_classes, original_menu_tag, menu_instance=None, request=None, use_absolute_page_urls=False, ): """ Make any necessary modifications to `menu_items` and return the list back to the calling menu tag to render in templates. Any additional items added should have a `text` and `href` attribute as a minimum. `original_menu_tag` should be one of 'main_menu', 'section_menu' or 'children_menu', which should be useful when extending/overriding. """ if (allow_repeating_parents and menu_items and self.repeat_in_subnav): """ This page should have a version of itself repeated alongside children in the subnav, so we create a new item and prepend it to menu_items. """ repeated_item = self.get_repeated_menu_item( current_page=current_page, current_site=current_site, apply_active_classes=apply_active_classes, original_menu_tag=original_menu_tag, use_absolute_page_urls=use_absolute_page_urls, request=request, ) menu_items.insert(0, repeated_item) return menu_items
[ "def", "modify_submenu_items", "(", "self", ",", "menu_items", ",", "current_page", ",", "current_ancestor_ids", ",", "current_site", ",", "allow_repeating_parents", ",", "apply_active_classes", ",", "original_menu_tag", ",", "menu_instance", "=", "None", ",", "request", "=", "None", ",", "use_absolute_page_urls", "=", "False", ",", ")", ":", "if", "(", "allow_repeating_parents", "and", "menu_items", "and", "self", ".", "repeat_in_subnav", ")", ":", "\"\"\"\n This page should have a version of itself repeated alongside\n children in the subnav, so we create a new item and prepend it to\n menu_items.\n \"\"\"", "repeated_item", "=", "self", ".", "get_repeated_menu_item", "(", "current_page", "=", "current_page", ",", "current_site", "=", "current_site", ",", "apply_active_classes", "=", "apply_active_classes", ",", "original_menu_tag", "=", "original_menu_tag", ",", "use_absolute_page_urls", "=", "use_absolute_page_urls", ",", "request", "=", "request", ",", ")", "menu_items", ".", "insert", "(", "0", ",", "repeated_item", ")", "return", "menu_items" ]
Make any necessary modifications to `menu_items` and return the list back to the calling menu tag to render in templates. Any additional items added should have a `text` and `href` attribute as a minimum. `original_menu_tag` should be one of 'main_menu', 'section_menu' or 'children_menu', which should be useful when extending/overriding.
[ "Make", "any", "necessary", "modifications", "to", "menu_items", "and", "return", "the", "list", "back", "to", "the", "calling", "menu", "tag", "to", "render", "in", "templates", ".", "Any", "additional", "items", "added", "should", "have", "a", "text", "and", "href", "attribute", "as", "a", "minimum", "." ]
a41f240bed0d362e0d4dd4ef04a230f2b1827a93
https://github.com/rkhleics/wagtailmenus/blob/a41f240bed0d362e0d4dd4ef04a230f2b1827a93/wagtailmenus/models/pages.py#L37-L65
train
rkhleics/wagtailmenus
wagtailmenus/models/pages.py
MenuPageMixin.has_submenu_items
def has_submenu_items(self, current_page, allow_repeating_parents, original_menu_tag, menu_instance=None, request=None): """ When rendering pages in a menu template a `has_children_in_menu` attribute is added to each page, letting template developers know whether or not the item has a submenu that must be rendered. By default, we return a boolean indicating whether the page has suitable child pages to include in such a menu. But, if you are overriding the `modify_submenu_items` method to programatically add items that aren't child pages, you'll likely need to alter this method too, so the template knows there are sub items to be rendered. """ return menu_instance.page_has_children(self)
python
def has_submenu_items(self, current_page, allow_repeating_parents, original_menu_tag, menu_instance=None, request=None): """ When rendering pages in a menu template a `has_children_in_menu` attribute is added to each page, letting template developers know whether or not the item has a submenu that must be rendered. By default, we return a boolean indicating whether the page has suitable child pages to include in such a menu. But, if you are overriding the `modify_submenu_items` method to programatically add items that aren't child pages, you'll likely need to alter this method too, so the template knows there are sub items to be rendered. """ return menu_instance.page_has_children(self)
[ "def", "has_submenu_items", "(", "self", ",", "current_page", ",", "allow_repeating_parents", ",", "original_menu_tag", ",", "menu_instance", "=", "None", ",", "request", "=", "None", ")", ":", "return", "menu_instance", ".", "page_has_children", "(", "self", ")" ]
When rendering pages in a menu template a `has_children_in_menu` attribute is added to each page, letting template developers know whether or not the item has a submenu that must be rendered. By default, we return a boolean indicating whether the page has suitable child pages to include in such a menu. But, if you are overriding the `modify_submenu_items` method to programatically add items that aren't child pages, you'll likely need to alter this method too, so the template knows there are sub items to be rendered.
[ "When", "rendering", "pages", "in", "a", "menu", "template", "a", "has_children_in_menu", "attribute", "is", "added", "to", "each", "page", "letting", "template", "developers", "know", "whether", "or", "not", "the", "item", "has", "a", "submenu", "that", "must", "be", "rendered", "." ]
a41f240bed0d362e0d4dd4ef04a230f2b1827a93
https://github.com/rkhleics/wagtailmenus/blob/a41f240bed0d362e0d4dd4ef04a230f2b1827a93/wagtailmenus/models/pages.py#L67-L80
train