content
stringlengths
21
24.2k
avg_line_length
float64
10.4
231
max_line_length
int64
20
8.17k
alphanum_fraction
float64
0.25
0.82
licenses
sequence
repository_name
stringlengths
11
51
path
stringlengths
7
121
size
int64
21
24.2k
lang
stringclasses
1 value
nl_text
stringlengths
19
20.6k
nl_size
int64
19
20.6k
nl_ratio
float64
0.8
1.07
#----------------------------------------------------------------------------- # Copyright (c) 2015-2017, PyInstaller Development Team. # # Distributed under the terms of the GNU General Public License with exception # for distributing bootloader. # # The full license is in the file COPYING.txt, distributed with this software. #----------------------------------------------------------------------------- # netCDF4 (tested with v.1.1.9) has some hidden imports hiddenimports = ['netCDF4.utils', 'netcdftime']
39.538462
78
0.529183
[ "MIT" ]
JohnWJackson/arcadePython
venv/Lib/site-packages/PyInstaller/hooks/hook-netCDF4.py
514
Python
----------------------------------------------------------------------------- Copyright (c) 2015-2017, PyInstaller Development Team. Distributed under the terms of the GNU General Public License with exception for distributing bootloader. The full license is in the file COPYING.txt, distributed with this software.----------------------------------------------------------------------------- netCDF4 (tested with v.1.1.9) has some hidden imports
446
0.867704
# The MIT License (MIT) # # Copyright (c) 2015, Nicolas Sebrecht & contributors # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE. import os import sys def testingPath(): return os.path.join( os.path.abspath(sys.modules['imapfw'].__path__[0]), 'testing')
41.419355
79
0.759346
[ "MIT" ]
Deepanshu2017/imapfw
imapfw/testing/libcore.py
1,284
Python
The MIT License (MIT) Copyright (c) 2015, Nicolas Sebrecht & contributors Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
1,094
0.852025
# -*- coding: utf-8 -*- """ Authors: Tim Hessels Module: Collect/SRTM Description: This module downloads DEM data from http://earlywarning.usgs.gov/hydrodata/. Use the DEM functions to download and create DEM images in Gtiff format. Examples: from pyWAPOR.Collect import SRTM SRTM.DEM(Dir='C:/TempDEM4/', latlim=[29, 32], lonlim=[-113, -109]) """ from .DEM import main as DEM __all__ = ['DEM'] __version__ = '0.1'
20.95
76
0.711217
[ "Apache-2.0" ]
DHI-GRAS/wapor-et-look
pyWAPOR/Collect/SRTM/__init__.py
419
Python
Authors: Tim Hessels Module: Collect/SRTM Description: This module downloads DEM data from http://earlywarning.usgs.gov/hydrodata/. Use the DEM functions to download and create DEM images in Gtiff format. Examples: from pyWAPOR.Collect import SRTM SRTM.DEM(Dir='C:/TempDEM4/', latlim=[29, 32], lonlim=[-113, -109]) -*- coding: utf-8 -*-
341
0.813842
"""Plot graphs from human-readable file formats."""
26
51
0.730769
[ "MIT" ]
Sean1708/uniplot
uniplot/__init__.py
52
Python
Plot graphs from human-readable file formats.
45
0.865385
# Configuration file for jupyter-notebook. #------------------------------------------------------------------------------ # Configurable configuration #------------------------------------------------------------------------------ #------------------------------------------------------------------------------ # SingletonConfigurable configuration #------------------------------------------------------------------------------ # A configurable that only allows one instance. # # This class is for classes that should only have one instance of itself or # *any* subclass. To create and retrieve such a class use the # :meth:`SingletonConfigurable.instance` method. #------------------------------------------------------------------------------ # Application configuration #------------------------------------------------------------------------------ # This is an application. # The date format used by logging formatters for %(asctime)s # c.Application.log_datefmt = '%Y-%m-%d %H:%M:%S' # The Logging format template # c.Application.log_format = '[%(name)s]%(highlevel)s %(message)s' # Set the log level by value or name. # c.Application.log_level = 30 #------------------------------------------------------------------------------ # JupyterApp configuration #------------------------------------------------------------------------------ # Base class for Jupyter applications # Answer yes to any prompts. c.JupyterApp.answer_yes = True # Full path of a config file. # c.JupyterApp.config_file = u'' # Generate default config file. # c.JupyterApp.generate_config = False # Specify a config file to load. # c.JupyterApp.config_file_name = u'' #------------------------------------------------------------------------------ # NotebookApp configuration #------------------------------------------------------------------------------ # The number of additional ports to try if the specified port is not available. c.NotebookApp.port_retries = 0 # Extra variables to supply to jinja templates when rendering. # c.NotebookApp.jinja_template_vars = traitlets.Undefined # The url for MathJax.js. # c.NotebookApp.mathjax_url = '' # Supply extra arguments that will be passed to Jinja environment. # c.NotebookApp.jinja_environment_options = traitlets.Undefined # The IP address the notebook server will listen on. c.NotebookApp.ip = '*' # DEPRECATED use base_url # c.NotebookApp.base_project_url = '/' # Python modules to load as notebook server extensions. This is an experimental # API, and may change in future releases. # c.NotebookApp.server_extensions = traitlets.Undefined # Note: These extensions require the ~/.jupyter path to exist otherwise, errors will occur on startup c.NotebookApp.server_extensions=['ipyparallel.nbextension'] # The random bytes used to secure cookies. By default this is a new random # number every time you start the Notebook. Set it to a value in a config file # to enable logins to persist across server sessions. # # Note: Cookie secrets should be kept private, do not share config files with # cookie_secret stored in plaintext (you can read the value from a file). # c.NotebookApp.cookie_secret = '' # The default URL to redirect to from `/` # c.NotebookApp.default_url = '/tree' # The port the notebook server will listen on. c.NotebookApp.port = 8754 # The kernel spec manager class to use. Should be a subclass of # `jupyter_client.kernelspec.KernelSpecManager`. # # The Api of KernelSpecManager is provisional and might change without warning # between this version of IPython and the next stable one. # c.NotebookApp.kernel_spec_manager_class = <class 'jupyter_client.kernelspec.KernelSpecManager'> # Set the Access-Control-Allow-Origin header # # Use '*' to allow any origin to access your server. # # Takes precedence over allow_origin_pat. c.NotebookApp.allow_origin = '*' # The notebook manager class to use. # c.NotebookApp.contents_manager_class = <class 'notebook.services.contents.filemanager.FileContentsManager'> # Use a regular expression for the Access-Control-Allow-Origin header # # Requests from an origin matching the expression will get replies with: # # Access-Control-Allow-Origin: origin # # where `origin` is the origin of the request. # # Ignored if allow_origin is set. # c.NotebookApp.allow_origin_pat = '' # The full path to an SSL/TLS certificate file. # c.NotebookApp.certfile = u'' # The logout handler class to use. # c.NotebookApp.logout_handler_class = <class 'notebook.auth.logout.LogoutHandler'> # The base URL for the notebook server. # # Leading and trailing slashes can be omitted, and will automatically be added. c.NotebookApp.base_url = '/' # The session manager class to use. # c.NotebookApp.session_manager_class = <class 'notebook.services.sessions.sessionmanager.SessionManager'> # Supply overrides for the tornado.web.Application that the IPython notebook # uses. # c.NotebookApp.tornado_settings = traitlets.Undefined # The directory to use for notebooks and kernels. c.NotebookApp.notebook_dir = u'/root/pipeline/myapps/jupyter/' # The kernel manager class to use. # c.NotebookApp.kernel_manager_class = <class 'notebook.services.kernels.kernelmanager.MappingKernelManager'> # The file where the cookie secret is stored. # c.NotebookApp.cookie_secret_file = u'' # Supply SSL options for the tornado HTTPServer. See the tornado docs for # details. # c.NotebookApp.ssl_options = traitlets.Undefined # # c.NotebookApp.file_to_run = '' # DISABLED: use %pylab or %matplotlib in the notebook to enable matplotlib. # c.NotebookApp.pylab = 'disabled' # Whether to enable MathJax for typesetting math/TeX # # MathJax is the javascript library IPython uses to render math/LaTeX. It is # very large, so you may want to disable it if you have a slow internet # connection, or for offline use of the notebook. # # When disabled, equations etc. will appear as their untransformed TeX source. # c.NotebookApp.enable_mathjax = True # Reraise exceptions encountered loading server extensions? # c.NotebookApp.reraise_server_extension_failures = False # The base URL for websockets, if it differs from the HTTP server (hint: it # almost certainly doesn't). # # Should be in the form of an HTTP origin: ws[s]://hostname[:port] # c.NotebookApp.websocket_url = '' # Whether to open in a browser after starting. The specific browser used is # platform dependent and determined by the python standard library `webbrowser` # module, unless it is overridden using the --browser (NotebookApp.browser) # configuration option. c.NotebookApp.open_browser = False # Hashed password to use for web authentication. # # To generate, type in a python/IPython shell: # # from notebook.auth import passwd; passwd() # # The string should be of the form type:salt:hashed-password. # c.NotebookApp.password = u'' # extra paths to look for Javascript notebook extensions # c.NotebookApp.extra_nbextensions_path = traitlets.Undefined # Set the Access-Control-Allow-Credentials: true header # c.NotebookApp.allow_credentials = False # Extra paths to search for serving static files. # # This allows adding javascript/css to be available from the notebook server # machine, or overriding individual files in the IPython # c.NotebookApp.extra_static_paths = traitlets.Undefined # The login handler class to use. # c.NotebookApp.login_handler_class = <class 'notebook.auth.login.LoginHandler'> # Whether to trust or not X-Scheme/X-Forwarded-Proto and X-Real-Ip/X-Forwarded- # For headerssent by the upstream reverse proxy. Necessary if the proxy handles # SSL # c.NotebookApp.trust_xheaders = False # Extra paths to search for serving jinja templates. # # Can be used to override templates from notebook.templates. # c.NotebookApp.extra_template_paths = traitlets.Undefined # The config manager class to use # c.NotebookApp.config_manager_class = <class 'notebook.services.config.manager.ConfigManager'> # The full path to a private key file for usage with SSL/TLS. # c.NotebookApp.keyfile = u'' # DEPRECATED, use tornado_settings # c.NotebookApp.webapp_settings = traitlets.Undefined # Specify what command to use to invoke a web browser when opening the notebook. # If not specified, the default browser will be determined by the `webbrowser` # standard library module, which allows setting of the BROWSER environment # variable to override it. # c.NotebookApp.browser = u'' #------------------------------------------------------------------------------ # LoggingConfigurable configuration #------------------------------------------------------------------------------ # A parent class for Configurables that log. # # Subclasses have a log trait, and the default behavior is to get the logger # from the currently running Application. #------------------------------------------------------------------------------ # ConnectionFileMixin configuration #------------------------------------------------------------------------------ # Mixin for configurable classes that work with connection files # set the stdin (ROUTER) port [default: random] # c.ConnectionFileMixin.stdin_port = 0 # Set the kernel's IP address [default localhost]. If the IP address is # something other than localhost, then Consoles on other machines will be able # to connect to the Kernel, so be careful! # c.ConnectionFileMixin.ip = u'' # JSON file in which to store connection info [default: kernel-<pid>.json] # # This file will contain the IP, ports, and authentication key needed to connect # clients to this kernel. By default, this file will be created in the security # dir of the current profile, but can be specified by absolute path. # c.ConnectionFileMixin.connection_file = '' # set the control (ROUTER) port [default: random] # c.ConnectionFileMixin.control_port = 0 # set the heartbeat port [default: random] # c.ConnectionFileMixin.hb_port = 0 # set the shell (ROUTER) port [default: random] # c.ConnectionFileMixin.shell_port = 0 # # c.ConnectionFileMixin.transport = 'tcp' # set the iopub (PUB) port [default: random] # c.ConnectionFileMixin.iopub_port = 0 #------------------------------------------------------------------------------ # KernelManager configuration #------------------------------------------------------------------------------ # Manages a single kernel in a subprocess on this host. # # This version starts kernels with Popen. # DEPRECATED: Use kernel_name instead. # # The Popen Command to launch the kernel. Override this if you have a custom # kernel. If kernel_cmd is specified in a configuration file, Jupyter does not # pass any arguments to the kernel, because it cannot make any assumptions about # the arguments that the kernel understands. In particular, this means that the # kernel does not receive the option --debug if it given on the Jupyter command # line. # c.KernelManager.kernel_cmd = traitlets.Undefined # Should we autorestart the kernel if it dies. # c.KernelManager.autorestart = False #------------------------------------------------------------------------------ # Session configuration #------------------------------------------------------------------------------ # Object for handling serialization and sending of messages. # # The Session object handles building messages and sending them with ZMQ sockets # or ZMQStream objects. Objects can communicate with each other over the # network via Session objects, and only need to work with the dict-based IPython # message spec. The Session will handle serialization/deserialization, security, # and metadata. # # Sessions support configurable serialization via packer/unpacker traits, and # signing with HMAC digests via the key/keyfile traits. # # Parameters ---------- # # debug : bool # whether to trigger extra debugging statements # packer/unpacker : str : 'json', 'pickle' or import_string # importstrings for methods to serialize message parts. If just # 'json' or 'pickle', predefined JSON and pickle packers will be used. # Otherwise, the entire importstring must be used. # # The functions must accept at least valid JSON input, and output *bytes*. # # For example, to use msgpack: # packer = 'msgpack.packb', unpacker='msgpack.unpackb' # pack/unpack : callables # You can also set the pack/unpack callables for serialization directly. # session : bytes # the ID of this Session object. The default is to generate a new UUID. # username : unicode # username added to message headers. The default is to ask the OS. # key : bytes # The key used to initialize an HMAC signature. If unset, messages # will not be signed or checked. # keyfile : filepath # The file containing a key. If this is set, `key` will be initialized # to the contents of the file. # Username for the Session. Default is your system username. # c.Session.username = u'username' # Threshold (in bytes) beyond which a buffer should be sent without copying. # c.Session.copy_threshold = 65536 # The name of the packer for serializing messages. Should be one of 'json', # 'pickle', or an import name for a custom callable serializer. # c.Session.packer = 'json' # Metadata dictionary, which serves as the default top-level metadata dict for # each message. # c.Session.metadata = traitlets.Undefined # The maximum number of digests to remember. # # The digest history will be culled when it exceeds this value. # c.Session.digest_history_size = 65536 # The UUID identifying this session. # c.Session.session = u'' # The digest scheme used to construct the message signatures. Must have the form # 'hmac-HASH'. # c.Session.signature_scheme = 'hmac-sha256' # execution key, for signing messages. # c.Session.key = '' # Debug output in the Session # c.Session.debug = False # The name of the unpacker for unserializing messages. Only used with custom # functions for `packer`. # c.Session.unpacker = 'json' # path to file containing execution key. # c.Session.keyfile = '' # Threshold (in bytes) beyond which an object's buffer should be extracted to # avoid pickling. # c.Session.buffer_threshold = 1024 # The maximum number of items for a container to be introspected for custom # serialization. Containers larger than this are pickled outright. # c.Session.item_threshold = 64 #------------------------------------------------------------------------------ # MultiKernelManager configuration #------------------------------------------------------------------------------ # A class for managing multiple kernels. # The name of the default kernel to start # c.MultiKernelManager.default_kernel_name = 'python2' # The kernel manager class. This is configurable to allow subclassing of the # KernelManager for customized behavior. # c.MultiKernelManager.kernel_manager_class = 'jupyter_client.ioloop.IOLoopKernelManager' #------------------------------------------------------------------------------ # MappingKernelManager configuration #------------------------------------------------------------------------------ # A KernelManager that handles notebook mapping and HTTP error handling # # c.MappingKernelManager.root_dir = u'' #------------------------------------------------------------------------------ # ContentsManager configuration #------------------------------------------------------------------------------ # Base class for serving files and directories. # # This serves any text or binary file, as well as directories, with special # handling for JSON notebook documents. # # Most APIs take a path argument, which is always an API-style unicode path, and # always refers to a directory. # # - unicode, not url-escaped # - '/'-separated # - leading and trailing '/' will be stripped # - if unspecified, path defaults to '', # indicating the root path. # The base name used when creating untitled files. # c.ContentsManager.untitled_file = 'untitled' # Python callable or importstring thereof # # To be called on a contents model prior to save. # # This can be used to process the structure, such as removing notebook outputs # or other side effects that should not be saved. # # It will be called as (all arguments passed by keyword):: # # hook(path=path, model=model, contents_manager=self) # # - model: the model to be saved. Includes file contents. # Modifying this dict will affect the file that is stored. # - path: the API path of the save destination # - contents_manager: this ContentsManager instance # c.ContentsManager.pre_save_hook = None # # c.ContentsManager.checkpoints_class = <class 'notebook.services.contents.checkpoints.Checkpoints'> # Glob patterns to hide in file and directory listings. # c.ContentsManager.hide_globs = traitlets.Undefined # The base name used when creating untitled notebooks. # c.ContentsManager.untitled_notebook = 'Untitled' # The base name used when creating untitled directories. # c.ContentsManager.untitled_directory = 'Untitled Folder' # # c.ContentsManager.checkpoints = traitlets.Undefined # # c.ContentsManager.checkpoints_kwargs = traitlets.Undefined #------------------------------------------------------------------------------ # FileContentsManager configuration #------------------------------------------------------------------------------ # DEPRECATED, use post_save_hook # c.FileContentsManager.save_script = False # # c.FileContentsManager.root_dir = u'' # Python callable or importstring thereof # # to be called on the path of a file just saved. # # This can be used to process the file on disk, such as converting the notebook # to a script or HTML via nbconvert. # # It will be called as (all arguments passed by keyword):: # # hook(os_path=os_path, model=model, contents_manager=instance) # # - path: the filesystem path to the file just written - model: the model # representing the file - contents_manager: this ContentsManager instance # c.FileContentsManager.post_save_hook = None #------------------------------------------------------------------------------ # NotebookNotary configuration #------------------------------------------------------------------------------ # A class for computing and verifying notebook signatures. # The number of notebook signatures to cache. When the number of signatures # exceeds this value, the oldest 25% of signatures will be culled. # c.NotebookNotary.cache_size = 65535 # The secret key with which notebooks are signed. # c.NotebookNotary.secret = '' # The sqlite file in which to store notebook signatures. By default, this will # be in your Jupyter runtime directory. You can set it to ':memory:' to disable # sqlite writing to the filesystem. # c.NotebookNotary.db_file = u'' # The hashing algorithm used to sign notebooks. # c.NotebookNotary.algorithm = 'sha256' # The file where the secret key is stored. # c.NotebookNotary.secret_file = u'' #------------------------------------------------------------------------------ # KernelSpecManager configuration #------------------------------------------------------------------------------ # Whitelist of allowed kernel names. # # By default, all installed kernels are allowed. # c.KernelSpecManager.whitelist = traitlets.Undefined
36.942197
109
0.668388
[ "Apache-2.0" ]
TrinathY/pipeline
config/jupyter/jupyter_notebook_config.py
19,173
Python
Configuration file for jupyter-notebook.------------------------------------------------------------------------------ Configurable configuration------------------------------------------------------------------------------------------------------------------------------------------------------------ SingletonConfigurable configuration------------------------------------------------------------------------------ A configurable that only allows one instance. This class is for classes that should only have one instance of itself or *any* subclass. To create and retrieve such a class use the :meth:`SingletonConfigurable.instance` method.------------------------------------------------------------------------------ Application configuration------------------------------------------------------------------------------ This is an application. The date format used by logging formatters for %(asctime)s c.Application.log_datefmt = '%Y-%m-%d %H:%M:%S' The Logging format template c.Application.log_format = '[%(name)s]%(highlevel)s %(message)s' Set the log level by value or name. c.Application.log_level = 30------------------------------------------------------------------------------ JupyterApp configuration------------------------------------------------------------------------------ Base class for Jupyter applications Answer yes to any prompts. Full path of a config file. c.JupyterApp.config_file = u'' Generate default config file. c.JupyterApp.generate_config = False Specify a config file to load. c.JupyterApp.config_file_name = u''------------------------------------------------------------------------------ NotebookApp configuration------------------------------------------------------------------------------ The number of additional ports to try if the specified port is not available. Extra variables to supply to jinja templates when rendering. c.NotebookApp.jinja_template_vars = traitlets.Undefined The url for MathJax.js. c.NotebookApp.mathjax_url = '' Supply extra arguments that will be passed to Jinja environment. c.NotebookApp.jinja_environment_options = traitlets.Undefined The IP address the notebook server will listen on. DEPRECATED use base_url c.NotebookApp.base_project_url = '/' Python modules to load as notebook server extensions. This is an experimental API, and may change in future releases. c.NotebookApp.server_extensions = traitlets.Undefined Note: These extensions require the ~/.jupyter path to exist otherwise, errors will occur on startup The random bytes used to secure cookies. By default this is a new random number every time you start the Notebook. Set it to a value in a config file to enable logins to persist across server sessions. Note: Cookie secrets should be kept private, do not share config files with cookie_secret stored in plaintext (you can read the value from a file). c.NotebookApp.cookie_secret = '' The default URL to redirect to from `/` c.NotebookApp.default_url = '/tree' The port the notebook server will listen on. The kernel spec manager class to use. Should be a subclass of `jupyter_client.kernelspec.KernelSpecManager`. The Api of KernelSpecManager is provisional and might change without warning between this version of IPython and the next stable one. c.NotebookApp.kernel_spec_manager_class = <class 'jupyter_client.kernelspec.KernelSpecManager'> Set the Access-Control-Allow-Origin header Use '*' to allow any origin to access your server. Takes precedence over allow_origin_pat. The notebook manager class to use. c.NotebookApp.contents_manager_class = <class 'notebook.services.contents.filemanager.FileContentsManager'> Use a regular expression for the Access-Control-Allow-Origin header Requests from an origin matching the expression will get replies with: Access-Control-Allow-Origin: origin where `origin` is the origin of the request. Ignored if allow_origin is set. c.NotebookApp.allow_origin_pat = '' The full path to an SSL/TLS certificate file. c.NotebookApp.certfile = u'' The logout handler class to use. c.NotebookApp.logout_handler_class = <class 'notebook.auth.logout.LogoutHandler'> The base URL for the notebook server. Leading and trailing slashes can be omitted, and will automatically be added. The session manager class to use. c.NotebookApp.session_manager_class = <class 'notebook.services.sessions.sessionmanager.SessionManager'> Supply overrides for the tornado.web.Application that the IPython notebook uses. c.NotebookApp.tornado_settings = traitlets.Undefined The directory to use for notebooks and kernels. The kernel manager class to use. c.NotebookApp.kernel_manager_class = <class 'notebook.services.kernels.kernelmanager.MappingKernelManager'> The file where the cookie secret is stored. c.NotebookApp.cookie_secret_file = u'' Supply SSL options for the tornado HTTPServer. See the tornado docs for details. c.NotebookApp.ssl_options = traitlets.Undefined c.NotebookApp.file_to_run = '' DISABLED: use %pylab or %matplotlib in the notebook to enable matplotlib. c.NotebookApp.pylab = 'disabled' Whether to enable MathJax for typesetting math/TeX MathJax is the javascript library IPython uses to render math/LaTeX. It is very large, so you may want to disable it if you have a slow internet connection, or for offline use of the notebook. When disabled, equations etc. will appear as their untransformed TeX source. c.NotebookApp.enable_mathjax = True Reraise exceptions encountered loading server extensions? c.NotebookApp.reraise_server_extension_failures = False The base URL for websockets, if it differs from the HTTP server (hint: it almost certainly doesn't). Should be in the form of an HTTP origin: ws[s]://hostname[:port] c.NotebookApp.websocket_url = '' Whether to open in a browser after starting. The specific browser used is platform dependent and determined by the python standard library `webbrowser` module, unless it is overridden using the --browser (NotebookApp.browser) configuration option. Hashed password to use for web authentication. To generate, type in a python/IPython shell: from notebook.auth import passwd; passwd() The string should be of the form type:salt:hashed-password. c.NotebookApp.password = u'' extra paths to look for Javascript notebook extensions c.NotebookApp.extra_nbextensions_path = traitlets.Undefined Set the Access-Control-Allow-Credentials: true header c.NotebookApp.allow_credentials = False Extra paths to search for serving static files. This allows adding javascript/css to be available from the notebook server machine, or overriding individual files in the IPython c.NotebookApp.extra_static_paths = traitlets.Undefined The login handler class to use. c.NotebookApp.login_handler_class = <class 'notebook.auth.login.LoginHandler'> Whether to trust or not X-Scheme/X-Forwarded-Proto and X-Real-Ip/X-Forwarded- For headerssent by the upstream reverse proxy. Necessary if the proxy handles SSL c.NotebookApp.trust_xheaders = False Extra paths to search for serving jinja templates. Can be used to override templates from notebook.templates. c.NotebookApp.extra_template_paths = traitlets.Undefined The config manager class to use c.NotebookApp.config_manager_class = <class 'notebook.services.config.manager.ConfigManager'> The full path to a private key file for usage with SSL/TLS. c.NotebookApp.keyfile = u'' DEPRECATED, use tornado_settings c.NotebookApp.webapp_settings = traitlets.Undefined Specify what command to use to invoke a web browser when opening the notebook. If not specified, the default browser will be determined by the `webbrowser` standard library module, which allows setting of the BROWSER environment variable to override it. c.NotebookApp.browser = u''------------------------------------------------------------------------------ LoggingConfigurable configuration------------------------------------------------------------------------------ A parent class for Configurables that log. Subclasses have a log trait, and the default behavior is to get the logger from the currently running Application.------------------------------------------------------------------------------ ConnectionFileMixin configuration------------------------------------------------------------------------------ Mixin for configurable classes that work with connection files set the stdin (ROUTER) port [default: random] c.ConnectionFileMixin.stdin_port = 0 Set the kernel's IP address [default localhost]. If the IP address is something other than localhost, then Consoles on other machines will be able to connect to the Kernel, so be careful! c.ConnectionFileMixin.ip = u'' JSON file in which to store connection info [default: kernel-<pid>.json] This file will contain the IP, ports, and authentication key needed to connect clients to this kernel. By default, this file will be created in the security dir of the current profile, but can be specified by absolute path. c.ConnectionFileMixin.connection_file = '' set the control (ROUTER) port [default: random] c.ConnectionFileMixin.control_port = 0 set the heartbeat port [default: random] c.ConnectionFileMixin.hb_port = 0 set the shell (ROUTER) port [default: random] c.ConnectionFileMixin.shell_port = 0 c.ConnectionFileMixin.transport = 'tcp' set the iopub (PUB) port [default: random] c.ConnectionFileMixin.iopub_port = 0------------------------------------------------------------------------------ KernelManager configuration------------------------------------------------------------------------------ Manages a single kernel in a subprocess on this host. This version starts kernels with Popen. DEPRECATED: Use kernel_name instead. The Popen Command to launch the kernel. Override this if you have a custom kernel. If kernel_cmd is specified in a configuration file, Jupyter does not pass any arguments to the kernel, because it cannot make any assumptions about the arguments that the kernel understands. In particular, this means that the kernel does not receive the option --debug if it given on the Jupyter command line. c.KernelManager.kernel_cmd = traitlets.Undefined Should we autorestart the kernel if it dies. c.KernelManager.autorestart = False------------------------------------------------------------------------------ Session configuration------------------------------------------------------------------------------ Object for handling serialization and sending of messages. The Session object handles building messages and sending them with ZMQ sockets or ZMQStream objects. Objects can communicate with each other over the network via Session objects, and only need to work with the dict-based IPython message spec. The Session will handle serialization/deserialization, security, and metadata. Sessions support configurable serialization via packer/unpacker traits, and signing with HMAC digests via the key/keyfile traits. Parameters ---------- debug : bool whether to trigger extra debugging statements packer/unpacker : str : 'json', 'pickle' or import_string importstrings for methods to serialize message parts. If just 'json' or 'pickle', predefined JSON and pickle packers will be used. Otherwise, the entire importstring must be used. The functions must accept at least valid JSON input, and output *bytes*. For example, to use msgpack: packer = 'msgpack.packb', unpacker='msgpack.unpackb' pack/unpack : callables You can also set the pack/unpack callables for serialization directly. session : bytes the ID of this Session object. The default is to generate a new UUID. username : unicode username added to message headers. The default is to ask the OS. key : bytes The key used to initialize an HMAC signature. If unset, messages will not be signed or checked. keyfile : filepath The file containing a key. If this is set, `key` will be initialized to the contents of the file. Username for the Session. Default is your system username. c.Session.username = u'username' Threshold (in bytes) beyond which a buffer should be sent without copying. c.Session.copy_threshold = 65536 The name of the packer for serializing messages. Should be one of 'json', 'pickle', or an import name for a custom callable serializer. c.Session.packer = 'json' Metadata dictionary, which serves as the default top-level metadata dict for each message. c.Session.metadata = traitlets.Undefined The maximum number of digests to remember. The digest history will be culled when it exceeds this value. c.Session.digest_history_size = 65536 The UUID identifying this session. c.Session.session = u'' The digest scheme used to construct the message signatures. Must have the form 'hmac-HASH'. c.Session.signature_scheme = 'hmac-sha256' execution key, for signing messages. c.Session.key = '' Debug output in the Session c.Session.debug = False The name of the unpacker for unserializing messages. Only used with custom functions for `packer`. c.Session.unpacker = 'json' path to file containing execution key. c.Session.keyfile = '' Threshold (in bytes) beyond which an object's buffer should be extracted to avoid pickling. c.Session.buffer_threshold = 1024 The maximum number of items for a container to be introspected for custom serialization. Containers larger than this are pickled outright. c.Session.item_threshold = 64------------------------------------------------------------------------------ MultiKernelManager configuration------------------------------------------------------------------------------ A class for managing multiple kernels. The name of the default kernel to start c.MultiKernelManager.default_kernel_name = 'python2' The kernel manager class. This is configurable to allow subclassing of the KernelManager for customized behavior. c.MultiKernelManager.kernel_manager_class = 'jupyter_client.ioloop.IOLoopKernelManager'------------------------------------------------------------------------------ MappingKernelManager configuration------------------------------------------------------------------------------ A KernelManager that handles notebook mapping and HTTP error handling c.MappingKernelManager.root_dir = u''------------------------------------------------------------------------------ ContentsManager configuration------------------------------------------------------------------------------ Base class for serving files and directories. This serves any text or binary file, as well as directories, with special handling for JSON notebook documents. Most APIs take a path argument, which is always an API-style unicode path, and always refers to a directory. - unicode, not url-escaped - '/'-separated - leading and trailing '/' will be stripped - if unspecified, path defaults to '', indicating the root path. The base name used when creating untitled files. c.ContentsManager.untitled_file = 'untitled' Python callable or importstring thereof To be called on a contents model prior to save. This can be used to process the structure, such as removing notebook outputs or other side effects that should not be saved. It will be called as (all arguments passed by keyword):: hook(path=path, model=model, contents_manager=self) - model: the model to be saved. Includes file contents. Modifying this dict will affect the file that is stored. - path: the API path of the save destination - contents_manager: this ContentsManager instance c.ContentsManager.pre_save_hook = None c.ContentsManager.checkpoints_class = <class 'notebook.services.contents.checkpoints.Checkpoints'> Glob patterns to hide in file and directory listings. c.ContentsManager.hide_globs = traitlets.Undefined The base name used when creating untitled notebooks. c.ContentsManager.untitled_notebook = 'Untitled' The base name used when creating untitled directories. c.ContentsManager.untitled_directory = 'Untitled Folder' c.ContentsManager.checkpoints = traitlets.Undefined c.ContentsManager.checkpoints_kwargs = traitlets.Undefined------------------------------------------------------------------------------ FileContentsManager configuration------------------------------------------------------------------------------ DEPRECATED, use post_save_hook c.FileContentsManager.save_script = False c.FileContentsManager.root_dir = u'' Python callable or importstring thereof to be called on the path of a file just saved. This can be used to process the file on disk, such as converting the notebook to a script or HTML via nbconvert. It will be called as (all arguments passed by keyword):: hook(os_path=os_path, model=model, contents_manager=instance) - path: the filesystem path to the file just written - model: the model representing the file - contents_manager: this ContentsManager instance c.FileContentsManager.post_save_hook = None------------------------------------------------------------------------------ NotebookNotary configuration------------------------------------------------------------------------------ A class for computing and verifying notebook signatures. The number of notebook signatures to cache. When the number of signatures exceeds this value, the oldest 25% of signatures will be culled. c.NotebookNotary.cache_size = 65535 The secret key with which notebooks are signed. c.NotebookNotary.secret = '' The sqlite file in which to store notebook signatures. By default, this will be in your Jupyter runtime directory. You can set it to ':memory:' to disable sqlite writing to the filesystem. c.NotebookNotary.db_file = u'' The hashing algorithm used to sign notebooks. c.NotebookNotary.algorithm = 'sha256' The file where the secret key is stored. c.NotebookNotary.secret_file = u''------------------------------------------------------------------------------ KernelSpecManager configuration------------------------------------------------------------------------------ Whitelist of allowed kernel names. By default, all installed kernels are allowed. c.KernelSpecManager.whitelist = traitlets.Undefined
17,935
0.93543
# # Tencent is pleased to support the open source community by making Angel available. # # Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved. # # Licensed under the BSD 3-Clause License (the "License"); you may not use this file except in # compliance with the License. You may obtain a copy of the License at # # https://opensource.org/licenses/BSD-3-Clause # # Unless required by applicable law or agreed to in writing, software distributed under the License is # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, # either express or implied. See the License for the specific language governing permissions and # from enum import Enum, unique @unique class RunningMode(Enum): """ Enum for running mode """ # Run ParameterServer & ParameterServerAgent ANGEL_PS_PSAGENT = 0 # Only Run ParameterServer ANGEL_PS = 1 # Run ParameterServer & Worker(embedded ParameterServerAgent) ANGEL_PS_WORKER = 2
31.967742
102
0.744702
[ "Apache-2.0", "BSD-3-Clause" ]
20100507/angel
angel-ps/python/build/lib/pyangel/running_mode.py
991
Python
Enum for running mode Tencent is pleased to support the open source community by making Angel available. Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved. Licensed under the BSD 3-Clause License (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at https://opensource.org/licenses/BSD-3-Clause Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and Run ParameterServer & ParameterServerAgent Only Run ParameterServer Run ParameterServer & Worker(embedded ParameterServerAgent)
793
0.800202
# MIT License # # Copyright (c) 2019 Red Hat, Inc. # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in all # copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE.
50.318182
80
0.775971
[ "MIT" ]
FilipSchad/packit
packit/cli/__init__.py
1,107
Python
MIT License Copyright (c) 2019 Red Hat, Inc. Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
1,065
0.96206
# inclass/mongo_queries.py import pymongo import os from dotenv import load_dotenv import sqlite3 load_dotenv() DB_USER = os.getenv("MONGO_USER", default="OOPS") DB_PASSWORD = os.getenv("MONGO_PASSWORD", default="OOPS") CLUSTER_NAME = os.getenv("MONGO_CLUSTER_NAME", default="OOPS") connection_uri = f"mongodb+srv://{DB_USER}:{DB_PASSWORD}@{CLUSTER_NAME}.mongodb.net/test?retryWrites=true&w=majority&ssl=true&ssl_cert_reqs=CERT_NONE" print("----------------") print("URI:", connection_uri) client = pymongo.MongoClient(connection_uri) print("----------------") print("CLIENT:", type(client), client) # print(dir(client)) # print("DB NAMES:", client.list_database_names()) #> ['admin', 'local'] db = client.ds14_db # "ds14_db" or whatever you want to call it # print("----------------") # print("DB:", type(db), db) # collection = db.ds14_pokemon_collection # "ds14_collection" or whatever you want to call it # print("----------------") # print("COLLECTION:", type(collection), collection) # print("----------------") # # print("COLLECTIONS:") # # print(db.list_collection_names()) # print("--------------------------------------") ################## ASSIGNMENT III ############################# # INSERT RPG DATA INTO MONGODB INSTANCE # Create RPG database db = client.rpg_data_db # Establish sqlite3 connection to access rpg data sl_conn = sqlite3.connect("data/rpg_db_original.sqlite3") sl_curs = sl_conn.cursor() ################# CHARACTERS ########################### # ## Create new collection for RPG data # col_characters = db.character_collection # ## Establish SQL syntax for query # rpg_characters = 'SELECT * FROM charactercreator_character' # # Function to loop through characters and return list of dictionaries # def all_chars(): # query = rpg_characters # chars = sl_curs.execute(query) # char_data = [] # for row in chars: # character = { # "character_id": row[0], # "name": row[1], # "level": row[2], # "exp": row[3], # "hp": row[4], # "strength": row[5], # "intelligence": row[6], # "dexterity": row[7], # "wisdom": row[8] # } # char_data.append(character) # result = char_data # return result # character_dict_list = all_chars() # # print(character_dict_list) # col_characters.insert_many(character_dict_list) # print("DOCS(Num Characters):", col_characters.count_documents({})) # # SELECT count(distinct id) from characters ################# MAGES ########################### # col_mage = db.mage_collection # mages = 'SELECT * FROM charactercreator_mage' # def all_chars(): # query = mages # chars = sl_curs.execute(query) # char_data = [] # for row in chars: # character = { # "character_ptr_id": row[0], # "has_pet": row[1], # "mana": row[2], # } # char_data.append(character) # result = char_data # return result # character_dict_list = all_chars() # col_mage.insert_many(character_dict_list) # print("DOCS:", col_mage.count_documents({})) ################# THIEVES ########################### # col_thief = db.thief_collection # thieves = 'SELECT * FROM charactercreator_thief' # def all_chars(): # query = thieves # chars = sl_curs.execute(query) # char_data = [] # for row in chars: # character = { # "character_ptr_id": row[0], # "is_sneaking": row[1], # "energy": row[2], # } # char_data.append(character) # result = char_data # return result # character_dict_list = all_chars() # col_thief.insert_many(character_dict_list) # print("DOCS:", col_thief.count_documents({})) ################# CLERICS ########################### # col_cleric = db.cleric_collection # clerics = 'SELECT * FROM charactercreator_cleric' # def all_chars(): # query = clerics # chars = sl_curs.execute(query) # char_data = [] # for row in chars: # character = { # "character_ptr_id": row[0], # "using_shield": row[1], # "mana": row[2], # } # char_data.append(character) # result = char_data # return result # character_dict_list = all_chars() # col_cleric.insert_many(character_dict_list) # print("DOCS:", col_cleric.count_documents({})) ################# FIGHTERS ########################### # col_fighter = db.fighter_collection # fighters = 'SELECT * FROM charactercreator_fighter' # def all_chars(): # query = fighters # chars = sl_curs.execute(query) # char_data = [] # for row in chars: # character = { # "character_ptr_id": row[0], # "using_shield": row[1], # "rage": row[2], # } # char_data.append(character) # result = char_data # return result # character_dict_list = all_chars() # col_fighter.insert_many(character_dict_list) # print("DOCS:", col_fighter.count_documents({})) ################# NECROMANCERS ########################### # col_mancer = db.mancer_collection # mancers = 'SELECT * FROM charactercreator_necromancer' # def all_chars(): # query = mancers # chars = sl_curs.execute(query) # char_data = [] # for row in chars: # character = { # "mage_ptr_id": row[0], # "talisman_charged": row[1], # } # char_data.append(character) # result = char_data # return result # character_dict_list = all_chars() # col_mancer.insert_many(character_dict_list) # print("DOCS:", col_mancer.count_documents({})) ################# ITEMS ########################### # col_items = db.items_collection # items = 'SELECT * FROM armory_item' # def all_chars(): # query = items # chars = sl_curs.execute(query) # char_data = [] # for row in chars: # character = { # "item_id": row[0], # "name": row[1], # "value": row[2], # "weight": row[3] # } # char_data.append(character) # result = char_data # return result # character_dict_list = all_chars() # col_items.insert_many(character_dict_list) # print("DOCS:", col_items.count_documents({})) ################# WEAPONS ########################### # col_weapons = db.weapons_collection # weapons = 'SELECT * FROM armory_weapon' # def all_chars(): # query = weapons # chars = sl_curs.execute(query) # char_data = [] # for row in chars: # character = { # "item_ptr_id": row[0], # "power": row[1] # } # char_data.append(character) # result = char_data # return result # character_dict_list = all_chars() # col_weapons.insert_many(character_dict_list) # print("DOCS:", col_weapons.count_documents({})) ################# INVENTORY ########################### # col_inventory = db.inventory_collection # records = 'SELECT * FROM charactercreator_character_inventory' # def all_chars(): # query = records # chars = sl_curs.execute(query) # char_data = [] # for row in chars: # character = { # "id": row[0], # "character_id": row[1], # "item_id": row[2] # } # char_data.append(character) # result = char_data # return result # character_dict_list = all_chars() # col_inventory.insert_many(character_dict_list) # print("DOCS:", col_inventory.count_documents({})) # print("COLLECTIONS:") # print(db.list_collection_names()) #################### IN-CLASS POKEMON INSERTS ############################# # collection.insert_one({ # "name": "Pikachu", # "level": 30, # "exp": 76000000000, # "hp": 400, # "fav_icecream_flavors":["vanila_bean", "choc"], # "stats":{"a":1,"b":2,"c":[1,2,3]} # }) # print("DOCS:", collection.count_documents({})) # SELECT count(distinct id) from pokemon # print(collection.count_documents({"name": "Pikachu"})) # SELECT # count(distinct id) from pokemon WHERE name = "Pikachu" # mewtwo = { # "name": "Mewtwo", # "level": 100, # "exp": 76000000000, # "hp": 450, # "strength": 550, # "intelligence": 450, # "dexterity": 300, # "wisdom": 575 # } # blastoise = { # "name": "Blastoise", # "lvl": 70, # OOPS we made a mistake with the structure of this dict # } # charmander = { # "nameeeeeee": "Charmander", # "level": 70, # "random_stat": {"a":2} # } # skarmory = { # "name": "Skarmory", # "level": 22, # "exp": 42000, # "hp": 85, # "strength": 750, # "intelligence": 8, # "dexterity": 57 # } # cubone = { # "name": "Cubone", # "level": 20, # "exp": 35000, # "hp": 80, # "strength": 600, # "intelligence": 60, # "dexterity": 200, # "wisdom": 200 # } # scyther = { # "name": "Scyther", # "level": 99, # "exp": 7000, # "hp": 40, # "strength": 50, # "intelligence": 40, # "dexterity": 30, # "wisdom": 57 # } # slowpoke = { # "name": "Slowpoke", # "level": 1, # "exp": 100, # "hp": 80, # "strength": 100, # "intelligence": 10, # "dexterity": 50, # "wisdom": 200 # } # pokemon_team = [mewtwo, blastoise, skarmory, cubone, scyther, slowpoke, charmander] # collection.insert_many(pokemon_team) # print("DOCS:", collection.count_documents({})) # SELECT count(distinct id) from pokemon # #collection.insert_one({"_id": "OURVAL", "name":"TEST"}) # # can overwrite the _id but not insert duplicate _id values # #breakpoint() # pikas = list(collection.find({"name": "Pikachu"})) # SELECT * FROM pokemon WHERE name = "Pikachu" # # print(len(pikas), "PIKAS") # # print(pikas[0]["_id"]) #> ObjectId('5ebc31c79c171e43bb5ed469') # # print(pikas[0]["name"]) # # strong = list(collection.find({"level": {"$gte": 60}} $or {"lvl": {"$gte": 60}})) # # strong = list(collection.find({"level": {"$gte": 60}, "$or" "lvl": {"$gte": 60}})) # strong = list(collection.find({"$or": [{"level": {"$gte": 60}}, {"lvl": {"$gte": 60}}]})) # # TODO: also try to account for our mistakes "lvl" vs "level" # breakpoint() # print(strong)
26.685864
150
0.566314
[ "MIT" ]
ekselan/DS-Unit-3-Sprint-2-SQL-and-Databases
assignment3/a3_mongo_queries_abw.py
10,194
Python
inclass/mongo_queries.py print(dir(client)) print("DB NAMES:", client.list_database_names()) > ['admin', 'local'] "ds14_db" or whatever you want to call it print("----------------") print("DB:", type(db), db) collection = db.ds14_pokemon_collection "ds14_collection" or whatever you want to call it print("----------------") print("COLLECTION:", type(collection), collection) print("----------------") print("COLLECTIONS:") print(db.list_collection_names()) print("--------------------------------------") ASSIGNMENT III INSERT RPG DATA INTO MONGODB INSTANCE Create RPG database Establish sqlite3 connection to access rpg data CHARACTERS Create new collection for RPG data col_characters = db.character_collection Establish SQL syntax for query rpg_characters = 'SELECT * FROM charactercreator_character' Function to loop through characters and return list of dictionaries def all_chars(): query = rpg_characters chars = sl_curs.execute(query) char_data = [] for row in chars: character = { "character_id": row[0], "name": row[1], "level": row[2], "exp": row[3], "hp": row[4], "strength": row[5], "intelligence": row[6], "dexterity": row[7], "wisdom": row[8] } char_data.append(character) result = char_data return result character_dict_list = all_chars() print(character_dict_list) col_characters.insert_many(character_dict_list) print("DOCS(Num Characters):", col_characters.count_documents({})) SELECT count(distinct id) from characters MAGES col_mage = db.mage_collection mages = 'SELECT * FROM charactercreator_mage' def all_chars(): query = mages chars = sl_curs.execute(query) char_data = [] for row in chars: character = { "character_ptr_id": row[0], "has_pet": row[1], "mana": row[2], } char_data.append(character) result = char_data return result character_dict_list = all_chars() col_mage.insert_many(character_dict_list) print("DOCS:", col_mage.count_documents({})) THIEVES col_thief = db.thief_collection thieves = 'SELECT * FROM charactercreator_thief' def all_chars(): query = thieves chars = sl_curs.execute(query) char_data = [] for row in chars: character = { "character_ptr_id": row[0], "is_sneaking": row[1], "energy": row[2], } char_data.append(character) result = char_data return result character_dict_list = all_chars() col_thief.insert_many(character_dict_list) print("DOCS:", col_thief.count_documents({})) CLERICS col_cleric = db.cleric_collection clerics = 'SELECT * FROM charactercreator_cleric' def all_chars(): query = clerics chars = sl_curs.execute(query) char_data = [] for row in chars: character = { "character_ptr_id": row[0], "using_shield": row[1], "mana": row[2], } char_data.append(character) result = char_data return result character_dict_list = all_chars() col_cleric.insert_many(character_dict_list) print("DOCS:", col_cleric.count_documents({})) FIGHTERS col_fighter = db.fighter_collection fighters = 'SELECT * FROM charactercreator_fighter' def all_chars(): query = fighters chars = sl_curs.execute(query) char_data = [] for row in chars: character = { "character_ptr_id": row[0], "using_shield": row[1], "rage": row[2], } char_data.append(character) result = char_data return result character_dict_list = all_chars() col_fighter.insert_many(character_dict_list) print("DOCS:", col_fighter.count_documents({})) NECROMANCERS col_mancer = db.mancer_collection mancers = 'SELECT * FROM charactercreator_necromancer' def all_chars(): query = mancers chars = sl_curs.execute(query) char_data = [] for row in chars: character = { "mage_ptr_id": row[0], "talisman_charged": row[1], } char_data.append(character) result = char_data return result character_dict_list = all_chars() col_mancer.insert_many(character_dict_list) print("DOCS:", col_mancer.count_documents({})) ITEMS col_items = db.items_collection items = 'SELECT * FROM armory_item' def all_chars(): query = items chars = sl_curs.execute(query) char_data = [] for row in chars: character = { "item_id": row[0], "name": row[1], "value": row[2], "weight": row[3] } char_data.append(character) result = char_data return result character_dict_list = all_chars() col_items.insert_many(character_dict_list) print("DOCS:", col_items.count_documents({})) WEAPONS col_weapons = db.weapons_collection weapons = 'SELECT * FROM armory_weapon' def all_chars(): query = weapons chars = sl_curs.execute(query) char_data = [] for row in chars: character = { "item_ptr_id": row[0], "power": row[1] } char_data.append(character) result = char_data return result character_dict_list = all_chars() col_weapons.insert_many(character_dict_list) print("DOCS:", col_weapons.count_documents({})) INVENTORY col_inventory = db.inventory_collection records = 'SELECT * FROM charactercreator_character_inventory' def all_chars(): query = records chars = sl_curs.execute(query) char_data = [] for row in chars: character = { "id": row[0], "character_id": row[1], "item_id": row[2] } char_data.append(character) result = char_data return result character_dict_list = all_chars() col_inventory.insert_many(character_dict_list) print("DOCS:", col_inventory.count_documents({})) print("COLLECTIONS:") print(db.list_collection_names()) IN-CLASS POKEMON INSERTS collection.insert_one({ "name": "Pikachu", "level": 30, "exp": 76000000000, "hp": 400, "fav_icecream_flavors":["vanila_bean", "choc"], "stats":{"a":1,"b":2,"c":[1,2,3]} }) print("DOCS:", collection.count_documents({})) SELECT count(distinct id) from pokemon print(collection.count_documents({"name": "Pikachu"})) SELECT count(distinct id) from pokemon WHERE name = "Pikachu" mewtwo = { "name": "Mewtwo", "level": 100, "exp": 76000000000, "hp": 450, "strength": 550, "intelligence": 450, "dexterity": 300, "wisdom": 575 } blastoise = { "name": "Blastoise", "lvl": 70, OOPS we made a mistake with the structure of this dict } charmander = { "nameeeeeee": "Charmander", "level": 70, "random_stat": {"a":2} } skarmory = { "name": "Skarmory", "level": 22, "exp": 42000, "hp": 85, "strength": 750, "intelligence": 8, "dexterity": 57 } cubone = { "name": "Cubone", "level": 20, "exp": 35000, "hp": 80, "strength": 600, "intelligence": 60, "dexterity": 200, "wisdom": 200 } scyther = { "name": "Scyther", "level": 99, "exp": 7000, "hp": 40, "strength": 50, "intelligence": 40, "dexterity": 30, "wisdom": 57 } slowpoke = { "name": "Slowpoke", "level": 1, "exp": 100, "hp": 80, "strength": 100, "intelligence": 10, "dexterity": 50, "wisdom": 200 } pokemon_team = [mewtwo, blastoise, skarmory, cubone, scyther, slowpoke, charmander] collection.insert_many(pokemon_team) print("DOCS:", collection.count_documents({})) SELECT count(distinct id) from pokemon collection.insert_one({"_id": "OURVAL", "name":"TEST"}) can overwrite the _id but not insert duplicate _id values breakpoint() pikas = list(collection.find({"name": "Pikachu"})) SELECT * FROM pokemon WHERE name = "Pikachu" print(len(pikas), "PIKAS") print(pikas[0]["_id"]) > ObjectId('5ebc31c79c171e43bb5ed469') print(pikas[0]["name"]) strong = list(collection.find({"level": {"$gte": 60}} $or {"lvl": {"$gte": 60}})) strong = list(collection.find({"level": {"$gte": 60}, "$or" "lvl": {"$gte": 60}})) strong = list(collection.find({"$or": [{"level": {"$gte": 60}}, {"lvl": {"$gte": 60}}]})) TODO: also try to account for our mistakes "lvl" vs "level" breakpoint() print(strong)
8,333
0.817442
# -*- coding: utf-8 -*- # # Python Github documentation build configuration file, created by # sphinx-quickstart on Tue Feb 3 23:23:15 2015. # # This file is execfile()d with the current directory set to its # containing dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. import sys import os # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. #sys.path.insert(0, os.path.abspath('.')) # -- General configuration ------------------------------------------------ # If your documentation needs a minimal Sphinx version, state it here. #needs_sphinx = '1.0' # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. extensions = [ 'sphinx.ext.autodoc', 'sphinx.ext.doctest', 'sphinx.ext.intersphinx', 'sphinx.ext.todo', 'sphinx.ext.viewcode', ] # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] # The suffix of source filenames. source_suffix = '.rst' # The encoding of source files. #source_encoding = 'utf-8-sig' # The master toctree document. master_doc = 'index' # General information about the project. project = u'Python Github' copyright = u'2015, Nicolas Mendoza' # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # # The short X.Y version. version = '0.1.0' # The full version, including alpha/beta/rc tags. release = '0.1.0' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. #language = None # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: #today = '' # Else, today_fmt is used as the format for a strftime call. #today_fmt = '%B %d, %Y' # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. exclude_patterns = ['_build'] # The reST default role (used for this markup: `text`) to use for all # documents. #default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. #add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). #add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. #show_authors = False # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # A list of ignored prefixes for module index sorting. #modindex_common_prefix = [] # If true, keep warnings as "system message" paragraphs in the built documents. #keep_warnings = False # -- Options for HTML output ---------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. html_theme = 'default' # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. #html_theme_options = {} # Add any paths that contain custom themes here, relative to this directory. #html_theme_path = [] # The name for this set of Sphinx documents. If None, it defaults to # "<project> v<release> documentation". #html_title = None # A shorter title for the navigation bar. Default is the same as html_title. #html_short_title = None # The name of an image file (relative to this directory) to place at the top # of the sidebar. #html_logo = None # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. #html_favicon = None # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ['_static'] # Add any extra paths that contain custom files (such as robots.txt or # .htaccess) here, relative to this directory. These files are copied # directly to the root of the documentation. #html_extra_path = [] # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. #html_last_updated_fmt = '%b %d, %Y' # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. #html_use_smartypants = True # Custom sidebar templates, maps document names to template names. #html_sidebars = {} # Additional templates that should be rendered to pages, maps page names to # template names. #html_additional_pages = {} # If false, no module index is generated. #html_domain_indices = True # If false, no index is generated. #html_use_index = True # If true, the index is split into individual pages for each letter. #html_split_index = False # If true, links to the reST sources are added to the pages. #html_show_sourcelink = True # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. #html_show_sphinx = True # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. #html_show_copyright = True # If true, an OpenSearch description file will be output, and all pages will # contain a <link> tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. #html_use_opensearch = '' # This is the file name suffix for HTML files (e.g. ".xhtml"). #html_file_suffix = None # Output file base name for HTML help builder. htmlhelp_basename = 'PythonGithubdoc' # -- Options for LaTeX output --------------------------------------------- latex_elements = { # The paper size ('letterpaper' or 'a4paper'). #'papersize': 'letterpaper', # The font size ('10pt', '11pt' or '12pt'). #'pointsize': '10pt', # Additional stuff for the LaTeX preamble. #'preamble': '', } # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, # author, documentclass [howto, manual, or own class]). latex_documents = [ ('index', 'PythonGithub.tex', u'Python Github Documentation', u'Nicolas Mendoza', 'manual'), ] # The name of an image file (relative to this directory) to place at the top of # the title page. #latex_logo = None # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. #latex_use_parts = False # If true, show page references after internal links. #latex_show_pagerefs = False # If true, show URL addresses after external links. #latex_show_urls = False # Documents to append as an appendix to all manuals. #latex_appendices = [] # If false, no module index is generated. #latex_domain_indices = True # -- Options for manual page output --------------------------------------- # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [ ('index', 'pythongithub', u'Python Github Documentation', [u'Nicolas Mendoza'], 1) ] # If true, show URL addresses after external links. #man_show_urls = False # -- Options for Texinfo output ------------------------------------------- # Grouping the document tree into Texinfo files. List of tuples # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ ('index', 'PythonGithub', u'Python Github Documentation', u'Nicolas Mendoza', 'PythonGithub', 'One line description of project.', 'Miscellaneous'), ] # Documents to append as an appendix to all manuals. #texinfo_appendices = [] # If false, no module index is generated. #texinfo_domain_indices = True # How to display URL addresses: 'footnote', 'no', or 'inline'. #texinfo_show_urls = 'footnote' # If true, do not generate a @detailmenu in the "Top" node's menu. #texinfo_no_detailmenu = False # Example configuration for intersphinx: refer to the Python standard library. intersphinx_mapping = {'http://docs.python.org/': None}
31.524164
79
0.71816
[ "MIT" ]
nicchub/PythonGithub
docs/conf.py
8,480
Python
-*- coding: utf-8 -*- Python Github documentation build configuration file, created by sphinx-quickstart on Tue Feb 3 23:23:15 2015. This file is execfile()d with the current directory set to its containing dir. Note that not all possible configuration values are present in this autogenerated file. All configuration values have a default; values that are commented out serve to show the default. If extensions (or modules to document with autodoc) are in another directory, add these directories to sys.path here. If the directory is relative to the documentation root, use os.path.abspath to make it absolute, like shown here.sys.path.insert(0, os.path.abspath('.')) -- General configuration ------------------------------------------------ If your documentation needs a minimal Sphinx version, state it here.needs_sphinx = '1.0' Add any Sphinx extension module names here, as strings. They can be extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones. Add any paths that contain templates here, relative to this directory. The suffix of source filenames. The encoding of source files.source_encoding = 'utf-8-sig' The master toctree document. General information about the project. The version info for the project you're documenting, acts as replacement for |version| and |release|, also used in various other places throughout the built documents. The short X.Y version. The full version, including alpha/beta/rc tags. The language for content autogenerated by Sphinx. Refer to documentation for a list of supported languages.language = None There are two options for replacing |today|: either, you set today to some non-false value, then it is used:today = '' Else, today_fmt is used as the format for a strftime call.today_fmt = '%B %d, %Y' List of patterns, relative to source directory, that match files and directories to ignore when looking for source files. The reST default role (used for this markup: `text`) to use for all documents.default_role = None If true, '()' will be appended to :func: etc. cross-reference text.add_function_parentheses = True If true, the current module name will be prepended to all description unit titles (such as .. function::).add_module_names = True If true, sectionauthor and moduleauthor directives will be shown in the output. They are ignored by default.show_authors = False The name of the Pygments (syntax highlighting) style to use. A list of ignored prefixes for module index sorting.modindex_common_prefix = [] If true, keep warnings as "system message" paragraphs in the built documents.keep_warnings = False -- Options for HTML output ---------------------------------------------- The theme to use for HTML and HTML Help pages. See the documentation for a list of builtin themes. Theme options are theme-specific and customize the look and feel of a theme further. For a list of options available for each theme, see the documentation.html_theme_options = {} Add any paths that contain custom themes here, relative to this directory.html_theme_path = [] The name for this set of Sphinx documents. If None, it defaults to "<project> v<release> documentation".html_title = None A shorter title for the navigation bar. Default is the same as html_title.html_short_title = None The name of an image file (relative to this directory) to place at the top of the sidebar.html_logo = None The name of an image file (within the static path) to use as favicon of the docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 pixels large.html_favicon = None Add any paths that contain custom static files (such as style sheets) here, relative to this directory. They are copied after the builtin static files, so a file named "default.css" will overwrite the builtin "default.css". Add any extra paths that contain custom files (such as robots.txt or .htaccess) here, relative to this directory. These files are copied directly to the root of the documentation.html_extra_path = [] If not '', a 'Last updated on:' timestamp is inserted at every page bottom, using the given strftime format.html_last_updated_fmt = '%b %d, %Y' If true, SmartyPants will be used to convert quotes and dashes to typographically correct entities.html_use_smartypants = True Custom sidebar templates, maps document names to template names.html_sidebars = {} Additional templates that should be rendered to pages, maps page names to template names.html_additional_pages = {} If false, no module index is generated.html_domain_indices = True If false, no index is generated.html_use_index = True If true, the index is split into individual pages for each letter.html_split_index = False If true, links to the reST sources are added to the pages.html_show_sourcelink = True If true, "Created using Sphinx" is shown in the HTML footer. Default is True.html_show_sphinx = True If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.html_show_copyright = True If true, an OpenSearch description file will be output, and all pages will contain a <link> tag referring to it. The value of this option must be the base URL from which the finished HTML is served.html_use_opensearch = '' This is the file name suffix for HTML files (e.g. ".xhtml").html_file_suffix = None Output file base name for HTML help builder. -- Options for LaTeX output --------------------------------------------- The paper size ('letterpaper' or 'a4paper').'papersize': 'letterpaper', The font size ('10pt', '11pt' or '12pt').'pointsize': '10pt', Additional stuff for the LaTeX preamble.'preamble': '', Grouping the document tree into LaTeX files. List of tuples (source start file, target name, title, author, documentclass [howto, manual, or own class]). The name of an image file (relative to this directory) to place at the top of the title page.latex_logo = None For "manual" documents, if this is true, then toplevel headings are parts, not chapters.latex_use_parts = False If true, show page references after internal links.latex_show_pagerefs = False If true, show URL addresses after external links.latex_show_urls = False Documents to append as an appendix to all manuals.latex_appendices = [] If false, no module index is generated.latex_domain_indices = True -- Options for manual page output --------------------------------------- One entry per manual page. List of tuples (source start file, name, description, authors, manual section). If true, show URL addresses after external links.man_show_urls = False -- Options for Texinfo output ------------------------------------------- Grouping the document tree into Texinfo files. List of tuples (source start file, target name, title, author, dir menu entry, description, category) Documents to append as an appendix to all manuals.texinfo_appendices = [] If false, no module index is generated.texinfo_domain_indices = True How to display URL addresses: 'footnote', 'no', or 'inline'.texinfo_show_urls = 'footnote' If true, do not generate a @detailmenu in the "Top" node's menu.texinfo_no_detailmenu = False Example configuration for intersphinx: refer to the Python standard library.
7,108
0.838208
''' TRIES Trie support search, insert, and deletion in O(L) time where L is length of the key why Trie? * With Trie, we can insert and find strings in O(L) time where L represent the length of a single word. This is obviously faster than BST. This is also faster than Hashing because of the ways it is implemented. We do not need to compute any hash function. No collision handling is required (like we do in open addressing and separate chaining) * Another advantage of Trie is, we can easily print all words in alphabetical order which is not easily possible with hashing. * We can efficiently do prefix search (or auto-complete) with Trie. Issues with Trie Faster but require HUGE memory for storing the strings NOTE: Trie node class struct TrieNode { struct TrieNode *children[ALPHABET_SIZE]; // isEndOfWord is true if the node // represents end of a word bool isEndOfWord; }; ''' class TrieNode: # Trie node class def __init__(self): self.children = [None]*26 # isEndOfWord is True if node represent the end of the word self.isEndOfWord = False
28.925
147
0.703544
[ "MIT" ]
Wmeng98/Leetcode
CTCI/Data Structures/Trees/tries.py
1,157
Python
TRIES Trie support search, insert, and deletion in O(L) time where L is length of the key why Trie? * With Trie, we can insert and find strings in O(L) time where L represent the length of a single word. This is obviously faster than BST. This is also faster than Hashing because of the ways it is implemented. We do not need to compute any hash function. No collision handling is required (like we do in open addressing and separate chaining) * Another advantage of Trie is, we can easily print all words in alphabetical order which is not easily possible with hashing. * We can efficiently do prefix search (or auto-complete) with Trie. Issues with Trie Faster but require HUGE memory for storing the strings NOTE: Trie node class struct TrieNode { struct TrieNode *children[ALPHABET_SIZE]; // isEndOfWord is true if the node // represents end of a word bool isEndOfWord; }; Trie node class isEndOfWord is True if node represent the end of the word
1,015
0.877269
# Configuration file with default options, # There are four main sections: General, Features, LQP and Learning corresponding to different # functionalities. You can disable any of the Features or Learning section (by commenting it out) according to your requirement. [General] # general options idir=/home/hussain/datasets/LFW/lfwa # images directory path odir=/scratch/testing/new-experiments/ # path where cropped_images, learned model and computed features will be stored dataset=LFW # name of dataset to use; it can be either LFW or FERET [currently not supported] width=80 # width of cropped images height=150 # height of cropped images padding=10 # (same as cellsize) use a padding of one cell on each side. This value must be same as the option cell-size has in the features section xoffset=1 # offsets to be added (from the center position) to the crop window placed over the original aligned images yoffset=-4 cbdataset=train-val # complete # This option is used only with LQP Features. It is used to choose subset of dataset for codebook learning e.g. in case of LFW it can be either view1 training validation ('train-val') subset or complete view1 set('complete') ftype=LQP # Feature types. Choice can be LBP, LTP, LBP+LTP or LQP usergb=False # if color images, use color information during feature computations. [Features] # options for feature computation listfile="" # a list file containing list of cropped images to compute features cellsize=10 # cellsize for the histogram grid tol=5 # [5,7] # tolerance values used for LTP or LQP features (can pass a list, i.e. tol=[5, 7]) [LQP] #LQP Options lqptype=2 # LQP type represent LQP geometric structure. # Choices can be either Disk (2) or Hor+Ver+Diag+ADiag (9) strip. lqpsize=7 # LQP size represent radius (length of strip) # of LQP disk (HVDA strip) (can pass a list i.e. lqpsize=[5,7]) coding=4 # LQP encoding type can be: Binary (0), Ternary (1) or Split-Ternary (4) cbsize=150 # Codebook size (number of visual words) used for # LQP computation (can pass a list, i.e. cbsize=[100, 150] cbfile="" # [Optional] A list file containing list of images for learning the codebook [Learning] # options for model learning view=complete # view2 # complete # Choice of the dataset, options cans be view1: used for # parameter tuning purposes; view2: used only for model # evaluation; complete: a model parameters will be first # tuned on view1 and results will be reported on view2 ttype=with-pca # Choice of Training with or without PCA (for feature # evaluation) Available options are with-pca or without- # (a pca model is learned and features are compared in the pca space) # or without-pca (features are compared in there original space) featdir="" # Directory path where computed features have been stored, used if # learning is being done without feature computation cycle. dist=cosine # Distance metric for comparing features. Choices are cosine, chi-square and L2. # For optimal results use cosine metric for comparing PCA reduced features and # chi-squared for comparing non-reduced ones. pcadim=[100, 200, 300, 400, 500, 600, 700, 800, 900, 1000, 1100, 1200, 1300, 1400, 1500, 1600, 1700, 1800, 1900, 2000] # Number of PCA components. You can pass a scalar or list, i.e. # pcadim= 500. In case of a list, all the dimensions will be used # for model learning (on view1) and finally only the best performing one will be # kept. Note that a single model with max(pcadim) is learned in this case # but evaluation is done using all the dimensions. # Caution: providing a much higher dimension makes the learning slow and memory # intensive
68.241379
256
0.698585
[ "BSD-3-Clause" ]
csgcmai/lqp_face
face-rec/config.py
3,958
Python
Configuration file with default options, There are four main sections: General, Features, LQP and Learning corresponding to different functionalities. You can disable any of the Features or Learning section (by commenting it out) according to your requirement. general options images directory path path where cropped_images, learned model and computed features will be stored name of dataset to use; it can be either LFW or FERET [currently not supported] width of cropped images height of cropped images (same as cellsize) use a padding of one cell on each side. This value must be same as the option cell-size has in the features section offsets to be added (from the center position) to the crop window placed over the original aligned images complete This option is used only with LQP Features. It is used to choose subset of dataset for codebook learning e.g. in case of LFW it can be either view1 training validation ('train-val') subset or complete view1 set('complete') Feature types. Choice can be LBP, LTP, LBP+LTP or LQP if color images, use color information during feature computations. options for feature computation a list file containing list of cropped images to compute features cellsize for the histogram grid [5,7] tolerance values used for LTP or LQP features (can pass a list, i.e. tol=[5, 7])LQP Options LQP type represent LQP geometric structure. Choices can be either Disk (2) or Hor+Ver+Diag+ADiag (9) strip. LQP size represent radius (length of strip) of LQP disk (HVDA strip) (can pass a list i.e. lqpsize=[5,7]) LQP encoding type can be: Binary (0), Ternary (1) or Split-Ternary (4) Codebook size (number of visual words) used for LQP computation (can pass a list, i.e. cbsize=[100, 150] [Optional] A list file containing list of images for learning the codebook options for model learning view2 complete Choice of the dataset, options cans be view1: used for parameter tuning purposes; view2: used only for model evaluation; complete: a model parameters will be first tuned on view1 and results will be reported on view2 Choice of Training with or without PCA (for feature evaluation) Available options are with-pca or without- (a pca model is learned and features are compared in the pca space) or without-pca (features are compared in there original space) Directory path where computed features have been stored, used if learning is being done without feature computation cycle. Distance metric for comparing features. Choices are cosine, chi-square and L2. For optimal results use cosine metric for comparing PCA reduced features and chi-squared for comparing non-reduced ones. Number of PCA components. You can pass a scalar or list, i.e. pcadim= 500. In case of a list, all the dimensions will be used for model learning (on view1) and finally only the best performing one will be kept. Note that a single model with max(pcadim) is learned in this case but evaluation is done using all the dimensions. Caution: providing a much higher dimension makes the learning slow and memory intensive
3,347
0.845629
# -*- coding: utf-8 -*- # # python_exameple documentation build configuration file, created by # sphinx-quickstart on Fri Feb 26 00:29:33 2016. # # This file is execfile()d with the current directory set to its # containing dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. import sys import os # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. #sys.path.insert(0, os.path.abspath('.')) # -- General configuration ------------------------------------------------ # If your documentation needs a minimal Sphinx version, state it here. #needs_sphinx = '1.0' # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. extensions = [ 'sphinx.ext.autodoc', 'sphinx.ext.intersphinx', 'sphinx.ext.autosummary', 'sphinx.ext.napoleon', ] autosummary_generate = True # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] # The suffix(es) of source filenames. # You can specify multiple suffix as a list of string: # source_suffix = ['.rst', '.md'] source_suffix = '.rst' # The encoding of source files. #source_encoding = 'utf-8-sig' # The master toctree document. master_doc = 'index' # General information about the project. project = u'pyBAScloudAPI' copyright = u'2021, ProFM Facility & Project Management GmbH' author = u'ProFM Facility & Project Management GmbH' # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # # The short X.Y version. version = u'0.2.0' # The full version, including alpha/beta/rc tags. release = u'0.2.0' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. # # This is also used if you do content translation via gettext catalogs. # Usually you set "language" from the command line for these cases. language = None # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: #today = '' # Else, today_fmt is used as the format for a strftime call. #today_fmt = '%B %d, %Y' # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. exclude_patterns = ['_build'] # The reST default role (used for this markup: `text`) to use for all # documents. #default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. #add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). #add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. #show_authors = False # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # A list of ignored prefixes for module index sorting. #modindex_common_prefix = [] # If true, keep warnings as "system message" paragraphs in the built documents. #keep_warnings = False # If true, `todo` and `todoList` produce output, else they produce nothing. todo_include_todos = False # -- Options for HTML output ---------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. html_theme = 'alabaster' # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. #html_theme_options = {} # Add any paths that contain custom themes here, relative to this directory. #html_theme_path = [] # The name for this set of Sphinx documents. If None, it defaults to # "<project> v<release> documentation". #html_title = None # A shorter title for the navigation bar. Default is the same as html_title. #html_short_title = None # The name of an image file (relative to this directory) to place at the top # of the sidebar. #html_logo = None # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. #html_favicon = None # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ['_static'] # Add any extra paths that contain custom files (such as robots.txt or # .htaccess) here, relative to this directory. These files are copied # directly to the root of the documentation. #html_extra_path = [] # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. #html_last_updated_fmt = '%b %d, %Y' # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. #html_use_smartypants = True # Custom sidebar templates, maps document names to template names. #html_sidebars = {} # Additional templates that should be rendered to pages, maps page names to # template names. #html_additional_pages = {} # If false, no module index is generated. #html_domain_indices = True # If false, no index is generated. #html_use_index = True # If true, the index is split into individual pages for each letter. #html_split_index = False # If true, links to the reST sources are added to the pages. #html_show_sourcelink = True # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. #html_show_sphinx = True # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. #html_show_copyright = True # If true, an OpenSearch description file will be output, and all pages will # contain a <link> tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. #html_use_opensearch = '' # This is the file name suffix for HTML files (e.g. ".xhtml"). #html_file_suffix = None # Language to be used for generating the HTML full-text search index. # Sphinx supports the following languages: # 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja' # 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr' #html_search_language = 'en' # A dictionary with options for the search language support, empty by default. # Now only 'ja' uses this config value #html_search_options = {'type': 'default'} # The name of a javascript file (relative to the configuration directory) that # implements a search results scorer. If empty, the default will be used. #html_search_scorer = 'scorer.js' # Output file base name for HTML help builder. htmlhelp_basename = 'pyBAScloudAPIdoc' # -- Options for LaTeX output --------------------------------------------- latex_elements = { # The paper size ('letterpaper' or 'a4paper'). #'papersize': 'letterpaper', # The font size ('10pt', '11pt' or '12pt'). #'pointsize': '10pt', # Additional stuff for the LaTeX preamble. #'preamble': '', # Latex figure (float) alignment #'figure_align': 'htbp', } # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, # author, documentclass [howto, manual, or own class]). latex_documents = [ (master_doc, 'pyBAScloudAPI.tex', u'pyBAScloudAPI Documentation', u'ProFM Facility & Project Management GmbH', 'manual'), ] # The name of an image file (relative to this directory) to place at the top of # the title page. #latex_logo = None # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. #latex_use_parts = False # If true, show page references after internal links. #latex_show_pagerefs = False # If true, show URL addresses after external links. #latex_show_urls = False # Documents to append as an appendix to all manuals. #latex_appendices = [] # If false, no module index is generated. #latex_domain_indices = True # -- Options for manual page output --------------------------------------- # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [ (master_doc, 'pyBAScloudAPI', u'pyBAScloudAPI Documentation', [author], 1) ] # If true, show URL addresses after external links. #man_show_urls = False # -- Options for Texinfo output ------------------------------------------- # Grouping the document tree into Texinfo files. List of tuples # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ (master_doc, 'pyBAScloudAPI', u'pyBAScloudAPI Documentation', author, 'pyBAScloudAPI', 'One line description of project.', 'Miscellaneous'), ] # Documents to append as an appendix to all manuals. #texinfo_appendices = [] # If false, no module index is generated. #texinfo_domain_indices = True # How to display URL addresses: 'footnote', 'no', or 'inline'. #texinfo_show_urls = 'footnote' # If true, do not generate a @detailmenu in the "Top" node's menu. #texinfo_no_detailmenu = False # Example configuration for intersphinx: refer to the Python standard library. intersphinx_mapping = {'https://docs.python.org/': None}
32.511864
79
0.719737
[ "MIT" ]
bascloud/BASCloudAPI
pyBAScloudAPI/docs/conf.py
9,591
Python
-*- coding: utf-8 -*- python_exameple documentation build configuration file, created by sphinx-quickstart on Fri Feb 26 00:29:33 2016. This file is execfile()d with the current directory set to its containing dir. Note that not all possible configuration values are present in this autogenerated file. All configuration values have a default; values that are commented out serve to show the default. If extensions (or modules to document with autodoc) are in another directory, add these directories to sys.path here. If the directory is relative to the documentation root, use os.path.abspath to make it absolute, like shown here.sys.path.insert(0, os.path.abspath('.')) -- General configuration ------------------------------------------------ If your documentation needs a minimal Sphinx version, state it here.needs_sphinx = '1.0' Add any Sphinx extension module names here, as strings. They can be extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones. Add any paths that contain templates here, relative to this directory. The suffix(es) of source filenames. You can specify multiple suffix as a list of string: source_suffix = ['.rst', '.md'] The encoding of source files.source_encoding = 'utf-8-sig' The master toctree document. General information about the project. The version info for the project you're documenting, acts as replacement for |version| and |release|, also used in various other places throughout the built documents. The short X.Y version. The full version, including alpha/beta/rc tags. The language for content autogenerated by Sphinx. Refer to documentation for a list of supported languages. This is also used if you do content translation via gettext catalogs. Usually you set "language" from the command line for these cases. There are two options for replacing |today|: either, you set today to some non-false value, then it is used:today = '' Else, today_fmt is used as the format for a strftime call.today_fmt = '%B %d, %Y' List of patterns, relative to source directory, that match files and directories to ignore when looking for source files. The reST default role (used for this markup: `text`) to use for all documents.default_role = None If true, '()' will be appended to :func: etc. cross-reference text.add_function_parentheses = True If true, the current module name will be prepended to all description unit titles (such as .. function::).add_module_names = True If true, sectionauthor and moduleauthor directives will be shown in the output. They are ignored by default.show_authors = False The name of the Pygments (syntax highlighting) style to use. A list of ignored prefixes for module index sorting.modindex_common_prefix = [] If true, keep warnings as "system message" paragraphs in the built documents.keep_warnings = False If true, `todo` and `todoList` produce output, else they produce nothing. -- Options for HTML output ---------------------------------------------- The theme to use for HTML and HTML Help pages. See the documentation for a list of builtin themes. Theme options are theme-specific and customize the look and feel of a theme further. For a list of options available for each theme, see the documentation.html_theme_options = {} Add any paths that contain custom themes here, relative to this directory.html_theme_path = [] The name for this set of Sphinx documents. If None, it defaults to "<project> v<release> documentation".html_title = None A shorter title for the navigation bar. Default is the same as html_title.html_short_title = None The name of an image file (relative to this directory) to place at the top of the sidebar.html_logo = None The name of an image file (within the static path) to use as favicon of the docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 pixels large.html_favicon = None Add any paths that contain custom static files (such as style sheets) here, relative to this directory. They are copied after the builtin static files, so a file named "default.css" will overwrite the builtin "default.css". Add any extra paths that contain custom files (such as robots.txt or .htaccess) here, relative to this directory. These files are copied directly to the root of the documentation.html_extra_path = [] If not '', a 'Last updated on:' timestamp is inserted at every page bottom, using the given strftime format.html_last_updated_fmt = '%b %d, %Y' If true, SmartyPants will be used to convert quotes and dashes to typographically correct entities.html_use_smartypants = True Custom sidebar templates, maps document names to template names.html_sidebars = {} Additional templates that should be rendered to pages, maps page names to template names.html_additional_pages = {} If false, no module index is generated.html_domain_indices = True If false, no index is generated.html_use_index = True If true, the index is split into individual pages for each letter.html_split_index = False If true, links to the reST sources are added to the pages.html_show_sourcelink = True If true, "Created using Sphinx" is shown in the HTML footer. Default is True.html_show_sphinx = True If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.html_show_copyright = True If true, an OpenSearch description file will be output, and all pages will contain a <link> tag referring to it. The value of this option must be the base URL from which the finished HTML is served.html_use_opensearch = '' This is the file name suffix for HTML files (e.g. ".xhtml").html_file_suffix = None Language to be used for generating the HTML full-text search index. Sphinx supports the following languages: 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja' 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'html_search_language = 'en' A dictionary with options for the search language support, empty by default. Now only 'ja' uses this config valuehtml_search_options = {'type': 'default'} The name of a javascript file (relative to the configuration directory) that implements a search results scorer. If empty, the default will be used.html_search_scorer = 'scorer.js' Output file base name for HTML help builder. -- Options for LaTeX output --------------------------------------------- The paper size ('letterpaper' or 'a4paper').'papersize': 'letterpaper', The font size ('10pt', '11pt' or '12pt').'pointsize': '10pt', Additional stuff for the LaTeX preamble.'preamble': '', Latex figure (float) alignment'figure_align': 'htbp', Grouping the document tree into LaTeX files. List of tuples (source start file, target name, title, author, documentclass [howto, manual, or own class]). The name of an image file (relative to this directory) to place at the top of the title page.latex_logo = None For "manual" documents, if this is true, then toplevel headings are parts, not chapters.latex_use_parts = False If true, show page references after internal links.latex_show_pagerefs = False If true, show URL addresses after external links.latex_show_urls = False Documents to append as an appendix to all manuals.latex_appendices = [] If false, no module index is generated.latex_domain_indices = True -- Options for manual page output --------------------------------------- One entry per manual page. List of tuples (source start file, name, description, authors, manual section). If true, show URL addresses after external links.man_show_urls = False -- Options for Texinfo output ------------------------------------------- Grouping the document tree into Texinfo files. List of tuples (source start file, target name, title, author, dir menu entry, description, category) Documents to append as an appendix to all manuals.texinfo_appendices = [] If false, no module index is generated.texinfo_domain_indices = True How to display URL addresses: 'footnote', 'no', or 'inline'.texinfo_show_urls = 'footnote' If true, do not generate a @detailmenu in the "Top" node's menu.texinfo_no_detailmenu = False Example configuration for intersphinx: refer to the Python standard library.
8,018
0.835992
# Demo Python Datetime - The strftime() Method ''' The strftime() Method The datetime object has a method for formatting date objects into readable strings. The method is called strftime(), and takes one parameter, format, to specify the format of the returned string. Directive Description Example %a Weekday, short version Wed %A Weekday, full version Wednesday %w Weekday as a number 0-6, 0 is Sunday 3 %d Day of month 01-31 31 %b Month name, short version Dec %B Month name, full version December %m Month as a number 01-12 12 %y Year, short version, without century 18 %Y Year, full version 2018 %H Hour 00-23 17 %I Hour 00-12 05 %p AM/PM PM %M Minute 00-59 41 %S Second 00-59 08 %f Microsecond 000000-999999 548513 %z UTC offset +0100 %Z Timezone CST %j Day number of year 001-366 365 %U Week number of year, Sunday as the first day of week, 00-53 52 %W Week number of year, Monday as the first day of week, 00-53 52 %c Local version of date and time Mon Dec 31 17:41:00 2018 %x Local version of date 12/31/18 %X Local version of time 17:41:00 %% A % character % ''' import datetime x = datetime.datetime.now() print(x) print(x.strftime("%z"))
57.333333
111
0.354236
[ "MIT" ]
luis2ra/py3-00-w3schools
0-python-tutorial/25-dates05_strftime23_z.py
2,408
Python
The strftime() Method The datetime object has a method for formatting date objects into readable strings. The method is called strftime(), and takes one parameter, format, to specify the format of the returned string. Directive Description Example %a Weekday, short version Wed %A Weekday, full version Wednesday %w Weekday as a number 0-6, 0 is Sunday 3 %d Day of month 01-31 31 %b Month name, short version Dec %B Month name, full version December %m Month as a number 01-12 12 %y Year, short version, without century 18 %Y Year, full version 2018 %H Hour 00-23 17 %I Hour 00-12 05 %p AM/PM PM %M Minute 00-59 41 %S Second 00-59 08 %f Microsecond 000000-999999 548513 %z UTC offset +0100 %Z Timezone CST %j Day number of year 001-366 365 %U Week number of year, Sunday as the first day of week, 00-53 52 %W Week number of year, Monday as the first day of week, 00-53 52 %c Local version of date and time Mon Dec 31 17:41:00 2018 %x Local version of date 12/31/18 %X Local version of time 17:41:00 %% A % character % Demo Python Datetime - The strftime() Method
2,588
1.074751
#!/usr/bin/env python # encoding: utf-8 """ @version: v1.0 @author: Shijie Qin @license: Apache Licence @contact: qsj4work@gmail.com @site: https://shijieqin.github.io @software: PyCharm @file: __init__.py.py @time: 2018/11/8 3:13 PM """
17.928571
35
0.669323
[ "Apache-2.0" ]
shijieqin/flatfish
core/__init__.py
251
Python
@version: v1.0 @author: Shijie Qin @license: Apache Licence @contact: qsj4work@gmail.com @site: https://shijieqin.github.io @software: PyCharm @file: __init__.py.py @time: 2018/11/8 3:13 PM !/usr/bin/env python encoding: utf-8
237
0.944223
""" Listas Listas em Python funcionam como vetores/matrizes (arrays) em outras linguagens, com a diferença de serem DINÂMICO e também de podermos colocar QUALQUER tipo de dado. Linguagens C/Java: Arrays - Possuem tamanho e tipo de dado fixo; Ou seja, nestas linguagens se você criar um array do tipo int e com tamanho 5, este array sera SEMPRE do tipo inteiro e poderá ter SEMPRE no máximo 5 valores. Já em Python: - Dinâmico: Não possui tamanho fixo; Ou seja, podemos criar a lista e simplesmente ir adicionando elementos; - Qualquer tipo de dado; Não possuem tipo de dado fixo; Ou seja, podemos colocar qualquer tipo de dado; As listas são mutáveis! As listas em Python são representadas por colchetes: [] type([]) lista1 = [1, 99, 4, 27, 15, 22, 3, 1, 44, 42, 27] lista2 = ['G', 'e', 'e', 'k', ' ', 'U', 'n', 'i', 'v', 'e', 'r', 's', 'i', 't', 'y'] lista3 = [] lista4 = list(range(11)) lista5 = list('Geek University') # Podemos facilmente checar se determinado valor está contido na lista num = 18 if num in lista4: print(f'Encontrei o número {num}') else: print(f'Não encontrei o número {num}') # Podemos facilmente ordenar uma lista print(lista1) lista1.sort() print(lista1) # Podemos facilmente contar o número de ocorrências de um valor em uma lista print(lista1) print(lista1.count(1)) print(lista5) print(lista5.count('e')) # Adicionar elementos em listas # Para adicionar elementos em listas, utilizamos a função append print(lista1) lista1.append(42) print(lista1) # OBS: Com append, nós só conseguimos adicionar um (1) elementos por vez # lista1.append(12, 14, 56) # Erro lista1.append([8, 3, 1]) # Coloca a lista como elemento único (sublista) print(lista1) if [8, 3, 1] in lista1: print('Encontrei a lista') else: print('Nao encontrei a lista') lista1.extend([123, 44, 67]) # Coloca cada elemento da lista como valor adicional á lista print(lista1) # Podemos inserir um novo elemento na lista informando a posição do índice # Isso nao substitui o valor inicial. O mesmo será deslocado para a direita da lista. lista1.insert(2, 'Novo Valor') print(lista1) # Podemos facilmente juntar duas listas lista1 = lista1 + lista2 # lista1.extend(lista2) print(lista1) # Podemos facilmente inverter uma lista # Forma 1 lista1.reverse() lista2.reverse() print(lista1) print(lista2) # Forma 2 print(lista1[::-1]) print(lista2[::-1]) # Copiar uma lista lista6 = lista2.copy() print(lista6) # Podemos contar quantos elementos existem dentro da lista print(len(lista1)) # Podemos remover facilmente o último elemento de uma lista # O pop não somente remove o último elemento, mas também o retorna print(lista5) lista5.pop() print(lista5) # Podemos remover um elemento pelo índice # OBS: Os elementos á direita deste índice serão deslocados para a esquerda. # OBS: Se não houver elemento no índice informado, teremos o erro IndexError lista5.pop(2) print(lista5) # Podemos remover todos os elementos (Zerar a lista) print(lista5) lista5.clear() print(lista5) # Podemos facilmente repetir elementos em uma lista nova = [1, 2, 3] print(nova) nova = nova * 3 print(nova) # Podemos facilmente converter uma string para uma lista # Exemplo 1 curso = 'Programação em Python Essencial' print(curso) curso = curso.split() print(curso) # OBS: Por padrão, o split separa os elementos da lista pelo espaço entre elas. # Exemplo 2 curso = 'Programação,em,Python, Essencial' print(curso) curso = curso.split(',') print(curso) # Convertendo uma lista em uma string lista6 = ['Programação', 'em', 'Python', 'Essencial'] print(lista6) # Abaixo estamos falando: Pega a lista6, coloca o cifrão entre cada elemento e transforma em uma string curso = ' '.join(lista6) print(curso) curso = '$'.join(lista6) print(curso) # Podemos realmente colocar qualquer tipo de dado em uma lista, inclusive misturando esses dados lista6 = [1, 2.34, True, 'Geek', 'd', [1, 2, 3], 45345345345] print(lista6) print(type(lista6)) # Iterando sobre listas # Exemplo 1 - Utilizando for soma = 0 for elemento in lista1: print(elemento) soma = soma + elemento print(soma) # Exemplo 2 - Utlizando while carrinho = [] produto = '' while produto != 'sair': print("Adicione um produto na lista ou digite 'sair' para sair: ") produto = input() if produto != 'sair': carrinho.append(produto) for produto in carrinho: print(produto) # Utilizando variáveis em listas numeros = [1, 2, 3, 4, 5] print(numeros) num1 = 1 num2 = 2 num3 = 3 num4 = 4 num5 = 5 numeros = [num1, num2, num3, num4, num5] print(numeros) # Fazemos acessos aos elementos de forma indexada cores = ['verde', 'amarelo', 'azul', 'branco'] print(cores[0]) # verde print(cores[1]) # amarelo print(cores[2]) # azul print(cores[3]) # branco # Fazer acesso aos elementos de forma indexada inversa # Para entender melhor o índice negativo, pense na lista como um círculo, onde # o final de um elemento está ligado ao início da lista print(cores[-1]) # branco print(cores[-2]) # azul print(cores[-3]) # amarelo print(cores[-4]) # verde for cor in cores: print(cor) indice = 0 while indice < len(cores): print(cores[indice]) indice = indice + 1 cores = ['verde', 'amarelo', 'azul', 'branco'] # Gerar índice em um for for indice, cor in enumerate(cores): print(indice, cor) # Listas aceitam valores repetidos lista = [] lista.append(42) lista.append(42) lista.append(33) lista.append(33) lista.append(42) # Outros métodos não tão importantes mas também úteis # Encontrar o índice de um elemento na lista numeros = [5, 6, 7, 5, 8, 9, 10] # Em qual índice da lista está o valor 6? print(numeros.index(6)) # Em qual índice da lista está o valor 9?? print(numeros.index(9)) # print(numeros.index(19)) # Gera ValueError # OBS: Caso não tenha este elemento na lista, será apresentado erro ValueError # OBS: Retorna o índice do primeiro elemento encontrado print(numeros.index(5)) # Podemos fazer busca dentro de um range, ou seja, qual índice começar a buscar print(numeros.index(5, 1)) # Buscando a partir do índice 1 print(numeros.index(5, 2)) # Buscando a partir do índice 2 print(numeros.index(5, 3)) # Buscando a partir do índice 3 # print(numeros.index(5, 4)) # Buscando a partir do índice 4 # OBS: Caso não tenha este elemento na lista, será apresentado erro ValueError # Podemos fazer busca dentro de um range, início/fim print(numeros.index(8, 3, 6)) # Buscar o índice do valor 8, entre os índices 3 a 6 # Revisão do slicing # lista[inicio:fim:passo] # range(inicio:fim:passo) # Trabalhando com slice de listas com o parâmetro 'início' lista = [1, 2, 3, 4] print(lista[1:]) # Iniciando no índice 1 e pegando todos os elementos restantes # Trabalhando com slice de listas com o parâmetro 'fim' print(lista[:2]) # Começa em 0, pega até o índice 2 - 1 print(lista[:4]) # Começa em 0, pega até o índice 4 - 1 print(lista[1:3]) # Começa em 1, pega até o índice 3 - 1 # Trabalhando com slice de listas com o parâmetro 'passo' print(lista[1::2]) # Começa em 1, vai até o final, de 2 em 2 print(lista[::2]) # Começa em 0, vai até o final, de 2 em 2 # Invertendo valores em uma lista nomes = ['Geek', 'University'] nomes[0], nomes[1] = nomes[1], nomes[0] print(nomes) nomes = ['Geek', 'University'] nomes.reverse() print(nomes) # Soma*, Valor Máximo*, Valor Mínimo*, Tamanho # * Se os valores forem todos inteiros ou reais lista = [1, 2, 3, 4, 5, 6] print(sum(lista)) # Soma print(max(lista)) # Máximo Valor print(min(lista)) # Mínimo Valor print(len(lista)) # Tamanho da Lista # Transformar uma lista em tupla lista = [1, 2, 3, 4, 5, 6] print(lista) print(type(lista)) tupla = tuple(lista) print(tupla) print(type(tupla)) # Desempacotamento de listas listas = [1, 2, 3] num1, num2, num3 = lista print(num1) print(num2) print(num3) # OBS: Se tivermos um número diferente de elementos na lista ou variáveis para receber os dados, teremos ValueError # Copiando uma lista para outra (Shallow Copy e Deep Copy) # Forma 1 - Deep Copy lista = [1, 2, 3] e print(lista) nova = lista.copy() # Cópia print(nova) nova.append(4) print(lista) print(nova) # Veja que ao utilizarmos lista.copy() copiamos os dados da lista para uma nova lista, mas elas # ficaram totalmente independentes, ou seja, modificando uma lista, não afeta a outra. Isso em Python # é chamado de Deep Copy (Cópia Profunda) # Forma 2 - Shallow Copy lista = [1, 2, 3] print(lista) nova = lista # Cópia print(nova) nova.append(4) print(lista) print(nova) # Veja que utilizamos a cópia via atribuição e copiamos os dados da lista para a nova lista, mas # após realizar modificação em uma das listas, essa modificação se refletiu em ambas as listas. # Isso em Python é chamado de Shallow Copy. """
25.703812
116
0.712949
[ "MIT" ]
vdonoladev/aprendendo-programacao
Python/Programação_em_Python_Essencial/5- Coleções/listas.py
8,882
Python
Listas Listas em Python funcionam como vetores/matrizes (arrays) em outras linguagens, com a diferença de serem DINÂMICO e também de podermos colocar QUALQUER tipo de dado. Linguagens C/Java: Arrays - Possuem tamanho e tipo de dado fixo; Ou seja, nestas linguagens se você criar um array do tipo int e com tamanho 5, este array sera SEMPRE do tipo inteiro e poderá ter SEMPRE no máximo 5 valores. Já em Python: - Dinâmico: Não possui tamanho fixo; Ou seja, podemos criar a lista e simplesmente ir adicionando elementos; - Qualquer tipo de dado; Não possuem tipo de dado fixo; Ou seja, podemos colocar qualquer tipo de dado; As listas são mutáveis! As listas em Python são representadas por colchetes: [] type([]) lista1 = [1, 99, 4, 27, 15, 22, 3, 1, 44, 42, 27] lista2 = ['G', 'e', 'e', 'k', ' ', 'U', 'n', 'i', 'v', 'e', 'r', 's', 'i', 't', 'y'] lista3 = [] lista4 = list(range(11)) lista5 = list('Geek University') # Podemos facilmente checar se determinado valor está contido na lista num = 18 if num in lista4: print(f'Encontrei o número {num}') else: print(f'Não encontrei o número {num}') # Podemos facilmente ordenar uma lista print(lista1) lista1.sort() print(lista1) # Podemos facilmente contar o número de ocorrências de um valor em uma lista print(lista1) print(lista1.count(1)) print(lista5) print(lista5.count('e')) # Adicionar elementos em listas # Para adicionar elementos em listas, utilizamos a função append print(lista1) lista1.append(42) print(lista1) # OBS: Com append, nós só conseguimos adicionar um (1) elementos por vez # lista1.append(12, 14, 56) # Erro lista1.append([8, 3, 1]) # Coloca a lista como elemento único (sublista) print(lista1) if [8, 3, 1] in lista1: print('Encontrei a lista') else: print('Nao encontrei a lista') lista1.extend([123, 44, 67]) # Coloca cada elemento da lista como valor adicional á lista print(lista1) # Podemos inserir um novo elemento na lista informando a posição do índice # Isso nao substitui o valor inicial. O mesmo será deslocado para a direita da lista. lista1.insert(2, 'Novo Valor') print(lista1) # Podemos facilmente juntar duas listas lista1 = lista1 + lista2 # lista1.extend(lista2) print(lista1) # Podemos facilmente inverter uma lista # Forma 1 lista1.reverse() lista2.reverse() print(lista1) print(lista2) # Forma 2 print(lista1[::-1]) print(lista2[::-1]) # Copiar uma lista lista6 = lista2.copy() print(lista6) # Podemos contar quantos elementos existem dentro da lista print(len(lista1)) # Podemos remover facilmente o último elemento de uma lista # O pop não somente remove o último elemento, mas também o retorna print(lista5) lista5.pop() print(lista5) # Podemos remover um elemento pelo índice # OBS: Os elementos á direita deste índice serão deslocados para a esquerda. # OBS: Se não houver elemento no índice informado, teremos o erro IndexError lista5.pop(2) print(lista5) # Podemos remover todos os elementos (Zerar a lista) print(lista5) lista5.clear() print(lista5) # Podemos facilmente repetir elementos em uma lista nova = [1, 2, 3] print(nova) nova = nova * 3 print(nova) # Podemos facilmente converter uma string para uma lista # Exemplo 1 curso = 'Programação em Python Essencial' print(curso) curso = curso.split() print(curso) # OBS: Por padrão, o split separa os elementos da lista pelo espaço entre elas. # Exemplo 2 curso = 'Programação,em,Python, Essencial' print(curso) curso = curso.split(',') print(curso) # Convertendo uma lista em uma string lista6 = ['Programação', 'em', 'Python', 'Essencial'] print(lista6) # Abaixo estamos falando: Pega a lista6, coloca o cifrão entre cada elemento e transforma em uma string curso = ' '.join(lista6) print(curso) curso = '$'.join(lista6) print(curso) # Podemos realmente colocar qualquer tipo de dado em uma lista, inclusive misturando esses dados lista6 = [1, 2.34, True, 'Geek', 'd', [1, 2, 3], 45345345345] print(lista6) print(type(lista6)) # Iterando sobre listas # Exemplo 1 - Utilizando for soma = 0 for elemento in lista1: print(elemento) soma = soma + elemento print(soma) # Exemplo 2 - Utlizando while carrinho = [] produto = '' while produto != 'sair': print("Adicione um produto na lista ou digite 'sair' para sair: ") produto = input() if produto != 'sair': carrinho.append(produto) for produto in carrinho: print(produto) # Utilizando variáveis em listas numeros = [1, 2, 3, 4, 5] print(numeros) num1 = 1 num2 = 2 num3 = 3 num4 = 4 num5 = 5 numeros = [num1, num2, num3, num4, num5] print(numeros) # Fazemos acessos aos elementos de forma indexada cores = ['verde', 'amarelo', 'azul', 'branco'] print(cores[0]) # verde print(cores[1]) # amarelo print(cores[2]) # azul print(cores[3]) # branco # Fazer acesso aos elementos de forma indexada inversa # Para entender melhor o índice negativo, pense na lista como um círculo, onde # o final de um elemento está ligado ao início da lista print(cores[-1]) # branco print(cores[-2]) # azul print(cores[-3]) # amarelo print(cores[-4]) # verde for cor in cores: print(cor) indice = 0 while indice < len(cores): print(cores[indice]) indice = indice + 1 cores = ['verde', 'amarelo', 'azul', 'branco'] # Gerar índice em um for for indice, cor in enumerate(cores): print(indice, cor) # Listas aceitam valores repetidos lista = [] lista.append(42) lista.append(42) lista.append(33) lista.append(33) lista.append(42) # Outros métodos não tão importantes mas também úteis # Encontrar o índice de um elemento na lista numeros = [5, 6, 7, 5, 8, 9, 10] # Em qual índice da lista está o valor 6? print(numeros.index(6)) # Em qual índice da lista está o valor 9?? print(numeros.index(9)) # print(numeros.index(19)) # Gera ValueError # OBS: Caso não tenha este elemento na lista, será apresentado erro ValueError # OBS: Retorna o índice do primeiro elemento encontrado print(numeros.index(5)) # Podemos fazer busca dentro de um range, ou seja, qual índice começar a buscar print(numeros.index(5, 1)) # Buscando a partir do índice 1 print(numeros.index(5, 2)) # Buscando a partir do índice 2 print(numeros.index(5, 3)) # Buscando a partir do índice 3 # print(numeros.index(5, 4)) # Buscando a partir do índice 4 # OBS: Caso não tenha este elemento na lista, será apresentado erro ValueError # Podemos fazer busca dentro de um range, início/fim print(numeros.index(8, 3, 6)) # Buscar o índice do valor 8, entre os índices 3 a 6 # Revisão do slicing # lista[inicio:fim:passo] # range(inicio:fim:passo) # Trabalhando com slice de listas com o parâmetro 'início' lista = [1, 2, 3, 4] print(lista[1:]) # Iniciando no índice 1 e pegando todos os elementos restantes # Trabalhando com slice de listas com o parâmetro 'fim' print(lista[:2]) # Começa em 0, pega até o índice 2 - 1 print(lista[:4]) # Começa em 0, pega até o índice 4 - 1 print(lista[1:3]) # Começa em 1, pega até o índice 3 - 1 # Trabalhando com slice de listas com o parâmetro 'passo' print(lista[1::2]) # Começa em 1, vai até o final, de 2 em 2 print(lista[::2]) # Começa em 0, vai até o final, de 2 em 2 # Invertendo valores em uma lista nomes = ['Geek', 'University'] nomes[0], nomes[1] = nomes[1], nomes[0] print(nomes) nomes = ['Geek', 'University'] nomes.reverse() print(nomes) # Soma*, Valor Máximo*, Valor Mínimo*, Tamanho # * Se os valores forem todos inteiros ou reais lista = [1, 2, 3, 4, 5, 6] print(sum(lista)) # Soma print(max(lista)) # Máximo Valor print(min(lista)) # Mínimo Valor print(len(lista)) # Tamanho da Lista # Transformar uma lista em tupla lista = [1, 2, 3, 4, 5, 6] print(lista) print(type(lista)) tupla = tuple(lista) print(tupla) print(type(tupla)) # Desempacotamento de listas listas = [1, 2, 3] num1, num2, num3 = lista print(num1) print(num2) print(num3) # OBS: Se tivermos um número diferente de elementos na lista ou variáveis para receber os dados, teremos ValueError # Copiando uma lista para outra (Shallow Copy e Deep Copy) # Forma 1 - Deep Copy lista = [1, 2, 3] e print(lista) nova = lista.copy() # Cópia print(nova) nova.append(4) print(lista) print(nova) # Veja que ao utilizarmos lista.copy() copiamos os dados da lista para uma nova lista, mas elas # ficaram totalmente independentes, ou seja, modificando uma lista, não afeta a outra. Isso em Python # é chamado de Deep Copy (Cópia Profunda) # Forma 2 - Shallow Copy lista = [1, 2, 3] print(lista) nova = lista # Cópia print(nova) nova.append(4) print(lista) print(nova) # Veja que utilizamos a cópia via atribuição e copiamos os dados da lista para a nova lista, mas # após realizar modificação em uma das listas, essa modificação se refletiu em ambas as listas. # Isso em Python é chamado de Shallow Copy.
8,938
1.019738
import re from pygbif.gbifutils import gbif_baseurl, bool2str, requests_argset, gbif_GET def search( taxonKey=None, repatriated=None, kingdomKey=None, phylumKey=None, classKey=None, orderKey=None, familyKey=None, genusKey=None, subgenusKey=None, scientificName=None, country=None, publishingCountry=None, hasCoordinate=None, typeStatus=None, recordNumber=None, lastInterpreted=None, continent=None, geometry=None, recordedBy=None, recordedByID=None, identifiedByID=None, basisOfRecord=None, datasetKey=None, eventDate=None, catalogNumber=None, year=None, month=None, decimalLatitude=None, decimalLongitude=None, elevation=None, depth=None, institutionCode=None, collectionCode=None, hasGeospatialIssue=None, issue=None, q=None, spellCheck=None, mediatype=None, limit=300, offset=0, establishmentMeans=None, facet=None, facetMincount=None, facetMultiselect=None, timeout=60, **kwargs ): """ Search GBIF occurrences :param taxonKey: [int] A GBIF occurrence identifier :param q: [str] Simple search parameter. The value for this parameter can be a simple word or a phrase. :param spellCheck: [bool] If ``True`` ask GBIF to check your spelling of the value passed to the ``search`` parameter. IMPORTANT: This only checks the input to the ``search`` parameter, and no others. Default: ``False`` :param repatriated: [str] Searches for records whose publishing country is different to the country where the record was recorded in :param kingdomKey: [int] Kingdom classification key :param phylumKey: [int] Phylum classification key :param classKey: [int] Class classification key :param orderKey: [int] Order classification key :param familyKey: [int] Family classification key :param genusKey: [int] Genus classification key :param subgenusKey: [int] Subgenus classification key :param scientificName: [str] A scientific name from the GBIF backbone. All included and synonym taxa are included in the search. :param datasetKey: [str] The occurrence dataset key (a uuid) :param catalogNumber: [str] An identifier of any form assigned by the source within a physical collection or digital dataset for the record which may not unique, but should be fairly unique in combination with the institution and collection code. :param recordedBy: [str] The person who recorded the occurrence. :param recordedByID: [str] Identifier (e.g. ORCID) for the person who recorded the occurrence :param identifiedByID: [str] Identifier (e.g. ORCID) for the person who provided the taxonomic identification of the occurrence. :param collectionCode: [str] An identifier of any form assigned by the source to identify the physical collection or digital dataset uniquely within the text of an institution. :param institutionCode: [str] An identifier of any form assigned by the source to identify the institution the record belongs to. Not guaranteed to be que. :param country: [str] The 2-letter country code (as per ISO-3166-1) of the country in which the occurrence was recorded. See here http://en.wikipedia.org/wiki/ISO_3166-1_alpha-2 :param basisOfRecord: [str] Basis of record, as defined in our BasisOfRecord enum here http://gbif.github.io/gbif-api/apidocs/org/gbif/api/vocabulary/BasisOfRecord.html Acceptable values are: - ``FOSSIL_SPECIMEN`` An occurrence record describing a fossilized specimen. - ``HUMAN_OBSERVATION`` An occurrence record describing an observation made by one or more people. - ``LIVING_SPECIMEN`` An occurrence record describing a living specimen. - ``MACHINE_OBSERVATION`` An occurrence record describing an observation made by a machine. - ``MATERIAL_CITATION`` An occurrence record based on a reference to a scholarly publication. - ``OBSERVATION`` An occurrence record describing an observation. - ``OCCURRENCE`` An existence of an organism at a particular place and time. No more specific basis. - ``PRESERVED_SPECIMEN`` An occurrence record describing a preserved specimen. :param eventDate: [date] Occurrence date in ISO 8601 format: yyyy, yyyy-MM, yyyy-MM-dd, or MM-dd. Supports range queries, smaller,larger (e.g., ``1990,1991``, whereas ``1991,1990`` wouldn't work) :param year: [int] The 4 digit year. A year of 98 will be interpreted as AD 98. Supports range queries, smaller,larger (e.g., ``1990,1991``, whereas ``1991,1990`` wouldn't work) :param month: [int] The month of the year, starting with 1 for January. Supports range queries, smaller,larger (e.g., ``1,2``, whereas ``2,1`` wouldn't work) :param decimalLatitude: [float] Latitude in decimals between -90 and 90 based on WGS 84. Supports range queries, smaller,larger (e.g., ``25,30``, whereas ``30,25`` wouldn't work) :param decimalLongitude: [float] Longitude in decimals between -180 and 180 based on WGS 84. Supports range queries (e.g., ``-0.4,-0.2``, whereas ``-0.2,-0.4`` wouldn't work). :param publishingCountry: [str] The 2-letter country code (as per ISO-3166-1) of the country in which the occurrence was recorded. :param elevation: [int/str] Elevation in meters above sea level. Supports range queries, smaller,larger (e.g., ``5,30``, whereas ``30,5`` wouldn't work) :param depth: [int/str] Depth in meters relative to elevation. For example 10 meters below a lake surface with given elevation. Supports range queries, smaller,larger (e.g., ``5,30``, whereas ``30,5`` wouldn't work) :param geometry: [str] Searches for occurrences inside a polygon described in Well Known Text (WKT) format. A WKT shape written as either POINT, LINESTRING, LINEARRING POLYGON, or MULTIPOLYGON. Example of a polygon: ``((30.1 10.1, 20, 20 40, 40 40, 30.1 10.1))`` would be queried as http://bit.ly/1BzNwDq. Polygons must have counter-clockwise ordering of points. :param hasGeospatialIssue: [bool] Includes/excludes occurrence records which contain spatial issues (as determined in our record interpretation), i.e. ``hasGeospatialIssue=TRUE`` returns only those records with spatial issues while ``hasGeospatialIssue=FALSE`` includes only records without spatial issues. The absence of this parameter returns any record with or without spatial issues. :param issue: [str] One or more of many possible issues with each occurrence record. See Details. Issues passed to this parameter filter results by the issue. :param hasCoordinate: [bool] Return only occurence records with lat/long data (``True``) or all records (``False``, default). :param typeStatus: [str] Type status of the specimen. One of many options. See ?typestatus :param recordNumber: [int] Number recorded by collector of the data, different from GBIF record number. See http://rs.tdwg.org/dwc/terms/#recordNumber} for more info :param lastInterpreted: [date] Date the record was last modified in GBIF, in ISO 8601 format: yyyy, yyyy-MM, yyyy-MM-dd, or MM-dd. Supports range queries, smaller,larger (e.g., ``1990,1991``, whereas ``1991,1990`` wouldn't work) :param continent: [str] Continent. One of ``africa``, ``antarctica``, ``asia``, ``europe``, ``north_america`` (North America includes the Caribbean and reachies down and includes Panama), ``oceania``, or ``south_america`` :param fields: [str] Default (``all``) returns all fields. ``minimal`` returns just taxon name, key, latitude, and longitude. Or specify each field you want returned by name, e.g. ``fields = c('name','latitude','elevation')``. :param mediatype: [str] Media type. Default is ``NULL``, so no filtering on mediatype. Options: ``NULL``, ``MovingImage``, ``Sound``, and ``StillImage`` :param limit: [int] Number of results to return. Default: ``300`` :param offset: [int] Record to start at. Default: ``0`` :param facet: [str] a character vector of length 1 or greater :param establishmentMeans: [str] EstablishmentMeans, possible values include: INTRODUCED, INVASIVE, MANAGED, NATIVE, NATURALISED, UNCERTAIN :param facetMincount: [int] minimum number of records to be included in the faceting results :param facetMultiselect: [bool] Set to ``True`` to still return counts for values that are not currently filtered. See examples. Default: ``False`` :return: A dictionary Usage:: from pygbif import occurrences occurrences.search(taxonKey = 3329049) # Return 2 results, this is the default by the way occurrences.search(taxonKey=3329049, limit=2) # Instead of getting a taxon key first, you can search for a name directly # However, note that using this approach (with `scientificName="..."`) # you are getting synonyms too. The results for using `scientifcName` and # `taxonKey` parameters are the same in this case, but I wouldn't be surprised if for some # names they return different results occurrences.search(scientificName = 'Ursus americanus') from pygbif import species key = species.name_backbone(name = 'Ursus americanus', rank='species')['usageKey'] occurrences.search(taxonKey = key) # Search by dataset key occurrences.search(datasetKey='7b5d6a48-f762-11e1-a439-00145eb45e9a', limit=20) # Search by catalog number occurrences.search(catalogNumber="49366", limit=20) # occurrences.search(catalogNumber=["49366","Bird.27847588"], limit=20) # Use paging parameters (limit and offset) to page. Note the different results # for the two queries below. occurrences.search(datasetKey='7b5d6a48-f762-11e1-a439-00145eb45e9a', offset=10, limit=5) occurrences.search(datasetKey='7b5d6a48-f762-11e1-a439-00145eb45e9a', offset=20, limit=5) # Many dataset keys # occurrences.search(datasetKey=["50c9509d-22c7-4a22-a47d-8c48425ef4a7", "7b5d6a48-f762-11e1-a439-00145eb45e9a"], limit=20) # Search by collector name res = occurrences.search(recordedBy="smith", limit=20) [ x['recordedBy'] for x in res['results'] ] # Many collector names # occurrences.search(recordedBy=["smith","BJ Stacey"], limit=20) # recordedByID occurrences.search(recordedByID="https://orcid.org/0000-0003-1691-239X", limit = 3) # identifiedByID occurrences.search(identifiedByID="https://orcid.org/0000-0003-1691-239X", limit = 3) # Search for many species splist = ['Cyanocitta stelleri', 'Junco hyemalis', 'Aix sponsa'] keys = [ species.name_suggest(x)[0]['key'] for x in splist ] out = [ occurrences.search(taxonKey = x, limit=1) for x in keys ] [ x['results'][0]['speciesKey'] for x in out ] # Search - q parameter occurrences.search(q = "kingfisher", limit=20) ## spell check - only works with the `search` parameter ### spelled correctly - same result as above call occurrences.search(q = "kingfisher", limit=20, spellCheck = True) ### spelled incorrectly - stops with suggested spelling occurrences.search(q = "kajsdkla", limit=20, spellCheck = True) ### spelled incorrectly - stops with many suggested spellings ### and number of results for each occurrences.search(q = "helir", limit=20, spellCheck = True) # Search on latitidue and longitude occurrences.search(decimalLatitude=50, decimalLongitude=10, limit=2) # Search on a bounding box ## in well known text format occurrences.search(geometry='POLYGON((30.1 10.1, 10 20, 20 40, 40 40, 30.1 10.1))', limit=20) from pygbif import species key = species.name_suggest(q='Aesculus hippocastanum')[0]['key'] occurrences.search(taxonKey=key, geometry='POLYGON((30.1 10.1, 10 20, 20 40, 40 40, 30.1 10.1))', limit=20) ## multipolygon wkt = 'MULTIPOLYGON(((-123 38, -123 43, -116 43, -116 38, -123 38)),((-97 41, -97 45, -93 45, -93 41, -97 41)))' occurrences.search(geometry = wkt, limit = 20) # Search on country occurrences.search(country='US', limit=20) occurrences.search(country='FR', limit=20) occurrences.search(country='DE', limit=20) # Get only occurrences with lat/long data occurrences.search(taxonKey=key, hasCoordinate=True, limit=20) # Get only occurrences that were recorded as living specimens occurrences.search(taxonKey=key, basisOfRecord="LIVING_SPECIMEN", hasCoordinate=True, limit=20) # Get occurrences for a particular eventDate occurrences.search(taxonKey=key, eventDate="2013", limit=20) occurrences.search(taxonKey=key, year="2013", limit=20) occurrences.search(taxonKey=key, month="6", limit=20) # Get occurrences based on depth key = species.name_backbone(name='Salmo salar', kingdom='animals')['usageKey'] occurrences.search(taxonKey=key, depth="5", limit=20) # Get occurrences based on elevation key = species.name_backbone(name='Puma concolor', kingdom='animals')['usageKey'] occurrences.search(taxonKey=key, elevation=50, hasCoordinate=True, limit=20) # Get occurrences based on institutionCode occurrences.search(institutionCode="TLMF", limit=20) # Get occurrences based on collectionCode occurrences.search(collectionCode="Floristic Databases MV - Higher Plants", limit=20) # Get only those occurrences with spatial issues occurrences.search(taxonKey=key, hasGeospatialIssue=True, limit=20) # Search using a query string occurrences.search(q="kingfisher", limit=20) # Range queries ## See Detail for parameters that support range queries ### this is a range depth, with lower/upper limits in character string occurrences.search(depth='50,100') ## Range search with year occurrences.search(year='1999,2000', limit=20) ## Range search with latitude occurrences.search(decimalLatitude='29.59,29.6') # Search by specimen type status ## Look for possible values of the typeStatus parameter looking at the typestatus dataset occurrences.search(typeStatus = 'allotype') # Search by specimen record number ## This is the record number of the person/group that submitted the data, not GBIF's numbers ## You can see that many different groups have record number 1, so not super helpful occurrences.search(recordNumber = 1) # Search by last time interpreted: Date the record was last modified in GBIF ## The lastInterpreted parameter accepts ISO 8601 format dates, including ## yyyy, yyyy-MM, yyyy-MM-dd, or MM-dd. Range queries are accepted for lastInterpreted occurrences.search(lastInterpreted = '2014-04-01') # Search by continent ## One of africa, antarctica, asia, europe, north_america, oceania, or south_america occurrences.search(continent = 'south_america') occurrences.search(continent = 'africa') occurrences.search(continent = 'oceania') occurrences.search(continent = 'antarctica') # Search for occurrences with images occurrences.search(mediatype = 'StillImage') occurrences.search(mediatype = 'MovingImage') x = occurrences.search(mediatype = 'Sound') [z['media'] for z in x['results']] # Query based on issues occurrences.search(taxonKey=1, issue='DEPTH_UNLIKELY') occurrences.search(taxonKey=1, issue=['DEPTH_UNLIKELY','COORDINATE_ROUNDED']) # Show all records in the Arizona State Lichen Collection that cant be matched to the GBIF # backbone properly: occurrences.search(datasetKey='84c0e1a0-f762-11e1-a439-00145eb45e9a', issue=['TAXON_MATCH_NONE','TAXON_MATCH_HIGHERRANK']) # If you pass in an invalid polygon you get hopefully informative errors ### the WKT string is fine, but GBIF says bad polygon wkt = 'POLYGON((-178.59375 64.83258989321493,-165.9375 59.24622380205539, -147.3046875 59.065977905449806,-130.78125 51.04484764446178,-125.859375 36.70806354647625, -112.1484375 23.367471303759686,-105.1171875 16.093320185359257,-86.8359375 9.23767076398516, -82.96875 2.9485268155066175,-82.6171875 -14.812060061226388,-74.8828125 -18.849111862023985, -77.34375 -47.661687803329166,-84.375 -49.975955187343295,174.7265625 -50.649460483096114, 179.296875 -42.19189902447192,-176.8359375 -35.634976650677295,176.8359375 -31.835565983656227, 163.4765625 -6.528187613695323,152.578125 1.894796132058301,135.703125 4.702353722559447, 127.96875 15.077427674847987,127.96875 23.689804541429606,139.921875 32.06861069132688, 149.4140625 42.65416193033991,159.2578125 48.3160811030533,168.3984375 57.019804336633165, 178.2421875 59.95776046458139,-179.6484375 61.16708631440347,-178.59375 64.83258989321493))' occurrences.search(geometry = wkt) # Faceting ## return no occurrence records with limit=0 x = occurrences.search(facet = "country", limit = 0) x['facets'] ## also return occurrence records x = occurrences.search(facet = "establishmentMeans", limit = 10) x['facets'] x['results'] ## multiple facet variables x = occurrences.search(facet = ["country", "basisOfRecord"], limit = 10) x['results'] x['facets'] x['facets']['country'] x['facets']['basisOfRecord'] x['facets']['basisOfRecord']['count'] ## set a minimum facet count x = occurrences.search(facet = "country", facetMincount = 30000000L, limit = 0) x['facets'] ## paging per each faceted variable ### do so by passing in variables like "country" + "_facetLimit" = "country_facetLimit" ### or "country" + "_facetOffset" = "country_facetOffset" x = occurrences.search( facet = ["country", "basisOfRecord", "hasCoordinate"], country_facetLimit = 3, basisOfRecord_facetLimit = 6, limit = 0 ) x['facets'] # requests package options ## There's an acceptable set of requests options (['timeout', 'cookies', 'auth', ## 'allow_redirects', 'proxies', 'verify', 'stream', 'cert']) you can pass ## in via **kwargs, e.g., set a timeout. Default timeout set to 60 seconds. x = occurrences.search(timeout = 1) """ url = gbif_baseurl + "occurrence/search" args = { "taxonKey": taxonKey, "repatriated": repatriated, "kingdomKey": kingdomKey, "phylumKey": phylumKey, "classKey": classKey, "orderKey": orderKey, "familyKey": familyKey, "genusKey": genusKey, "subgenusKey": subgenusKey, "scientificName": scientificName, "country": country, "publishingCountry": publishingCountry, "hasCoordinate": bool2str(hasCoordinate), "typeStatus": typeStatus, "recordNumber": recordNumber, "lastInterpreted": lastInterpreted, "continent": continent, "geometry": geometry, "recordedBy": recordedBy, "recordedByID": recordedByID, "identifiedByID": identifiedByID, "basisOfRecord": basisOfRecord, "datasetKey": datasetKey, "eventDate": eventDate, "catalogNumber": catalogNumber, "year": year, "month": month, "decimalLatitude": decimalLatitude, "decimalLongitude": decimalLongitude, "elevation": elevation, "depth": depth, "institutionCode": institutionCode, "collectionCode": collectionCode, "hasGeospatialIssue": bool2str(hasGeospatialIssue), "issue": issue, "q": q, "spellCheck": bool2str(spellCheck), "mediatype": mediatype, "limit": limit, "offset": offset, "establishmentMeans": establishmentMeans, "facetMincount": facetMincount, "facet": facet, "facetMultiselect": bool2str(facetMultiselect), } gbif_kwargs = {key: kwargs[key] for key in kwargs if key not in requests_argset} if gbif_kwargs is not None: xx = dict( zip([re.sub("_", ".", x) for x in gbif_kwargs.keys()], gbif_kwargs.values()) ) args.update(xx) kwargs = {key: kwargs[key] for key in kwargs if key in requests_argset} out = gbif_GET(url, args, **kwargs) return out
50.844282
250
0.676748
[ "MIT" ]
livatras/pygbif
pygbif/occurrences/search.py
20,897
Python
Search GBIF occurrences :param taxonKey: [int] A GBIF occurrence identifier :param q: [str] Simple search parameter. The value for this parameter can be a simple word or a phrase. :param spellCheck: [bool] If ``True`` ask GBIF to check your spelling of the value passed to the ``search`` parameter. IMPORTANT: This only checks the input to the ``search`` parameter, and no others. Default: ``False`` :param repatriated: [str] Searches for records whose publishing country is different to the country where the record was recorded in :param kingdomKey: [int] Kingdom classification key :param phylumKey: [int] Phylum classification key :param classKey: [int] Class classification key :param orderKey: [int] Order classification key :param familyKey: [int] Family classification key :param genusKey: [int] Genus classification key :param subgenusKey: [int] Subgenus classification key :param scientificName: [str] A scientific name from the GBIF backbone. All included and synonym taxa are included in the search. :param datasetKey: [str] The occurrence dataset key (a uuid) :param catalogNumber: [str] An identifier of any form assigned by the source within a physical collection or digital dataset for the record which may not unique, but should be fairly unique in combination with the institution and collection code. :param recordedBy: [str] The person who recorded the occurrence. :param recordedByID: [str] Identifier (e.g. ORCID) for the person who recorded the occurrence :param identifiedByID: [str] Identifier (e.g. ORCID) for the person who provided the taxonomic identification of the occurrence. :param collectionCode: [str] An identifier of any form assigned by the source to identify the physical collection or digital dataset uniquely within the text of an institution. :param institutionCode: [str] An identifier of any form assigned by the source to identify the institution the record belongs to. Not guaranteed to be que. :param country: [str] The 2-letter country code (as per ISO-3166-1) of the country in which the occurrence was recorded. See here http://en.wikipedia.org/wiki/ISO_3166-1_alpha-2 :param basisOfRecord: [str] Basis of record, as defined in our BasisOfRecord enum here http://gbif.github.io/gbif-api/apidocs/org/gbif/api/vocabulary/BasisOfRecord.html Acceptable values are: - ``FOSSIL_SPECIMEN`` An occurrence record describing a fossilized specimen. - ``HUMAN_OBSERVATION`` An occurrence record describing an observation made by one or more people. - ``LIVING_SPECIMEN`` An occurrence record describing a living specimen. - ``MACHINE_OBSERVATION`` An occurrence record describing an observation made by a machine. - ``MATERIAL_CITATION`` An occurrence record based on a reference to a scholarly publication. - ``OBSERVATION`` An occurrence record describing an observation. - ``OCCURRENCE`` An existence of an organism at a particular place and time. No more specific basis. - ``PRESERVED_SPECIMEN`` An occurrence record describing a preserved specimen. :param eventDate: [date] Occurrence date in ISO 8601 format: yyyy, yyyy-MM, yyyy-MM-dd, or MM-dd. Supports range queries, smaller,larger (e.g., ``1990,1991``, whereas ``1991,1990`` wouldn't work) :param year: [int] The 4 digit year. A year of 98 will be interpreted as AD 98. Supports range queries, smaller,larger (e.g., ``1990,1991``, whereas ``1991,1990`` wouldn't work) :param month: [int] The month of the year, starting with 1 for January. Supports range queries, smaller,larger (e.g., ``1,2``, whereas ``2,1`` wouldn't work) :param decimalLatitude: [float] Latitude in decimals between -90 and 90 based on WGS 84. Supports range queries, smaller,larger (e.g., ``25,30``, whereas ``30,25`` wouldn't work) :param decimalLongitude: [float] Longitude in decimals between -180 and 180 based on WGS 84. Supports range queries (e.g., ``-0.4,-0.2``, whereas ``-0.2,-0.4`` wouldn't work). :param publishingCountry: [str] The 2-letter country code (as per ISO-3166-1) of the country in which the occurrence was recorded. :param elevation: [int/str] Elevation in meters above sea level. Supports range queries, smaller,larger (e.g., ``5,30``, whereas ``30,5`` wouldn't work) :param depth: [int/str] Depth in meters relative to elevation. For example 10 meters below a lake surface with given elevation. Supports range queries, smaller,larger (e.g., ``5,30``, whereas ``30,5`` wouldn't work) :param geometry: [str] Searches for occurrences inside a polygon described in Well Known Text (WKT) format. A WKT shape written as either POINT, LINESTRING, LINEARRING POLYGON, or MULTIPOLYGON. Example of a polygon: ``((30.1 10.1, 20, 20 40, 40 40, 30.1 10.1))`` would be queried as http://bit.ly/1BzNwDq. Polygons must have counter-clockwise ordering of points. :param hasGeospatialIssue: [bool] Includes/excludes occurrence records which contain spatial issues (as determined in our record interpretation), i.e. ``hasGeospatialIssue=TRUE`` returns only those records with spatial issues while ``hasGeospatialIssue=FALSE`` includes only records without spatial issues. The absence of this parameter returns any record with or without spatial issues. :param issue: [str] One or more of many possible issues with each occurrence record. See Details. Issues passed to this parameter filter results by the issue. :param hasCoordinate: [bool] Return only occurence records with lat/long data (``True``) or all records (``False``, default). :param typeStatus: [str] Type status of the specimen. One of many options. See ?typestatus :param recordNumber: [int] Number recorded by collector of the data, different from GBIF record number. See http://rs.tdwg.org/dwc/terms/#recordNumber} for more info :param lastInterpreted: [date] Date the record was last modified in GBIF, in ISO 8601 format: yyyy, yyyy-MM, yyyy-MM-dd, or MM-dd. Supports range queries, smaller,larger (e.g., ``1990,1991``, whereas ``1991,1990`` wouldn't work) :param continent: [str] Continent. One of ``africa``, ``antarctica``, ``asia``, ``europe``, ``north_america`` (North America includes the Caribbean and reachies down and includes Panama), ``oceania``, or ``south_america`` :param fields: [str] Default (``all``) returns all fields. ``minimal`` returns just taxon name, key, latitude, and longitude. Or specify each field you want returned by name, e.g. ``fields = c('name','latitude','elevation')``. :param mediatype: [str] Media type. Default is ``NULL``, so no filtering on mediatype. Options: ``NULL``, ``MovingImage``, ``Sound``, and ``StillImage`` :param limit: [int] Number of results to return. Default: ``300`` :param offset: [int] Record to start at. Default: ``0`` :param facet: [str] a character vector of length 1 or greater :param establishmentMeans: [str] EstablishmentMeans, possible values include: INTRODUCED, INVASIVE, MANAGED, NATIVE, NATURALISED, UNCERTAIN :param facetMincount: [int] minimum number of records to be included in the faceting results :param facetMultiselect: [bool] Set to ``True`` to still return counts for values that are not currently filtered. See examples. Default: ``False`` :return: A dictionary Usage:: from pygbif import occurrences occurrences.search(taxonKey = 3329049) # Return 2 results, this is the default by the way occurrences.search(taxonKey=3329049, limit=2) # Instead of getting a taxon key first, you can search for a name directly # However, note that using this approach (with `scientificName="..."`) # you are getting synonyms too. The results for using `scientifcName` and # `taxonKey` parameters are the same in this case, but I wouldn't be surprised if for some # names they return different results occurrences.search(scientificName = 'Ursus americanus') from pygbif import species key = species.name_backbone(name = 'Ursus americanus', rank='species')['usageKey'] occurrences.search(taxonKey = key) # Search by dataset key occurrences.search(datasetKey='7b5d6a48-f762-11e1-a439-00145eb45e9a', limit=20) # Search by catalog number occurrences.search(catalogNumber="49366", limit=20) # occurrences.search(catalogNumber=["49366","Bird.27847588"], limit=20) # Use paging parameters (limit and offset) to page. Note the different results # for the two queries below. occurrences.search(datasetKey='7b5d6a48-f762-11e1-a439-00145eb45e9a', offset=10, limit=5) occurrences.search(datasetKey='7b5d6a48-f762-11e1-a439-00145eb45e9a', offset=20, limit=5) # Many dataset keys # occurrences.search(datasetKey=["50c9509d-22c7-4a22-a47d-8c48425ef4a7", "7b5d6a48-f762-11e1-a439-00145eb45e9a"], limit=20) # Search by collector name res = occurrences.search(recordedBy="smith", limit=20) [ x['recordedBy'] for x in res['results'] ] # Many collector names # occurrences.search(recordedBy=["smith","BJ Stacey"], limit=20) # recordedByID occurrences.search(recordedByID="https://orcid.org/0000-0003-1691-239X", limit = 3) # identifiedByID occurrences.search(identifiedByID="https://orcid.org/0000-0003-1691-239X", limit = 3) # Search for many species splist = ['Cyanocitta stelleri', 'Junco hyemalis', 'Aix sponsa'] keys = [ species.name_suggest(x)[0]['key'] for x in splist ] out = [ occurrences.search(taxonKey = x, limit=1) for x in keys ] [ x['results'][0]['speciesKey'] for x in out ] # Search - q parameter occurrences.search(q = "kingfisher", limit=20) ## spell check - only works with the `search` parameter ### spelled correctly - same result as above call occurrences.search(q = "kingfisher", limit=20, spellCheck = True) ### spelled incorrectly - stops with suggested spelling occurrences.search(q = "kajsdkla", limit=20, spellCheck = True) ### spelled incorrectly - stops with many suggested spellings ### and number of results for each occurrences.search(q = "helir", limit=20, spellCheck = True) # Search on latitidue and longitude occurrences.search(decimalLatitude=50, decimalLongitude=10, limit=2) # Search on a bounding box ## in well known text format occurrences.search(geometry='POLYGON((30.1 10.1, 10 20, 20 40, 40 40, 30.1 10.1))', limit=20) from pygbif import species key = species.name_suggest(q='Aesculus hippocastanum')[0]['key'] occurrences.search(taxonKey=key, geometry='POLYGON((30.1 10.1, 10 20, 20 40, 40 40, 30.1 10.1))', limit=20) ## multipolygon wkt = 'MULTIPOLYGON(((-123 38, -123 43, -116 43, -116 38, -123 38)),((-97 41, -97 45, -93 45, -93 41, -97 41)))' occurrences.search(geometry = wkt, limit = 20) # Search on country occurrences.search(country='US', limit=20) occurrences.search(country='FR', limit=20) occurrences.search(country='DE', limit=20) # Get only occurrences with lat/long data occurrences.search(taxonKey=key, hasCoordinate=True, limit=20) # Get only occurrences that were recorded as living specimens occurrences.search(taxonKey=key, basisOfRecord="LIVING_SPECIMEN", hasCoordinate=True, limit=20) # Get occurrences for a particular eventDate occurrences.search(taxonKey=key, eventDate="2013", limit=20) occurrences.search(taxonKey=key, year="2013", limit=20) occurrences.search(taxonKey=key, month="6", limit=20) # Get occurrences based on depth key = species.name_backbone(name='Salmo salar', kingdom='animals')['usageKey'] occurrences.search(taxonKey=key, depth="5", limit=20) # Get occurrences based on elevation key = species.name_backbone(name='Puma concolor', kingdom='animals')['usageKey'] occurrences.search(taxonKey=key, elevation=50, hasCoordinate=True, limit=20) # Get occurrences based on institutionCode occurrences.search(institutionCode="TLMF", limit=20) # Get occurrences based on collectionCode occurrences.search(collectionCode="Floristic Databases MV - Higher Plants", limit=20) # Get only those occurrences with spatial issues occurrences.search(taxonKey=key, hasGeospatialIssue=True, limit=20) # Search using a query string occurrences.search(q="kingfisher", limit=20) # Range queries ## See Detail for parameters that support range queries ### this is a range depth, with lower/upper limits in character string occurrences.search(depth='50,100') ## Range search with year occurrences.search(year='1999,2000', limit=20) ## Range search with latitude occurrences.search(decimalLatitude='29.59,29.6') # Search by specimen type status ## Look for possible values of the typeStatus parameter looking at the typestatus dataset occurrences.search(typeStatus = 'allotype') # Search by specimen record number ## This is the record number of the person/group that submitted the data, not GBIF's numbers ## You can see that many different groups have record number 1, so not super helpful occurrences.search(recordNumber = 1) # Search by last time interpreted: Date the record was last modified in GBIF ## The lastInterpreted parameter accepts ISO 8601 format dates, including ## yyyy, yyyy-MM, yyyy-MM-dd, or MM-dd. Range queries are accepted for lastInterpreted occurrences.search(lastInterpreted = '2014-04-01') # Search by continent ## One of africa, antarctica, asia, europe, north_america, oceania, or south_america occurrences.search(continent = 'south_america') occurrences.search(continent = 'africa') occurrences.search(continent = 'oceania') occurrences.search(continent = 'antarctica') # Search for occurrences with images occurrences.search(mediatype = 'StillImage') occurrences.search(mediatype = 'MovingImage') x = occurrences.search(mediatype = 'Sound') [z['media'] for z in x['results']] # Query based on issues occurrences.search(taxonKey=1, issue='DEPTH_UNLIKELY') occurrences.search(taxonKey=1, issue=['DEPTH_UNLIKELY','COORDINATE_ROUNDED']) # Show all records in the Arizona State Lichen Collection that cant be matched to the GBIF # backbone properly: occurrences.search(datasetKey='84c0e1a0-f762-11e1-a439-00145eb45e9a', issue=['TAXON_MATCH_NONE','TAXON_MATCH_HIGHERRANK']) # If you pass in an invalid polygon you get hopefully informative errors ### the WKT string is fine, but GBIF says bad polygon wkt = 'POLYGON((-178.59375 64.83258989321493,-165.9375 59.24622380205539, -147.3046875 59.065977905449806,-130.78125 51.04484764446178,-125.859375 36.70806354647625, -112.1484375 23.367471303759686,-105.1171875 16.093320185359257,-86.8359375 9.23767076398516, -82.96875 2.9485268155066175,-82.6171875 -14.812060061226388,-74.8828125 -18.849111862023985, -77.34375 -47.661687803329166,-84.375 -49.975955187343295,174.7265625 -50.649460483096114, 179.296875 -42.19189902447192,-176.8359375 -35.634976650677295,176.8359375 -31.835565983656227, 163.4765625 -6.528187613695323,152.578125 1.894796132058301,135.703125 4.702353722559447, 127.96875 15.077427674847987,127.96875 23.689804541429606,139.921875 32.06861069132688, 149.4140625 42.65416193033991,159.2578125 48.3160811030533,168.3984375 57.019804336633165, 178.2421875 59.95776046458139,-179.6484375 61.16708631440347,-178.59375 64.83258989321493))' occurrences.search(geometry = wkt) # Faceting ## return no occurrence records with limit=0 x = occurrences.search(facet = "country", limit = 0) x['facets'] ## also return occurrence records x = occurrences.search(facet = "establishmentMeans", limit = 10) x['facets'] x['results'] ## multiple facet variables x = occurrences.search(facet = ["country", "basisOfRecord"], limit = 10) x['results'] x['facets'] x['facets']['country'] x['facets']['basisOfRecord'] x['facets']['basisOfRecord']['count'] ## set a minimum facet count x = occurrences.search(facet = "country", facetMincount = 30000000L, limit = 0) x['facets'] ## paging per each faceted variable ### do so by passing in variables like "country" + "_facetLimit" = "country_facetLimit" ### or "country" + "_facetOffset" = "country_facetOffset" x = occurrences.search( facet = ["country", "basisOfRecord", "hasCoordinate"], country_facetLimit = 3, basisOfRecord_facetLimit = 6, limit = 0 ) x['facets'] # requests package options ## There's an acceptable set of requests options (['timeout', 'cookies', 'auth', ## 'allow_redirects', 'proxies', 'verify', 'stream', 'cert']) you can pass ## in via **kwargs, e.g., set a timeout. Default timeout set to 60 seconds. x = occurrences.search(timeout = 1)
16,767
0.802364
# coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft and contributors. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # # See the License for the specific language governing permissions and # limitations under the License. # # Code generated by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior and will be lost if the code is # regenerated. # -------------------------------------------------------------------------- from .feature_client import FeatureClient from .version import VERSION __all__ = ['FeatureClient'] __version__ = VERSION
36.172414
76
0.656816
[ "Apache-2.0" ]
HydAu/AzureSDKForPython
azure-mgmt-resource/azure/mgmt/resource/features/__init__.py
1,049
Python
coding=utf-8 -------------------------------------------------------------------------- Copyright (c) Microsoft and contributors. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Code generated by Microsoft (R) AutoRest Code Generator. Changes may cause incorrect behavior and will be lost if the code is regenerated. --------------------------------------------------------------------------
883
0.841754
# Configuration file for the Sphinx documentation builder. # # This file only contains a selection of the most common options. For a full # list see the documentation: # https://www.sphinx-doc.org/en/master/usage/configuration.html # -- Path setup -------------------------------------------------------------- # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. # # import os # import sys # sys.path.insert(0, os.path.abspath('.')) # -- Project information ----------------------------------------------------- project = 'sample' copyright = '2020, Sample Author' author = 'Sample Author' # -- General configuration --------------------------------------------------- # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. extensions = [ ] # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. # This pattern also affects html_static_path and html_extra_path. exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store'] # -- Options for HTML output ------------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. # html_theme = 'alabaster' # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ['_static']
35.711538
79
0.661282
[ "MIT" ]
keathmilligan/flask-jwt-refresh
docs/conf.py
1,857
Python
Configuration file for the Sphinx documentation builder. This file only contains a selection of the most common options. For a full list see the documentation: https://www.sphinx-doc.org/en/master/usage/configuration.html -- Path setup -------------------------------------------------------------- If extensions (or modules to document with autodoc) are in another directory, add these directories to sys.path here. If the directory is relative to the documentation root, use os.path.abspath to make it absolute, like shown here. import os import sys sys.path.insert(0, os.path.abspath('.')) -- Project information ----------------------------------------------------- -- General configuration --------------------------------------------------- Add any Sphinx extension module names here, as strings. They can be extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones. Add any paths that contain templates here, relative to this directory. List of patterns, relative to source directory, that match files and directories to ignore when looking for source files. This pattern also affects html_static_path and html_extra_path. -- Options for HTML output ------------------------------------------------- The theme to use for HTML and HTML Help pages. See the documentation for a list of builtin themes. Add any paths that contain custom static files (such as style sheets) here, relative to this directory. They are copied after the builtin static files, so a file named "default.css" will overwrite the builtin "default.css".
1,546
0.832526
# partesanato/__init__.py
13.5
26
0.777778
[ "MIT" ]
edgarbs1998/partesanato-server
src/partesanato/__init__.py
27
Python
partesanato/__init__.py
23
0.851852
# Copyright 2015 Ciara Kamahele-Sanfratello # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Planner is a generic interface used by Simulators to choose the next action to take class Planner: def __init__(self): pass def next_action(self, initial_state, goal_state, prev_obs): pass
36.727273
85
0.75
[ "Apache-2.0" ]
ciarakamahele/sasy
simulator/Planners/Planner.py
808
Python
Copyright 2015 Ciara Kamahele-Sanfratello Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Planner is a generic interface used by Simulators to choose the next action to take
648
0.80198

Dataset Card for "python_comment_code_ratio_08"

More Information needed

Downloads last month
45
Edit dataset card